1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
72 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73 VNInfoAllocator.Reset();
74 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
78 /// runOnMachineFunction - Register allocate the whole function
80 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
82 mri_ = &mf_->getRegInfo();
83 tm_ = &fn.getTarget();
84 tri_ = tm_->getRegisterInfo();
85 tii_ = tm_->getInstrInfo();
86 lv_ = &getAnalysis<LiveVariables>();
87 allocatableRegs_ = tri_->getAllocatableSet(fn);
89 // Number MachineInstrs and MachineBasicBlocks.
90 // Initialize MBB indexes to a sentinal.
91 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
94 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
96 unsigned StartIdx = MIIndex;
98 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
100 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
101 assert(inserted && "multiple MachineInstr -> index mappings");
102 i2miMap_.push_back(I);
103 MIIndex += InstrSlots::NUM;
106 // Set the MBB2IdxMap entry for this MBB.
107 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
108 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
109 : std::make_pair(StartIdx, MIIndex - 1);
110 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
112 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
116 numIntervals += getNumIntervals();
118 DOUT << "********** INTERVALS **********\n";
119 for (iterator I = begin(), E = end(); I != E; ++I) {
120 I->second.print(DOUT, tri_);
124 numIntervalsAfter += getNumIntervals();
129 /// print - Implement the dump method.
130 void LiveIntervals::print(std::ostream &O, const Module* ) const {
131 O << "********** INTERVALS **********\n";
132 for (const_iterator I = begin(), E = end(); I != E; ++I) {
133 I->second.print(DOUT, tri_);
137 O << "********** MACHINEINSTRS **********\n";
138 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
139 mbbi != mbbe; ++mbbi) {
140 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 O << getInstructionIndex(mii) << '\t' << *mii;
148 /// conflictsWithPhysRegDef - Returns true if the specified register
149 /// is defined during the duration of the specified interval.
150 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
151 VirtRegMap &vrm, unsigned reg) {
152 for (LiveInterval::Ranges::const_iterator
153 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
154 for (unsigned index = getBaseIndex(I->start),
155 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
156 index += InstrSlots::NUM) {
157 // skip deleted instructions
158 while (index != end && !getInstructionFromIndex(index))
159 index += InstrSlots::NUM;
160 if (index == end) break;
162 MachineInstr *MI = getInstructionFromIndex(index);
163 unsigned SrcReg, DstReg;
164 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
165 if (SrcReg == li.reg || DstReg == li.reg)
167 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
168 MachineOperand& mop = MI->getOperand(i);
169 if (!mop.isRegister())
171 unsigned PhysReg = mop.getReg();
172 if (PhysReg == 0 || PhysReg == li.reg)
174 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
175 if (!vrm.hasPhys(PhysReg))
177 PhysReg = vrm.getPhys(PhysReg);
179 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
188 void LiveIntervals::printRegName(unsigned reg) const {
189 if (TargetRegisterInfo::isPhysicalRegister(reg))
190 cerr << tri_->getName(reg);
192 cerr << "%reg" << reg;
195 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
196 MachineBasicBlock::iterator mi,
198 LiveInterval &interval) {
199 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
200 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
202 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
203 DOUT << "is a implicit_def\n";
207 // Virtual registers may be defined multiple times (due to phi
208 // elimination and 2-addr elimination). Much of what we do only has to be
209 // done once for the vreg. We use an empty interval to detect the first
210 // time we see a vreg.
211 if (interval.empty()) {
212 // Get the Idx of the defining instructions.
213 unsigned defIndex = getDefIndex(MIIdx);
215 MachineInstr *CopyMI = NULL;
216 unsigned SrcReg, DstReg;
217 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
218 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
219 tii_->isMoveInstr(*mi, SrcReg, DstReg))
221 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
223 assert(ValNo->id == 0 && "First value in interval is not 0?");
225 // Loop over all of the blocks that the vreg is defined in. There are
226 // two cases we have to handle here. The most common case is a vreg
227 // whose lifetime is contained within a basic block. In this case there
228 // will be a single kill, in MBB, which comes after the definition.
229 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
230 // FIXME: what about dead vars?
232 if (vi.Kills[0] != mi)
233 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
235 killIdx = defIndex+1;
237 // If the kill happens after the definition, we have an intra-block
239 if (killIdx > defIndex) {
240 assert(vi.AliveBlocks.none() &&
241 "Shouldn't be alive across any blocks!");
242 LiveRange LR(defIndex, killIdx, ValNo);
243 interval.addRange(LR);
244 DOUT << " +" << LR << "\n";
245 interval.addKill(ValNo, killIdx);
250 // The other case we handle is when a virtual register lives to the end
251 // of the defining block, potentially live across some blocks, then is
252 // live into some number of blocks, but gets killed. Start by adding a
253 // range that goes from this definition to the end of the defining block.
254 LiveRange NewLR(defIndex,
255 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
257 DOUT << " +" << NewLR;
258 interval.addRange(NewLR);
260 // Iterate over all of the blocks that the variable is completely
261 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
263 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
264 if (vi.AliveBlocks[i]) {
265 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
267 LiveRange LR(getMBBStartIdx(i),
268 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
270 interval.addRange(LR);
276 // Finally, this virtual register is live from the start of any killing
277 // block to the 'use' slot of the killing instruction.
278 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
279 MachineInstr *Kill = vi.Kills[i];
280 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
281 LiveRange LR(getMBBStartIdx(Kill->getParent()),
283 interval.addRange(LR);
284 interval.addKill(ValNo, killIdx);
289 // If this is the second time we see a virtual register definition, it
290 // must be due to phi elimination or two addr elimination. If this is
291 // the result of two address elimination, then the vreg is one of the
292 // def-and-use register operand.
293 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
294 // If this is a two-address definition, then we have already processed
295 // the live range. The only problem is that we didn't realize there
296 // are actually two values in the live interval. Because of this we
297 // need to take the LiveRegion that defines this register and split it
299 assert(interval.containsOneValue());
300 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
301 unsigned RedefIndex = getDefIndex(MIIdx);
303 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
304 VNInfo *OldValNo = OldLR->valno;
306 // Delete the initial value, which should be short and continuous,
307 // because the 2-addr copy must be in the same MBB as the redef.
308 interval.removeRange(DefIndex, RedefIndex);
310 // Two-address vregs should always only be redefined once. This means
311 // that at this point, there should be exactly one value number in it.
312 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
314 // The new value number (#1) is defined by the instruction we claimed
316 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
319 // Value#0 is now defined by the 2-addr instruction.
320 OldValNo->def = RedefIndex;
323 // Add the new live interval which replaces the range for the input copy.
324 LiveRange LR(DefIndex, RedefIndex, ValNo);
325 DOUT << " replace range with " << LR;
326 interval.addRange(LR);
327 interval.addKill(ValNo, RedefIndex);
329 // If this redefinition is dead, we need to add a dummy unit live
330 // range covering the def slot.
331 if (mi->registerDefIsDead(interval.reg, tri_))
332 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
335 interval.print(DOUT, tri_);
338 // Otherwise, this must be because of phi elimination. If this is the
339 // first redefinition of the vreg that we have seen, go back and change
340 // the live range in the PHI block to be a different value number.
341 if (interval.containsOneValue()) {
342 assert(vi.Kills.size() == 1 &&
343 "PHI elimination vreg should have one kill, the PHI itself!");
345 // Remove the old range that we now know has an incorrect number.
346 VNInfo *VNI = interval.getValNumInfo(0);
347 MachineInstr *Killer = vi.Kills[0];
348 unsigned Start = getMBBStartIdx(Killer->getParent());
349 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
350 DOUT << " Removing [" << Start << "," << End << "] from: ";
351 interval.print(DOUT, tri_); DOUT << "\n";
352 interval.removeRange(Start, End);
353 VNI->hasPHIKill = true;
354 DOUT << " RESULT: "; interval.print(DOUT, tri_);
356 // Replace the interval with one of a NEW value number. Note that this
357 // value number isn't actually defined by an instruction, weird huh? :)
358 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
359 DOUT << " replace range with " << LR;
360 interval.addRange(LR);
361 interval.addKill(LR.valno, End);
362 DOUT << " RESULT: "; interval.print(DOUT, tri_);
365 // In the case of PHI elimination, each variable definition is only
366 // live until the end of the block. We've already taken care of the
367 // rest of the live range.
368 unsigned defIndex = getDefIndex(MIIdx);
371 MachineInstr *CopyMI = NULL;
372 unsigned SrcReg, DstReg;
373 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
374 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
375 tii_->isMoveInstr(*mi, SrcReg, DstReg))
377 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
379 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
380 LiveRange LR(defIndex, killIndex, ValNo);
381 interval.addRange(LR);
382 interval.addKill(ValNo, killIndex);
383 ValNo->hasPHIKill = true;
391 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
392 MachineBasicBlock::iterator mi,
394 LiveInterval &interval,
395 MachineInstr *CopyMI) {
396 // A physical register cannot be live across basic block, so its
397 // lifetime must end somewhere in its defining basic block.
398 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
400 unsigned baseIndex = MIIdx;
401 unsigned start = getDefIndex(baseIndex);
402 unsigned end = start;
404 // If it is not used after definition, it is considered dead at
405 // the instruction defining it. Hence its interval is:
406 // [defSlot(def), defSlot(def)+1)
407 if (mi->registerDefIsDead(interval.reg, tri_)) {
409 end = getDefIndex(start) + 1;
413 // If it is not dead on definition, it must be killed by a
414 // subsequent instruction. Hence its interval is:
415 // [defSlot(def), useSlot(kill)+1)
416 while (++mi != MBB->end()) {
417 baseIndex += InstrSlots::NUM;
418 if (mi->killsRegister(interval.reg, tri_)) {
420 end = getUseIndex(baseIndex) + 1;
422 } else if (mi->modifiesRegister(interval.reg, tri_)) {
423 // Another instruction redefines the register before it is ever read.
424 // Then the register is essentially dead at the instruction that defines
425 // it. Hence its interval is:
426 // [defSlot(def), defSlot(def)+1)
428 end = getDefIndex(start) + 1;
433 // The only case we should have a dead physreg here without a killing or
434 // instruction where we know it's dead is if it is live-in to the function
436 assert(!CopyMI && "physreg was not killed in defining block!");
437 end = getDefIndex(start) + 1; // It's dead.
440 assert(start < end && "did not find end of interval?");
442 // Already exists? Extend old live interval.
443 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
444 VNInfo *ValNo = (OldLR != interval.end())
445 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
446 LiveRange LR(start, end, ValNo);
447 interval.addRange(LR);
448 interval.addKill(LR.valno, end);
449 DOUT << " +" << LR << '\n';
452 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
453 MachineBasicBlock::iterator MI,
456 if (TargetRegisterInfo::isVirtualRegister(reg))
457 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
458 else if (allocatableRegs_[reg]) {
459 MachineInstr *CopyMI = NULL;
460 unsigned SrcReg, DstReg;
461 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
462 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
463 tii_->isMoveInstr(*MI, SrcReg, DstReg))
465 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
466 // Def of a register also defines its sub-registers.
467 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
468 // If MI also modifies the sub-register explicitly, avoid processing it
469 // more than once. Do not pass in TRI here so it checks for exact match.
470 if (!MI->modifiesRegister(*AS))
471 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
475 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
477 LiveInterval &interval, bool isAlias) {
478 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
480 // Look for kills, if it reaches a def before it's killed, then it shouldn't
481 // be considered a livein.
482 MachineBasicBlock::iterator mi = MBB->begin();
483 unsigned baseIndex = MIIdx;
484 unsigned start = baseIndex;
485 unsigned end = start;
486 while (mi != MBB->end()) {
487 if (mi->killsRegister(interval.reg, tri_)) {
489 end = getUseIndex(baseIndex) + 1;
491 } else if (mi->modifiesRegister(interval.reg, tri_)) {
492 // Another instruction redefines the register before it is ever read.
493 // Then the register is essentially dead at the instruction that defines
494 // it. Hence its interval is:
495 // [defSlot(def), defSlot(def)+1)
497 end = getDefIndex(start) + 1;
501 baseIndex += InstrSlots::NUM;
506 // Live-in register might not be used at all.
510 end = getDefIndex(MIIdx) + 1;
512 DOUT << " live through";
517 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
518 interval.addRange(LR);
519 interval.addKill(LR.valno, end);
520 DOUT << " +" << LR << '\n';
523 /// computeIntervals - computes the live intervals for virtual
524 /// registers. for some ordering of the machine instructions [1,N] a
525 /// live interval is an interval [i, j) where 1 <= i <= j < N for
526 /// which a variable is live
527 void LiveIntervals::computeIntervals() {
528 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
529 << "********** Function: "
530 << ((Value*)mf_->getFunction())->getName() << '\n';
531 // Track the index of the current machine instr.
532 unsigned MIIndex = 0;
533 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
535 MachineBasicBlock *MBB = MBBI;
536 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
538 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
540 // Create intervals for live-ins to this BB first.
541 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
542 LE = MBB->livein_end(); LI != LE; ++LI) {
543 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
544 // Multiple live-ins can alias the same register.
545 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
546 if (!hasInterval(*AS))
547 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
551 for (; MI != miEnd; ++MI) {
552 DOUT << MIIndex << "\t" << *MI;
555 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
556 MachineOperand &MO = MI->getOperand(i);
557 // handle register defs - build intervals
558 if (MO.isRegister() && MO.getReg() && MO.isDef())
559 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
562 MIIndex += InstrSlots::NUM;
567 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
568 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
569 std::vector<IdxMBBPair>::const_iterator I =
570 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
573 while (I != Idx2MBBMap.end()) {
574 if (LR.end <= I->first)
576 MBBs.push_back(I->second);
584 LiveInterval LiveIntervals::createInterval(unsigned reg) {
585 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
587 return LiveInterval(reg, Weight);
590 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
591 /// copy field and returns the source register that defines it.
592 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
596 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
597 return VNI->copy->getOperand(1).getReg();
598 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
599 return VNI->copy->getOperand(2).getReg();
600 unsigned SrcReg, DstReg;
601 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
603 assert(0 && "Unrecognized copy instruction!");
607 //===----------------------------------------------------------------------===//
608 // Register allocator hooks.
611 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
612 /// allow one) virtual register operand, then its uses are implicitly using
613 /// the register. Returns the virtual register.
614 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
615 MachineInstr *MI) const {
617 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
618 MachineOperand &MO = MI->getOperand(i);
619 if (!MO.isRegister() || !MO.isUse())
621 unsigned Reg = MO.getReg();
622 if (Reg == 0 || Reg == li.reg)
624 // FIXME: For now, only remat MI with at most one register operand.
626 "Can't rematerialize instruction with multiple register operand!");
633 /// isValNoAvailableAt - Return true if the val# of the specified interval
634 /// which reaches the given instruction also reaches the specified use index.
635 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
636 unsigned UseIdx) const {
637 unsigned Index = getInstructionIndex(MI);
638 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
639 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
640 return UI != li.end() && UI->valno == ValNo;
643 /// isReMaterializable - Returns true if the definition MI of the specified
644 /// val# of the specified interval is re-materializable.
645 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
646 const VNInfo *ValNo, MachineInstr *MI,
652 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
656 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
657 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
658 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
659 // this but remember this is not safe to fold into a two-address
661 // This is a load from fixed stack slot. It can be rematerialized.
664 if (tii_->isTriviallyReMaterializable(MI)) {
665 const TargetInstrDesc &TID = MI->getDesc();
666 isLoad = TID.isSimpleLoad();
668 unsigned ImpUse = getReMatImplicitUse(li, MI);
670 const LiveInterval &ImpLi = getInterval(ImpUse);
671 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
672 re = mri_->use_end(); ri != re; ++ri) {
673 MachineInstr *UseMI = &*ri;
674 unsigned UseIdx = getInstructionIndex(UseMI);
675 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
677 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
687 /// isReMaterializable - Returns true if every definition of MI of every
688 /// val# of the specified interval is re-materializable.
689 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
691 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
693 const VNInfo *VNI = *i;
694 unsigned DefIdx = VNI->def;
696 continue; // Dead val#.
697 // Is the def for the val# rematerializable?
700 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
701 bool DefIsLoad = false;
703 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
710 /// FilterFoldedOps - Filter out two-address use operands. Return
711 /// true if it finds any issue with the operands that ought to prevent
713 static bool FilterFoldedOps(MachineInstr *MI,
714 SmallVector<unsigned, 2> &Ops,
716 SmallVector<unsigned, 2> &FoldOps) {
717 const TargetInstrDesc &TID = MI->getDesc();
720 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
721 unsigned OpIdx = Ops[i];
722 MachineOperand &MO = MI->getOperand(OpIdx);
723 // FIXME: fold subreg use.
727 MRInfo |= (unsigned)VirtRegMap::isMod;
729 // Filter out two-address use operand(s).
730 if (!MO.isImplicit() &&
731 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
732 MRInfo = VirtRegMap::isModRef;
735 MRInfo |= (unsigned)VirtRegMap::isRef;
737 FoldOps.push_back(OpIdx);
743 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
744 /// slot / to reg or any rematerialized load into ith operand of specified
745 /// MI. If it is successul, MI is updated with the newly created MI and
747 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
748 VirtRegMap &vrm, MachineInstr *DefMI,
750 SmallVector<unsigned, 2> &Ops,
751 bool isSS, int Slot, unsigned Reg) {
752 // If it is an implicit def instruction, just delete it.
753 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
754 RemoveMachineInstrFromMaps(MI);
755 vrm.RemoveMachineInstrFromMaps(MI);
756 MI->eraseFromParent();
761 // Filter the list of operand indexes that are to be folded. Abort if
762 // any operand will prevent folding.
764 SmallVector<unsigned, 2> FoldOps;
765 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
768 // The only time it's safe to fold into a two address instruction is when
769 // it's folding reload and spill from / into a spill stack slot.
770 if (DefMI && (MRInfo & VirtRegMap::isMod))
773 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
774 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
776 // Remember this instruction uses the spill slot.
777 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
779 // Attempt to fold the memory reference into the instruction. If
780 // we can do this, we don't need to insert spill code.
782 lv_->instructionChanged(MI, fmi);
784 fmi->copyKillDeadInfo(MI, tri_);
785 MachineBasicBlock &MBB = *MI->getParent();
786 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
787 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
788 vrm.transferSpillPts(MI, fmi);
789 vrm.transferRestorePts(MI, fmi);
790 vrm.transferEmergencySpills(MI, fmi);
792 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
793 mi2iMap_[fmi] = InstrIdx;
794 MI = MBB.insert(MBB.erase(MI), fmi);
801 /// canFoldMemoryOperand - Returns true if the specified load / store
802 /// folding is possible.
803 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
804 SmallVector<unsigned, 2> &Ops,
806 // Filter the list of operand indexes that are to be folded. Abort if
807 // any operand will prevent folding.
809 SmallVector<unsigned, 2> FoldOps;
810 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
813 // It's only legal to remat for a use, not a def.
814 if (ReMat && (MRInfo & VirtRegMap::isMod))
817 return tii_->canFoldMemoryOperand(MI, FoldOps);
820 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
821 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
822 for (LiveInterval::Ranges::const_iterator
823 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
824 std::vector<IdxMBBPair>::const_iterator II =
825 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
826 if (II == Idx2MBBMap.end())
828 if (I->end > II->first) // crossing a MBB.
830 MBBs.insert(II->second);
837 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
838 /// interval on to-be re-materialized operands of MI) with new register.
839 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
840 MachineInstr *MI, unsigned NewVReg,
842 // There is an implicit use. That means one of the other operand is
843 // being remat'ed and the remat'ed instruction has li.reg as an
844 // use operand. Make sure we rewrite that as well.
845 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
846 MachineOperand &MO = MI->getOperand(i);
847 if (!MO.isRegister())
849 unsigned Reg = MO.getReg();
850 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
852 if (!vrm.isReMaterialized(Reg))
854 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
855 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
857 UseMO->setReg(NewVReg);
861 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
862 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
864 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
865 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
866 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
867 unsigned Slot, int LdSlot,
868 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
870 const TargetRegisterClass* rc,
871 SmallVector<int, 4> &ReMatIds,
872 const MachineLoopInfo *loopInfo,
873 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
874 std::map<unsigned,unsigned> &MBBVRegsMap,
875 std::vector<LiveInterval*> &NewLIs) {
876 bool CanFold = false;
878 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
879 MachineOperand& mop = MI->getOperand(i);
880 if (!mop.isRegister())
882 unsigned Reg = mop.getReg();
884 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
889 bool TryFold = !DefIsReMat;
890 bool FoldSS = true; // Default behavior unless it's a remat.
893 // If this is the rematerializable definition MI itself and
894 // all of its uses are rematerialized, simply delete it.
895 if (MI == ReMatOrigDefMI && CanDelete) {
896 DOUT << "\t\t\t\tErasing re-materlizable def: ";
898 RemoveMachineInstrFromMaps(MI);
899 vrm.RemoveMachineInstrFromMaps(MI);
900 MI->eraseFromParent();
904 // If def for this use can't be rematerialized, then try folding.
905 // If def is rematerializable and it's a load, also try folding.
906 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
908 // Try fold loads (from stack slot, constant pool, etc.) into uses.
914 // Scan all of the operands of this instruction rewriting operands
915 // to use NewVReg instead of li.reg as appropriate. We do this for
918 // 1. If the instr reads the same spilled vreg multiple times, we
919 // want to reuse the NewVReg.
920 // 2. If the instr is a two-addr instruction, we are required to
921 // keep the src/dst regs pinned.
923 // Keep track of whether we replace a use and/or def so that we can
924 // create the spill interval with the appropriate range.
926 HasUse = mop.isUse();
927 HasDef = mop.isDef();
928 SmallVector<unsigned, 2> Ops;
930 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
931 const MachineOperand &MOj = MI->getOperand(j);
932 if (!MOj.isRegister())
934 unsigned RegJ = MOj.getReg();
935 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
939 HasUse |= MOj.isUse();
940 HasDef |= MOj.isDef();
945 // Do not fold load / store here if we are splitting. We'll find an
946 // optimal point to insert a load / store later.
948 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
949 Ops, FoldSS, FoldSlot, Reg)) {
950 // Folding the load/store can completely change the instruction in
951 // unpredictable ways, rescan it from the beginning.
957 goto RestartInstruction;
960 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
965 // Create a new virtual register for the spill interval.
966 bool CreatedNewVReg = false;
968 NewVReg = mri_->createVirtualRegister(rc);
970 CreatedNewVReg = true;
973 if (mop.isImplicit())
974 rewriteImplicitOps(li, MI, NewVReg, vrm);
976 // Reuse NewVReg for other reads.
977 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
978 MachineOperand &mopj = MI->getOperand(Ops[j]);
979 mopj.setReg(NewVReg);
980 if (mopj.isImplicit())
981 rewriteImplicitOps(li, MI, NewVReg, vrm);
984 if (CreatedNewVReg) {
986 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
987 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
988 // Each valnum may have its own remat id.
989 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
991 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
993 if (!CanDelete || (HasUse && HasDef)) {
994 // If this is a two-addr instruction then its use operands are
995 // rematerializable but its def is not. It should be assigned a
997 vrm.assignVirt2StackSlot(NewVReg, Slot);
1000 vrm.assignVirt2StackSlot(NewVReg, Slot);
1002 } else if (HasUse && HasDef &&
1003 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1004 // If this interval hasn't been assigned a stack slot (because earlier
1005 // def is a deleted remat def), do it now.
1006 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1007 vrm.assignVirt2StackSlot(NewVReg, Slot);
1010 // Re-matting an instruction with virtual register use. Add the
1011 // register as an implicit use on the use MI.
1012 if (DefIsReMat && ImpUse)
1013 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1015 // create a new register interval for this spill / remat.
1016 LiveInterval &nI = getOrCreateInterval(NewVReg);
1017 if (CreatedNewVReg) {
1018 NewLIs.push_back(&nI);
1019 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1021 vrm.setIsSplitFromReg(NewVReg, li.reg);
1025 if (CreatedNewVReg) {
1026 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1027 nI.getNextValue(~0U, 0, VNInfoAllocator));
1031 // Extend the split live interval to this def / use.
1032 unsigned End = getUseIndex(index)+1;
1033 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1034 nI.getValNumInfo(nI.getNumValNums()-1));
1040 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1041 nI.getNextValue(~0U, 0, VNInfoAllocator));
1046 DOUT << "\t\t\t\tAdded new interval: ";
1047 nI.print(DOUT, tri_);
1052 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1054 MachineBasicBlock *MBB, unsigned Idx) const {
1055 unsigned End = getMBBEndIdx(MBB);
1056 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1057 unsigned KillIdx = VNI->kills[j];
1058 if (KillIdx > Idx && KillIdx < End)
1064 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1065 const VNInfo *VNI = NULL;
1066 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1067 e = li.vni_end(); i != e; ++i)
1068 if ((*i)->def == DefIdx) {
1075 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1076 /// during spilling.
1078 struct RewriteInfo {
1083 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1084 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1087 struct RewriteInfoCompare {
1088 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1089 return LHS.Index < RHS.Index;
1094 void LiveIntervals::
1095 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1096 LiveInterval::Ranges::const_iterator &I,
1097 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1098 unsigned Slot, int LdSlot,
1099 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1101 const TargetRegisterClass* rc,
1102 SmallVector<int, 4> &ReMatIds,
1103 const MachineLoopInfo *loopInfo,
1104 BitVector &SpillMBBs,
1105 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1106 BitVector &RestoreMBBs,
1107 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1108 std::map<unsigned,unsigned> &MBBVRegsMap,
1109 std::vector<LiveInterval*> &NewLIs) {
1110 bool AllCanFold = true;
1111 unsigned NewVReg = 0;
1112 unsigned start = getBaseIndex(I->start);
1113 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1115 // First collect all the def / use in this live range that will be rewritten.
1116 // Make sure they are sorted according to instruction index.
1117 std::vector<RewriteInfo> RewriteMIs;
1118 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1119 re = mri_->reg_end(); ri != re; ) {
1120 MachineInstr *MI = &*ri;
1121 MachineOperand &O = ri.getOperand();
1123 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1124 unsigned index = getInstructionIndex(MI);
1125 if (index < start || index >= end)
1127 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1129 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1131 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1132 // Now rewrite the defs and uses.
1133 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1134 RewriteInfo &rwi = RewriteMIs[i];
1136 unsigned index = rwi.Index;
1137 bool MIHasUse = rwi.HasUse;
1138 bool MIHasDef = rwi.HasDef;
1139 MachineInstr *MI = rwi.MI;
1140 // If MI def and/or use the same register multiple times, then there
1141 // are multiple entries.
1142 unsigned NumUses = MIHasUse;
1143 while (i != e && RewriteMIs[i].MI == MI) {
1144 assert(RewriteMIs[i].Index == index);
1145 bool isUse = RewriteMIs[i].HasUse;
1146 if (isUse) ++NumUses;
1148 MIHasDef |= RewriteMIs[i].HasDef;
1151 MachineBasicBlock *MBB = MI->getParent();
1153 // ReMatDefMI is a clone and not in the IR at all, so check
1154 // RefMatOrigDefMI too.
1155 if (ImpUse && MI != ReMatDefMI && MI != ReMatOrigDefMI) {
1156 // Re-matting an instruction with virtual register use. Update the
1157 // register interval's spill weight to HUGE_VALF to prevent it from
1159 LiveInterval &ImpLi = getInterval(ImpUse);
1160 ImpLi.weight = HUGE_VALF;
1163 unsigned MBBId = MBB->getNumber();
1164 unsigned ThisVReg = 0;
1166 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1167 if (NVI != MBBVRegsMap.end()) {
1168 ThisVReg = NVI->second;
1175 // It's better to start a new interval to avoid artifically
1176 // extend the new interval.
1177 if (MIHasDef && !MIHasUse) {
1178 MBBVRegsMap.erase(MBB->getNumber());
1184 bool IsNew = ThisVReg == 0;
1186 // This ends the previous live interval. If all of its def / use
1187 // can be folded, give it a low spill weight.
1188 if (NewVReg && TrySplit && AllCanFold) {
1189 LiveInterval &nI = getOrCreateInterval(NewVReg);
1196 bool HasDef = false;
1197 bool HasUse = false;
1198 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1199 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1200 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1201 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1202 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1203 if (!HasDef && !HasUse)
1206 AllCanFold &= CanFold;
1208 // Update weight of spill interval.
1209 LiveInterval &nI = getOrCreateInterval(NewVReg);
1211 // The spill weight is now infinity as it cannot be spilled again.
1212 nI.weight = HUGE_VALF;
1216 // Keep track of the last def and first use in each MBB.
1218 if (MI != ReMatOrigDefMI || !CanDelete) {
1219 bool HasKill = false;
1221 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1223 // If this is a two-address code, then this index starts a new VNInfo.
1224 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1226 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1228 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1229 SpillIdxes.find(MBBId);
1231 if (SII == SpillIdxes.end()) {
1232 std::vector<SRInfo> S;
1233 S.push_back(SRInfo(index, NewVReg, true));
1234 SpillIdxes.insert(std::make_pair(MBBId, S));
1235 } else if (SII->second.back().vreg != NewVReg) {
1236 SII->second.push_back(SRInfo(index, NewVReg, true));
1237 } else if ((int)index > SII->second.back().index) {
1238 // If there is an earlier def and this is a two-address
1239 // instruction, then it's not possible to fold the store (which
1240 // would also fold the load).
1241 SRInfo &Info = SII->second.back();
1243 Info.canFold = !HasUse;
1245 SpillMBBs.set(MBBId);
1246 } else if (SII != SpillIdxes.end() &&
1247 SII->second.back().vreg == NewVReg &&
1248 (int)index > SII->second.back().index) {
1249 // There is an earlier def that's not killed (must be two-address).
1250 // The spill is no longer needed.
1251 SII->second.pop_back();
1252 if (SII->second.empty()) {
1253 SpillIdxes.erase(MBBId);
1254 SpillMBBs.reset(MBBId);
1261 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1262 SpillIdxes.find(MBBId);
1263 if (SII != SpillIdxes.end() &&
1264 SII->second.back().vreg == NewVReg &&
1265 (int)index > SII->second.back().index)
1266 // Use(s) following the last def, it's not safe to fold the spill.
1267 SII->second.back().canFold = false;
1268 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1269 RestoreIdxes.find(MBBId);
1270 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1271 // If we are splitting live intervals, only fold if it's the first
1272 // use and there isn't another use later in the MBB.
1273 RII->second.back().canFold = false;
1275 // Only need a reload if there isn't an earlier def / use.
1276 if (RII == RestoreIdxes.end()) {
1277 std::vector<SRInfo> Infos;
1278 Infos.push_back(SRInfo(index, NewVReg, true));
1279 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1281 RII->second.push_back(SRInfo(index, NewVReg, true));
1283 RestoreMBBs.set(MBBId);
1287 // Update spill weight.
1288 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1289 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1292 if (NewVReg && TrySplit && AllCanFold) {
1293 // If all of its def / use can be folded, give it a low spill weight.
1294 LiveInterval &nI = getOrCreateInterval(NewVReg);
1299 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1300 BitVector &RestoreMBBs,
1301 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1302 if (!RestoreMBBs[Id])
1304 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1305 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1306 if (Restores[i].index == index &&
1307 Restores[i].vreg == vr &&
1308 Restores[i].canFold)
1313 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1314 BitVector &RestoreMBBs,
1315 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1316 if (!RestoreMBBs[Id])
1318 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1319 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1320 if (Restores[i].index == index && Restores[i].vreg)
1321 Restores[i].index = -1;
1324 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1325 /// spilled and create empty intervals for their uses.
1327 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1328 const TargetRegisterClass* rc,
1329 std::vector<LiveInterval*> &NewLIs) {
1330 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1331 re = mri_->reg_end(); ri != re; ) {
1332 MachineOperand &O = ri.getOperand();
1333 MachineInstr *MI = &*ri;
1336 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1337 "Register def was not rewritten?");
1338 RemoveMachineInstrFromMaps(MI);
1339 vrm.RemoveMachineInstrFromMaps(MI);
1340 MI->eraseFromParent();
1342 // This must be an use of an implicit_def so it's not part of the live
1343 // interval. Create a new empty live interval for it.
1344 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1345 unsigned NewVReg = mri_->createVirtualRegister(rc);
1347 vrm.setIsImplicitlyDefined(NewVReg);
1348 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1349 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1350 MachineOperand &MO = MI->getOperand(i);
1351 if (MO.isReg() && MO.getReg() == li.reg)
1359 std::vector<LiveInterval*> LiveIntervals::
1360 addIntervalsForSpills(const LiveInterval &li,
1361 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1362 // Since this is called after the analysis is done we don't know if
1363 // LiveVariables is available
1364 lv_ = getAnalysisToUpdate<LiveVariables>();
1366 assert(li.weight != HUGE_VALF &&
1367 "attempt to spill already spilled interval!");
1369 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1370 li.print(DOUT, tri_);
1373 // Each bit specify whether it a spill is required in the MBB.
1374 BitVector SpillMBBs(mf_->getNumBlockIDs());
1375 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1376 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1377 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1378 std::map<unsigned,unsigned> MBBVRegsMap;
1379 std::vector<LiveInterval*> NewLIs;
1380 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1382 unsigned NumValNums = li.getNumValNums();
1383 SmallVector<MachineInstr*, 4> ReMatDefs;
1384 ReMatDefs.resize(NumValNums, NULL);
1385 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1386 ReMatOrigDefs.resize(NumValNums, NULL);
1387 SmallVector<int, 4> ReMatIds;
1388 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1389 BitVector ReMatDelete(NumValNums);
1390 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1392 // Spilling a split live interval. It cannot be split any further. Also,
1393 // it's also guaranteed to be a single val# / range interval.
1394 if (vrm.getPreSplitReg(li.reg)) {
1395 vrm.setIsSplitFromReg(li.reg, 0);
1396 // Unset the split kill marker on the last use.
1397 unsigned KillIdx = vrm.getKillPoint(li.reg);
1399 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1400 assert(KillMI && "Last use disappeared?");
1401 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1402 assert(KillOp != -1 && "Last use disappeared?");
1403 KillMI->getOperand(KillOp).setIsKill(false);
1405 vrm.removeKillPoint(li.reg);
1406 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1407 Slot = vrm.getStackSlot(li.reg);
1408 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1409 MachineInstr *ReMatDefMI = DefIsReMat ?
1410 vrm.getReMaterializedMI(li.reg) : NULL;
1412 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1413 bool isLoad = isLoadSS ||
1414 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1415 bool IsFirstRange = true;
1416 for (LiveInterval::Ranges::const_iterator
1417 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1418 // If this is a split live interval with multiple ranges, it means there
1419 // are two-address instructions that re-defined the value. Only the
1420 // first def can be rematerialized!
1422 // Note ReMatOrigDefMI has already been deleted.
1423 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1424 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1425 false, vrm, rc, ReMatIds, loopInfo,
1426 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1427 MBBVRegsMap, NewLIs);
1429 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1430 Slot, 0, false, false, false,
1431 false, vrm, rc, ReMatIds, loopInfo,
1432 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1433 MBBVRegsMap, NewLIs);
1435 IsFirstRange = false;
1438 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1442 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1443 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1447 bool NeedStackSlot = false;
1448 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1450 const VNInfo *VNI = *i;
1451 unsigned VN = VNI->id;
1452 unsigned DefIdx = VNI->def;
1454 continue; // Dead val#.
1455 // Is the def for the val# rematerializable?
1456 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1457 ? 0 : getInstructionFromIndex(DefIdx);
1459 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1460 // Remember how to remat the def of this val#.
1461 ReMatOrigDefs[VN] = ReMatDefMI;
1462 // Original def may be modified so we have to make a copy here. vrm must
1464 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1466 bool CanDelete = true;
1467 if (VNI->hasPHIKill) {
1468 // A kill is a phi node, not all of its uses can be rematerialized.
1469 // It must not be deleted.
1471 // Need a stack slot if there is any live range where uses cannot be
1473 NeedStackSlot = true;
1476 ReMatDelete.set(VN);
1478 // Need a stack slot if there is any live range where uses cannot be
1480 NeedStackSlot = true;
1484 // One stack slot per live interval.
1485 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1486 Slot = vrm.assignVirt2StackSlot(li.reg);
1488 // Create new intervals and rewrite defs and uses.
1489 for (LiveInterval::Ranges::const_iterator
1490 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1491 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1492 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1493 bool DefIsReMat = ReMatDefMI != NULL;
1494 bool CanDelete = ReMatDelete[I->valno->id];
1496 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1497 bool isLoad = isLoadSS ||
1498 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1499 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1500 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1501 CanDelete, vrm, rc, ReMatIds, loopInfo,
1502 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1503 MBBVRegsMap, NewLIs);
1506 // Insert spills / restores if we are splitting.
1508 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1512 SmallPtrSet<LiveInterval*, 4> AddedKill;
1513 SmallVector<unsigned, 2> Ops;
1514 if (NeedStackSlot) {
1515 int Id = SpillMBBs.find_first();
1517 std::vector<SRInfo> &spills = SpillIdxes[Id];
1518 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1519 int index = spills[i].index;
1520 unsigned VReg = spills[i].vreg;
1521 LiveInterval &nI = getOrCreateInterval(VReg);
1522 bool isReMat = vrm.isReMaterialized(VReg);
1523 MachineInstr *MI = getInstructionFromIndex(index);
1524 bool CanFold = false;
1525 bool FoundUse = false;
1527 if (spills[i].canFold) {
1529 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1530 MachineOperand &MO = MI->getOperand(j);
1531 if (!MO.isRegister() || MO.getReg() != VReg)
1538 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1539 RestoreMBBs, RestoreIdxes))) {
1540 // MI has two-address uses of the same register. If the use
1541 // isn't the first and only use in the BB, then we can't fold
1542 // it. FIXME: Move this to rewriteInstructionsForSpills.
1549 // Fold the store into the def if possible.
1550 bool Folded = false;
1551 if (CanFold && !Ops.empty()) {
1552 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1555 // Also folded uses, do not issue a load.
1556 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1557 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1559 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1563 // Otherwise tell the spiller to issue a spill.
1565 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1566 bool isKill = LR->end == getStoreIndex(index);
1567 if (!MI->registerDefIsDead(nI.reg))
1568 // No need to spill a dead def.
1569 vrm.addSpillPoint(VReg, isKill, MI);
1571 AddedKill.insert(&nI);
1574 Id = SpillMBBs.find_next(Id);
1578 int Id = RestoreMBBs.find_first();
1580 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1581 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1582 int index = restores[i].index;
1585 unsigned VReg = restores[i].vreg;
1586 LiveInterval &nI = getOrCreateInterval(VReg);
1587 MachineInstr *MI = getInstructionFromIndex(index);
1588 bool CanFold = false;
1590 if (restores[i].canFold) {
1592 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1593 MachineOperand &MO = MI->getOperand(j);
1594 if (!MO.isRegister() || MO.getReg() != VReg)
1598 // If this restore were to be folded, it would have been folded
1607 // Fold the load into the use if possible.
1608 bool Folded = false;
1609 if (CanFold && !Ops.empty()) {
1610 if (!vrm.isReMaterialized(VReg))
1611 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1613 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1615 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1616 // If the rematerializable def is a load, also try to fold it.
1617 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1618 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1619 Ops, isLoadSS, LdSlot, VReg);
1620 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1622 // Re-matting an instruction with virtual register use. Add the
1623 // register as an implicit use on the use MI and update the register
1624 // interval's spill weight to HUGE_VALF to prevent it from being
1626 LiveInterval &ImpLi = getInterval(ImpUse);
1627 ImpLi.weight = HUGE_VALF;
1628 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1632 // If folding is not possible / failed, then tell the spiller to issue a
1633 // load / rematerialization for us.
1635 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1637 vrm.addRestorePoint(VReg, MI);
1639 Id = RestoreMBBs.find_next(Id);
1642 // Finalize intervals: add kills, finalize spill weights, and filter out
1644 std::vector<LiveInterval*> RetNewLIs;
1645 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1646 LiveInterval *LI = NewLIs[i];
1648 LI->weight /= LI->getSize();
1649 if (!AddedKill.count(LI)) {
1650 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1651 unsigned LastUseIdx = getBaseIndex(LR->end);
1652 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1653 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1654 assert(UseIdx != -1);
1655 if (LastUse->getOperand(UseIdx).isImplicit() ||
1656 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1657 LastUse->getOperand(UseIdx).setIsKill();
1658 vrm.addKillPoint(LI->reg, LastUseIdx);
1661 RetNewLIs.push_back(LI);
1665 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1669 /// hasAllocatableSuperReg - Return true if the specified physical register has
1670 /// any super register that's allocatable.
1671 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1672 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1673 if (allocatableRegs_[*AS] && hasInterval(*AS))
1678 /// getRepresentativeReg - Find the largest super register of the specified
1679 /// physical register.
1680 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1681 // Find the largest super-register that is allocatable.
1682 unsigned BestReg = Reg;
1683 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1684 unsigned SuperReg = *AS;
1685 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1693 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1694 /// specified interval that conflicts with the specified physical register.
1695 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1696 unsigned PhysReg) const {
1697 unsigned NumConflicts = 0;
1698 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1699 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1700 E = mri_->reg_end(); I != E; ++I) {
1701 MachineOperand &O = I.getOperand();
1702 MachineInstr *MI = O.getParent();
1703 unsigned Index = getInstructionIndex(MI);
1704 if (pli.liveAt(Index))
1707 return NumConflicts;
1710 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1711 /// around all defs and uses of the specified interval.
1712 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1713 unsigned PhysReg, VirtRegMap &vrm) {
1714 unsigned SpillReg = getRepresentativeReg(PhysReg);
1716 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1717 // If there are registers which alias PhysReg, but which are not a
1718 // sub-register of the chosen representative super register. Assert
1719 // since we can't handle it yet.
1720 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1721 tri_->isSuperRegister(*AS, SpillReg));
1723 LiveInterval &pli = getInterval(SpillReg);
1724 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1725 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1726 E = mri_->reg_end(); I != E; ++I) {
1727 MachineOperand &O = I.getOperand();
1728 MachineInstr *MI = O.getParent();
1729 if (SeenMIs.count(MI))
1732 unsigned Index = getInstructionIndex(MI);
1733 if (pli.liveAt(Index)) {
1734 vrm.addEmergencySpill(SpillReg, MI);
1735 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1736 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1737 if (!hasInterval(*AS))
1739 LiveInterval &spli = getInterval(*AS);
1740 if (spli.liveAt(Index))
1741 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);