1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
40 // Hidden options for help debugging.
41 cl::opt<bool> DisableReMat("disable-rematerialization",
42 cl::init(false), cl::Hidden);
44 cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45 cl::init(true), cl::Hidden);
46 cl::opt<int> SplitLimit("split-limit",
47 cl::init(-1), cl::Hidden);
50 STATISTIC(numIntervals, "Number of original intervals");
51 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
53 STATISTIC(numSplits , "Number of intervals split");
55 char LiveIntervals::ID = 0;
57 RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addPreserved<LiveVariables>();
62 AU.addRequired<LiveVariables>();
63 AU.addPreservedID(MachineLoopInfoID);
64 AU.addPreservedID(MachineDominatorsID);
65 AU.addPreservedID(PHIEliminationID);
66 AU.addRequiredID(PHIEliminationID);
67 AU.addRequiredID(TwoAddressInstructionPassID);
68 MachineFunctionPass::getAnalysisUsage(AU);
71 void LiveIntervals::releaseMemory() {
76 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77 VNInfoAllocator.Reset();
78 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
82 /// runOnMachineFunction - Register allocate the whole function
84 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
86 mri_ = &mf_->getRegInfo();
87 tm_ = &fn.getTarget();
88 tri_ = tm_->getRegisterInfo();
89 tii_ = tm_->getInstrInfo();
90 lv_ = &getAnalysis<LiveVariables>();
91 allocatableRegs_ = tri_->getAllocatableSet(fn);
93 // Number MachineInstrs and MachineBasicBlocks.
94 // Initialize MBB indexes to a sentinal.
95 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
98 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
100 unsigned StartIdx = MIIndex;
102 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
104 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105 assert(inserted && "multiple MachineInstr -> index mappings");
106 i2miMap_.push_back(I);
107 MIIndex += InstrSlots::NUM;
110 // Set the MBB2IdxMap entry for this MBB.
111 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
114 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
118 numIntervals += getNumIntervals();
120 DOUT << "********** INTERVALS **********\n";
121 for (iterator I = begin(), E = end(); I != E; ++I) {
122 I->second.print(DOUT, tri_);
126 numIntervalsAfter += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(std::ostream &O, const Module* ) const {
133 O << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second.print(DOUT, tri_);
139 O << "********** MACHINEINSTRS **********\n";
140 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141 mbbi != mbbe; ++mbbi) {
142 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143 for (MachineBasicBlock::iterator mii = mbbi->begin(),
144 mie = mbbi->end(); mii != mie; ++mii) {
145 O << getInstructionIndex(mii) << '\t' << *mii;
150 /// conflictsWithPhysRegDef - Returns true if the specified register
151 /// is defined during the duration of the specified interval.
152 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 for (LiveInterval::Ranges::const_iterator
155 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156 for (unsigned index = getBaseIndex(I->start),
157 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158 index += InstrSlots::NUM) {
159 // skip deleted instructions
160 while (index != end && !getInstructionFromIndex(index))
161 index += InstrSlots::NUM;
162 if (index == end) break;
164 MachineInstr *MI = getInstructionFromIndex(index);
165 unsigned SrcReg, DstReg;
166 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167 if (SrcReg == li.reg || DstReg == li.reg)
169 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170 MachineOperand& mop = MI->getOperand(i);
171 if (!mop.isRegister())
173 unsigned PhysReg = mop.getReg();
174 if (PhysReg == 0 || PhysReg == li.reg)
176 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177 if (!vrm.hasPhys(PhysReg))
179 PhysReg = vrm.getPhys(PhysReg);
181 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
190 void LiveIntervals::printRegName(unsigned reg) const {
191 if (TargetRegisterInfo::isPhysicalRegister(reg))
192 cerr << tri_->getName(reg);
194 cerr << "%reg" << reg;
197 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198 MachineBasicBlock::iterator mi,
200 LiveInterval &interval) {
201 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
204 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
205 DOUT << "is a implicit_def\n";
209 // Virtual registers may be defined multiple times (due to phi
210 // elimination and 2-addr elimination). Much of what we do only has to be
211 // done once for the vreg. We use an empty interval to detect the first
212 // time we see a vreg.
213 if (interval.empty()) {
214 // Get the Idx of the defining instructions.
215 unsigned defIndex = getDefIndex(MIIdx);
217 MachineInstr *CopyMI = NULL;
218 unsigned SrcReg, DstReg;
219 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
220 tii_->isMoveInstr(*mi, SrcReg, DstReg))
222 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
224 assert(ValNo->id == 0 && "First value in interval is not 0?");
226 // Loop over all of the blocks that the vreg is defined in. There are
227 // two cases we have to handle here. The most common case is a vreg
228 // whose lifetime is contained within a basic block. In this case there
229 // will be a single kill, in MBB, which comes after the definition.
230 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
231 // FIXME: what about dead vars?
233 if (vi.Kills[0] != mi)
234 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
236 killIdx = defIndex+1;
238 // If the kill happens after the definition, we have an intra-block
240 if (killIdx > defIndex) {
241 assert(vi.AliveBlocks.none() &&
242 "Shouldn't be alive across any blocks!");
243 LiveRange LR(defIndex, killIdx, ValNo);
244 interval.addRange(LR);
245 DOUT << " +" << LR << "\n";
246 interval.addKill(ValNo, killIdx);
251 // The other case we handle is when a virtual register lives to the end
252 // of the defining block, potentially live across some blocks, then is
253 // live into some number of blocks, but gets killed. Start by adding a
254 // range that goes from this definition to the end of the defining block.
255 LiveRange NewLR(defIndex,
256 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
258 DOUT << " +" << NewLR;
259 interval.addRange(NewLR);
261 // Iterate over all of the blocks that the variable is completely
262 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
264 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
265 if (vi.AliveBlocks[i]) {
266 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
268 LiveRange LR(getMBBStartIdx(i),
269 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
271 interval.addRange(LR);
277 // Finally, this virtual register is live from the start of any killing
278 // block to the 'use' slot of the killing instruction.
279 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
280 MachineInstr *Kill = vi.Kills[i];
281 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
282 LiveRange LR(getMBBStartIdx(Kill->getParent()),
284 interval.addRange(LR);
285 interval.addKill(ValNo, killIdx);
290 // If this is the second time we see a virtual register definition, it
291 // must be due to phi elimination or two addr elimination. If this is
292 // the result of two address elimination, then the vreg is one of the
293 // def-and-use register operand.
294 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
295 // If this is a two-address definition, then we have already processed
296 // the live range. The only problem is that we didn't realize there
297 // are actually two values in the live interval. Because of this we
298 // need to take the LiveRegion that defines this register and split it
300 assert(interval.containsOneValue());
301 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
302 unsigned RedefIndex = getDefIndex(MIIdx);
304 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
305 VNInfo *OldValNo = OldLR->valno;
307 // Delete the initial value, which should be short and continuous,
308 // because the 2-addr copy must be in the same MBB as the redef.
309 interval.removeRange(DefIndex, RedefIndex);
311 // Two-address vregs should always only be redefined once. This means
312 // that at this point, there should be exactly one value number in it.
313 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
315 // The new value number (#1) is defined by the instruction we claimed
317 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
320 // Value#0 is now defined by the 2-addr instruction.
321 OldValNo->def = RedefIndex;
324 // Add the new live interval which replaces the range for the input copy.
325 LiveRange LR(DefIndex, RedefIndex, ValNo);
326 DOUT << " replace range with " << LR;
327 interval.addRange(LR);
328 interval.addKill(ValNo, RedefIndex);
330 // If this redefinition is dead, we need to add a dummy unit live
331 // range covering the def slot.
332 if (mi->registerDefIsDead(interval.reg, tri_))
333 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
336 interval.print(DOUT, tri_);
339 // Otherwise, this must be because of phi elimination. If this is the
340 // first redefinition of the vreg that we have seen, go back and change
341 // the live range in the PHI block to be a different value number.
342 if (interval.containsOneValue()) {
343 assert(vi.Kills.size() == 1 &&
344 "PHI elimination vreg should have one kill, the PHI itself!");
346 // Remove the old range that we now know has an incorrect number.
347 VNInfo *VNI = interval.getValNumInfo(0);
348 MachineInstr *Killer = vi.Kills[0];
349 unsigned Start = getMBBStartIdx(Killer->getParent());
350 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
351 DOUT << " Removing [" << Start << "," << End << "] from: ";
352 interval.print(DOUT, tri_); DOUT << "\n";
353 interval.removeRange(Start, End);
354 VNI->hasPHIKill = true;
355 DOUT << " RESULT: "; interval.print(DOUT, tri_);
357 // Replace the interval with one of a NEW value number. Note that this
358 // value number isn't actually defined by an instruction, weird huh? :)
359 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
360 DOUT << " replace range with " << LR;
361 interval.addRange(LR);
362 interval.addKill(LR.valno, End);
363 DOUT << " RESULT: "; interval.print(DOUT, tri_);
366 // In the case of PHI elimination, each variable definition is only
367 // live until the end of the block. We've already taken care of the
368 // rest of the live range.
369 unsigned defIndex = getDefIndex(MIIdx);
372 MachineInstr *CopyMI = NULL;
373 unsigned SrcReg, DstReg;
374 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
375 tii_->isMoveInstr(*mi, SrcReg, DstReg))
377 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
379 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
380 LiveRange LR(defIndex, killIndex, ValNo);
381 interval.addRange(LR);
382 interval.addKill(ValNo, killIndex);
383 ValNo->hasPHIKill = true;
391 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
392 MachineBasicBlock::iterator mi,
394 LiveInterval &interval,
395 MachineInstr *CopyMI) {
396 // A physical register cannot be live across basic block, so its
397 // lifetime must end somewhere in its defining basic block.
398 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
400 unsigned baseIndex = MIIdx;
401 unsigned start = getDefIndex(baseIndex);
402 unsigned end = start;
404 // If it is not used after definition, it is considered dead at
405 // the instruction defining it. Hence its interval is:
406 // [defSlot(def), defSlot(def)+1)
407 if (mi->registerDefIsDead(interval.reg, tri_)) {
409 end = getDefIndex(start) + 1;
413 // If it is not dead on definition, it must be killed by a
414 // subsequent instruction. Hence its interval is:
415 // [defSlot(def), useSlot(kill)+1)
416 while (++mi != MBB->end()) {
417 baseIndex += InstrSlots::NUM;
418 if (mi->killsRegister(interval.reg, tri_)) {
420 end = getUseIndex(baseIndex) + 1;
422 } else if (mi->modifiesRegister(interval.reg, tri_)) {
423 // Another instruction redefines the register before it is ever read.
424 // Then the register is essentially dead at the instruction that defines
425 // it. Hence its interval is:
426 // [defSlot(def), defSlot(def)+1)
428 end = getDefIndex(start) + 1;
433 // The only case we should have a dead physreg here without a killing or
434 // instruction where we know it's dead is if it is live-in to the function
436 assert(!CopyMI && "physreg was not killed in defining block!");
437 end = getDefIndex(start) + 1; // It's dead.
440 assert(start < end && "did not find end of interval?");
442 // Already exists? Extend old live interval.
443 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
444 VNInfo *ValNo = (OldLR != interval.end())
445 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
446 LiveRange LR(start, end, ValNo);
447 interval.addRange(LR);
448 interval.addKill(LR.valno, end);
449 DOUT << " +" << LR << '\n';
452 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
453 MachineBasicBlock::iterator MI,
456 if (TargetRegisterInfo::isVirtualRegister(reg))
457 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
458 else if (allocatableRegs_[reg]) {
459 MachineInstr *CopyMI = NULL;
460 unsigned SrcReg, DstReg;
461 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
462 tii_->isMoveInstr(*MI, SrcReg, DstReg))
464 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
465 // Def of a register also defines its sub-registers.
466 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
467 // If MI also modifies the sub-register explicitly, avoid processing it
468 // more than once. Do not pass in TRI here so it checks for exact match.
469 if (!MI->modifiesRegister(*AS))
470 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
474 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
476 LiveInterval &interval, bool isAlias) {
477 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
479 // Look for kills, if it reaches a def before it's killed, then it shouldn't
480 // be considered a livein.
481 MachineBasicBlock::iterator mi = MBB->begin();
482 unsigned baseIndex = MIIdx;
483 unsigned start = baseIndex;
484 unsigned end = start;
485 while (mi != MBB->end()) {
486 if (mi->killsRegister(interval.reg, tri_)) {
488 end = getUseIndex(baseIndex) + 1;
490 } else if (mi->modifiesRegister(interval.reg, tri_)) {
491 // Another instruction redefines the register before it is ever read.
492 // Then the register is essentially dead at the instruction that defines
493 // it. Hence its interval is:
494 // [defSlot(def), defSlot(def)+1)
496 end = getDefIndex(start) + 1;
500 baseIndex += InstrSlots::NUM;
505 // Live-in register might not be used at all.
509 end = getDefIndex(MIIdx) + 1;
511 DOUT << " live through";
516 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
517 interval.addRange(LR);
518 interval.addKill(LR.valno, end);
519 DOUT << " +" << LR << '\n';
522 /// computeIntervals - computes the live intervals for virtual
523 /// registers. for some ordering of the machine instructions [1,N] a
524 /// live interval is an interval [i, j) where 1 <= i <= j < N for
525 /// which a variable is live
526 void LiveIntervals::computeIntervals() {
527 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
528 << "********** Function: "
529 << ((Value*)mf_->getFunction())->getName() << '\n';
530 // Track the index of the current machine instr.
531 unsigned MIIndex = 0;
532 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
534 MachineBasicBlock *MBB = MBBI;
535 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
537 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
539 // Create intervals for live-ins to this BB first.
540 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
541 LE = MBB->livein_end(); LI != LE; ++LI) {
542 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
543 // Multiple live-ins can alias the same register.
544 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
545 if (!hasInterval(*AS))
546 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
550 for (; MI != miEnd; ++MI) {
551 DOUT << MIIndex << "\t" << *MI;
554 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
555 MachineOperand &MO = MI->getOperand(i);
556 // handle register defs - build intervals
557 if (MO.isRegister() && MO.getReg() && MO.isDef())
558 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
561 MIIndex += InstrSlots::NUM;
566 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
567 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
568 std::vector<IdxMBBPair>::const_iterator I =
569 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
572 while (I != Idx2MBBMap.end()) {
573 if (LR.end <= I->first)
575 MBBs.push_back(I->second);
583 LiveInterval LiveIntervals::createInterval(unsigned reg) {
584 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
586 return LiveInterval(reg, Weight);
589 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
590 /// copy field and returns the source register that defines it.
591 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
595 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
596 return VNI->copy->getOperand(1).getReg();
597 unsigned SrcReg, DstReg;
598 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
600 assert(0 && "Unrecognized copy instruction!");
604 //===----------------------------------------------------------------------===//
605 // Register allocator hooks.
608 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
609 /// allow one) virtual register operand, then its uses are implicitly using
610 /// the register. Returns the virtual register.
611 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
612 MachineInstr *MI) const {
614 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
615 MachineOperand &MO = MI->getOperand(i);
616 if (!MO.isRegister() || !MO.isUse())
618 unsigned Reg = MO.getReg();
619 if (Reg == 0 || Reg == li.reg)
621 // FIXME: For now, only remat MI with at most one register operand.
623 "Can't rematerialize instruction with multiple register operand!");
630 /// isValNoAvailableAt - Return true if the val# of the specified interval
631 /// which reaches the given instruction also reaches the specified use index.
632 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
633 unsigned UseIdx) const {
634 unsigned Index = getInstructionIndex(MI);
635 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
636 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
637 return UI != li.end() && UI->valno == ValNo;
640 /// isReMaterializable - Returns true if the definition MI of the specified
641 /// val# of the specified interval is re-materializable.
642 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
643 const VNInfo *ValNo, MachineInstr *MI,
649 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
653 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
654 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
655 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
656 // this but remember this is not safe to fold into a two-address
658 // This is a load from fixed stack slot. It can be rematerialized.
661 if (tii_->isTriviallyReMaterializable(MI)) {
662 const TargetInstrDesc &TID = MI->getDesc();
663 isLoad = TID.isSimpleLoad();
665 unsigned ImpUse = getReMatImplicitUse(li, MI);
667 const LiveInterval &ImpLi = getInterval(ImpUse);
668 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
669 re = mri_->use_end(); ri != re; ++ri) {
670 MachineInstr *UseMI = &*ri;
671 unsigned UseIdx = getInstructionIndex(UseMI);
672 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
674 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
684 /// isReMaterializable - Returns true if every definition of MI of every
685 /// val# of the specified interval is re-materializable.
686 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
688 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
690 const VNInfo *VNI = *i;
691 unsigned DefIdx = VNI->def;
693 continue; // Dead val#.
694 // Is the def for the val# rematerializable?
697 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
698 bool DefIsLoad = false;
700 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
707 /// FilterFoldedOps - Filter out two-address use operands. Return
708 /// true if it finds any issue with the operands that ought to prevent
710 static bool FilterFoldedOps(MachineInstr *MI,
711 SmallVector<unsigned, 2> &Ops,
713 SmallVector<unsigned, 2> &FoldOps) {
714 const TargetInstrDesc &TID = MI->getDesc();
717 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
718 unsigned OpIdx = Ops[i];
719 MachineOperand &MO = MI->getOperand(OpIdx);
720 // FIXME: fold subreg use.
724 MRInfo |= (unsigned)VirtRegMap::isMod;
726 // Filter out two-address use operand(s).
727 if (!MO.isImplicit() &&
728 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
729 MRInfo = VirtRegMap::isModRef;
732 MRInfo |= (unsigned)VirtRegMap::isRef;
734 FoldOps.push_back(OpIdx);
740 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
741 /// slot / to reg or any rematerialized load into ith operand of specified
742 /// MI. If it is successul, MI is updated with the newly created MI and
744 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
745 VirtRegMap &vrm, MachineInstr *DefMI,
747 SmallVector<unsigned, 2> &Ops,
748 bool isSS, int Slot, unsigned Reg) {
749 // If it is an implicit def instruction, just delete it.
750 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
751 RemoveMachineInstrFromMaps(MI);
752 vrm.RemoveMachineInstrFromMaps(MI);
753 MI->eraseFromParent();
758 // Filter the list of operand indexes that are to be folded. Abort if
759 // any operand will prevent folding.
761 SmallVector<unsigned, 2> FoldOps;
762 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
765 // The only time it's safe to fold into a two address instruction is when
766 // it's folding reload and spill from / into a spill stack slot.
767 if (DefMI && (MRInfo & VirtRegMap::isMod))
770 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
771 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
773 // Remember this instruction uses the spill slot.
774 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
776 // Attempt to fold the memory reference into the instruction. If
777 // we can do this, we don't need to insert spill code.
779 lv_->instructionChanged(MI, fmi);
781 fmi->copyKillDeadInfo(MI, tri_);
782 MachineBasicBlock &MBB = *MI->getParent();
783 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
784 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
785 vrm.transferSpillPts(MI, fmi);
786 vrm.transferRestorePts(MI, fmi);
787 vrm.transferEmergencySpills(MI, fmi);
789 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
790 mi2iMap_[fmi] = InstrIdx;
791 MI = MBB.insert(MBB.erase(MI), fmi);
798 /// canFoldMemoryOperand - Returns true if the specified load / store
799 /// folding is possible.
800 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
801 SmallVector<unsigned, 2> &Ops,
803 // Filter the list of operand indexes that are to be folded. Abort if
804 // any operand will prevent folding.
806 SmallVector<unsigned, 2> FoldOps;
807 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
810 // It's only legal to remat for a use, not a def.
811 if (ReMat && (MRInfo & VirtRegMap::isMod))
814 return tii_->canFoldMemoryOperand(MI, FoldOps);
817 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
818 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
819 for (LiveInterval::Ranges::const_iterator
820 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
821 std::vector<IdxMBBPair>::const_iterator II =
822 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
823 if (II == Idx2MBBMap.end())
825 if (I->end > II->first) // crossing a MBB.
827 MBBs.insert(II->second);
834 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
835 /// interval on to-be re-materialized operands of MI) with new register.
836 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
837 MachineInstr *MI, unsigned NewVReg,
839 // There is an implicit use. That means one of the other operand is
840 // being remat'ed and the remat'ed instruction has li.reg as an
841 // use operand. Make sure we rewrite that as well.
842 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
843 MachineOperand &MO = MI->getOperand(i);
844 if (!MO.isRegister())
846 unsigned Reg = MO.getReg();
847 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
849 if (!vrm.isReMaterialized(Reg))
851 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
852 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
854 UseMO->setReg(NewVReg);
858 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
859 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
861 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
862 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
863 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
864 unsigned Slot, int LdSlot,
865 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
867 const TargetRegisterClass* rc,
868 SmallVector<int, 4> &ReMatIds,
869 const MachineLoopInfo *loopInfo,
870 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
871 std::map<unsigned,unsigned> &MBBVRegsMap,
872 std::vector<LiveInterval*> &NewLIs) {
873 bool CanFold = false;
875 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
876 MachineOperand& mop = MI->getOperand(i);
877 if (!mop.isRegister())
879 unsigned Reg = mop.getReg();
881 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
886 bool TryFold = !DefIsReMat;
887 bool FoldSS = true; // Default behavior unless it's a remat.
890 // If this is the rematerializable definition MI itself and
891 // all of its uses are rematerialized, simply delete it.
892 if (MI == ReMatOrigDefMI && CanDelete) {
893 DOUT << "\t\t\t\tErasing re-materlizable def: ";
895 RemoveMachineInstrFromMaps(MI);
896 vrm.RemoveMachineInstrFromMaps(MI);
897 MI->eraseFromParent();
901 // If def for this use can't be rematerialized, then try folding.
902 // If def is rematerializable and it's a load, also try folding.
903 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
905 // Try fold loads (from stack slot, constant pool, etc.) into uses.
911 // Scan all of the operands of this instruction rewriting operands
912 // to use NewVReg instead of li.reg as appropriate. We do this for
915 // 1. If the instr reads the same spilled vreg multiple times, we
916 // want to reuse the NewVReg.
917 // 2. If the instr is a two-addr instruction, we are required to
918 // keep the src/dst regs pinned.
920 // Keep track of whether we replace a use and/or def so that we can
921 // create the spill interval with the appropriate range.
923 HasUse = mop.isUse();
924 HasDef = mop.isDef();
925 SmallVector<unsigned, 2> Ops;
927 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
928 const MachineOperand &MOj = MI->getOperand(j);
929 if (!MOj.isRegister())
931 unsigned RegJ = MOj.getReg();
932 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
936 HasUse |= MOj.isUse();
937 HasDef |= MOj.isDef();
942 // Do not fold load / store here if we are splitting. We'll find an
943 // optimal point to insert a load / store later.
945 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
946 Ops, FoldSS, FoldSlot, Reg)) {
947 // Folding the load/store can completely change the instruction in
948 // unpredictable ways, rescan it from the beginning.
952 goto RestartInstruction;
955 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
960 // Create a new virtual register for the spill interval.
961 bool CreatedNewVReg = false;
963 NewVReg = mri_->createVirtualRegister(rc);
965 CreatedNewVReg = true;
968 if (mop.isImplicit())
969 rewriteImplicitOps(li, MI, NewVReg, vrm);
971 // Reuse NewVReg for other reads.
972 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
973 MachineOperand &mopj = MI->getOperand(Ops[j]);
974 mopj.setReg(NewVReg);
975 if (mopj.isImplicit())
976 rewriteImplicitOps(li, MI, NewVReg, vrm);
979 if (CreatedNewVReg) {
981 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
982 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
983 // Each valnum may have its own remat id.
984 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
986 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
988 if (!CanDelete || (HasUse && HasDef)) {
989 // If this is a two-addr instruction then its use operands are
990 // rematerializable but its def is not. It should be assigned a
992 vrm.assignVirt2StackSlot(NewVReg, Slot);
995 vrm.assignVirt2StackSlot(NewVReg, Slot);
997 } else if (HasUse && HasDef &&
998 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
999 // If this interval hasn't been assigned a stack slot (because earlier
1000 // def is a deleted remat def), do it now.
1001 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1002 vrm.assignVirt2StackSlot(NewVReg, Slot);
1005 // Re-matting an instruction with virtual register use. Add the
1006 // register as an implicit use on the use MI.
1007 if (DefIsReMat && ImpUse)
1008 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1010 // create a new register interval for this spill / remat.
1011 LiveInterval &nI = getOrCreateInterval(NewVReg);
1012 if (CreatedNewVReg) {
1013 NewLIs.push_back(&nI);
1014 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1016 vrm.setIsSplitFromReg(NewVReg, li.reg);
1020 if (CreatedNewVReg) {
1021 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1022 nI.getNextValue(~0U, 0, VNInfoAllocator));
1026 // Extend the split live interval to this def / use.
1027 unsigned End = getUseIndex(index)+1;
1028 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1029 nI.getValNumInfo(nI.getNumValNums()-1));
1035 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1036 nI.getNextValue(~0U, 0, VNInfoAllocator));
1041 DOUT << "\t\t\t\tAdded new interval: ";
1042 nI.print(DOUT, tri_);
1047 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1049 MachineBasicBlock *MBB, unsigned Idx) const {
1050 unsigned End = getMBBEndIdx(MBB);
1051 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1052 unsigned KillIdx = VNI->kills[j];
1053 if (KillIdx > Idx && KillIdx < End)
1059 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1060 const VNInfo *VNI = NULL;
1061 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1062 e = li.vni_end(); i != e; ++i)
1063 if ((*i)->def == DefIdx) {
1070 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1071 /// during spilling.
1072 struct RewriteInfo {
1077 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1078 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1081 struct RewriteInfoCompare {
1082 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1083 return LHS.Index < RHS.Index;
1087 void LiveIntervals::
1088 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1089 LiveInterval::Ranges::const_iterator &I,
1090 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1091 unsigned Slot, int LdSlot,
1092 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1094 const TargetRegisterClass* rc,
1095 SmallVector<int, 4> &ReMatIds,
1096 const MachineLoopInfo *loopInfo,
1097 BitVector &SpillMBBs,
1098 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1099 BitVector &RestoreMBBs,
1100 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1101 std::map<unsigned,unsigned> &MBBVRegsMap,
1102 std::vector<LiveInterval*> &NewLIs) {
1103 bool AllCanFold = true;
1104 unsigned NewVReg = 0;
1105 unsigned start = getBaseIndex(I->start);
1106 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1108 // First collect all the def / use in this live range that will be rewritten.
1109 // Make sure they are sorted according instruction index.
1110 std::vector<RewriteInfo> RewriteMIs;
1111 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1112 re = mri_->reg_end(); ri != re; ) {
1113 MachineInstr *MI = &*ri;
1114 MachineOperand &O = ri.getOperand();
1116 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1117 unsigned index = getInstructionIndex(MI);
1118 if (index < start || index >= end)
1120 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1122 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1124 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1125 // Now rewrite the defs and uses.
1126 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1127 RewriteInfo &rwi = RewriteMIs[i];
1129 unsigned index = rwi.Index;
1130 bool MIHasUse = rwi.HasUse;
1131 bool MIHasDef = rwi.HasDef;
1132 MachineInstr *MI = rwi.MI;
1133 // If MI def and/or use the same register multiple times, then there
1134 // are multiple entries.
1135 unsigned NumUses = MIHasUse;
1136 while (i != e && RewriteMIs[i].MI == MI) {
1137 assert(RewriteMIs[i].Index == index);
1138 bool isUse = RewriteMIs[i].HasUse;
1139 if (isUse) ++NumUses;
1141 MIHasDef |= RewriteMIs[i].HasDef;
1144 MachineBasicBlock *MBB = MI->getParent();
1146 if (ImpUse && MI != ReMatDefMI) {
1147 // Re-matting an instruction with virtual register use. Update the
1148 // register interval's spill weight to HUGE_VALF to prevent it from
1150 LiveInterval &ImpLi = getInterval(ImpUse);
1151 ImpLi.weight = HUGE_VALF;
1154 unsigned MBBId = MBB->getNumber();
1155 unsigned ThisVReg = 0;
1157 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1158 if (NVI != MBBVRegsMap.end()) {
1159 ThisVReg = NVI->second;
1166 // It's better to start a new interval to avoid artifically
1167 // extend the new interval.
1168 if (MIHasDef && !MIHasUse) {
1169 MBBVRegsMap.erase(MBB->getNumber());
1175 bool IsNew = ThisVReg == 0;
1177 // This ends the previous live interval. If all of its def / use
1178 // can be folded, give it a low spill weight.
1179 if (NewVReg && TrySplit && AllCanFold) {
1180 LiveInterval &nI = getOrCreateInterval(NewVReg);
1187 bool HasDef = false;
1188 bool HasUse = false;
1189 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1190 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1191 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1192 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1193 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1194 if (!HasDef && !HasUse)
1197 AllCanFold &= CanFold;
1199 // Update weight of spill interval.
1200 LiveInterval &nI = getOrCreateInterval(NewVReg);
1202 // The spill weight is now infinity as it cannot be spilled again.
1203 nI.weight = HUGE_VALF;
1207 // Keep track of the last def and first use in each MBB.
1209 if (MI != ReMatOrigDefMI || !CanDelete) {
1210 bool HasKill = false;
1212 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1214 // If this is a two-address code, then this index starts a new VNInfo.
1215 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1217 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1219 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1220 SpillIdxes.find(MBBId);
1222 if (SII == SpillIdxes.end()) {
1223 std::vector<SRInfo> S;
1224 S.push_back(SRInfo(index, NewVReg, true));
1225 SpillIdxes.insert(std::make_pair(MBBId, S));
1226 } else if (SII->second.back().vreg != NewVReg) {
1227 SII->second.push_back(SRInfo(index, NewVReg, true));
1228 } else if ((int)index > SII->second.back().index) {
1229 // If there is an earlier def and this is a two-address
1230 // instruction, then it's not possible to fold the store (which
1231 // would also fold the load).
1232 SRInfo &Info = SII->second.back();
1234 Info.canFold = !HasUse;
1236 SpillMBBs.set(MBBId);
1237 } else if (SII != SpillIdxes.end() &&
1238 SII->second.back().vreg == NewVReg &&
1239 (int)index > SII->second.back().index) {
1240 // There is an earlier def that's not killed (must be two-address).
1241 // The spill is no longer needed.
1242 SII->second.pop_back();
1243 if (SII->second.empty()) {
1244 SpillIdxes.erase(MBBId);
1245 SpillMBBs.reset(MBBId);
1252 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1253 SpillIdxes.find(MBBId);
1254 if (SII != SpillIdxes.end() &&
1255 SII->second.back().vreg == NewVReg &&
1256 (int)index > SII->second.back().index)
1257 // Use(s) following the last def, it's not safe to fold the spill.
1258 SII->second.back().canFold = false;
1259 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1260 RestoreIdxes.find(MBBId);
1261 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1262 // If we are splitting live intervals, only fold if it's the first
1263 // use and there isn't another use later in the MBB.
1264 RII->second.back().canFold = false;
1266 // Only need a reload if there isn't an earlier def / use.
1267 if (RII == RestoreIdxes.end()) {
1268 std::vector<SRInfo> Infos;
1269 Infos.push_back(SRInfo(index, NewVReg, true));
1270 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1272 RII->second.push_back(SRInfo(index, NewVReg, true));
1274 RestoreMBBs.set(MBBId);
1278 // Update spill weight.
1279 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1280 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1283 if (NewVReg && TrySplit && AllCanFold) {
1284 // If all of its def / use can be folded, give it a low spill weight.
1285 LiveInterval &nI = getOrCreateInterval(NewVReg);
1290 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1291 BitVector &RestoreMBBs,
1292 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1293 if (!RestoreMBBs[Id])
1295 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1296 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1297 if (Restores[i].index == index &&
1298 Restores[i].vreg == vr &&
1299 Restores[i].canFold)
1304 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1305 BitVector &RestoreMBBs,
1306 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1307 if (!RestoreMBBs[Id])
1309 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1310 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1311 if (Restores[i].index == index && Restores[i].vreg)
1312 Restores[i].index = -1;
1315 /// removeSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1317 void LiveIntervals::removeSpilledImpDefs(const LiveInterval &li,
1319 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1320 re = mri_->reg_end(); ri != re; ) {
1321 MachineInstr *MI = &*ri;
1323 if (MI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF)
1325 RemoveMachineInstrFromMaps(MI);
1326 vrm.RemoveMachineInstrFromMaps(MI);
1327 MI->eraseFromParent();
1332 std::vector<LiveInterval*> LiveIntervals::
1333 addIntervalsForSpills(const LiveInterval &li,
1334 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1335 // Since this is called after the analysis is done we don't know if
1336 // LiveVariables is available
1337 lv_ = getAnalysisToUpdate<LiveVariables>();
1339 assert(li.weight != HUGE_VALF &&
1340 "attempt to spill already spilled interval!");
1342 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1343 li.print(DOUT, tri_);
1346 // Each bit specify whether it a spill is required in the MBB.
1347 BitVector SpillMBBs(mf_->getNumBlockIDs());
1348 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1349 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1350 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1351 std::map<unsigned,unsigned> MBBVRegsMap;
1352 std::vector<LiveInterval*> NewLIs;
1353 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1355 unsigned NumValNums = li.getNumValNums();
1356 SmallVector<MachineInstr*, 4> ReMatDefs;
1357 ReMatDefs.resize(NumValNums, NULL);
1358 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1359 ReMatOrigDefs.resize(NumValNums, NULL);
1360 SmallVector<int, 4> ReMatIds;
1361 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1362 BitVector ReMatDelete(NumValNums);
1363 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1365 // Spilling a split live interval. It cannot be split any further. Also,
1366 // it's also guaranteed to be a single val# / range interval.
1367 if (vrm.getPreSplitReg(li.reg)) {
1368 vrm.setIsSplitFromReg(li.reg, 0);
1369 // Unset the split kill marker on the last use.
1370 unsigned KillIdx = vrm.getKillPoint(li.reg);
1372 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1373 assert(KillMI && "Last use disappeared?");
1374 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1375 assert(KillOp != -1 && "Last use disappeared?");
1376 KillMI->getOperand(KillOp).setIsKill(false);
1378 vrm.removeKillPoint(li.reg);
1379 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1380 Slot = vrm.getStackSlot(li.reg);
1381 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1382 MachineInstr *ReMatDefMI = DefIsReMat ?
1383 vrm.getReMaterializedMI(li.reg) : NULL;
1385 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1386 bool isLoad = isLoadSS ||
1387 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1388 bool IsFirstRange = true;
1389 for (LiveInterval::Ranges::const_iterator
1390 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1391 // If this is a split live interval with multiple ranges, it means there
1392 // are two-address instructions that re-defined the value. Only the
1393 // first def can be rematerialized!
1395 // Note ReMatOrigDefMI has already been deleted.
1396 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1397 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1398 false, vrm, rc, ReMatIds, loopInfo,
1399 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1400 MBBVRegsMap, NewLIs);
1402 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1403 Slot, 0, false, false, false,
1404 false, vrm, rc, ReMatIds, loopInfo,
1405 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1406 MBBVRegsMap, NewLIs);
1408 IsFirstRange = false;
1411 removeSpilledImpDefs(li, vrm);
1415 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1416 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1420 bool NeedStackSlot = false;
1421 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1423 const VNInfo *VNI = *i;
1424 unsigned VN = VNI->id;
1425 unsigned DefIdx = VNI->def;
1427 continue; // Dead val#.
1428 // Is the def for the val# rematerializable?
1429 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1430 ? 0 : getInstructionFromIndex(DefIdx);
1432 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1433 // Remember how to remat the def of this val#.
1434 ReMatOrigDefs[VN] = ReMatDefMI;
1435 // Original def may be modified so we have to make a copy here. vrm must
1437 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1439 bool CanDelete = true;
1440 if (VNI->hasPHIKill) {
1441 // A kill is a phi node, not all of its uses can be rematerialized.
1442 // It must not be deleted.
1444 // Need a stack slot if there is any live range where uses cannot be
1446 NeedStackSlot = true;
1449 ReMatDelete.set(VN);
1451 // Need a stack slot if there is any live range where uses cannot be
1453 NeedStackSlot = true;
1457 // One stack slot per live interval.
1458 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1459 Slot = vrm.assignVirt2StackSlot(li.reg);
1461 // Create new intervals and rewrite defs and uses.
1462 for (LiveInterval::Ranges::const_iterator
1463 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1464 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1465 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1466 bool DefIsReMat = ReMatDefMI != NULL;
1467 bool CanDelete = ReMatDelete[I->valno->id];
1469 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1470 bool isLoad = isLoadSS ||
1471 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1472 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1473 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1474 CanDelete, vrm, rc, ReMatIds, loopInfo,
1475 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1476 MBBVRegsMap, NewLIs);
1479 // Insert spills / restores if we are splitting.
1481 removeSpilledImpDefs(li, vrm);
1485 SmallPtrSet<LiveInterval*, 4> AddedKill;
1486 SmallVector<unsigned, 2> Ops;
1487 if (NeedStackSlot) {
1488 int Id = SpillMBBs.find_first();
1490 std::vector<SRInfo> &spills = SpillIdxes[Id];
1491 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1492 int index = spills[i].index;
1493 unsigned VReg = spills[i].vreg;
1494 LiveInterval &nI = getOrCreateInterval(VReg);
1495 bool isReMat = vrm.isReMaterialized(VReg);
1496 MachineInstr *MI = getInstructionFromIndex(index);
1497 bool CanFold = false;
1498 bool FoundUse = false;
1500 if (spills[i].canFold) {
1502 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1503 MachineOperand &MO = MI->getOperand(j);
1504 if (!MO.isRegister() || MO.getReg() != VReg)
1511 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1512 RestoreMBBs, RestoreIdxes))) {
1513 // MI has two-address uses of the same register. If the use
1514 // isn't the first and only use in the BB, then we can't fold
1515 // it. FIXME: Move this to rewriteInstructionsForSpills.
1522 // Fold the store into the def if possible.
1523 bool Folded = false;
1524 if (CanFold && !Ops.empty()) {
1525 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1528 // Also folded uses, do not issue a load.
1529 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1530 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1532 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1536 // Else tell the spiller to issue a spill.
1538 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1539 bool isKill = LR->end == getStoreIndex(index);
1540 vrm.addSpillPoint(VReg, isKill, MI);
1542 AddedKill.insert(&nI);
1545 Id = SpillMBBs.find_next(Id);
1549 int Id = RestoreMBBs.find_first();
1551 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1552 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1553 int index = restores[i].index;
1556 unsigned VReg = restores[i].vreg;
1557 LiveInterval &nI = getOrCreateInterval(VReg);
1558 MachineInstr *MI = getInstructionFromIndex(index);
1559 bool CanFold = false;
1561 if (restores[i].canFold) {
1563 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1564 MachineOperand &MO = MI->getOperand(j);
1565 if (!MO.isRegister() || MO.getReg() != VReg)
1569 // If this restore were to be folded, it would have been folded
1578 // Fold the load into the use if possible.
1579 bool Folded = false;
1580 if (CanFold && !Ops.empty()) {
1581 if (!vrm.isReMaterialized(VReg))
1582 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1584 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1586 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1587 // If the rematerializable def is a load, also try to fold it.
1588 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1589 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1590 Ops, isLoadSS, LdSlot, VReg);
1591 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1593 // Re-matting an instruction with virtual register use. Add the
1594 // register as an implicit use on the use MI and update the register
1595 // interval's spill weight to HUGE_VALF to prevent it from being
1597 LiveInterval &ImpLi = getInterval(ImpUse);
1598 ImpLi.weight = HUGE_VALF;
1599 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1603 // If folding is not possible / failed, then tell the spiller to issue a
1604 // load / rematerialization for us.
1606 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1608 vrm.addRestorePoint(VReg, MI);
1610 Id = RestoreMBBs.find_next(Id);
1613 // Finalize intervals: add kills, finalize spill weights, and filter out
1615 std::vector<LiveInterval*> RetNewLIs;
1616 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1617 LiveInterval *LI = NewLIs[i];
1619 LI->weight /= LI->getSize();
1620 if (!AddedKill.count(LI)) {
1621 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1622 unsigned LastUseIdx = getBaseIndex(LR->end);
1623 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1624 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1625 assert(UseIdx != -1);
1626 if (LastUse->getOperand(UseIdx).isImplicit() ||
1627 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1628 LastUse->getOperand(UseIdx).setIsKill();
1629 vrm.addKillPoint(LI->reg, LastUseIdx);
1632 RetNewLIs.push_back(LI);
1636 removeSpilledImpDefs(li, vrm);
1640 /// hasAllocatableSuperReg - Return true if the specified physical register has
1641 /// any super register that's allocatable.
1642 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1643 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1644 if (allocatableRegs_[*AS] && hasInterval(*AS))
1649 /// getRepresentativeReg - Find the largest super register of the specified
1650 /// physical register.
1651 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1652 // Find the largest super-register that is allocatable.
1653 unsigned BestReg = Reg;
1654 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1655 unsigned SuperReg = *AS;
1656 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1664 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1665 /// specified interval that conflicts with the specified physical register.
1666 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1667 unsigned PhysReg) const {
1668 unsigned NumConflicts = 0;
1669 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1670 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1671 E = mri_->reg_end(); I != E; ++I) {
1672 MachineOperand &O = I.getOperand();
1673 MachineInstr *MI = O.getParent();
1674 unsigned Index = getInstructionIndex(MI);
1675 if (pli.liveAt(Index))
1678 return NumConflicts;
1681 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1682 /// around all defs and uses of the specified interval.
1683 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1684 unsigned PhysReg, VirtRegMap &vrm) {
1685 unsigned SpillReg = getRepresentativeReg(PhysReg);
1687 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1688 // If there are registers which alias PhysReg, but which are not a
1689 // sub-register of the chosen representative super register. Assert
1690 // since we can't handle it yet.
1691 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1692 tri_->isSuperRegister(*AS, SpillReg));
1694 LiveInterval &pli = getInterval(SpillReg);
1695 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1696 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1697 E = mri_->reg_end(); I != E; ++I) {
1698 MachineOperand &O = I.getOperand();
1699 MachineInstr *MI = O.getParent();
1700 if (SeenMIs.count(MI))
1703 unsigned Index = getInstructionIndex(MI);
1704 if (pli.liveAt(Index)) {
1705 vrm.addEmergencySpill(SpillReg, MI);
1706 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1707 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1708 if (!hasInterval(*AS))
1710 LiveInterval &spli = getInterval(*AS);
1711 if (spli.liveAt(Index))
1712 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);