1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
72 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73 VNInfoAllocator.Reset();
74 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
78 void LiveIntervals::computeNumbering() {
79 Index2MiMap OldI2MI = i2miMap_;
86 // Number MachineInstrs and MachineBasicBlocks.
87 // Initialize MBB indexes to a sentinal.
88 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
91 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
93 unsigned StartIdx = MIIndex;
95 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
97 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
98 assert(inserted && "multiple MachineInstr -> index mappings");
99 i2miMap_.push_back(I);
100 MIIndex += InstrSlots::NUM;
103 // Set the MBB2IdxMap entry for this MBB.
104 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
105 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
106 : std::make_pair(StartIdx, MIIndex - 1);
107 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
109 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
111 if (!OldI2MI.empty())
112 for (iterator I = begin(), E = end(); I != E; ++I)
113 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
115 LI->start = mi2iMap_[OldI2MI[LI->start]];
116 LI->end = mi2iMap_[OldI2MI[LI->end]];
120 /// runOnMachineFunction - Register allocate the whole function
122 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
124 mri_ = &mf_->getRegInfo();
125 tm_ = &fn.getTarget();
126 tri_ = tm_->getRegisterInfo();
127 tii_ = tm_->getInstrInfo();
128 lv_ = &getAnalysis<LiveVariables>();
129 allocatableRegs_ = tri_->getAllocatableSet(fn);
134 numIntervals += getNumIntervals();
136 DOUT << "********** INTERVALS **********\n";
137 for (iterator I = begin(), E = end(); I != E; ++I) {
138 I->second.print(DOUT, tri_);
142 numIntervalsAfter += getNumIntervals();
147 /// print - Implement the dump method.
148 void LiveIntervals::print(std::ostream &O, const Module* ) const {
149 O << "********** INTERVALS **********\n";
150 for (const_iterator I = begin(), E = end(); I != E; ++I) {
151 I->second.print(DOUT, tri_);
155 O << "********** MACHINEINSTRS **********\n";
156 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
157 mbbi != mbbe; ++mbbi) {
158 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
159 for (MachineBasicBlock::iterator mii = mbbi->begin(),
160 mie = mbbi->end(); mii != mie; ++mii) {
161 O << getInstructionIndex(mii) << '\t' << *mii;
166 /// conflictsWithPhysRegDef - Returns true if the specified register
167 /// is defined during the duration of the specified interval.
168 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
169 VirtRegMap &vrm, unsigned reg) {
170 for (LiveInterval::Ranges::const_iterator
171 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
172 for (unsigned index = getBaseIndex(I->start),
173 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
174 index += InstrSlots::NUM) {
175 // skip deleted instructions
176 while (index != end && !getInstructionFromIndex(index))
177 index += InstrSlots::NUM;
178 if (index == end) break;
180 MachineInstr *MI = getInstructionFromIndex(index);
181 unsigned SrcReg, DstReg;
182 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
183 if (SrcReg == li.reg || DstReg == li.reg)
185 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
186 MachineOperand& mop = MI->getOperand(i);
187 if (!mop.isRegister())
189 unsigned PhysReg = mop.getReg();
190 if (PhysReg == 0 || PhysReg == li.reg)
192 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
193 if (!vrm.hasPhys(PhysReg))
195 PhysReg = vrm.getPhys(PhysReg);
197 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
206 void LiveIntervals::printRegName(unsigned reg) const {
207 if (TargetRegisterInfo::isPhysicalRegister(reg))
208 cerr << tri_->getName(reg);
210 cerr << "%reg" << reg;
213 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
214 MachineBasicBlock::iterator mi,
216 LiveInterval &interval) {
217 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
218 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
220 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
221 DOUT << "is a implicit_def\n";
225 // Virtual registers may be defined multiple times (due to phi
226 // elimination and 2-addr elimination). Much of what we do only has to be
227 // done once for the vreg. We use an empty interval to detect the first
228 // time we see a vreg.
229 if (interval.empty()) {
230 // Get the Idx of the defining instructions.
231 unsigned defIndex = getDefIndex(MIIdx);
233 MachineInstr *CopyMI = NULL;
234 unsigned SrcReg, DstReg;
235 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
236 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
237 tii_->isMoveInstr(*mi, SrcReg, DstReg))
239 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
241 assert(ValNo->id == 0 && "First value in interval is not 0?");
243 // Loop over all of the blocks that the vreg is defined in. There are
244 // two cases we have to handle here. The most common case is a vreg
245 // whose lifetime is contained within a basic block. In this case there
246 // will be a single kill, in MBB, which comes after the definition.
247 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
248 // FIXME: what about dead vars?
250 if (vi.Kills[0] != mi)
251 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
253 killIdx = defIndex+1;
255 // If the kill happens after the definition, we have an intra-block
257 if (killIdx > defIndex) {
258 assert(vi.AliveBlocks.none() &&
259 "Shouldn't be alive across any blocks!");
260 LiveRange LR(defIndex, killIdx, ValNo);
261 interval.addRange(LR);
262 DOUT << " +" << LR << "\n";
263 interval.addKill(ValNo, killIdx);
268 // The other case we handle is when a virtual register lives to the end
269 // of the defining block, potentially live across some blocks, then is
270 // live into some number of blocks, but gets killed. Start by adding a
271 // range that goes from this definition to the end of the defining block.
272 LiveRange NewLR(defIndex,
273 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
275 DOUT << " +" << NewLR;
276 interval.addRange(NewLR);
278 // Iterate over all of the blocks that the variable is completely
279 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
281 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
282 if (vi.AliveBlocks[i]) {
283 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
285 LiveRange LR(getMBBStartIdx(i),
286 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
288 interval.addRange(LR);
294 // Finally, this virtual register is live from the start of any killing
295 // block to the 'use' slot of the killing instruction.
296 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
297 MachineInstr *Kill = vi.Kills[i];
298 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
299 LiveRange LR(getMBBStartIdx(Kill->getParent()),
301 interval.addRange(LR);
302 interval.addKill(ValNo, killIdx);
307 // If this is the second time we see a virtual register definition, it
308 // must be due to phi elimination or two addr elimination. If this is
309 // the result of two address elimination, then the vreg is one of the
310 // def-and-use register operand.
311 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
312 // If this is a two-address definition, then we have already processed
313 // the live range. The only problem is that we didn't realize there
314 // are actually two values in the live interval. Because of this we
315 // need to take the LiveRegion that defines this register and split it
317 assert(interval.containsOneValue());
318 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
319 unsigned RedefIndex = getDefIndex(MIIdx);
321 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
322 VNInfo *OldValNo = OldLR->valno;
324 // Delete the initial value, which should be short and continuous,
325 // because the 2-addr copy must be in the same MBB as the redef.
326 interval.removeRange(DefIndex, RedefIndex);
328 // Two-address vregs should always only be redefined once. This means
329 // that at this point, there should be exactly one value number in it.
330 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
332 // The new value number (#1) is defined by the instruction we claimed
334 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
337 // Value#0 is now defined by the 2-addr instruction.
338 OldValNo->def = RedefIndex;
341 // Add the new live interval which replaces the range for the input copy.
342 LiveRange LR(DefIndex, RedefIndex, ValNo);
343 DOUT << " replace range with " << LR;
344 interval.addRange(LR);
345 interval.addKill(ValNo, RedefIndex);
347 // If this redefinition is dead, we need to add a dummy unit live
348 // range covering the def slot.
349 if (mi->registerDefIsDead(interval.reg, tri_))
350 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
353 interval.print(DOUT, tri_);
356 // Otherwise, this must be because of phi elimination. If this is the
357 // first redefinition of the vreg that we have seen, go back and change
358 // the live range in the PHI block to be a different value number.
359 if (interval.containsOneValue()) {
360 assert(vi.Kills.size() == 1 &&
361 "PHI elimination vreg should have one kill, the PHI itself!");
363 // Remove the old range that we now know has an incorrect number.
364 VNInfo *VNI = interval.getValNumInfo(0);
365 MachineInstr *Killer = vi.Kills[0];
366 unsigned Start = getMBBStartIdx(Killer->getParent());
367 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
368 DOUT << " Removing [" << Start << "," << End << "] from: ";
369 interval.print(DOUT, tri_); DOUT << "\n";
370 interval.removeRange(Start, End);
371 VNI->hasPHIKill = true;
372 DOUT << " RESULT: "; interval.print(DOUT, tri_);
374 // Replace the interval with one of a NEW value number. Note that this
375 // value number isn't actually defined by an instruction, weird huh? :)
376 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
377 DOUT << " replace range with " << LR;
378 interval.addRange(LR);
379 interval.addKill(LR.valno, End);
380 DOUT << " RESULT: "; interval.print(DOUT, tri_);
383 // In the case of PHI elimination, each variable definition is only
384 // live until the end of the block. We've already taken care of the
385 // rest of the live range.
386 unsigned defIndex = getDefIndex(MIIdx);
389 MachineInstr *CopyMI = NULL;
390 unsigned SrcReg, DstReg;
391 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
392 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
393 tii_->isMoveInstr(*mi, SrcReg, DstReg))
395 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
397 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
398 LiveRange LR(defIndex, killIndex, ValNo);
399 interval.addRange(LR);
400 interval.addKill(ValNo, killIndex);
401 ValNo->hasPHIKill = true;
409 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
410 MachineBasicBlock::iterator mi,
412 LiveInterval &interval,
413 MachineInstr *CopyMI) {
414 // A physical register cannot be live across basic block, so its
415 // lifetime must end somewhere in its defining basic block.
416 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
418 unsigned baseIndex = MIIdx;
419 unsigned start = getDefIndex(baseIndex);
420 unsigned end = start;
422 // If it is not used after definition, it is considered dead at
423 // the instruction defining it. Hence its interval is:
424 // [defSlot(def), defSlot(def)+1)
425 if (mi->registerDefIsDead(interval.reg, tri_)) {
427 end = getDefIndex(start) + 1;
431 // If it is not dead on definition, it must be killed by a
432 // subsequent instruction. Hence its interval is:
433 // [defSlot(def), useSlot(kill)+1)
434 while (++mi != MBB->end()) {
435 baseIndex += InstrSlots::NUM;
436 if (mi->killsRegister(interval.reg, tri_)) {
438 end = getUseIndex(baseIndex) + 1;
440 } else if (mi->modifiesRegister(interval.reg, tri_)) {
441 // Another instruction redefines the register before it is ever read.
442 // Then the register is essentially dead at the instruction that defines
443 // it. Hence its interval is:
444 // [defSlot(def), defSlot(def)+1)
446 end = getDefIndex(start) + 1;
451 // The only case we should have a dead physreg here without a killing or
452 // instruction where we know it's dead is if it is live-in to the function
454 assert(!CopyMI && "physreg was not killed in defining block!");
455 end = getDefIndex(start) + 1; // It's dead.
458 assert(start < end && "did not find end of interval?");
460 // Already exists? Extend old live interval.
461 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
462 VNInfo *ValNo = (OldLR != interval.end())
463 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
464 LiveRange LR(start, end, ValNo);
465 interval.addRange(LR);
466 interval.addKill(LR.valno, end);
467 DOUT << " +" << LR << '\n';
470 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
471 MachineBasicBlock::iterator MI,
474 if (TargetRegisterInfo::isVirtualRegister(reg))
475 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
476 else if (allocatableRegs_[reg]) {
477 MachineInstr *CopyMI = NULL;
478 unsigned SrcReg, DstReg;
479 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
480 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
481 tii_->isMoveInstr(*MI, SrcReg, DstReg))
483 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
484 // Def of a register also defines its sub-registers.
485 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
486 // If MI also modifies the sub-register explicitly, avoid processing it
487 // more than once. Do not pass in TRI here so it checks for exact match.
488 if (!MI->modifiesRegister(*AS))
489 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
493 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
495 LiveInterval &interval, bool isAlias) {
496 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
498 // Look for kills, if it reaches a def before it's killed, then it shouldn't
499 // be considered a livein.
500 MachineBasicBlock::iterator mi = MBB->begin();
501 unsigned baseIndex = MIIdx;
502 unsigned start = baseIndex;
503 unsigned end = start;
504 while (mi != MBB->end()) {
505 if (mi->killsRegister(interval.reg, tri_)) {
507 end = getUseIndex(baseIndex) + 1;
509 } else if (mi->modifiesRegister(interval.reg, tri_)) {
510 // Another instruction redefines the register before it is ever read.
511 // Then the register is essentially dead at the instruction that defines
512 // it. Hence its interval is:
513 // [defSlot(def), defSlot(def)+1)
515 end = getDefIndex(start) + 1;
519 baseIndex += InstrSlots::NUM;
524 // Live-in register might not be used at all.
528 end = getDefIndex(MIIdx) + 1;
530 DOUT << " live through";
535 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
536 interval.addRange(LR);
537 interval.addKill(LR.valno, end);
538 DOUT << " +" << LR << '\n';
541 /// computeIntervals - computes the live intervals for virtual
542 /// registers. for some ordering of the machine instructions [1,N] a
543 /// live interval is an interval [i, j) where 1 <= i <= j < N for
544 /// which a variable is live
545 void LiveIntervals::computeIntervals() {
546 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
547 << "********** Function: "
548 << ((Value*)mf_->getFunction())->getName() << '\n';
549 // Track the index of the current machine instr.
550 unsigned MIIndex = 0;
551 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
553 MachineBasicBlock *MBB = MBBI;
554 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
556 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
558 // Create intervals for live-ins to this BB first.
559 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
560 LE = MBB->livein_end(); LI != LE; ++LI) {
561 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
562 // Multiple live-ins can alias the same register.
563 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
564 if (!hasInterval(*AS))
565 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
569 for (; MI != miEnd; ++MI) {
570 DOUT << MIIndex << "\t" << *MI;
573 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
574 MachineOperand &MO = MI->getOperand(i);
575 // handle register defs - build intervals
576 if (MO.isRegister() && MO.getReg() && MO.isDef())
577 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
580 MIIndex += InstrSlots::NUM;
585 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
586 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
587 std::vector<IdxMBBPair>::const_iterator I =
588 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
591 while (I != Idx2MBBMap.end()) {
592 if (LR.end <= I->first)
594 MBBs.push_back(I->second);
602 LiveInterval LiveIntervals::createInterval(unsigned reg) {
603 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
605 return LiveInterval(reg, Weight);
608 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
609 /// copy field and returns the source register that defines it.
610 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
614 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
615 return VNI->copy->getOperand(1).getReg();
616 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
617 return VNI->copy->getOperand(2).getReg();
618 unsigned SrcReg, DstReg;
619 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
621 assert(0 && "Unrecognized copy instruction!");
625 //===----------------------------------------------------------------------===//
626 // Register allocator hooks.
629 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
630 /// allow one) virtual register operand, then its uses are implicitly using
631 /// the register. Returns the virtual register.
632 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
633 MachineInstr *MI) const {
635 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
636 MachineOperand &MO = MI->getOperand(i);
637 if (!MO.isRegister() || !MO.isUse())
639 unsigned Reg = MO.getReg();
640 if (Reg == 0 || Reg == li.reg)
642 // FIXME: For now, only remat MI with at most one register operand.
644 "Can't rematerialize instruction with multiple register operand!");
651 /// isValNoAvailableAt - Return true if the val# of the specified interval
652 /// which reaches the given instruction also reaches the specified use index.
653 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
654 unsigned UseIdx) const {
655 unsigned Index = getInstructionIndex(MI);
656 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
657 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
658 return UI != li.end() && UI->valno == ValNo;
661 /// isReMaterializable - Returns true if the definition MI of the specified
662 /// val# of the specified interval is re-materializable.
663 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
664 const VNInfo *ValNo, MachineInstr *MI,
670 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
674 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
675 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
676 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
677 // this but remember this is not safe to fold into a two-address
679 // This is a load from fixed stack slot. It can be rematerialized.
682 if (tii_->isTriviallyReMaterializable(MI)) {
683 const TargetInstrDesc &TID = MI->getDesc();
684 isLoad = TID.isSimpleLoad();
686 unsigned ImpUse = getReMatImplicitUse(li, MI);
688 const LiveInterval &ImpLi = getInterval(ImpUse);
689 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
690 re = mri_->use_end(); ri != re; ++ri) {
691 MachineInstr *UseMI = &*ri;
692 unsigned UseIdx = getInstructionIndex(UseMI);
693 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
695 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
705 /// isReMaterializable - Returns true if every definition of MI of every
706 /// val# of the specified interval is re-materializable.
707 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
709 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
711 const VNInfo *VNI = *i;
712 unsigned DefIdx = VNI->def;
714 continue; // Dead val#.
715 // Is the def for the val# rematerializable?
718 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
719 bool DefIsLoad = false;
721 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
728 /// FilterFoldedOps - Filter out two-address use operands. Return
729 /// true if it finds any issue with the operands that ought to prevent
731 static bool FilterFoldedOps(MachineInstr *MI,
732 SmallVector<unsigned, 2> &Ops,
734 SmallVector<unsigned, 2> &FoldOps) {
735 const TargetInstrDesc &TID = MI->getDesc();
738 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
739 unsigned OpIdx = Ops[i];
740 MachineOperand &MO = MI->getOperand(OpIdx);
741 // FIXME: fold subreg use.
745 MRInfo |= (unsigned)VirtRegMap::isMod;
747 // Filter out two-address use operand(s).
748 if (!MO.isImplicit() &&
749 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
750 MRInfo = VirtRegMap::isModRef;
753 MRInfo |= (unsigned)VirtRegMap::isRef;
755 FoldOps.push_back(OpIdx);
761 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
762 /// slot / to reg or any rematerialized load into ith operand of specified
763 /// MI. If it is successul, MI is updated with the newly created MI and
765 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
766 VirtRegMap &vrm, MachineInstr *DefMI,
768 SmallVector<unsigned, 2> &Ops,
769 bool isSS, int Slot, unsigned Reg) {
770 // If it is an implicit def instruction, just delete it.
771 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
772 RemoveMachineInstrFromMaps(MI);
773 vrm.RemoveMachineInstrFromMaps(MI);
774 MI->eraseFromParent();
779 // Filter the list of operand indexes that are to be folded. Abort if
780 // any operand will prevent folding.
782 SmallVector<unsigned, 2> FoldOps;
783 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
786 // The only time it's safe to fold into a two address instruction is when
787 // it's folding reload and spill from / into a spill stack slot.
788 if (DefMI && (MRInfo & VirtRegMap::isMod))
791 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
792 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
794 // Remember this instruction uses the spill slot.
795 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
797 // Attempt to fold the memory reference into the instruction. If
798 // we can do this, we don't need to insert spill code.
800 lv_->instructionChanged(MI, fmi);
802 fmi->copyKillDeadInfo(MI, tri_);
803 MachineBasicBlock &MBB = *MI->getParent();
804 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
805 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
806 vrm.transferSpillPts(MI, fmi);
807 vrm.transferRestorePts(MI, fmi);
808 vrm.transferEmergencySpills(MI, fmi);
810 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
811 mi2iMap_[fmi] = InstrIdx;
812 MI = MBB.insert(MBB.erase(MI), fmi);
819 /// canFoldMemoryOperand - Returns true if the specified load / store
820 /// folding is possible.
821 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
822 SmallVector<unsigned, 2> &Ops,
824 // Filter the list of operand indexes that are to be folded. Abort if
825 // any operand will prevent folding.
827 SmallVector<unsigned, 2> FoldOps;
828 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
831 // It's only legal to remat for a use, not a def.
832 if (ReMat && (MRInfo & VirtRegMap::isMod))
835 return tii_->canFoldMemoryOperand(MI, FoldOps);
838 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
839 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
840 for (LiveInterval::Ranges::const_iterator
841 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
842 std::vector<IdxMBBPair>::const_iterator II =
843 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
844 if (II == Idx2MBBMap.end())
846 if (I->end > II->first) // crossing a MBB.
848 MBBs.insert(II->second);
855 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
856 /// interval on to-be re-materialized operands of MI) with new register.
857 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
858 MachineInstr *MI, unsigned NewVReg,
860 // There is an implicit use. That means one of the other operand is
861 // being remat'ed and the remat'ed instruction has li.reg as an
862 // use operand. Make sure we rewrite that as well.
863 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
864 MachineOperand &MO = MI->getOperand(i);
865 if (!MO.isRegister())
867 unsigned Reg = MO.getReg();
868 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
870 if (!vrm.isReMaterialized(Reg))
872 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
873 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
875 UseMO->setReg(NewVReg);
879 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
880 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
882 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
883 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
884 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
885 unsigned Slot, int LdSlot,
886 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
888 const TargetRegisterClass* rc,
889 SmallVector<int, 4> &ReMatIds,
890 const MachineLoopInfo *loopInfo,
891 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
892 std::map<unsigned,unsigned> &MBBVRegsMap,
893 std::vector<LiveInterval*> &NewLIs) {
894 bool CanFold = false;
896 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
897 MachineOperand& mop = MI->getOperand(i);
898 if (!mop.isRegister())
900 unsigned Reg = mop.getReg();
902 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
907 bool TryFold = !DefIsReMat;
908 bool FoldSS = true; // Default behavior unless it's a remat.
911 // If this is the rematerializable definition MI itself and
912 // all of its uses are rematerialized, simply delete it.
913 if (MI == ReMatOrigDefMI && CanDelete) {
914 DOUT << "\t\t\t\tErasing re-materlizable def: ";
916 RemoveMachineInstrFromMaps(MI);
917 vrm.RemoveMachineInstrFromMaps(MI);
918 MI->eraseFromParent();
922 // If def for this use can't be rematerialized, then try folding.
923 // If def is rematerializable and it's a load, also try folding.
924 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
926 // Try fold loads (from stack slot, constant pool, etc.) into uses.
932 // Scan all of the operands of this instruction rewriting operands
933 // to use NewVReg instead of li.reg as appropriate. We do this for
936 // 1. If the instr reads the same spilled vreg multiple times, we
937 // want to reuse the NewVReg.
938 // 2. If the instr is a two-addr instruction, we are required to
939 // keep the src/dst regs pinned.
941 // Keep track of whether we replace a use and/or def so that we can
942 // create the spill interval with the appropriate range.
944 HasUse = mop.isUse();
945 HasDef = mop.isDef();
946 SmallVector<unsigned, 2> Ops;
948 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
949 const MachineOperand &MOj = MI->getOperand(j);
950 if (!MOj.isRegister())
952 unsigned RegJ = MOj.getReg();
953 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
957 HasUse |= MOj.isUse();
958 HasDef |= MOj.isDef();
963 // Do not fold load / store here if we are splitting. We'll find an
964 // optimal point to insert a load / store later.
966 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
967 Ops, FoldSS, FoldSlot, Reg)) {
968 // Folding the load/store can completely change the instruction in
969 // unpredictable ways, rescan it from the beginning.
975 goto RestartInstruction;
978 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
983 // Create a new virtual register for the spill interval.
984 bool CreatedNewVReg = false;
986 NewVReg = mri_->createVirtualRegister(rc);
988 CreatedNewVReg = true;
991 if (mop.isImplicit())
992 rewriteImplicitOps(li, MI, NewVReg, vrm);
994 // Reuse NewVReg for other reads.
995 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
996 MachineOperand &mopj = MI->getOperand(Ops[j]);
997 mopj.setReg(NewVReg);
998 if (mopj.isImplicit())
999 rewriteImplicitOps(li, MI, NewVReg, vrm);
1002 if (CreatedNewVReg) {
1004 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1005 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1006 // Each valnum may have its own remat id.
1007 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1009 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1011 if (!CanDelete || (HasUse && HasDef)) {
1012 // If this is a two-addr instruction then its use operands are
1013 // rematerializable but its def is not. It should be assigned a
1015 vrm.assignVirt2StackSlot(NewVReg, Slot);
1018 vrm.assignVirt2StackSlot(NewVReg, Slot);
1020 } else if (HasUse && HasDef &&
1021 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1022 // If this interval hasn't been assigned a stack slot (because earlier
1023 // def is a deleted remat def), do it now.
1024 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1025 vrm.assignVirt2StackSlot(NewVReg, Slot);
1028 // Re-matting an instruction with virtual register use. Add the
1029 // register as an implicit use on the use MI.
1030 if (DefIsReMat && ImpUse)
1031 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1033 // create a new register interval for this spill / remat.
1034 LiveInterval &nI = getOrCreateInterval(NewVReg);
1035 if (CreatedNewVReg) {
1036 NewLIs.push_back(&nI);
1037 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1039 vrm.setIsSplitFromReg(NewVReg, li.reg);
1043 if (CreatedNewVReg) {
1044 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1045 nI.getNextValue(~0U, 0, VNInfoAllocator));
1049 // Extend the split live interval to this def / use.
1050 unsigned End = getUseIndex(index)+1;
1051 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1052 nI.getValNumInfo(nI.getNumValNums()-1));
1058 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1059 nI.getNextValue(~0U, 0, VNInfoAllocator));
1064 DOUT << "\t\t\t\tAdded new interval: ";
1065 nI.print(DOUT, tri_);
1070 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1072 MachineBasicBlock *MBB, unsigned Idx) const {
1073 unsigned End = getMBBEndIdx(MBB);
1074 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1075 unsigned KillIdx = VNI->kills[j];
1076 if (KillIdx > Idx && KillIdx < End)
1082 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1083 const VNInfo *VNI = NULL;
1084 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1085 e = li.vni_end(); i != e; ++i)
1086 if ((*i)->def == DefIdx) {
1093 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1094 /// during spilling.
1096 struct RewriteInfo {
1101 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1102 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1105 struct RewriteInfoCompare {
1106 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1107 return LHS.Index < RHS.Index;
1112 void LiveIntervals::
1113 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1114 LiveInterval::Ranges::const_iterator &I,
1115 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1116 unsigned Slot, int LdSlot,
1117 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1119 const TargetRegisterClass* rc,
1120 SmallVector<int, 4> &ReMatIds,
1121 const MachineLoopInfo *loopInfo,
1122 BitVector &SpillMBBs,
1123 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1124 BitVector &RestoreMBBs,
1125 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1126 std::map<unsigned,unsigned> &MBBVRegsMap,
1127 std::vector<LiveInterval*> &NewLIs) {
1128 bool AllCanFold = true;
1129 unsigned NewVReg = 0;
1130 unsigned start = getBaseIndex(I->start);
1131 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1133 // First collect all the def / use in this live range that will be rewritten.
1134 // Make sure they are sorted according to instruction index.
1135 std::vector<RewriteInfo> RewriteMIs;
1136 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1137 re = mri_->reg_end(); ri != re; ) {
1138 MachineInstr *MI = &*ri;
1139 MachineOperand &O = ri.getOperand();
1141 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1142 unsigned index = getInstructionIndex(MI);
1143 if (index < start || index >= end)
1145 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1147 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1149 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1150 // Now rewrite the defs and uses.
1151 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1152 RewriteInfo &rwi = RewriteMIs[i];
1154 unsigned index = rwi.Index;
1155 bool MIHasUse = rwi.HasUse;
1156 bool MIHasDef = rwi.HasDef;
1157 MachineInstr *MI = rwi.MI;
1158 // If MI def and/or use the same register multiple times, then there
1159 // are multiple entries.
1160 unsigned NumUses = MIHasUse;
1161 while (i != e && RewriteMIs[i].MI == MI) {
1162 assert(RewriteMIs[i].Index == index);
1163 bool isUse = RewriteMIs[i].HasUse;
1164 if (isUse) ++NumUses;
1166 MIHasDef |= RewriteMIs[i].HasDef;
1169 MachineBasicBlock *MBB = MI->getParent();
1171 if (ImpUse && MI != ReMatDefMI) {
1172 // Re-matting an instruction with virtual register use. Update the
1173 // register interval's spill weight to HUGE_VALF to prevent it from
1175 LiveInterval &ImpLi = getInterval(ImpUse);
1176 ImpLi.weight = HUGE_VALF;
1179 unsigned MBBId = MBB->getNumber();
1180 unsigned ThisVReg = 0;
1182 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1183 if (NVI != MBBVRegsMap.end()) {
1184 ThisVReg = NVI->second;
1191 // It's better to start a new interval to avoid artifically
1192 // extend the new interval.
1193 if (MIHasDef && !MIHasUse) {
1194 MBBVRegsMap.erase(MBB->getNumber());
1200 bool IsNew = ThisVReg == 0;
1202 // This ends the previous live interval. If all of its def / use
1203 // can be folded, give it a low spill weight.
1204 if (NewVReg && TrySplit && AllCanFold) {
1205 LiveInterval &nI = getOrCreateInterval(NewVReg);
1212 bool HasDef = false;
1213 bool HasUse = false;
1214 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1215 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1216 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1217 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1218 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1219 if (!HasDef && !HasUse)
1222 AllCanFold &= CanFold;
1224 // Update weight of spill interval.
1225 LiveInterval &nI = getOrCreateInterval(NewVReg);
1227 // The spill weight is now infinity as it cannot be spilled again.
1228 nI.weight = HUGE_VALF;
1232 // Keep track of the last def and first use in each MBB.
1234 if (MI != ReMatOrigDefMI || !CanDelete) {
1235 bool HasKill = false;
1237 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1239 // If this is a two-address code, then this index starts a new VNInfo.
1240 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1242 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1244 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1245 SpillIdxes.find(MBBId);
1247 if (SII == SpillIdxes.end()) {
1248 std::vector<SRInfo> S;
1249 S.push_back(SRInfo(index, NewVReg, true));
1250 SpillIdxes.insert(std::make_pair(MBBId, S));
1251 } else if (SII->second.back().vreg != NewVReg) {
1252 SII->second.push_back(SRInfo(index, NewVReg, true));
1253 } else if ((int)index > SII->second.back().index) {
1254 // If there is an earlier def and this is a two-address
1255 // instruction, then it's not possible to fold the store (which
1256 // would also fold the load).
1257 SRInfo &Info = SII->second.back();
1259 Info.canFold = !HasUse;
1261 SpillMBBs.set(MBBId);
1262 } else if (SII != SpillIdxes.end() &&
1263 SII->second.back().vreg == NewVReg &&
1264 (int)index > SII->second.back().index) {
1265 // There is an earlier def that's not killed (must be two-address).
1266 // The spill is no longer needed.
1267 SII->second.pop_back();
1268 if (SII->second.empty()) {
1269 SpillIdxes.erase(MBBId);
1270 SpillMBBs.reset(MBBId);
1277 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1278 SpillIdxes.find(MBBId);
1279 if (SII != SpillIdxes.end() &&
1280 SII->second.back().vreg == NewVReg &&
1281 (int)index > SII->second.back().index)
1282 // Use(s) following the last def, it's not safe to fold the spill.
1283 SII->second.back().canFold = false;
1284 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1285 RestoreIdxes.find(MBBId);
1286 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1287 // If we are splitting live intervals, only fold if it's the first
1288 // use and there isn't another use later in the MBB.
1289 RII->second.back().canFold = false;
1291 // Only need a reload if there isn't an earlier def / use.
1292 if (RII == RestoreIdxes.end()) {
1293 std::vector<SRInfo> Infos;
1294 Infos.push_back(SRInfo(index, NewVReg, true));
1295 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1297 RII->second.push_back(SRInfo(index, NewVReg, true));
1299 RestoreMBBs.set(MBBId);
1303 // Update spill weight.
1304 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1305 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1308 if (NewVReg && TrySplit && AllCanFold) {
1309 // If all of its def / use can be folded, give it a low spill weight.
1310 LiveInterval &nI = getOrCreateInterval(NewVReg);
1315 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1316 BitVector &RestoreMBBs,
1317 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1318 if (!RestoreMBBs[Id])
1320 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1321 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1322 if (Restores[i].index == index &&
1323 Restores[i].vreg == vr &&
1324 Restores[i].canFold)
1329 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1330 BitVector &RestoreMBBs,
1331 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1332 if (!RestoreMBBs[Id])
1334 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1335 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1336 if (Restores[i].index == index && Restores[i].vreg)
1337 Restores[i].index = -1;
1340 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1341 /// spilled and create empty intervals for their uses.
1343 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1344 const TargetRegisterClass* rc,
1345 std::vector<LiveInterval*> &NewLIs) {
1346 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1347 re = mri_->reg_end(); ri != re; ) {
1348 MachineOperand &O = ri.getOperand();
1349 MachineInstr *MI = &*ri;
1352 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1353 "Register def was not rewritten?");
1354 RemoveMachineInstrFromMaps(MI);
1355 vrm.RemoveMachineInstrFromMaps(MI);
1356 MI->eraseFromParent();
1358 // This must be an use of an implicit_def so it's not part of the live
1359 // interval. Create a new empty live interval for it.
1360 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1361 unsigned NewVReg = mri_->createVirtualRegister(rc);
1363 vrm.setIsImplicitlyDefined(NewVReg);
1364 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1365 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1366 MachineOperand &MO = MI->getOperand(i);
1367 if (MO.isReg() && MO.getReg() == li.reg)
1375 std::vector<LiveInterval*> LiveIntervals::
1376 addIntervalsForSpills(const LiveInterval &li,
1377 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1378 // Since this is called after the analysis is done we don't know if
1379 // LiveVariables is available
1380 lv_ = getAnalysisToUpdate<LiveVariables>();
1382 assert(li.weight != HUGE_VALF &&
1383 "attempt to spill already spilled interval!");
1385 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1386 li.print(DOUT, tri_);
1389 // Each bit specify whether it a spill is required in the MBB.
1390 BitVector SpillMBBs(mf_->getNumBlockIDs());
1391 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1392 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1393 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1394 std::map<unsigned,unsigned> MBBVRegsMap;
1395 std::vector<LiveInterval*> NewLIs;
1396 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1398 unsigned NumValNums = li.getNumValNums();
1399 SmallVector<MachineInstr*, 4> ReMatDefs;
1400 ReMatDefs.resize(NumValNums, NULL);
1401 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1402 ReMatOrigDefs.resize(NumValNums, NULL);
1403 SmallVector<int, 4> ReMatIds;
1404 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1405 BitVector ReMatDelete(NumValNums);
1406 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1408 // Spilling a split live interval. It cannot be split any further. Also,
1409 // it's also guaranteed to be a single val# / range interval.
1410 if (vrm.getPreSplitReg(li.reg)) {
1411 vrm.setIsSplitFromReg(li.reg, 0);
1412 // Unset the split kill marker on the last use.
1413 unsigned KillIdx = vrm.getKillPoint(li.reg);
1415 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1416 assert(KillMI && "Last use disappeared?");
1417 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1418 assert(KillOp != -1 && "Last use disappeared?");
1419 KillMI->getOperand(KillOp).setIsKill(false);
1421 vrm.removeKillPoint(li.reg);
1422 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1423 Slot = vrm.getStackSlot(li.reg);
1424 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1425 MachineInstr *ReMatDefMI = DefIsReMat ?
1426 vrm.getReMaterializedMI(li.reg) : NULL;
1428 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1429 bool isLoad = isLoadSS ||
1430 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1431 bool IsFirstRange = true;
1432 for (LiveInterval::Ranges::const_iterator
1433 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1434 // If this is a split live interval with multiple ranges, it means there
1435 // are two-address instructions that re-defined the value. Only the
1436 // first def can be rematerialized!
1438 // Note ReMatOrigDefMI has already been deleted.
1439 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1440 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1441 false, vrm, rc, ReMatIds, loopInfo,
1442 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1443 MBBVRegsMap, NewLIs);
1445 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1446 Slot, 0, false, false, false,
1447 false, vrm, rc, ReMatIds, loopInfo,
1448 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1449 MBBVRegsMap, NewLIs);
1451 IsFirstRange = false;
1454 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1458 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1459 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1463 bool NeedStackSlot = false;
1464 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1466 const VNInfo *VNI = *i;
1467 unsigned VN = VNI->id;
1468 unsigned DefIdx = VNI->def;
1470 continue; // Dead val#.
1471 // Is the def for the val# rematerializable?
1472 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1473 ? 0 : getInstructionFromIndex(DefIdx);
1475 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1476 // Remember how to remat the def of this val#.
1477 ReMatOrigDefs[VN] = ReMatDefMI;
1478 // Original def may be modified so we have to make a copy here. vrm must
1480 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1482 bool CanDelete = true;
1483 if (VNI->hasPHIKill) {
1484 // A kill is a phi node, not all of its uses can be rematerialized.
1485 // It must not be deleted.
1487 // Need a stack slot if there is any live range where uses cannot be
1489 NeedStackSlot = true;
1492 ReMatDelete.set(VN);
1494 // Need a stack slot if there is any live range where uses cannot be
1496 NeedStackSlot = true;
1500 // One stack slot per live interval.
1501 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1502 Slot = vrm.assignVirt2StackSlot(li.reg);
1504 // Create new intervals and rewrite defs and uses.
1505 for (LiveInterval::Ranges::const_iterator
1506 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1507 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1508 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1509 bool DefIsReMat = ReMatDefMI != NULL;
1510 bool CanDelete = ReMatDelete[I->valno->id];
1512 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1513 bool isLoad = isLoadSS ||
1514 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1515 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1516 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1517 CanDelete, vrm, rc, ReMatIds, loopInfo,
1518 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1519 MBBVRegsMap, NewLIs);
1522 // Insert spills / restores if we are splitting.
1524 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1528 SmallPtrSet<LiveInterval*, 4> AddedKill;
1529 SmallVector<unsigned, 2> Ops;
1530 if (NeedStackSlot) {
1531 int Id = SpillMBBs.find_first();
1533 std::vector<SRInfo> &spills = SpillIdxes[Id];
1534 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1535 int index = spills[i].index;
1536 unsigned VReg = spills[i].vreg;
1537 LiveInterval &nI = getOrCreateInterval(VReg);
1538 bool isReMat = vrm.isReMaterialized(VReg);
1539 MachineInstr *MI = getInstructionFromIndex(index);
1540 bool CanFold = false;
1541 bool FoundUse = false;
1543 if (spills[i].canFold) {
1545 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1546 MachineOperand &MO = MI->getOperand(j);
1547 if (!MO.isRegister() || MO.getReg() != VReg)
1554 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1555 RestoreMBBs, RestoreIdxes))) {
1556 // MI has two-address uses of the same register. If the use
1557 // isn't the first and only use in the BB, then we can't fold
1558 // it. FIXME: Move this to rewriteInstructionsForSpills.
1565 // Fold the store into the def if possible.
1566 bool Folded = false;
1567 if (CanFold && !Ops.empty()) {
1568 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1571 // Also folded uses, do not issue a load.
1572 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1573 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1575 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1579 // Otherwise tell the spiller to issue a spill.
1581 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1582 bool isKill = LR->end == getStoreIndex(index);
1583 if (!MI->registerDefIsDead(nI.reg))
1584 // No need to spill a dead def.
1585 vrm.addSpillPoint(VReg, isKill, MI);
1587 AddedKill.insert(&nI);
1590 Id = SpillMBBs.find_next(Id);
1594 int Id = RestoreMBBs.find_first();
1596 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1597 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1598 int index = restores[i].index;
1601 unsigned VReg = restores[i].vreg;
1602 LiveInterval &nI = getOrCreateInterval(VReg);
1603 MachineInstr *MI = getInstructionFromIndex(index);
1604 bool CanFold = false;
1606 if (restores[i].canFold) {
1608 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1609 MachineOperand &MO = MI->getOperand(j);
1610 if (!MO.isRegister() || MO.getReg() != VReg)
1614 // If this restore were to be folded, it would have been folded
1623 // Fold the load into the use if possible.
1624 bool Folded = false;
1625 if (CanFold && !Ops.empty()) {
1626 if (!vrm.isReMaterialized(VReg))
1627 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1629 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1631 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1632 // If the rematerializable def is a load, also try to fold it.
1633 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1634 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1635 Ops, isLoadSS, LdSlot, VReg);
1636 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1638 // Re-matting an instruction with virtual register use. Add the
1639 // register as an implicit use on the use MI and update the register
1640 // interval's spill weight to HUGE_VALF to prevent it from being
1642 LiveInterval &ImpLi = getInterval(ImpUse);
1643 ImpLi.weight = HUGE_VALF;
1644 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1648 // If folding is not possible / failed, then tell the spiller to issue a
1649 // load / rematerialization for us.
1651 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1653 vrm.addRestorePoint(VReg, MI);
1655 Id = RestoreMBBs.find_next(Id);
1658 // Finalize intervals: add kills, finalize spill weights, and filter out
1660 std::vector<LiveInterval*> RetNewLIs;
1661 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1662 LiveInterval *LI = NewLIs[i];
1664 LI->weight /= LI->getSize();
1665 if (!AddedKill.count(LI)) {
1666 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1667 unsigned LastUseIdx = getBaseIndex(LR->end);
1668 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1669 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1670 assert(UseIdx != -1);
1671 if (LastUse->getOperand(UseIdx).isImplicit() ||
1672 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1673 LastUse->getOperand(UseIdx).setIsKill();
1674 vrm.addKillPoint(LI->reg, LastUseIdx);
1677 RetNewLIs.push_back(LI);
1681 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1685 /// hasAllocatableSuperReg - Return true if the specified physical register has
1686 /// any super register that's allocatable.
1687 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1688 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1689 if (allocatableRegs_[*AS] && hasInterval(*AS))
1694 /// getRepresentativeReg - Find the largest super register of the specified
1695 /// physical register.
1696 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1697 // Find the largest super-register that is allocatable.
1698 unsigned BestReg = Reg;
1699 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1700 unsigned SuperReg = *AS;
1701 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1709 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1710 /// specified interval that conflicts with the specified physical register.
1711 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1712 unsigned PhysReg) const {
1713 unsigned NumConflicts = 0;
1714 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1715 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1716 E = mri_->reg_end(); I != E; ++I) {
1717 MachineOperand &O = I.getOperand();
1718 MachineInstr *MI = O.getParent();
1719 unsigned Index = getInstructionIndex(MI);
1720 if (pli.liveAt(Index))
1723 return NumConflicts;
1726 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1727 /// around all defs and uses of the specified interval.
1728 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1729 unsigned PhysReg, VirtRegMap &vrm) {
1730 unsigned SpillReg = getRepresentativeReg(PhysReg);
1732 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1733 // If there are registers which alias PhysReg, but which are not a
1734 // sub-register of the chosen representative super register. Assert
1735 // since we can't handle it yet.
1736 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1737 tri_->isSuperRegister(*AS, SpillReg));
1739 LiveInterval &pli = getInterval(SpillReg);
1740 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1741 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1742 E = mri_->reg_end(); I != E; ++I) {
1743 MachineOperand &O = I.getOperand();
1744 MachineInstr *MI = O.getParent();
1745 if (SeenMIs.count(MI))
1748 unsigned Index = getInstructionIndex(MI);
1749 if (pli.liveAt(Index)) {
1750 vrm.addEmergencySpill(SpillReg, MI);
1751 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1752 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1753 if (!hasInterval(*AS))
1755 LiveInterval &spli = getInterval(*AS);
1756 if (spli.liveAt(Index))
1757 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);