1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert an empty slot after every instruction.
134 MIIndex += InstrSlots::NUM;
135 i2miMap_.push_back(0);
138 // Set the MBB2IdxMap entry for this MBB.
139 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
140 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
142 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
144 if (!OldI2MI.empty())
145 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
146 for (LiveInterval::iterator LI = OI->second->begin(),
147 LE = OI->second->end(); LI != LE; ++LI) {
149 // Remap the start index of the live range to the corresponding new
150 // number, or our best guess at what it _should_ correspond to if the
151 // original instruction has been erased. This is either the following
152 // instruction or its predecessor.
153 unsigned index = LI->start / InstrSlots::NUM;
154 unsigned offset = LI->start % InstrSlots::NUM;
155 if (offset == InstrSlots::LOAD) {
156 std::vector<IdxMBBPair>::const_iterator I =
157 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
158 // Take the pair containing the index
159 std::vector<IdxMBBPair>::const_iterator J =
160 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
162 LI->start = getMBBStartIdx(J->second);
164 LI->start = mi2iMap_[OldI2MI[index]] + offset;
167 // Remap the ending index in the same way that we remapped the start,
168 // except for the final step where we always map to the immediately
169 // following instruction.
170 index = (LI->end - 1) / InstrSlots::NUM;
171 offset = LI->end % InstrSlots::NUM;
172 if (offset == InstrSlots::LOAD) {
173 // VReg dies at end of block.
174 std::vector<IdxMBBPair>::const_iterator I =
175 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
178 LI->end = getMBBEndIdx(I->second) + 1;
180 unsigned idx = index;
181 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
183 if (index != OldI2MI.size())
184 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
186 LI->end = InstrSlots::NUM * i2miMap_.size();
190 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
191 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
194 // Remap the VNInfo def index, which works the same as the
195 // start indices above. VN's with special sentinel defs
196 // don't need to be remapped.
197 if (vni->def != ~0U && vni->def != ~1U) {
198 unsigned index = vni->def / InstrSlots::NUM;
199 unsigned offset = vni->def % InstrSlots::NUM;
200 if (offset == InstrSlots::LOAD) {
201 std::vector<IdxMBBPair>::const_iterator I =
202 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
203 // Take the pair containing the index
204 std::vector<IdxMBBPair>::const_iterator J =
205 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
207 vni->def = getMBBStartIdx(J->second);
209 vni->def = mi2iMap_[OldI2MI[index]] + offset;
213 // Remap the VNInfo kill indices, which works the same as
214 // the end indices above.
215 for (size_t i = 0; i < vni->kills.size(); ++i) {
216 // PHI kills don't need to be remapped.
217 if (!vni->kills[i]) continue;
219 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
220 unsigned offset = vni->kills[i] % InstrSlots::NUM;
221 if (offset == InstrSlots::LOAD) {
222 std::vector<IdxMBBPair>::const_iterator I =
223 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
226 vni->kills[i] = getMBBEndIdx(I->second);
228 unsigned idx = index;
229 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
231 if (index != OldI2MI.size())
232 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
233 (idx == index ? offset : 0);
235 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
242 /// runOnMachineFunction - Register allocate the whole function
244 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
246 mri_ = &mf_->getRegInfo();
247 tm_ = &fn.getTarget();
248 tri_ = tm_->getRegisterInfo();
249 tii_ = tm_->getInstrInfo();
250 aa_ = &getAnalysis<AliasAnalysis>();
251 lv_ = &getAnalysis<LiveVariables>();
252 allocatableRegs_ = tri_->getAllocatableSet(fn);
257 numIntervals += getNumIntervals();
259 DOUT << "********** INTERVALS **********\n";
260 for (iterator I = begin(), E = end(); I != E; ++I) {
261 I->second->print(DOUT, tri_);
265 numIntervalsAfter += getNumIntervals();
270 /// print - Implement the dump method.
271 void LiveIntervals::print(std::ostream &O, const Module* ) const {
272 O << "********** INTERVALS **********\n";
273 for (const_iterator I = begin(), E = end(); I != E; ++I) {
274 I->second->print(O, tri_);
278 O << "********** MACHINEINSTRS **********\n";
279 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
280 mbbi != mbbe; ++mbbi) {
281 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
282 for (MachineBasicBlock::iterator mii = mbbi->begin(),
283 mie = mbbi->end(); mii != mie; ++mii) {
284 O << getInstructionIndex(mii) << '\t' << *mii;
289 /// conflictsWithPhysRegDef - Returns true if the specified register
290 /// is defined during the duration of the specified interval.
291 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
292 VirtRegMap &vrm, unsigned reg) {
293 for (LiveInterval::Ranges::const_iterator
294 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
295 for (unsigned index = getBaseIndex(I->start),
296 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
297 index += InstrSlots::NUM) {
298 // skip deleted instructions
299 while (index != end && !getInstructionFromIndex(index))
300 index += InstrSlots::NUM;
301 if (index == end) break;
303 MachineInstr *MI = getInstructionFromIndex(index);
304 unsigned SrcReg, DstReg;
305 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
306 if (SrcReg == li.reg || DstReg == li.reg)
308 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
309 MachineOperand& mop = MI->getOperand(i);
312 unsigned PhysReg = mop.getReg();
313 if (PhysReg == 0 || PhysReg == li.reg)
315 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
316 if (!vrm.hasPhys(PhysReg))
318 PhysReg = vrm.getPhys(PhysReg);
320 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
329 void LiveIntervals::printRegName(unsigned reg) const {
330 if (TargetRegisterInfo::isPhysicalRegister(reg))
331 cerr << tri_->getName(reg);
333 cerr << "%reg" << reg;
336 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
337 MachineBasicBlock::iterator mi,
338 unsigned MIIdx, MachineOperand& MO,
340 LiveInterval &interval) {
341 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
342 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
344 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
345 DOUT << "is a implicit_def\n";
349 // Virtual registers may be defined multiple times (due to phi
350 // elimination and 2-addr elimination). Much of what we do only has to be
351 // done once for the vreg. We use an empty interval to detect the first
352 // time we see a vreg.
353 if (interval.empty()) {
354 // Get the Idx of the defining instructions.
355 unsigned defIndex = getDefIndex(MIIdx);
356 // Earlyclobbers move back one.
357 if (MO.isEarlyClobber())
358 defIndex = getUseIndex(MIIdx);
360 MachineInstr *CopyMI = NULL;
361 unsigned SrcReg, DstReg;
362 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
363 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
364 tii_->isMoveInstr(*mi, SrcReg, DstReg))
366 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
368 assert(ValNo->id == 0 && "First value in interval is not 0?");
370 // Loop over all of the blocks that the vreg is defined in. There are
371 // two cases we have to handle here. The most common case is a vreg
372 // whose lifetime is contained within a basic block. In this case there
373 // will be a single kill, in MBB, which comes after the definition.
374 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
375 // FIXME: what about dead vars?
377 if (vi.Kills[0] != mi)
378 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
380 killIdx = defIndex+1;
382 // If the kill happens after the definition, we have an intra-block
384 if (killIdx > defIndex) {
385 assert(vi.AliveBlocks.none() &&
386 "Shouldn't be alive across any blocks!");
387 LiveRange LR(defIndex, killIdx, ValNo);
388 interval.addRange(LR);
389 DOUT << " +" << LR << "\n";
390 interval.addKill(ValNo, killIdx);
395 // The other case we handle is when a virtual register lives to the end
396 // of the defining block, potentially live across some blocks, then is
397 // live into some number of blocks, but gets killed. Start by adding a
398 // range that goes from this definition to the end of the defining block.
399 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
400 DOUT << " +" << NewLR;
401 interval.addRange(NewLR);
403 // Iterate over all of the blocks that the variable is completely
404 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
406 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
407 if (vi.AliveBlocks[i]) {
408 LiveRange LR(getMBBStartIdx(i),
409 getMBBEndIdx(i)+1, // MBB ends at -1.
411 interval.addRange(LR);
416 // Finally, this virtual register is live from the start of any killing
417 // block to the 'use' slot of the killing instruction.
418 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
419 MachineInstr *Kill = vi.Kills[i];
420 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
421 LiveRange LR(getMBBStartIdx(Kill->getParent()),
423 interval.addRange(LR);
424 interval.addKill(ValNo, killIdx);
429 // If this is the second time we see a virtual register definition, it
430 // must be due to phi elimination or two addr elimination. If this is
431 // the result of two address elimination, then the vreg is one of the
432 // def-and-use register operand.
433 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
434 // If this is a two-address definition, then we have already processed
435 // the live range. The only problem is that we didn't realize there
436 // are actually two values in the live interval. Because of this we
437 // need to take the LiveRegion that defines this register and split it
439 assert(interval.containsOneValue());
440 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
441 unsigned RedefIndex = getDefIndex(MIIdx);
442 // Earlyclobbers move back one.
443 if (MO.isEarlyClobber())
444 RedefIndex = getUseIndex(MIIdx);
446 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
447 VNInfo *OldValNo = OldLR->valno;
449 // Delete the initial value, which should be short and continuous,
450 // because the 2-addr copy must be in the same MBB as the redef.
451 interval.removeRange(DefIndex, RedefIndex);
453 // Two-address vregs should always only be redefined once. This means
454 // that at this point, there should be exactly one value number in it.
455 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
457 // The new value number (#1) is defined by the instruction we claimed
459 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
462 // Value#0 is now defined by the 2-addr instruction.
463 OldValNo->def = RedefIndex;
466 // Add the new live interval which replaces the range for the input copy.
467 LiveRange LR(DefIndex, RedefIndex, ValNo);
468 DOUT << " replace range with " << LR;
469 interval.addRange(LR);
470 interval.addKill(ValNo, RedefIndex);
472 // If this redefinition is dead, we need to add a dummy unit live
473 // range covering the def slot.
475 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
478 interval.print(DOUT, tri_);
481 // Otherwise, this must be because of phi elimination. If this is the
482 // first redefinition of the vreg that we have seen, go back and change
483 // the live range in the PHI block to be a different value number.
484 if (interval.containsOneValue()) {
485 assert(vi.Kills.size() == 1 &&
486 "PHI elimination vreg should have one kill, the PHI itself!");
488 // Remove the old range that we now know has an incorrect number.
489 VNInfo *VNI = interval.getValNumInfo(0);
490 MachineInstr *Killer = vi.Kills[0];
491 unsigned Start = getMBBStartIdx(Killer->getParent());
492 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
493 DOUT << " Removing [" << Start << "," << End << "] from: ";
494 interval.print(DOUT, tri_); DOUT << "\n";
495 interval.removeRange(Start, End);
496 VNI->hasPHIKill = true;
497 DOUT << " RESULT: "; interval.print(DOUT, tri_);
499 // Replace the interval with one of a NEW value number. Note that this
500 // value number isn't actually defined by an instruction, weird huh? :)
501 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
502 DOUT << " replace range with " << LR;
503 interval.addRange(LR);
504 interval.addKill(LR.valno, End);
505 DOUT << " RESULT: "; interval.print(DOUT, tri_);
508 // In the case of PHI elimination, each variable definition is only
509 // live until the end of the block. We've already taken care of the
510 // rest of the live range.
511 unsigned defIndex = getDefIndex(MIIdx);
512 // Earlyclobbers move back one.
513 if (MO.isEarlyClobber())
514 defIndex = getUseIndex(MIIdx);
517 MachineInstr *CopyMI = NULL;
518 unsigned SrcReg, DstReg;
519 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
520 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
521 tii_->isMoveInstr(*mi, SrcReg, DstReg))
523 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
525 unsigned killIndex = getMBBEndIdx(mbb) + 1;
526 LiveRange LR(defIndex, killIndex, ValNo);
527 interval.addRange(LR);
528 interval.addKill(ValNo, killIndex);
529 ValNo->hasPHIKill = true;
537 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
538 MachineBasicBlock::iterator mi,
541 LiveInterval &interval,
542 MachineInstr *CopyMI) {
543 // A physical register cannot be live across basic block, so its
544 // lifetime must end somewhere in its defining basic block.
545 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
547 unsigned baseIndex = MIIdx;
548 unsigned start = getDefIndex(baseIndex);
549 // Earlyclobbers move back one.
550 if (MO.isEarlyClobber())
551 start = getUseIndex(MIIdx);
552 unsigned end = start;
554 // If it is not used after definition, it is considered dead at
555 // the instruction defining it. Hence its interval is:
556 // [defSlot(def), defSlot(def)+1)
563 // If it is not dead on definition, it must be killed by a
564 // subsequent instruction. Hence its interval is:
565 // [defSlot(def), useSlot(kill)+1)
566 baseIndex += InstrSlots::NUM;
567 while (++mi != MBB->end()) {
568 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
569 getInstructionFromIndex(baseIndex) == 0)
570 baseIndex += InstrSlots::NUM;
571 if (mi->killsRegister(interval.reg, tri_)) {
573 end = getUseIndex(baseIndex) + 1;
575 } else if (mi->modifiesRegister(interval.reg, tri_)) {
576 // Another instruction redefines the register before it is ever read.
577 // Then the register is essentially dead at the instruction that defines
578 // it. Hence its interval is:
579 // [defSlot(def), defSlot(def)+1)
585 baseIndex += InstrSlots::NUM;
588 // The only case we should have a dead physreg here without a killing or
589 // instruction where we know it's dead is if it is live-in to the function
591 assert(!CopyMI && "physreg was not killed in defining block!");
595 assert(start < end && "did not find end of interval?");
597 // Already exists? Extend old live interval.
598 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
599 VNInfo *ValNo = (OldLR != interval.end())
600 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
601 LiveRange LR(start, end, ValNo);
602 interval.addRange(LR);
603 interval.addKill(LR.valno, end);
604 DOUT << " +" << LR << '\n';
607 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
608 MachineBasicBlock::iterator MI,
612 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
613 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
614 getOrCreateInterval(MO.getReg()));
615 else if (allocatableRegs_[MO.getReg()]) {
616 MachineInstr *CopyMI = NULL;
617 unsigned SrcReg, DstReg;
618 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
619 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
620 tii_->isMoveInstr(*MI, SrcReg, DstReg))
622 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
623 getOrCreateInterval(MO.getReg()), CopyMI);
624 // Def of a register also defines its sub-registers.
625 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
626 // If MI also modifies the sub-register explicitly, avoid processing it
627 // more than once. Do not pass in TRI here so it checks for exact match.
628 if (!MI->modifiesRegister(*AS))
629 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
630 getOrCreateInterval(*AS), 0);
634 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
636 LiveInterval &interval, bool isAlias) {
637 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
639 // Look for kills, if it reaches a def before it's killed, then it shouldn't
640 // be considered a livein.
641 MachineBasicBlock::iterator mi = MBB->begin();
642 unsigned baseIndex = MIIdx;
643 unsigned start = baseIndex;
644 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
645 getInstructionFromIndex(baseIndex) == 0)
646 baseIndex += InstrSlots::NUM;
647 unsigned end = baseIndex;
649 while (mi != MBB->end()) {
650 if (mi->killsRegister(interval.reg, tri_)) {
652 end = getUseIndex(baseIndex) + 1;
654 } else if (mi->modifiesRegister(interval.reg, tri_)) {
655 // Another instruction redefines the register before it is ever read.
656 // Then the register is essentially dead at the instruction that defines
657 // it. Hence its interval is:
658 // [defSlot(def), defSlot(def)+1)
660 end = getDefIndex(start) + 1;
664 baseIndex += InstrSlots::NUM;
665 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
666 getInstructionFromIndex(baseIndex) == 0)
667 baseIndex += InstrSlots::NUM;
672 // Live-in register might not be used at all.
676 end = getDefIndex(MIIdx) + 1;
678 DOUT << " live through";
683 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
684 interval.addRange(LR);
685 interval.addKill(LR.valno, end);
686 DOUT << " +" << LR << '\n';
689 /// computeIntervals - computes the live intervals for virtual
690 /// registers. for some ordering of the machine instructions [1,N] a
691 /// live interval is an interval [i, j) where 1 <= i <= j < N for
692 /// which a variable is live
693 void LiveIntervals::computeIntervals() {
695 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
696 << "********** Function: "
697 << ((Value*)mf_->getFunction())->getName() << '\n';
699 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
701 MachineBasicBlock *MBB = MBBI;
702 // Track the index of the current machine instr.
703 unsigned MIIndex = getMBBStartIdx(MBB);
704 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
706 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
708 // Create intervals for live-ins to this BB first.
709 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
710 LE = MBB->livein_end(); LI != LE; ++LI) {
711 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
712 // Multiple live-ins can alias the same register.
713 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
714 if (!hasInterval(*AS))
715 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
719 // Skip over empty initial indices.
720 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
721 getInstructionFromIndex(MIIndex) == 0)
722 MIIndex += InstrSlots::NUM;
724 for (; MI != miEnd; ++MI) {
725 DOUT << MIIndex << "\t" << *MI;
728 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
729 MachineOperand &MO = MI->getOperand(i);
730 // handle register defs - build intervals
731 if (MO.isReg() && MO.getReg() && MO.isDef()) {
732 handleRegisterDef(MBB, MI, MIIndex, MO, i);
736 MIIndex += InstrSlots::NUM;
738 // Skip over empty indices.
739 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
740 getInstructionFromIndex(MIIndex) == 0)
741 MIIndex += InstrSlots::NUM;
746 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
747 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
748 std::vector<IdxMBBPair>::const_iterator I =
749 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
752 while (I != Idx2MBBMap.end()) {
753 if (LR.end <= I->first)
755 MBBs.push_back(I->second);
762 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
763 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
765 return new LiveInterval(reg, Weight);
768 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
769 /// copy field and returns the source register that defines it.
770 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
774 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
775 return VNI->copy->getOperand(1).getReg();
776 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
777 return VNI->copy->getOperand(2).getReg();
778 unsigned SrcReg, DstReg;
779 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
781 assert(0 && "Unrecognized copy instruction!");
785 //===----------------------------------------------------------------------===//
786 // Register allocator hooks.
789 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
790 /// allow one) virtual register operand, then its uses are implicitly using
791 /// the register. Returns the virtual register.
792 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
793 MachineInstr *MI) const {
795 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
796 MachineOperand &MO = MI->getOperand(i);
797 if (!MO.isReg() || !MO.isUse())
799 unsigned Reg = MO.getReg();
800 if (Reg == 0 || Reg == li.reg)
802 // FIXME: For now, only remat MI with at most one register operand.
804 "Can't rematerialize instruction with multiple register operand!");
813 /// isValNoAvailableAt - Return true if the val# of the specified interval
814 /// which reaches the given instruction also reaches the specified use index.
815 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
816 unsigned UseIdx) const {
817 unsigned Index = getInstructionIndex(MI);
818 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
819 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
820 return UI != li.end() && UI->valno == ValNo;
823 /// isReMaterializable - Returns true if the definition MI of the specified
824 /// val# of the specified interval is re-materializable.
825 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
826 const VNInfo *ValNo, MachineInstr *MI,
827 SmallVectorImpl<LiveInterval*> &SpillIs,
832 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
836 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
837 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
838 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
839 // this but remember this is not safe to fold into a two-address
841 // This is a load from fixed stack slot. It can be rematerialized.
844 // If the target-specific rules don't identify an instruction as
845 // being trivially rematerializable, use some target-independent
847 if (!MI->getDesc().isRematerializable() ||
848 !tii_->isTriviallyReMaterializable(MI)) {
849 if (!EnableAggressiveRemat)
852 // If the instruction accesses memory but the memoperands have been lost,
853 // we can't analyze it.
854 const TargetInstrDesc &TID = MI->getDesc();
855 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
858 // Avoid instructions obviously unsafe for remat.
859 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
862 // If the instruction accesses memory and the memory could be non-constant,
863 // assume the instruction is not rematerializable.
864 for (std::list<MachineMemOperand>::const_iterator
865 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
866 const MachineMemOperand &MMO = *I;
867 if (MMO.isVolatile() || MMO.isStore())
869 const Value *V = MMO.getValue();
872 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
873 if (!PSV->isConstant(mf_->getFrameInfo()))
875 } else if (!aa_->pointsToConstantMemory(V))
879 // If any of the registers accessed are non-constant, conservatively assume
880 // the instruction is not rematerializable.
882 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
883 const MachineOperand &MO = MI->getOperand(i);
885 unsigned Reg = MO.getReg();
888 if (TargetRegisterInfo::isPhysicalRegister(Reg))
891 // Only allow one def, and that in the first operand.
892 if (MO.isDef() != (i == 0))
895 // Only allow constant-valued registers.
896 bool IsLiveIn = mri_->isLiveIn(Reg);
897 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
900 // For the def, it should be the only def.
901 if (MO.isDef() && (next(I) != E || IsLiveIn))
905 // Only allow one use other register use, as that's all the
906 // remat mechanisms support currently.
910 else if (Reg != ImpUse)
913 // For uses, there should be only one associate def.
914 if (I != E && (next(I) != E || IsLiveIn))
921 unsigned ImpUse = getReMatImplicitUse(li, MI);
923 const LiveInterval &ImpLi = getInterval(ImpUse);
924 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
925 re = mri_->use_end(); ri != re; ++ri) {
926 MachineInstr *UseMI = &*ri;
927 unsigned UseIdx = getInstructionIndex(UseMI);
928 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
930 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
934 // If a register operand of the re-materialized instruction is going to
935 // be spilled next, then it's not legal to re-materialize this instruction.
936 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
937 if (ImpUse == SpillIs[i]->reg)
943 /// isReMaterializable - Returns true if every definition of MI of every
944 /// val# of the specified interval is re-materializable.
945 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
946 SmallVectorImpl<LiveInterval*> &SpillIs,
949 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
951 const VNInfo *VNI = *i;
952 unsigned DefIdx = VNI->def;
954 continue; // Dead val#.
955 // Is the def for the val# rematerializable?
958 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
959 bool DefIsLoad = false;
961 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
968 /// FilterFoldedOps - Filter out two-address use operands. Return
969 /// true if it finds any issue with the operands that ought to prevent
971 static bool FilterFoldedOps(MachineInstr *MI,
972 SmallVector<unsigned, 2> &Ops,
974 SmallVector<unsigned, 2> &FoldOps) {
975 const TargetInstrDesc &TID = MI->getDesc();
978 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
979 unsigned OpIdx = Ops[i];
980 MachineOperand &MO = MI->getOperand(OpIdx);
981 // FIXME: fold subreg use.
985 MRInfo |= (unsigned)VirtRegMap::isMod;
987 // Filter out two-address use operand(s).
988 if (!MO.isImplicit() &&
989 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
990 MRInfo = VirtRegMap::isModRef;
993 MRInfo |= (unsigned)VirtRegMap::isRef;
995 FoldOps.push_back(OpIdx);
1001 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1002 /// slot / to reg or any rematerialized load into ith operand of specified
1003 /// MI. If it is successul, MI is updated with the newly created MI and
1005 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1006 VirtRegMap &vrm, MachineInstr *DefMI,
1008 SmallVector<unsigned, 2> &Ops,
1009 bool isSS, int Slot, unsigned Reg) {
1010 // If it is an implicit def instruction, just delete it.
1011 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1012 RemoveMachineInstrFromMaps(MI);
1013 vrm.RemoveMachineInstrFromMaps(MI);
1014 MI->eraseFromParent();
1019 // Filter the list of operand indexes that are to be folded. Abort if
1020 // any operand will prevent folding.
1021 unsigned MRInfo = 0;
1022 SmallVector<unsigned, 2> FoldOps;
1023 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1026 // The only time it's safe to fold into a two address instruction is when
1027 // it's folding reload and spill from / into a spill stack slot.
1028 if (DefMI && (MRInfo & VirtRegMap::isMod))
1031 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1032 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1034 // Remember this instruction uses the spill slot.
1035 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1037 // Attempt to fold the memory reference into the instruction. If
1038 // we can do this, we don't need to insert spill code.
1039 MachineBasicBlock &MBB = *MI->getParent();
1040 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1041 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1042 vrm.transferSpillPts(MI, fmi);
1043 vrm.transferRestorePts(MI, fmi);
1044 vrm.transferEmergencySpills(MI, fmi);
1046 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1047 mi2iMap_[fmi] = InstrIdx;
1048 MI = MBB.insert(MBB.erase(MI), fmi);
1055 /// canFoldMemoryOperand - Returns true if the specified load / store
1056 /// folding is possible.
1057 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1058 SmallVector<unsigned, 2> &Ops,
1060 // Filter the list of operand indexes that are to be folded. Abort if
1061 // any operand will prevent folding.
1062 unsigned MRInfo = 0;
1063 SmallVector<unsigned, 2> FoldOps;
1064 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1067 // It's only legal to remat for a use, not a def.
1068 if (ReMat && (MRInfo & VirtRegMap::isMod))
1071 return tii_->canFoldMemoryOperand(MI, FoldOps);
1074 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1075 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1076 for (LiveInterval::Ranges::const_iterator
1077 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1078 std::vector<IdxMBBPair>::const_iterator II =
1079 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1080 if (II == Idx2MBBMap.end())
1082 if (I->end > II->first) // crossing a MBB.
1084 MBBs.insert(II->second);
1085 if (MBBs.size() > 1)
1091 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1092 /// interval on to-be re-materialized operands of MI) with new register.
1093 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1094 MachineInstr *MI, unsigned NewVReg,
1096 // There is an implicit use. That means one of the other operand is
1097 // being remat'ed and the remat'ed instruction has li.reg as an
1098 // use operand. Make sure we rewrite that as well.
1099 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1100 MachineOperand &MO = MI->getOperand(i);
1103 unsigned Reg = MO.getReg();
1104 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1106 if (!vrm.isReMaterialized(Reg))
1108 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1109 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1111 UseMO->setReg(NewVReg);
1115 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1116 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1117 bool LiveIntervals::
1118 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1119 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1120 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1121 unsigned Slot, int LdSlot,
1122 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1124 const TargetRegisterClass* rc,
1125 SmallVector<int, 4> &ReMatIds,
1126 const MachineLoopInfo *loopInfo,
1127 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1128 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1129 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1130 MachineBasicBlock *MBB = MI->getParent();
1131 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1132 bool CanFold = false;
1134 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1135 MachineOperand& mop = MI->getOperand(i);
1138 unsigned Reg = mop.getReg();
1139 unsigned RegI = Reg;
1140 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1145 bool TryFold = !DefIsReMat;
1146 bool FoldSS = true; // Default behavior unless it's a remat.
1147 int FoldSlot = Slot;
1149 // If this is the rematerializable definition MI itself and
1150 // all of its uses are rematerialized, simply delete it.
1151 if (MI == ReMatOrigDefMI && CanDelete) {
1152 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1154 RemoveMachineInstrFromMaps(MI);
1155 vrm.RemoveMachineInstrFromMaps(MI);
1156 MI->eraseFromParent();
1160 // If def for this use can't be rematerialized, then try folding.
1161 // If def is rematerializable and it's a load, also try folding.
1162 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1164 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1170 // Scan all of the operands of this instruction rewriting operands
1171 // to use NewVReg instead of li.reg as appropriate. We do this for
1174 // 1. If the instr reads the same spilled vreg multiple times, we
1175 // want to reuse the NewVReg.
1176 // 2. If the instr is a two-addr instruction, we are required to
1177 // keep the src/dst regs pinned.
1179 // Keep track of whether we replace a use and/or def so that we can
1180 // create the spill interval with the appropriate range.
1182 HasUse = mop.isUse();
1183 HasDef = mop.isDef();
1184 SmallVector<unsigned, 2> Ops;
1186 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1187 const MachineOperand &MOj = MI->getOperand(j);
1190 unsigned RegJ = MOj.getReg();
1191 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1195 HasUse |= MOj.isUse();
1196 HasDef |= MOj.isDef();
1200 if (HasUse && !li.liveAt(getUseIndex(index)))
1201 // Must be defined by an implicit def. It should not be spilled. Note,
1202 // this is for correctness reason. e.g.
1203 // 8 %reg1024<def> = IMPLICIT_DEF
1204 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1205 // The live range [12, 14) are not part of the r1024 live interval since
1206 // it's defined by an implicit def. It will not conflicts with live
1207 // interval of r1025. Now suppose both registers are spilled, you can
1208 // easily see a situation where both registers are reloaded before
1209 // the INSERT_SUBREG and both target registers that would overlap.
1212 // Update stack slot spill weight if we are splitting.
1213 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1220 // Do not fold load / store here if we are splitting. We'll find an
1221 // optimal point to insert a load / store later.
1223 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1224 Ops, FoldSS, FoldSlot, Reg)) {
1225 // Folding the load/store can completely change the instruction in
1226 // unpredictable ways, rescan it from the beginning.
1230 if (isRemoved(MI)) {
1234 goto RestartInstruction;
1237 // We'll try to fold it later if it's profitable.
1238 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1242 // Create a new virtual register for the spill interval.
1243 bool CreatedNewVReg = false;
1245 NewVReg = mri_->createVirtualRegister(rc);
1247 CreatedNewVReg = true;
1249 mop.setReg(NewVReg);
1250 if (mop.isImplicit())
1251 rewriteImplicitOps(li, MI, NewVReg, vrm);
1253 // Reuse NewVReg for other reads.
1254 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1255 MachineOperand &mopj = MI->getOperand(Ops[j]);
1256 mopj.setReg(NewVReg);
1257 if (mopj.isImplicit())
1258 rewriteImplicitOps(li, MI, NewVReg, vrm);
1261 if (CreatedNewVReg) {
1263 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1264 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1265 // Each valnum may have its own remat id.
1266 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1268 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1270 if (!CanDelete || (HasUse && HasDef)) {
1271 // If this is a two-addr instruction then its use operands are
1272 // rematerializable but its def is not. It should be assigned a
1274 vrm.assignVirt2StackSlot(NewVReg, Slot);
1277 vrm.assignVirt2StackSlot(NewVReg, Slot);
1279 } else if (HasUse && HasDef &&
1280 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1281 // If this interval hasn't been assigned a stack slot (because earlier
1282 // def is a deleted remat def), do it now.
1283 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1284 vrm.assignVirt2StackSlot(NewVReg, Slot);
1287 // Re-matting an instruction with virtual register use. Add the
1288 // register as an implicit use on the use MI.
1289 if (DefIsReMat && ImpUse)
1290 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1292 // create a new register interval for this spill / remat.
1293 LiveInterval &nI = getOrCreateInterval(NewVReg);
1294 if (CreatedNewVReg) {
1295 NewLIs.push_back(&nI);
1296 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1298 vrm.setIsSplitFromReg(NewVReg, li.reg);
1302 if (CreatedNewVReg) {
1303 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1304 nI.getNextValue(~0U, 0, VNInfoAllocator));
1308 // Extend the split live interval to this def / use.
1309 unsigned End = getUseIndex(index)+1;
1310 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1311 nI.getValNumInfo(nI.getNumValNums()-1));
1317 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1318 nI.getNextValue(~0U, 0, VNInfoAllocator));
1323 DOUT << "\t\t\t\tAdded new interval: ";
1324 nI.print(DOUT, tri_);
1329 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1331 MachineBasicBlock *MBB, unsigned Idx) const {
1332 unsigned End = getMBBEndIdx(MBB);
1333 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1334 unsigned KillIdx = VNI->kills[j];
1335 if (KillIdx > Idx && KillIdx < End)
1341 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1342 /// during spilling.
1344 struct RewriteInfo {
1349 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1350 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1353 struct RewriteInfoCompare {
1354 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1355 return LHS.Index < RHS.Index;
1360 void LiveIntervals::
1361 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1362 LiveInterval::Ranges::const_iterator &I,
1363 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1364 unsigned Slot, int LdSlot,
1365 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1367 const TargetRegisterClass* rc,
1368 SmallVector<int, 4> &ReMatIds,
1369 const MachineLoopInfo *loopInfo,
1370 BitVector &SpillMBBs,
1371 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1372 BitVector &RestoreMBBs,
1373 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1374 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1375 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1376 bool AllCanFold = true;
1377 unsigned NewVReg = 0;
1378 unsigned start = getBaseIndex(I->start);
1379 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1381 // First collect all the def / use in this live range that will be rewritten.
1382 // Make sure they are sorted according to instruction index.
1383 std::vector<RewriteInfo> RewriteMIs;
1384 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1385 re = mri_->reg_end(); ri != re; ) {
1386 MachineInstr *MI = &*ri;
1387 MachineOperand &O = ri.getOperand();
1389 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1390 unsigned index = getInstructionIndex(MI);
1391 if (index < start || index >= end)
1393 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1394 // Must be defined by an implicit def. It should not be spilled. Note,
1395 // this is for correctness reason. e.g.
1396 // 8 %reg1024<def> = IMPLICIT_DEF
1397 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1398 // The live range [12, 14) are not part of the r1024 live interval since
1399 // it's defined by an implicit def. It will not conflicts with live
1400 // interval of r1025. Now suppose both registers are spilled, you can
1401 // easily see a situation where both registers are reloaded before
1402 // the INSERT_SUBREG and both target registers that would overlap.
1404 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1406 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1408 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1409 // Now rewrite the defs and uses.
1410 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1411 RewriteInfo &rwi = RewriteMIs[i];
1413 unsigned index = rwi.Index;
1414 bool MIHasUse = rwi.HasUse;
1415 bool MIHasDef = rwi.HasDef;
1416 MachineInstr *MI = rwi.MI;
1417 // If MI def and/or use the same register multiple times, then there
1418 // are multiple entries.
1419 unsigned NumUses = MIHasUse;
1420 while (i != e && RewriteMIs[i].MI == MI) {
1421 assert(RewriteMIs[i].Index == index);
1422 bool isUse = RewriteMIs[i].HasUse;
1423 if (isUse) ++NumUses;
1425 MIHasDef |= RewriteMIs[i].HasDef;
1428 MachineBasicBlock *MBB = MI->getParent();
1430 if (ImpUse && MI != ReMatDefMI) {
1431 // Re-matting an instruction with virtual register use. Update the
1432 // register interval's spill weight to HUGE_VALF to prevent it from
1434 LiveInterval &ImpLi = getInterval(ImpUse);
1435 ImpLi.weight = HUGE_VALF;
1438 unsigned MBBId = MBB->getNumber();
1439 unsigned ThisVReg = 0;
1441 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1442 if (NVI != MBBVRegsMap.end()) {
1443 ThisVReg = NVI->second;
1450 // It's better to start a new interval to avoid artifically
1451 // extend the new interval.
1452 if (MIHasDef && !MIHasUse) {
1453 MBBVRegsMap.erase(MBB->getNumber());
1459 bool IsNew = ThisVReg == 0;
1461 // This ends the previous live interval. If all of its def / use
1462 // can be folded, give it a low spill weight.
1463 if (NewVReg && TrySplit && AllCanFold) {
1464 LiveInterval &nI = getOrCreateInterval(NewVReg);
1471 bool HasDef = false;
1472 bool HasUse = false;
1473 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1474 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1475 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1476 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1477 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1478 if (!HasDef && !HasUse)
1481 AllCanFold &= CanFold;
1483 // Update weight of spill interval.
1484 LiveInterval &nI = getOrCreateInterval(NewVReg);
1486 // The spill weight is now infinity as it cannot be spilled again.
1487 nI.weight = HUGE_VALF;
1491 // Keep track of the last def and first use in each MBB.
1493 if (MI != ReMatOrigDefMI || !CanDelete) {
1494 bool HasKill = false;
1496 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1498 // If this is a two-address code, then this index starts a new VNInfo.
1499 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1501 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1503 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1504 SpillIdxes.find(MBBId);
1506 if (SII == SpillIdxes.end()) {
1507 std::vector<SRInfo> S;
1508 S.push_back(SRInfo(index, NewVReg, true));
1509 SpillIdxes.insert(std::make_pair(MBBId, S));
1510 } else if (SII->second.back().vreg != NewVReg) {
1511 SII->second.push_back(SRInfo(index, NewVReg, true));
1512 } else if ((int)index > SII->second.back().index) {
1513 // If there is an earlier def and this is a two-address
1514 // instruction, then it's not possible to fold the store (which
1515 // would also fold the load).
1516 SRInfo &Info = SII->second.back();
1518 Info.canFold = !HasUse;
1520 SpillMBBs.set(MBBId);
1521 } else if (SII != SpillIdxes.end() &&
1522 SII->second.back().vreg == NewVReg &&
1523 (int)index > SII->second.back().index) {
1524 // There is an earlier def that's not killed (must be two-address).
1525 // The spill is no longer needed.
1526 SII->second.pop_back();
1527 if (SII->second.empty()) {
1528 SpillIdxes.erase(MBBId);
1529 SpillMBBs.reset(MBBId);
1536 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1537 SpillIdxes.find(MBBId);
1538 if (SII != SpillIdxes.end() &&
1539 SII->second.back().vreg == NewVReg &&
1540 (int)index > SII->second.back().index)
1541 // Use(s) following the last def, it's not safe to fold the spill.
1542 SII->second.back().canFold = false;
1543 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1544 RestoreIdxes.find(MBBId);
1545 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1546 // If we are splitting live intervals, only fold if it's the first
1547 // use and there isn't another use later in the MBB.
1548 RII->second.back().canFold = false;
1550 // Only need a reload if there isn't an earlier def / use.
1551 if (RII == RestoreIdxes.end()) {
1552 std::vector<SRInfo> Infos;
1553 Infos.push_back(SRInfo(index, NewVReg, true));
1554 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1556 RII->second.push_back(SRInfo(index, NewVReg, true));
1558 RestoreMBBs.set(MBBId);
1562 // Update spill weight.
1563 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1564 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1567 if (NewVReg && TrySplit && AllCanFold) {
1568 // If all of its def / use can be folded, give it a low spill weight.
1569 LiveInterval &nI = getOrCreateInterval(NewVReg);
1574 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1575 BitVector &RestoreMBBs,
1576 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1577 if (!RestoreMBBs[Id])
1579 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1580 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1581 if (Restores[i].index == index &&
1582 Restores[i].vreg == vr &&
1583 Restores[i].canFold)
1588 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1589 BitVector &RestoreMBBs,
1590 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1591 if (!RestoreMBBs[Id])
1593 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1594 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1595 if (Restores[i].index == index && Restores[i].vreg)
1596 Restores[i].index = -1;
1599 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1600 /// spilled and create empty intervals for their uses.
1602 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1603 const TargetRegisterClass* rc,
1604 std::vector<LiveInterval*> &NewLIs) {
1605 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1606 re = mri_->reg_end(); ri != re; ) {
1607 MachineOperand &O = ri.getOperand();
1608 MachineInstr *MI = &*ri;
1611 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1612 "Register def was not rewritten?");
1613 RemoveMachineInstrFromMaps(MI);
1614 vrm.RemoveMachineInstrFromMaps(MI);
1615 MI->eraseFromParent();
1617 // This must be an use of an implicit_def so it's not part of the live
1618 // interval. Create a new empty live interval for it.
1619 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1620 unsigned NewVReg = mri_->createVirtualRegister(rc);
1622 vrm.setIsImplicitlyDefined(NewVReg);
1623 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1624 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1625 MachineOperand &MO = MI->getOperand(i);
1626 if (MO.isReg() && MO.getReg() == li.reg)
1635 bool operator()(LiveInterval* A, LiveInterval* B) {
1636 return A->beginNumber() < B->beginNumber();
1641 std::vector<LiveInterval*> LiveIntervals::
1642 addIntervalsForSpillsFast(const LiveInterval &li,
1643 const MachineLoopInfo *loopInfo,
1644 VirtRegMap &vrm, float& SSWeight) {
1645 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1647 std::vector<LiveInterval*> added;
1649 assert(li.weight != HUGE_VALF &&
1650 "attempt to spill already spilled interval!");
1652 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1656 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1660 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1661 while (RI != mri_->reg_end()) {
1662 MachineInstr* MI = &*RI;
1664 SmallVector<unsigned, 2> Indices;
1665 bool HasUse = false;
1666 bool HasDef = false;
1668 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1669 MachineOperand& mop = MI->getOperand(i);
1670 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1672 HasUse |= MI->getOperand(i).isUse();
1673 HasDef |= MI->getOperand(i).isDef();
1675 Indices.push_back(i);
1678 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1679 Indices, true, slot, li.reg)) {
1680 unsigned NewVReg = mri_->createVirtualRegister(rc);
1682 vrm.assignVirt2StackSlot(NewVReg, slot);
1684 // create a new register for this spill
1685 LiveInterval &nI = getOrCreateInterval(NewVReg);
1687 // the spill weight is now infinity as it
1688 // cannot be spilled again
1689 nI.weight = HUGE_VALF;
1691 // Rewrite register operands to use the new vreg.
1692 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1693 E = Indices.end(); I != E; ++I) {
1694 MI->getOperand(*I).setReg(NewVReg);
1696 if (MI->getOperand(*I).isUse())
1697 MI->getOperand(*I).setIsKill(true);
1700 // Fill in the new live interval.
1701 unsigned index = getInstructionIndex(MI);
1703 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1704 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1707 vrm.addRestorePoint(NewVReg, MI);
1710 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1711 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1714 vrm.addSpillPoint(NewVReg, true, MI);
1717 added.push_back(&nI);
1719 DOUT << "\t\t\t\tadded new interval: ";
1723 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1726 SSWeight += getSpillWeight(true, true, loopDepth);
1728 SSWeight += getSpillWeight(false, true, loopDepth);
1730 SSWeight += getSpillWeight(true, false, loopDepth);
1734 RI = mri_->reg_begin(li.reg);
1737 // Clients expect the new intervals to be returned in sorted order.
1738 std::sort(added.begin(), added.end(), LISorter());
1743 std::vector<LiveInterval*> LiveIntervals::
1744 addIntervalsForSpills(const LiveInterval &li,
1745 SmallVectorImpl<LiveInterval*> &SpillIs,
1746 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1749 if (EnableFastSpilling)
1750 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1752 assert(li.weight != HUGE_VALF &&
1753 "attempt to spill already spilled interval!");
1755 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1756 li.print(DOUT, tri_);
1759 // Spill slot weight.
1762 // Each bit specify whether it a spill is required in the MBB.
1763 BitVector SpillMBBs(mf_->getNumBlockIDs());
1764 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1765 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1766 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1767 DenseMap<unsigned,unsigned> MBBVRegsMap;
1768 std::vector<LiveInterval*> NewLIs;
1769 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1771 unsigned NumValNums = li.getNumValNums();
1772 SmallVector<MachineInstr*, 4> ReMatDefs;
1773 ReMatDefs.resize(NumValNums, NULL);
1774 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1775 ReMatOrigDefs.resize(NumValNums, NULL);
1776 SmallVector<int, 4> ReMatIds;
1777 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1778 BitVector ReMatDelete(NumValNums);
1779 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1781 // Spilling a split live interval. It cannot be split any further. Also,
1782 // it's also guaranteed to be a single val# / range interval.
1783 if (vrm.getPreSplitReg(li.reg)) {
1784 vrm.setIsSplitFromReg(li.reg, 0);
1785 // Unset the split kill marker on the last use.
1786 unsigned KillIdx = vrm.getKillPoint(li.reg);
1788 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1789 assert(KillMI && "Last use disappeared?");
1790 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1791 assert(KillOp != -1 && "Last use disappeared?");
1792 KillMI->getOperand(KillOp).setIsKill(false);
1794 vrm.removeKillPoint(li.reg);
1795 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1796 Slot = vrm.getStackSlot(li.reg);
1797 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1798 MachineInstr *ReMatDefMI = DefIsReMat ?
1799 vrm.getReMaterializedMI(li.reg) : NULL;
1801 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1802 bool isLoad = isLoadSS ||
1803 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1804 bool IsFirstRange = true;
1805 for (LiveInterval::Ranges::const_iterator
1806 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1807 // If this is a split live interval with multiple ranges, it means there
1808 // are two-address instructions that re-defined the value. Only the
1809 // first def can be rematerialized!
1811 // Note ReMatOrigDefMI has already been deleted.
1812 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1813 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1814 false, vrm, rc, ReMatIds, loopInfo,
1815 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1816 MBBVRegsMap, NewLIs, SSWeight);
1818 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1819 Slot, 0, false, false, false,
1820 false, vrm, rc, ReMatIds, loopInfo,
1821 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1822 MBBVRegsMap, NewLIs, SSWeight);
1824 IsFirstRange = false;
1827 SSWeight = 0.0f; // Already accounted for when split.
1828 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1832 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1833 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1837 bool NeedStackSlot = false;
1838 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1840 const VNInfo *VNI = *i;
1841 unsigned VN = VNI->id;
1842 unsigned DefIdx = VNI->def;
1844 continue; // Dead val#.
1845 // Is the def for the val# rematerializable?
1846 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1847 ? 0 : getInstructionFromIndex(DefIdx);
1849 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1850 // Remember how to remat the def of this val#.
1851 ReMatOrigDefs[VN] = ReMatDefMI;
1852 // Original def may be modified so we have to make a copy here.
1853 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1854 ClonedMIs.push_back(Clone);
1855 ReMatDefs[VN] = Clone;
1857 bool CanDelete = true;
1858 if (VNI->hasPHIKill) {
1859 // A kill is a phi node, not all of its uses can be rematerialized.
1860 // It must not be deleted.
1862 // Need a stack slot if there is any live range where uses cannot be
1864 NeedStackSlot = true;
1867 ReMatDelete.set(VN);
1869 // Need a stack slot if there is any live range where uses cannot be
1871 NeedStackSlot = true;
1875 // One stack slot per live interval.
1876 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1877 Slot = vrm.assignVirt2StackSlot(li.reg);
1879 // Create new intervals and rewrite defs and uses.
1880 for (LiveInterval::Ranges::const_iterator
1881 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1882 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1883 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1884 bool DefIsReMat = ReMatDefMI != NULL;
1885 bool CanDelete = ReMatDelete[I->valno->id];
1887 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1888 bool isLoad = isLoadSS ||
1889 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1890 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1891 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1892 CanDelete, vrm, rc, ReMatIds, loopInfo,
1893 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1894 MBBVRegsMap, NewLIs, SSWeight);
1897 // Insert spills / restores if we are splitting.
1899 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1903 SmallPtrSet<LiveInterval*, 4> AddedKill;
1904 SmallVector<unsigned, 2> Ops;
1905 if (NeedStackSlot) {
1906 int Id = SpillMBBs.find_first();
1908 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1909 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1910 std::vector<SRInfo> &spills = SpillIdxes[Id];
1911 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1912 int index = spills[i].index;
1913 unsigned VReg = spills[i].vreg;
1914 LiveInterval &nI = getOrCreateInterval(VReg);
1915 bool isReMat = vrm.isReMaterialized(VReg);
1916 MachineInstr *MI = getInstructionFromIndex(index);
1917 bool CanFold = false;
1918 bool FoundUse = false;
1920 if (spills[i].canFold) {
1922 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1923 MachineOperand &MO = MI->getOperand(j);
1924 if (!MO.isReg() || MO.getReg() != VReg)
1931 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1932 RestoreMBBs, RestoreIdxes))) {
1933 // MI has two-address uses of the same register. If the use
1934 // isn't the first and only use in the BB, then we can't fold
1935 // it. FIXME: Move this to rewriteInstructionsForSpills.
1942 // Fold the store into the def if possible.
1943 bool Folded = false;
1944 if (CanFold && !Ops.empty()) {
1945 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1948 // Also folded uses, do not issue a load.
1949 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1950 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1952 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1956 // Otherwise tell the spiller to issue a spill.
1958 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1959 bool isKill = LR->end == getStoreIndex(index);
1960 if (!MI->registerDefIsDead(nI.reg))
1961 // No need to spill a dead def.
1962 vrm.addSpillPoint(VReg, isKill, MI);
1964 AddedKill.insert(&nI);
1967 // Update spill slot weight.
1969 SSWeight += getSpillWeight(true, false, loopDepth);
1971 Id = SpillMBBs.find_next(Id);
1975 int Id = RestoreMBBs.find_first();
1977 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1978 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1980 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1981 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1982 int index = restores[i].index;
1985 unsigned VReg = restores[i].vreg;
1986 LiveInterval &nI = getOrCreateInterval(VReg);
1987 bool isReMat = vrm.isReMaterialized(VReg);
1988 MachineInstr *MI = getInstructionFromIndex(index);
1989 bool CanFold = false;
1991 if (restores[i].canFold) {
1993 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1994 MachineOperand &MO = MI->getOperand(j);
1995 if (!MO.isReg() || MO.getReg() != VReg)
1999 // If this restore were to be folded, it would have been folded
2008 // Fold the load into the use if possible.
2009 bool Folded = false;
2010 if (CanFold && !Ops.empty()) {
2012 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2014 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2016 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2017 // If the rematerializable def is a load, also try to fold it.
2018 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2019 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2020 Ops, isLoadSS, LdSlot, VReg);
2021 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2023 // Re-matting an instruction with virtual register use. Add the
2024 // register as an implicit use on the use MI and update the register
2025 // interval's spill weight to HUGE_VALF to prevent it from being
2027 LiveInterval &ImpLi = getInterval(ImpUse);
2028 ImpLi.weight = HUGE_VALF;
2029 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2033 // If folding is not possible / failed, then tell the spiller to issue a
2034 // load / rematerialization for us.
2036 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2038 vrm.addRestorePoint(VReg, MI);
2040 // Update spill slot weight.
2042 SSWeight += getSpillWeight(false, true, loopDepth);
2044 Id = RestoreMBBs.find_next(Id);
2047 // Finalize intervals: add kills, finalize spill weights, and filter out
2049 std::vector<LiveInterval*> RetNewLIs;
2050 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2051 LiveInterval *LI = NewLIs[i];
2053 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2054 if (!AddedKill.count(LI)) {
2055 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2056 unsigned LastUseIdx = getBaseIndex(LR->end);
2057 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2058 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2059 assert(UseIdx != -1);
2060 if (LastUse->getOperand(UseIdx).isImplicit() ||
2061 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2062 LastUse->getOperand(UseIdx).setIsKill();
2063 vrm.addKillPoint(LI->reg, LastUseIdx);
2066 RetNewLIs.push_back(LI);
2070 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2074 /// hasAllocatableSuperReg - Return true if the specified physical register has
2075 /// any super register that's allocatable.
2076 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2077 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2078 if (allocatableRegs_[*AS] && hasInterval(*AS))
2083 /// getRepresentativeReg - Find the largest super register of the specified
2084 /// physical register.
2085 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2086 // Find the largest super-register that is allocatable.
2087 unsigned BestReg = Reg;
2088 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2089 unsigned SuperReg = *AS;
2090 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2098 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2099 /// specified interval that conflicts with the specified physical register.
2100 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2101 unsigned PhysReg) const {
2102 unsigned NumConflicts = 0;
2103 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2104 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2105 E = mri_->reg_end(); I != E; ++I) {
2106 MachineOperand &O = I.getOperand();
2107 MachineInstr *MI = O.getParent();
2108 unsigned Index = getInstructionIndex(MI);
2109 if (pli.liveAt(Index))
2112 return NumConflicts;
2115 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2116 /// around all defs and uses of the specified interval.
2117 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2118 unsigned PhysReg, VirtRegMap &vrm) {
2119 unsigned SpillReg = getRepresentativeReg(PhysReg);
2121 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2122 // If there are registers which alias PhysReg, but which are not a
2123 // sub-register of the chosen representative super register. Assert
2124 // since we can't handle it yet.
2125 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2126 tri_->isSuperRegister(*AS, SpillReg));
2128 LiveInterval &pli = getInterval(SpillReg);
2129 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2130 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2131 E = mri_->reg_end(); I != E; ++I) {
2132 MachineOperand &O = I.getOperand();
2133 MachineInstr *MI = O.getParent();
2134 if (SeenMIs.count(MI))
2137 unsigned Index = getInstructionIndex(MI);
2138 if (pli.liveAt(Index)) {
2139 vrm.addEmergencySpill(SpillReg, MI);
2140 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2141 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2142 if (!hasInterval(*AS))
2144 LiveInterval &spli = getInterval(*AS);
2145 if (spli.liveAt(Index))
2146 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2152 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2153 MachineInstr* startInst) {
2154 LiveInterval& Interval = getOrCreateInterval(reg);
2155 VNInfo* VN = Interval.getNextValue(
2156 getInstructionIndex(startInst) + InstrSlots::DEF,
2157 startInst, getVNInfoAllocator());
2158 VN->hasPHIKill = true;
2159 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2160 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2161 getMBBEndIdx(startInst->getParent()) + 1, VN);
2162 Interval.addRange(LR);