1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 STATISTIC(numIntervals, "Number of original intervals");
53 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addRequired<AliasAnalysis>();
62 AU.addPreserved<AliasAnalysis>();
63 AU.addPreserved<LiveVariables>();
64 AU.addRequired<LiveVariables>();
65 AU.addPreservedID(MachineLoopInfoID);
66 AU.addPreservedID(MachineDominatorsID);
67 AU.addPreservedID(PHIEliminationID);
68 AU.addRequiredID(PHIEliminationID);
69 AU.addRequiredID(TwoAddressInstructionPassID);
70 MachineFunctionPass::getAnalysisUsage(AU);
73 void LiveIntervals::releaseMemory() {
79 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
80 VNInfoAllocator.Reset();
81 while (!ClonedMIs.empty()) {
82 MachineInstr *MI = ClonedMIs.back();
84 mf_->DeleteMachineInstr(MI);
88 void LiveIntervals::computeNumbering() {
89 Index2MiMap OldI2MI = i2miMap_;
90 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99 // Number MachineInstrs and MachineBasicBlocks.
100 // Initialize MBB indexes to a sentinal.
101 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
103 unsigned MIIndex = 0;
104 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
106 unsigned StartIdx = MIIndex;
108 // Insert an empty slot at the beginning of each block.
109 MIIndex += InstrSlots::NUM;
110 i2miMap_.push_back(0);
112 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
114 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
115 assert(inserted && "multiple MachineInstr -> index mappings");
116 i2miMap_.push_back(I);
117 MIIndex += InstrSlots::NUM;
120 // Insert an empty slot after every instruction.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
125 // Set the MBB2IdxMap entry for this MBB.
126 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
129 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
131 if (!OldI2MI.empty())
132 for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
133 for (LiveInterval::iterator LI = OI->second.begin(),
134 LE = OI->second.end(); LI != LE; ++LI) {
136 // Remap the start index of the live range to the corresponding new
137 // number, or our best guess at what it _should_ correspond to if the
138 // original instruction has been erased. This is either the following
139 // instruction or its predecessor.
140 unsigned index = LI->start / InstrSlots::NUM;
141 unsigned offset = LI->start % InstrSlots::NUM;
142 if (offset == InstrSlots::LOAD) {
143 std::vector<IdxMBBPair>::const_iterator I =
144 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
145 // Take the pair containing the index
146 std::vector<IdxMBBPair>::const_iterator J =
147 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
149 LI->start = getMBBStartIdx(J->second);
151 LI->start = mi2iMap_[OldI2MI[index]] + offset;
154 // Remap the ending index in the same way that we remapped the start,
155 // except for the final step where we always map to the immediately
156 // following instruction.
157 index = (LI->end - 1) / InstrSlots::NUM;
158 offset = LI->end % InstrSlots::NUM;
159 if (offset == InstrSlots::USE) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->end = getMBBEndIdx(J->second) + 1;
168 unsigned idx = index;
169 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
171 if (index != OldI2MI.size())
172 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
174 LI->end = InstrSlots::NUM * i2miMap_.size();
177 // Remap the VNInfo def index, which works the same as the
178 // start indices above.
179 VNInfo* vni = LI->valno;
180 index = vni->def / InstrSlots::NUM;
181 offset = vni->def % InstrSlots::NUM;
182 if (offset == InstrSlots::LOAD) {
183 std::vector<IdxMBBPair>::const_iterator I =
184 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
185 // Take the pair containing the index
186 std::vector<IdxMBBPair>::const_iterator J =
187 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
189 vni->def = getMBBStartIdx(J->second);
192 vni->def = mi2iMap_[OldI2MI[index]] + offset;
195 // Remap the VNInfo kill indices, which works the same as
196 // the end indices above.
197 for (size_t i = 0; i < vni->kills.size(); ++i) {
198 index = (vni->kills[i]-1) / InstrSlots::NUM;
199 offset = vni->kills[i] % InstrSlots::NUM;
200 if (offset == InstrSlots::USE) {
201 std::vector<IdxMBBPair>::const_iterator I =
202 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
203 // Take the pair containing the index
204 std::vector<IdxMBBPair>::const_iterator J =
205 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
207 vni->kills[i] = getMBBEndIdx(J->second) + 1;
209 unsigned idx = index;
210 while (!OldI2MI[index]) ++index;
211 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
213 if (index != OldI2MI.size())
214 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
215 (idx == index ? offset : 0);
217 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
223 /// runOnMachineFunction - Register allocate the whole function
225 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
227 mri_ = &mf_->getRegInfo();
228 tm_ = &fn.getTarget();
229 tri_ = tm_->getRegisterInfo();
230 tii_ = tm_->getInstrInfo();
231 aa_ = &getAnalysis<AliasAnalysis>();
232 lv_ = &getAnalysis<LiveVariables>();
233 allocatableRegs_ = tri_->getAllocatableSet(fn);
238 numIntervals += getNumIntervals();
240 DOUT << "********** INTERVALS **********\n";
241 for (iterator I = begin(), E = end(); I != E; ++I) {
242 I->second.print(DOUT, tri_);
246 numIntervalsAfter += getNumIntervals();
251 /// print - Implement the dump method.
252 void LiveIntervals::print(std::ostream &O, const Module* ) const {
253 O << "********** INTERVALS **********\n";
254 for (const_iterator I = begin(), E = end(); I != E; ++I) {
255 I->second.print(O, tri_);
259 O << "********** MACHINEINSTRS **********\n";
260 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
261 mbbi != mbbe; ++mbbi) {
262 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
263 for (MachineBasicBlock::iterator mii = mbbi->begin(),
264 mie = mbbi->end(); mii != mie; ++mii) {
265 O << getInstructionIndex(mii) << '\t' << *mii;
270 /// conflictsWithPhysRegDef - Returns true if the specified register
271 /// is defined during the duration of the specified interval.
272 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
273 VirtRegMap &vrm, unsigned reg) {
274 for (LiveInterval::Ranges::const_iterator
275 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
276 for (unsigned index = getBaseIndex(I->start),
277 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
278 index += InstrSlots::NUM) {
279 // skip deleted instructions
280 while (index != end && !getInstructionFromIndex(index))
281 index += InstrSlots::NUM;
282 if (index == end) break;
284 MachineInstr *MI = getInstructionFromIndex(index);
285 unsigned SrcReg, DstReg;
286 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
287 if (SrcReg == li.reg || DstReg == li.reg)
289 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
290 MachineOperand& mop = MI->getOperand(i);
291 if (!mop.isRegister())
293 unsigned PhysReg = mop.getReg();
294 if (PhysReg == 0 || PhysReg == li.reg)
296 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
297 if (!vrm.hasPhys(PhysReg))
299 PhysReg = vrm.getPhys(PhysReg);
301 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
310 void LiveIntervals::printRegName(unsigned reg) const {
311 if (TargetRegisterInfo::isPhysicalRegister(reg))
312 cerr << tri_->getName(reg);
314 cerr << "%reg" << reg;
317 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
318 MachineBasicBlock::iterator mi,
319 unsigned MIIdx, MachineOperand& MO,
321 LiveInterval &interval) {
322 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
323 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
325 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
326 DOUT << "is a implicit_def\n";
330 // Virtual registers may be defined multiple times (due to phi
331 // elimination and 2-addr elimination). Much of what we do only has to be
332 // done once for the vreg. We use an empty interval to detect the first
333 // time we see a vreg.
334 if (interval.empty()) {
335 // Get the Idx of the defining instructions.
336 unsigned defIndex = getDefIndex(MIIdx);
338 MachineInstr *CopyMI = NULL;
339 unsigned SrcReg, DstReg;
340 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
341 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
342 tii_->isMoveInstr(*mi, SrcReg, DstReg))
344 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
346 assert(ValNo->id == 0 && "First value in interval is not 0?");
348 // Loop over all of the blocks that the vreg is defined in. There are
349 // two cases we have to handle here. The most common case is a vreg
350 // whose lifetime is contained within a basic block. In this case there
351 // will be a single kill, in MBB, which comes after the definition.
352 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
353 // FIXME: what about dead vars?
355 if (vi.Kills[0] != mi)
356 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
358 killIdx = defIndex+1;
360 // If the kill happens after the definition, we have an intra-block
362 if (killIdx > defIndex) {
363 assert(vi.AliveBlocks.none() &&
364 "Shouldn't be alive across any blocks!");
365 LiveRange LR(defIndex, killIdx, ValNo);
366 interval.addRange(LR);
367 DOUT << " +" << LR << "\n";
368 interval.addKill(ValNo, killIdx);
373 // The other case we handle is when a virtual register lives to the end
374 // of the defining block, potentially live across some blocks, then is
375 // live into some number of blocks, but gets killed. Start by adding a
376 // range that goes from this definition to the end of the defining block.
377 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
378 DOUT << " +" << NewLR;
379 interval.addRange(NewLR);
381 // Iterate over all of the blocks that the variable is completely
382 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
384 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
385 if (vi.AliveBlocks[i]) {
386 LiveRange LR(getMBBStartIdx(i),
387 getMBBEndIdx(i)+1, // MBB ends at -1.
389 interval.addRange(LR);
394 // Finally, this virtual register is live from the start of any killing
395 // block to the 'use' slot of the killing instruction.
396 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
397 MachineInstr *Kill = vi.Kills[i];
398 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
399 LiveRange LR(getMBBStartIdx(Kill->getParent()),
401 interval.addRange(LR);
402 interval.addKill(ValNo, killIdx);
407 // If this is the second time we see a virtual register definition, it
408 // must be due to phi elimination or two addr elimination. If this is
409 // the result of two address elimination, then the vreg is one of the
410 // def-and-use register operand.
411 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
412 // If this is a two-address definition, then we have already processed
413 // the live range. The only problem is that we didn't realize there
414 // are actually two values in the live interval. Because of this we
415 // need to take the LiveRegion that defines this register and split it
417 assert(interval.containsOneValue());
418 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
419 unsigned RedefIndex = getDefIndex(MIIdx);
421 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
422 VNInfo *OldValNo = OldLR->valno;
424 // Delete the initial value, which should be short and continuous,
425 // because the 2-addr copy must be in the same MBB as the redef.
426 interval.removeRange(DefIndex, RedefIndex);
428 // Two-address vregs should always only be redefined once. This means
429 // that at this point, there should be exactly one value number in it.
430 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
432 // The new value number (#1) is defined by the instruction we claimed
434 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
437 // Value#0 is now defined by the 2-addr instruction.
438 OldValNo->def = RedefIndex;
441 // Add the new live interval which replaces the range for the input copy.
442 LiveRange LR(DefIndex, RedefIndex, ValNo);
443 DOUT << " replace range with " << LR;
444 interval.addRange(LR);
445 interval.addKill(ValNo, RedefIndex);
447 // If this redefinition is dead, we need to add a dummy unit live
448 // range covering the def slot.
450 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
453 interval.print(DOUT, tri_);
456 // Otherwise, this must be because of phi elimination. If this is the
457 // first redefinition of the vreg that we have seen, go back and change
458 // the live range in the PHI block to be a different value number.
459 if (interval.containsOneValue()) {
460 assert(vi.Kills.size() == 1 &&
461 "PHI elimination vreg should have one kill, the PHI itself!");
463 // Remove the old range that we now know has an incorrect number.
464 VNInfo *VNI = interval.getValNumInfo(0);
465 MachineInstr *Killer = vi.Kills[0];
466 unsigned Start = getMBBStartIdx(Killer->getParent());
467 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
468 DOUT << " Removing [" << Start << "," << End << "] from: ";
469 interval.print(DOUT, tri_); DOUT << "\n";
470 interval.removeRange(Start, End);
471 VNI->hasPHIKill = true;
472 DOUT << " RESULT: "; interval.print(DOUT, tri_);
474 // Replace the interval with one of a NEW value number. Note that this
475 // value number isn't actually defined by an instruction, weird huh? :)
476 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
477 DOUT << " replace range with " << LR;
478 interval.addRange(LR);
479 interval.addKill(LR.valno, End);
480 DOUT << " RESULT: "; interval.print(DOUT, tri_);
483 // In the case of PHI elimination, each variable definition is only
484 // live until the end of the block. We've already taken care of the
485 // rest of the live range.
486 unsigned defIndex = getDefIndex(MIIdx);
489 MachineInstr *CopyMI = NULL;
490 unsigned SrcReg, DstReg;
491 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
492 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
493 tii_->isMoveInstr(*mi, SrcReg, DstReg))
495 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
497 unsigned killIndex = getMBBEndIdx(mbb) + 1;
498 LiveRange LR(defIndex, killIndex, ValNo);
499 interval.addRange(LR);
500 interval.addKill(ValNo, killIndex);
501 ValNo->hasPHIKill = true;
509 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
510 MachineBasicBlock::iterator mi,
513 LiveInterval &interval,
514 MachineInstr *CopyMI) {
515 // A physical register cannot be live across basic block, so its
516 // lifetime must end somewhere in its defining basic block.
517 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
519 unsigned baseIndex = MIIdx;
520 unsigned start = getDefIndex(baseIndex);
521 unsigned end = start;
523 // If it is not used after definition, it is considered dead at
524 // the instruction defining it. Hence its interval is:
525 // [defSlot(def), defSlot(def)+1)
528 end = getDefIndex(start) + 1;
532 // If it is not dead on definition, it must be killed by a
533 // subsequent instruction. Hence its interval is:
534 // [defSlot(def), useSlot(kill)+1)
535 baseIndex += InstrSlots::NUM;
536 while (++mi != MBB->end()) {
537 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
538 getInstructionFromIndex(baseIndex) == 0)
539 baseIndex += InstrSlots::NUM;
540 if (mi->killsRegister(interval.reg, tri_)) {
542 end = getUseIndex(baseIndex) + 1;
544 } else if (mi->modifiesRegister(interval.reg, tri_)) {
545 // Another instruction redefines the register before it is ever read.
546 // Then the register is essentially dead at the instruction that defines
547 // it. Hence its interval is:
548 // [defSlot(def), defSlot(def)+1)
550 end = getDefIndex(start) + 1;
554 baseIndex += InstrSlots::NUM;
557 // The only case we should have a dead physreg here without a killing or
558 // instruction where we know it's dead is if it is live-in to the function
560 assert(!CopyMI && "physreg was not killed in defining block!");
561 end = getDefIndex(start) + 1; // It's dead.
564 assert(start < end && "did not find end of interval?");
566 // Already exists? Extend old live interval.
567 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
568 VNInfo *ValNo = (OldLR != interval.end())
569 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
570 LiveRange LR(start, end, ValNo);
571 interval.addRange(LR);
572 interval.addKill(LR.valno, end);
573 DOUT << " +" << LR << '\n';
576 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
577 MachineBasicBlock::iterator MI,
581 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
582 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
583 getOrCreateInterval(MO.getReg()));
584 else if (allocatableRegs_[MO.getReg()]) {
585 MachineInstr *CopyMI = NULL;
586 unsigned SrcReg, DstReg;
587 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
588 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
589 tii_->isMoveInstr(*MI, SrcReg, DstReg))
591 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
592 getOrCreateInterval(MO.getReg()), CopyMI);
593 // Def of a register also defines its sub-registers.
594 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
595 // If MI also modifies the sub-register explicitly, avoid processing it
596 // more than once. Do not pass in TRI here so it checks for exact match.
597 if (!MI->modifiesRegister(*AS))
598 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
599 getOrCreateInterval(*AS), 0);
603 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
605 LiveInterval &interval, bool isAlias) {
606 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
608 // Look for kills, if it reaches a def before it's killed, then it shouldn't
609 // be considered a livein.
610 MachineBasicBlock::iterator mi = MBB->begin();
611 unsigned baseIndex = MIIdx;
612 unsigned start = baseIndex;
613 unsigned end = start;
614 while (mi != MBB->end()) {
615 if (mi->killsRegister(interval.reg, tri_)) {
617 end = getUseIndex(baseIndex) + 1;
619 } else if (mi->modifiesRegister(interval.reg, tri_)) {
620 // Another instruction redefines the register before it is ever read.
621 // Then the register is essentially dead at the instruction that defines
622 // it. Hence its interval is:
623 // [defSlot(def), defSlot(def)+1)
625 end = getDefIndex(start) + 1;
629 baseIndex += InstrSlots::NUM;
630 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
631 getInstructionFromIndex(baseIndex) == 0)
632 baseIndex += InstrSlots::NUM;
637 // Live-in register might not be used at all.
641 end = getDefIndex(MIIdx) + 1;
643 DOUT << " live through";
648 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
649 interval.addRange(LR);
650 interval.addKill(LR.valno, end);
651 DOUT << " +" << LR << '\n';
654 /// computeIntervals - computes the live intervals for virtual
655 /// registers. for some ordering of the machine instructions [1,N] a
656 /// live interval is an interval [i, j) where 1 <= i <= j < N for
657 /// which a variable is live
658 void LiveIntervals::computeIntervals() {
659 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
660 << "********** Function: "
661 << ((Value*)mf_->getFunction())->getName() << '\n';
662 // Track the index of the current machine instr.
663 unsigned MIIndex = 0;
665 // Skip over empty initial indices.
666 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
667 getInstructionFromIndex(MIIndex) == 0)
668 MIIndex += InstrSlots::NUM;
670 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
672 MachineBasicBlock *MBB = MBBI;
673 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
675 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
677 // Create intervals for live-ins to this BB first.
678 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
679 LE = MBB->livein_end(); LI != LE; ++LI) {
680 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
681 // Multiple live-ins can alias the same register.
682 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
683 if (!hasInterval(*AS))
684 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
688 for (; MI != miEnd; ++MI) {
689 DOUT << MIIndex << "\t" << *MI;
692 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
693 MachineOperand &MO = MI->getOperand(i);
694 // handle register defs - build intervals
695 if (MO.isRegister() && MO.getReg() && MO.isDef())
696 handleRegisterDef(MBB, MI, MIIndex, MO, i);
699 MIIndex += InstrSlots::NUM;
701 // Skip over empty indices.
702 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
703 getInstructionFromIndex(MIIndex) == 0)
704 MIIndex += InstrSlots::NUM;
709 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
710 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
711 std::vector<IdxMBBPair>::const_iterator I =
712 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
715 while (I != Idx2MBBMap.end()) {
716 if (LR.end <= I->first)
718 MBBs.push_back(I->second);
726 LiveInterval LiveIntervals::createInterval(unsigned reg) {
727 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
729 return LiveInterval(reg, Weight);
732 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
733 /// copy field and returns the source register that defines it.
734 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
738 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
739 return VNI->copy->getOperand(1).getReg();
740 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
741 return VNI->copy->getOperand(2).getReg();
742 unsigned SrcReg, DstReg;
743 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
745 assert(0 && "Unrecognized copy instruction!");
749 //===----------------------------------------------------------------------===//
750 // Register allocator hooks.
753 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
754 /// allow one) virtual register operand, then its uses are implicitly using
755 /// the register. Returns the virtual register.
756 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
757 MachineInstr *MI) const {
759 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
760 MachineOperand &MO = MI->getOperand(i);
761 if (!MO.isRegister() || !MO.isUse())
763 unsigned Reg = MO.getReg();
764 if (Reg == 0 || Reg == li.reg)
766 // FIXME: For now, only remat MI with at most one register operand.
768 "Can't rematerialize instruction with multiple register operand!");
777 /// isValNoAvailableAt - Return true if the val# of the specified interval
778 /// which reaches the given instruction also reaches the specified use index.
779 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
780 unsigned UseIdx) const {
781 unsigned Index = getInstructionIndex(MI);
782 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
783 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
784 return UI != li.end() && UI->valno == ValNo;
787 /// isReMaterializable - Returns true if the definition MI of the specified
788 /// val# of the specified interval is re-materializable.
789 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
790 const VNInfo *ValNo, MachineInstr *MI,
795 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
799 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
800 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
801 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
802 // this but remember this is not safe to fold into a two-address
804 // This is a load from fixed stack slot. It can be rematerialized.
807 // If the target-specific rules don't identify an instruction as
808 // being trivially rematerializable, use some target-independent
810 if (!MI->getDesc().isRematerializable() ||
811 !tii_->isTriviallyReMaterializable(MI)) {
812 if (!EnableAggressiveRemat)
815 // If the instruction accesses memory but the memoperands have been lost,
816 // we can't analyze it.
817 const TargetInstrDesc &TID = MI->getDesc();
818 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
821 // Avoid instructions obviously unsafe for remat.
822 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
825 // If the instruction accesses memory and the memory could be non-constant,
826 // assume the instruction is not rematerializable.
827 for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
828 E = MI->memoperands_end(); I != E; ++I) {
829 const MachineMemOperand &MMO = *I;
830 if (MMO.isVolatile() || MMO.isStore())
832 const Value *V = MMO.getValue();
835 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
836 if (!PSV->isConstant(mf_->getFrameInfo()))
838 } else if (!aa_->pointsToConstantMemory(V))
842 // If any of the registers accessed are non-constant, conservatively assume
843 // the instruction is not rematerializable.
845 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
846 const MachineOperand &MO = MI->getOperand(i);
848 unsigned Reg = MO.getReg();
851 if (TargetRegisterInfo::isPhysicalRegister(Reg))
854 // Only allow one def, and that in the first operand.
855 if (MO.isDef() != (i == 0))
858 // Only allow constant-valued registers.
859 bool IsLiveIn = mri_->isLiveIn(Reg);
860 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
863 // For the def, it should be the only def.
864 if (MO.isDef() && (next(I) != E || IsLiveIn))
868 // Only allow one use other register use, as that's all the
869 // remat mechanisms support currently.
873 else if (Reg != ImpUse)
876 // For uses, there should be only one associate def.
877 if (I != E && (next(I) != E || IsLiveIn))
884 unsigned ImpUse = getReMatImplicitUse(li, MI);
886 const LiveInterval &ImpLi = getInterval(ImpUse);
887 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
888 re = mri_->use_end(); ri != re; ++ri) {
889 MachineInstr *UseMI = &*ri;
890 unsigned UseIdx = getInstructionIndex(UseMI);
891 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
893 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
900 /// isReMaterializable - Returns true if every definition of MI of every
901 /// val# of the specified interval is re-materializable.
902 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
904 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
906 const VNInfo *VNI = *i;
907 unsigned DefIdx = VNI->def;
909 continue; // Dead val#.
910 // Is the def for the val# rematerializable?
913 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
914 bool DefIsLoad = false;
916 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
923 /// FilterFoldedOps - Filter out two-address use operands. Return
924 /// true if it finds any issue with the operands that ought to prevent
926 static bool FilterFoldedOps(MachineInstr *MI,
927 SmallVector<unsigned, 2> &Ops,
929 SmallVector<unsigned, 2> &FoldOps) {
930 const TargetInstrDesc &TID = MI->getDesc();
933 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
934 unsigned OpIdx = Ops[i];
935 MachineOperand &MO = MI->getOperand(OpIdx);
936 // FIXME: fold subreg use.
940 MRInfo |= (unsigned)VirtRegMap::isMod;
942 // Filter out two-address use operand(s).
943 if (!MO.isImplicit() &&
944 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
945 MRInfo = VirtRegMap::isModRef;
948 MRInfo |= (unsigned)VirtRegMap::isRef;
950 FoldOps.push_back(OpIdx);
956 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
957 /// slot / to reg or any rematerialized load into ith operand of specified
958 /// MI. If it is successul, MI is updated with the newly created MI and
960 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
961 VirtRegMap &vrm, MachineInstr *DefMI,
963 SmallVector<unsigned, 2> &Ops,
964 bool isSS, int Slot, unsigned Reg) {
965 // If it is an implicit def instruction, just delete it.
966 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
967 RemoveMachineInstrFromMaps(MI);
968 vrm.RemoveMachineInstrFromMaps(MI);
969 MI->eraseFromParent();
974 // Filter the list of operand indexes that are to be folded. Abort if
975 // any operand will prevent folding.
977 SmallVector<unsigned, 2> FoldOps;
978 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
981 // The only time it's safe to fold into a two address instruction is when
982 // it's folding reload and spill from / into a spill stack slot.
983 if (DefMI && (MRInfo & VirtRegMap::isMod))
986 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
987 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
989 // Remember this instruction uses the spill slot.
990 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
992 // Attempt to fold the memory reference into the instruction. If
993 // we can do this, we don't need to insert spill code.
994 MachineBasicBlock &MBB = *MI->getParent();
995 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
996 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
997 vrm.transferSpillPts(MI, fmi);
998 vrm.transferRestorePts(MI, fmi);
999 vrm.transferEmergencySpills(MI, fmi);
1001 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1002 mi2iMap_[fmi] = InstrIdx;
1003 MI = MBB.insert(MBB.erase(MI), fmi);
1010 /// canFoldMemoryOperand - Returns true if the specified load / store
1011 /// folding is possible.
1012 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1013 SmallVector<unsigned, 2> &Ops,
1015 // Filter the list of operand indexes that are to be folded. Abort if
1016 // any operand will prevent folding.
1017 unsigned MRInfo = 0;
1018 SmallVector<unsigned, 2> FoldOps;
1019 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1022 // It's only legal to remat for a use, not a def.
1023 if (ReMat && (MRInfo & VirtRegMap::isMod))
1026 return tii_->canFoldMemoryOperand(MI, FoldOps);
1029 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1030 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1031 for (LiveInterval::Ranges::const_iterator
1032 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1033 std::vector<IdxMBBPair>::const_iterator II =
1034 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1035 if (II == Idx2MBBMap.end())
1037 if (I->end > II->first) // crossing a MBB.
1039 MBBs.insert(II->second);
1040 if (MBBs.size() > 1)
1046 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1047 /// interval on to-be re-materialized operands of MI) with new register.
1048 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1049 MachineInstr *MI, unsigned NewVReg,
1051 // There is an implicit use. That means one of the other operand is
1052 // being remat'ed and the remat'ed instruction has li.reg as an
1053 // use operand. Make sure we rewrite that as well.
1054 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1055 MachineOperand &MO = MI->getOperand(i);
1056 if (!MO.isRegister())
1058 unsigned Reg = MO.getReg();
1059 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1061 if (!vrm.isReMaterialized(Reg))
1063 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1064 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1066 UseMO->setReg(NewVReg);
1070 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1071 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1072 bool LiveIntervals::
1073 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1074 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1075 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1076 unsigned Slot, int LdSlot,
1077 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1079 const TargetRegisterClass* rc,
1080 SmallVector<int, 4> &ReMatIds,
1081 const MachineLoopInfo *loopInfo,
1082 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1083 std::map<unsigned,unsigned> &MBBVRegsMap,
1084 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1085 MachineBasicBlock *MBB = MI->getParent();
1086 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1087 bool CanFold = false;
1089 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1090 MachineOperand& mop = MI->getOperand(i);
1091 if (!mop.isRegister())
1093 unsigned Reg = mop.getReg();
1094 unsigned RegI = Reg;
1095 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1100 bool TryFold = !DefIsReMat;
1101 bool FoldSS = true; // Default behavior unless it's a remat.
1102 int FoldSlot = Slot;
1104 // If this is the rematerializable definition MI itself and
1105 // all of its uses are rematerialized, simply delete it.
1106 if (MI == ReMatOrigDefMI && CanDelete) {
1107 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1109 RemoveMachineInstrFromMaps(MI);
1110 vrm.RemoveMachineInstrFromMaps(MI);
1111 MI->eraseFromParent();
1115 // If def for this use can't be rematerialized, then try folding.
1116 // If def is rematerializable and it's a load, also try folding.
1117 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1119 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1125 // Scan all of the operands of this instruction rewriting operands
1126 // to use NewVReg instead of li.reg as appropriate. We do this for
1129 // 1. If the instr reads the same spilled vreg multiple times, we
1130 // want to reuse the NewVReg.
1131 // 2. If the instr is a two-addr instruction, we are required to
1132 // keep the src/dst regs pinned.
1134 // Keep track of whether we replace a use and/or def so that we can
1135 // create the spill interval with the appropriate range.
1137 HasUse = mop.isUse();
1138 HasDef = mop.isDef();
1139 SmallVector<unsigned, 2> Ops;
1141 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1142 const MachineOperand &MOj = MI->getOperand(j);
1143 if (!MOj.isRegister())
1145 unsigned RegJ = MOj.getReg();
1146 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1150 HasUse |= MOj.isUse();
1151 HasDef |= MOj.isDef();
1155 if (HasUse && !li.liveAt(getUseIndex(index)))
1156 // Must be defined by an implicit def. It should not be spilled. Note,
1157 // this is for correctness reason. e.g.
1158 // 8 %reg1024<def> = IMPLICIT_DEF
1159 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1160 // The live range [12, 14) are not part of the r1024 live interval since
1161 // it's defined by an implicit def. It will not conflicts with live
1162 // interval of r1025. Now suppose both registers are spilled, you can
1163 // easily see a situation where both registers are reloaded before
1164 // the INSERT_SUBREG and both target registers that would overlap.
1167 // Update stack slot spill weight if we are splitting.
1168 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1175 // Do not fold load / store here if we are splitting. We'll find an
1176 // optimal point to insert a load / store later.
1178 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1179 Ops, FoldSS, FoldSlot, Reg)) {
1180 // Folding the load/store can completely change the instruction in
1181 // unpredictable ways, rescan it from the beginning.
1185 if (isRemoved(MI)) {
1189 goto RestartInstruction;
1192 // We'll try to fold it later if it's profitable.
1193 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1197 // Create a new virtual register for the spill interval.
1198 bool CreatedNewVReg = false;
1200 NewVReg = mri_->createVirtualRegister(rc);
1202 CreatedNewVReg = true;
1204 mop.setReg(NewVReg);
1205 if (mop.isImplicit())
1206 rewriteImplicitOps(li, MI, NewVReg, vrm);
1208 // Reuse NewVReg for other reads.
1209 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1210 MachineOperand &mopj = MI->getOperand(Ops[j]);
1211 mopj.setReg(NewVReg);
1212 if (mopj.isImplicit())
1213 rewriteImplicitOps(li, MI, NewVReg, vrm);
1216 if (CreatedNewVReg) {
1218 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1219 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1220 // Each valnum may have its own remat id.
1221 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1223 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1225 if (!CanDelete || (HasUse && HasDef)) {
1226 // If this is a two-addr instruction then its use operands are
1227 // rematerializable but its def is not. It should be assigned a
1229 vrm.assignVirt2StackSlot(NewVReg, Slot);
1232 vrm.assignVirt2StackSlot(NewVReg, Slot);
1234 } else if (HasUse && HasDef &&
1235 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1236 // If this interval hasn't been assigned a stack slot (because earlier
1237 // def is a deleted remat def), do it now.
1238 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1239 vrm.assignVirt2StackSlot(NewVReg, Slot);
1242 // Re-matting an instruction with virtual register use. Add the
1243 // register as an implicit use on the use MI.
1244 if (DefIsReMat && ImpUse)
1245 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1247 // create a new register interval for this spill / remat.
1248 LiveInterval &nI = getOrCreateInterval(NewVReg);
1249 if (CreatedNewVReg) {
1250 NewLIs.push_back(&nI);
1251 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1253 vrm.setIsSplitFromReg(NewVReg, li.reg);
1257 if (CreatedNewVReg) {
1258 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1259 nI.getNextValue(~0U, 0, VNInfoAllocator));
1263 // Extend the split live interval to this def / use.
1264 unsigned End = getUseIndex(index)+1;
1265 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1266 nI.getValNumInfo(nI.getNumValNums()-1));
1272 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1273 nI.getNextValue(~0U, 0, VNInfoAllocator));
1278 DOUT << "\t\t\t\tAdded new interval: ";
1279 nI.print(DOUT, tri_);
1284 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1286 MachineBasicBlock *MBB, unsigned Idx) const {
1287 unsigned End = getMBBEndIdx(MBB);
1288 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1289 unsigned KillIdx = VNI->kills[j];
1290 if (KillIdx > Idx && KillIdx < End)
1296 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1297 /// during spilling.
1299 struct RewriteInfo {
1304 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1305 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1308 struct RewriteInfoCompare {
1309 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1310 return LHS.Index < RHS.Index;
1315 void LiveIntervals::
1316 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1317 LiveInterval::Ranges::const_iterator &I,
1318 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1319 unsigned Slot, int LdSlot,
1320 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1322 const TargetRegisterClass* rc,
1323 SmallVector<int, 4> &ReMatIds,
1324 const MachineLoopInfo *loopInfo,
1325 BitVector &SpillMBBs,
1326 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1327 BitVector &RestoreMBBs,
1328 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1329 std::map<unsigned,unsigned> &MBBVRegsMap,
1330 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1331 bool AllCanFold = true;
1332 unsigned NewVReg = 0;
1333 unsigned start = getBaseIndex(I->start);
1334 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1336 // First collect all the def / use in this live range that will be rewritten.
1337 // Make sure they are sorted according to instruction index.
1338 std::vector<RewriteInfo> RewriteMIs;
1339 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1340 re = mri_->reg_end(); ri != re; ) {
1341 MachineInstr *MI = &*ri;
1342 MachineOperand &O = ri.getOperand();
1344 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1345 unsigned index = getInstructionIndex(MI);
1346 if (index < start || index >= end)
1348 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1349 // Must be defined by an implicit def. It should not be spilled. Note,
1350 // this is for correctness reason. e.g.
1351 // 8 %reg1024<def> = IMPLICIT_DEF
1352 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1353 // The live range [12, 14) are not part of the r1024 live interval since
1354 // it's defined by an implicit def. It will not conflicts with live
1355 // interval of r1025. Now suppose both registers are spilled, you can
1356 // easily see a situation where both registers are reloaded before
1357 // the INSERT_SUBREG and both target registers that would overlap.
1359 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1361 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1363 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1364 // Now rewrite the defs and uses.
1365 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1366 RewriteInfo &rwi = RewriteMIs[i];
1368 unsigned index = rwi.Index;
1369 bool MIHasUse = rwi.HasUse;
1370 bool MIHasDef = rwi.HasDef;
1371 MachineInstr *MI = rwi.MI;
1372 // If MI def and/or use the same register multiple times, then there
1373 // are multiple entries.
1374 unsigned NumUses = MIHasUse;
1375 while (i != e && RewriteMIs[i].MI == MI) {
1376 assert(RewriteMIs[i].Index == index);
1377 bool isUse = RewriteMIs[i].HasUse;
1378 if (isUse) ++NumUses;
1380 MIHasDef |= RewriteMIs[i].HasDef;
1383 MachineBasicBlock *MBB = MI->getParent();
1385 if (ImpUse && MI != ReMatDefMI) {
1386 // Re-matting an instruction with virtual register use. Update the
1387 // register interval's spill weight to HUGE_VALF to prevent it from
1389 LiveInterval &ImpLi = getInterval(ImpUse);
1390 ImpLi.weight = HUGE_VALF;
1393 unsigned MBBId = MBB->getNumber();
1394 unsigned ThisVReg = 0;
1396 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1397 if (NVI != MBBVRegsMap.end()) {
1398 ThisVReg = NVI->second;
1405 // It's better to start a new interval to avoid artifically
1406 // extend the new interval.
1407 if (MIHasDef && !MIHasUse) {
1408 MBBVRegsMap.erase(MBB->getNumber());
1414 bool IsNew = ThisVReg == 0;
1416 // This ends the previous live interval. If all of its def / use
1417 // can be folded, give it a low spill weight.
1418 if (NewVReg && TrySplit && AllCanFold) {
1419 LiveInterval &nI = getOrCreateInterval(NewVReg);
1426 bool HasDef = false;
1427 bool HasUse = false;
1428 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1429 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1430 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1431 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1432 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1433 if (!HasDef && !HasUse)
1436 AllCanFold &= CanFold;
1438 // Update weight of spill interval.
1439 LiveInterval &nI = getOrCreateInterval(NewVReg);
1441 // The spill weight is now infinity as it cannot be spilled again.
1442 nI.weight = HUGE_VALF;
1446 // Keep track of the last def and first use in each MBB.
1448 if (MI != ReMatOrigDefMI || !CanDelete) {
1449 bool HasKill = false;
1451 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1453 // If this is a two-address code, then this index starts a new VNInfo.
1454 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1456 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1458 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1459 SpillIdxes.find(MBBId);
1461 if (SII == SpillIdxes.end()) {
1462 std::vector<SRInfo> S;
1463 S.push_back(SRInfo(index, NewVReg, true));
1464 SpillIdxes.insert(std::make_pair(MBBId, S));
1465 } else if (SII->second.back().vreg != NewVReg) {
1466 SII->second.push_back(SRInfo(index, NewVReg, true));
1467 } else if ((int)index > SII->second.back().index) {
1468 // If there is an earlier def and this is a two-address
1469 // instruction, then it's not possible to fold the store (which
1470 // would also fold the load).
1471 SRInfo &Info = SII->second.back();
1473 Info.canFold = !HasUse;
1475 SpillMBBs.set(MBBId);
1476 } else if (SII != SpillIdxes.end() &&
1477 SII->second.back().vreg == NewVReg &&
1478 (int)index > SII->second.back().index) {
1479 // There is an earlier def that's not killed (must be two-address).
1480 // The spill is no longer needed.
1481 SII->second.pop_back();
1482 if (SII->second.empty()) {
1483 SpillIdxes.erase(MBBId);
1484 SpillMBBs.reset(MBBId);
1491 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1492 SpillIdxes.find(MBBId);
1493 if (SII != SpillIdxes.end() &&
1494 SII->second.back().vreg == NewVReg &&
1495 (int)index > SII->second.back().index)
1496 // Use(s) following the last def, it's not safe to fold the spill.
1497 SII->second.back().canFold = false;
1498 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1499 RestoreIdxes.find(MBBId);
1500 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1501 // If we are splitting live intervals, only fold if it's the first
1502 // use and there isn't another use later in the MBB.
1503 RII->second.back().canFold = false;
1505 // Only need a reload if there isn't an earlier def / use.
1506 if (RII == RestoreIdxes.end()) {
1507 std::vector<SRInfo> Infos;
1508 Infos.push_back(SRInfo(index, NewVReg, true));
1509 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1511 RII->second.push_back(SRInfo(index, NewVReg, true));
1513 RestoreMBBs.set(MBBId);
1517 // Update spill weight.
1518 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1519 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1522 if (NewVReg && TrySplit && AllCanFold) {
1523 // If all of its def / use can be folded, give it a low spill weight.
1524 LiveInterval &nI = getOrCreateInterval(NewVReg);
1529 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1530 BitVector &RestoreMBBs,
1531 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1532 if (!RestoreMBBs[Id])
1534 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1535 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1536 if (Restores[i].index == index &&
1537 Restores[i].vreg == vr &&
1538 Restores[i].canFold)
1543 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1544 BitVector &RestoreMBBs,
1545 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1546 if (!RestoreMBBs[Id])
1548 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1549 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1550 if (Restores[i].index == index && Restores[i].vreg)
1551 Restores[i].index = -1;
1554 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1555 /// spilled and create empty intervals for their uses.
1557 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1558 const TargetRegisterClass* rc,
1559 std::vector<LiveInterval*> &NewLIs) {
1560 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1561 re = mri_->reg_end(); ri != re; ) {
1562 MachineOperand &O = ri.getOperand();
1563 MachineInstr *MI = &*ri;
1566 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1567 "Register def was not rewritten?");
1568 RemoveMachineInstrFromMaps(MI);
1569 vrm.RemoveMachineInstrFromMaps(MI);
1570 MI->eraseFromParent();
1572 // This must be an use of an implicit_def so it's not part of the live
1573 // interval. Create a new empty live interval for it.
1574 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1575 unsigned NewVReg = mri_->createVirtualRegister(rc);
1577 vrm.setIsImplicitlyDefined(NewVReg);
1578 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1579 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1580 MachineOperand &MO = MI->getOperand(i);
1581 if (MO.isReg() && MO.getReg() == li.reg)
1589 std::vector<LiveInterval*> LiveIntervals::
1590 addIntervalsForSpills(const LiveInterval &li,
1591 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1593 assert(li.weight != HUGE_VALF &&
1594 "attempt to spill already spilled interval!");
1596 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1597 li.print(DOUT, tri_);
1600 // Spill slot weight.
1603 // Each bit specify whether it a spill is required in the MBB.
1604 BitVector SpillMBBs(mf_->getNumBlockIDs());
1605 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1606 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1607 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1608 std::map<unsigned,unsigned> MBBVRegsMap;
1609 std::vector<LiveInterval*> NewLIs;
1610 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1612 unsigned NumValNums = li.getNumValNums();
1613 SmallVector<MachineInstr*, 4> ReMatDefs;
1614 ReMatDefs.resize(NumValNums, NULL);
1615 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1616 ReMatOrigDefs.resize(NumValNums, NULL);
1617 SmallVector<int, 4> ReMatIds;
1618 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1619 BitVector ReMatDelete(NumValNums);
1620 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1622 // Spilling a split live interval. It cannot be split any further. Also,
1623 // it's also guaranteed to be a single val# / range interval.
1624 if (vrm.getPreSplitReg(li.reg)) {
1625 vrm.setIsSplitFromReg(li.reg, 0);
1626 // Unset the split kill marker on the last use.
1627 unsigned KillIdx = vrm.getKillPoint(li.reg);
1629 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1630 assert(KillMI && "Last use disappeared?");
1631 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1632 assert(KillOp != -1 && "Last use disappeared?");
1633 KillMI->getOperand(KillOp).setIsKill(false);
1635 vrm.removeKillPoint(li.reg);
1636 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1637 Slot = vrm.getStackSlot(li.reg);
1638 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1639 MachineInstr *ReMatDefMI = DefIsReMat ?
1640 vrm.getReMaterializedMI(li.reg) : NULL;
1642 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1643 bool isLoad = isLoadSS ||
1644 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1645 bool IsFirstRange = true;
1646 for (LiveInterval::Ranges::const_iterator
1647 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1648 // If this is a split live interval with multiple ranges, it means there
1649 // are two-address instructions that re-defined the value. Only the
1650 // first def can be rematerialized!
1652 // Note ReMatOrigDefMI has already been deleted.
1653 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1654 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1655 false, vrm, rc, ReMatIds, loopInfo,
1656 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1657 MBBVRegsMap, NewLIs, SSWeight);
1659 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1660 Slot, 0, false, false, false,
1661 false, vrm, rc, ReMatIds, loopInfo,
1662 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1663 MBBVRegsMap, NewLIs, SSWeight);
1665 IsFirstRange = false;
1668 SSWeight = 0.0f; // Already accounted for when split.
1669 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1673 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1674 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1678 bool NeedStackSlot = false;
1679 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1681 const VNInfo *VNI = *i;
1682 unsigned VN = VNI->id;
1683 unsigned DefIdx = VNI->def;
1685 continue; // Dead val#.
1686 // Is the def for the val# rematerializable?
1687 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1688 ? 0 : getInstructionFromIndex(DefIdx);
1690 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1691 // Remember how to remat the def of this val#.
1692 ReMatOrigDefs[VN] = ReMatDefMI;
1693 // Original def may be modified so we have to make a copy here.
1694 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1695 ClonedMIs.push_back(Clone);
1696 ReMatDefs[VN] = Clone;
1698 bool CanDelete = true;
1699 if (VNI->hasPHIKill) {
1700 // A kill is a phi node, not all of its uses can be rematerialized.
1701 // It must not be deleted.
1703 // Need a stack slot if there is any live range where uses cannot be
1705 NeedStackSlot = true;
1708 ReMatDelete.set(VN);
1710 // Need a stack slot if there is any live range where uses cannot be
1712 NeedStackSlot = true;
1716 // One stack slot per live interval.
1717 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1718 Slot = vrm.assignVirt2StackSlot(li.reg);
1720 // Create new intervals and rewrite defs and uses.
1721 for (LiveInterval::Ranges::const_iterator
1722 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1723 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1724 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1725 bool DefIsReMat = ReMatDefMI != NULL;
1726 bool CanDelete = ReMatDelete[I->valno->id];
1728 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1729 bool isLoad = isLoadSS ||
1730 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1731 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1732 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1733 CanDelete, vrm, rc, ReMatIds, loopInfo,
1734 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1735 MBBVRegsMap, NewLIs, SSWeight);
1738 // Insert spills / restores if we are splitting.
1740 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1744 SmallPtrSet<LiveInterval*, 4> AddedKill;
1745 SmallVector<unsigned, 2> Ops;
1746 if (NeedStackSlot) {
1747 int Id = SpillMBBs.find_first();
1749 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1750 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1751 std::vector<SRInfo> &spills = SpillIdxes[Id];
1752 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1753 int index = spills[i].index;
1754 unsigned VReg = spills[i].vreg;
1755 LiveInterval &nI = getOrCreateInterval(VReg);
1756 bool isReMat = vrm.isReMaterialized(VReg);
1757 MachineInstr *MI = getInstructionFromIndex(index);
1758 bool CanFold = false;
1759 bool FoundUse = false;
1761 if (spills[i].canFold) {
1763 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1764 MachineOperand &MO = MI->getOperand(j);
1765 if (!MO.isRegister() || MO.getReg() != VReg)
1772 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1773 RestoreMBBs, RestoreIdxes))) {
1774 // MI has two-address uses of the same register. If the use
1775 // isn't the first and only use in the BB, then we can't fold
1776 // it. FIXME: Move this to rewriteInstructionsForSpills.
1783 // Fold the store into the def if possible.
1784 bool Folded = false;
1785 if (CanFold && !Ops.empty()) {
1786 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1789 // Also folded uses, do not issue a load.
1790 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1791 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1793 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1797 // Otherwise tell the spiller to issue a spill.
1799 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1800 bool isKill = LR->end == getStoreIndex(index);
1801 if (!MI->registerDefIsDead(nI.reg))
1802 // No need to spill a dead def.
1803 vrm.addSpillPoint(VReg, isKill, MI);
1805 AddedKill.insert(&nI);
1808 // Update spill slot weight.
1810 SSWeight += getSpillWeight(true, false, loopDepth);
1812 Id = SpillMBBs.find_next(Id);
1816 int Id = RestoreMBBs.find_first();
1818 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1819 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1821 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1822 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1823 int index = restores[i].index;
1826 unsigned VReg = restores[i].vreg;
1827 LiveInterval &nI = getOrCreateInterval(VReg);
1828 bool isReMat = vrm.isReMaterialized(VReg);
1829 MachineInstr *MI = getInstructionFromIndex(index);
1830 bool CanFold = false;
1832 if (restores[i].canFold) {
1834 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1835 MachineOperand &MO = MI->getOperand(j);
1836 if (!MO.isRegister() || MO.getReg() != VReg)
1840 // If this restore were to be folded, it would have been folded
1849 // Fold the load into the use if possible.
1850 bool Folded = false;
1851 if (CanFold && !Ops.empty()) {
1853 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1855 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1857 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1858 // If the rematerializable def is a load, also try to fold it.
1859 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1860 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1861 Ops, isLoadSS, LdSlot, VReg);
1862 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1864 // Re-matting an instruction with virtual register use. Add the
1865 // register as an implicit use on the use MI and update the register
1866 // interval's spill weight to HUGE_VALF to prevent it from being
1868 LiveInterval &ImpLi = getInterval(ImpUse);
1869 ImpLi.weight = HUGE_VALF;
1870 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1874 // If folding is not possible / failed, then tell the spiller to issue a
1875 // load / rematerialization for us.
1877 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1879 vrm.addRestorePoint(VReg, MI);
1881 // Update spill slot weight.
1883 SSWeight += getSpillWeight(false, true, loopDepth);
1885 Id = RestoreMBBs.find_next(Id);
1888 // Finalize intervals: add kills, finalize spill weights, and filter out
1890 std::vector<LiveInterval*> RetNewLIs;
1891 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1892 LiveInterval *LI = NewLIs[i];
1894 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1895 if (!AddedKill.count(LI)) {
1896 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1897 unsigned LastUseIdx = getBaseIndex(LR->end);
1898 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1899 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1900 assert(UseIdx != -1);
1901 if (LastUse->getOperand(UseIdx).isImplicit() ||
1902 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1903 LastUse->getOperand(UseIdx).setIsKill();
1904 vrm.addKillPoint(LI->reg, LastUseIdx);
1907 RetNewLIs.push_back(LI);
1911 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1915 /// hasAllocatableSuperReg - Return true if the specified physical register has
1916 /// any super register that's allocatable.
1917 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1918 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1919 if (allocatableRegs_[*AS] && hasInterval(*AS))
1924 /// getRepresentativeReg - Find the largest super register of the specified
1925 /// physical register.
1926 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1927 // Find the largest super-register that is allocatable.
1928 unsigned BestReg = Reg;
1929 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1930 unsigned SuperReg = *AS;
1931 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1939 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1940 /// specified interval that conflicts with the specified physical register.
1941 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1942 unsigned PhysReg) const {
1943 unsigned NumConflicts = 0;
1944 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1945 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1946 E = mri_->reg_end(); I != E; ++I) {
1947 MachineOperand &O = I.getOperand();
1948 MachineInstr *MI = O.getParent();
1949 unsigned Index = getInstructionIndex(MI);
1950 if (pli.liveAt(Index))
1953 return NumConflicts;
1956 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1957 /// around all defs and uses of the specified interval.
1958 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1959 unsigned PhysReg, VirtRegMap &vrm) {
1960 unsigned SpillReg = getRepresentativeReg(PhysReg);
1962 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1963 // If there are registers which alias PhysReg, but which are not a
1964 // sub-register of the chosen representative super register. Assert
1965 // since we can't handle it yet.
1966 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1967 tri_->isSuperRegister(*AS, SpillReg));
1969 LiveInterval &pli = getInterval(SpillReg);
1970 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1971 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1972 E = mri_->reg_end(); I != E; ++I) {
1973 MachineOperand &O = I.getOperand();
1974 MachineInstr *MI = O.getParent();
1975 if (SeenMIs.count(MI))
1978 unsigned Index = getInstructionIndex(MI);
1979 if (pli.liveAt(Index)) {
1980 vrm.addEmergencySpill(SpillReg, MI);
1981 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1982 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1983 if (!hasInterval(*AS))
1985 LiveInterval &spli = getInterval(*AS);
1986 if (spli.liveAt(Index))
1987 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1993 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1994 MachineInstr* startInst) {
1995 LiveInterval& Interval = getOrCreateInterval(reg);
1996 VNInfo* VN = Interval.getNextValue(
1997 getInstructionIndex(startInst) + InstrSlots::DEF,
1998 startInst, getVNInfoAllocator());
1999 VN->hasPHIKill = true;
2000 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2001 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2002 getMBBEndIdx(startInst->getParent()) + 1, VN);
2003 Interval.addRange(LR);