1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 STATISTIC(numIntervals, "Number of original intervals");
51 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
53 STATISTIC(numSplits , "Number of intervals split");
55 char LiveIntervals::ID = 0;
56 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
59 AU.addRequired<AliasAnalysis>();
60 AU.addPreserved<AliasAnalysis>();
61 AU.addPreserved<LiveVariables>();
62 AU.addRequired<LiveVariables>();
63 AU.addPreservedID(MachineLoopInfoID);
64 AU.addPreservedID(MachineDominatorsID);
65 AU.addPreservedID(PHIEliminationID);
66 AU.addRequiredID(PHIEliminationID);
67 AU.addRequiredID(TwoAddressInstructionPassID);
68 MachineFunctionPass::getAnalysisUsage(AU);
71 void LiveIntervals::releaseMemory() {
77 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
78 VNInfoAllocator.Reset();
79 while (!ClonedMIs.empty()) {
80 MachineInstr *MI = ClonedMIs.back();
82 mf_->DeleteMachineInstr(MI);
86 void LiveIntervals::computeNumbering() {
87 Index2MiMap OldI2MI = i2miMap_;
88 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
97 // Number MachineInstrs and MachineBasicBlocks.
98 // Initialize MBB indexes to a sentinal.
99 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
101 unsigned MIIndex = 0;
102 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
104 unsigned StartIdx = MIIndex;
106 // Insert an empty slot at the beginning of each block.
107 MIIndex += InstrSlots::NUM;
108 i2miMap_.push_back(0);
110 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
112 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
113 assert(inserted && "multiple MachineInstr -> index mappings");
114 i2miMap_.push_back(I);
115 MIIndex += InstrSlots::NUM;
118 // Insert an empty slot after every instruction.
119 MIIndex += InstrSlots::NUM;
120 i2miMap_.push_back(0);
123 // Set the MBB2IdxMap entry for this MBB.
124 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
125 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
127 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
129 if (!OldI2MI.empty())
130 for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
131 for (LiveInterval::iterator LI = OI->second.begin(),
132 LE = OI->second.end(); LI != LE; ++LI) {
134 // Remap the start index of the live range to the corresponding new
135 // number, or our best guess at what it _should_ correspond to if the
136 // original instruction has been erased. This is either the following
137 // instruction or its predecessor.
138 unsigned index = LI->start / InstrSlots::NUM;
139 unsigned offset = LI->start % InstrSlots::NUM;
140 if (offset == InstrSlots::LOAD) {
141 std::vector<IdxMBBPair>::const_iterator I =
142 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
143 // Take the pair containing the index
144 std::vector<IdxMBBPair>::const_iterator J =
145 ((I != OldI2MBB.end() && I->first > index) ||
146 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
148 LI->start = getMBBStartIdx(J->second);
150 LI->start = mi2iMap_[OldI2MI[index]] + offset;
153 // Remap the ending index in the same way that we remapped the start,
154 // except for the final step where we always map to the immediately
155 // following instruction.
156 index = LI->end / InstrSlots::NUM;
157 offset = LI->end % InstrSlots::NUM;
158 if (offset == InstrSlots::STORE) {
159 std::vector<IdxMBBPair>::const_iterator I =
160 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
161 // Take the pair containing the index
162 std::vector<IdxMBBPair>::const_iterator J =
163 ((I != OldI2MBB.end() && I->first > index) ||
164 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
166 LI->end = getMBBEndIdx(J->second) + 1;
168 LI->end = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the VNInfo def index, which works the same as the
172 // start indices above.
173 VNInfo* vni = LI->valno;
174 index = vni->def / InstrSlots::NUM;
175 offset = vni->def % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 std::vector<IdxMBBPair>::const_iterator I =
178 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
179 // Take the pair containing the index
180 std::vector<IdxMBBPair>::const_iterator J =
181 ((I != OldI2MBB.end() && I->first > index) ||
182 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
184 vni->def = getMBBStartIdx(J->second);
187 vni->def = mi2iMap_[OldI2MI[index]] + offset;
190 // Remap the VNInfo kill indices, which works the same as
191 // the end indices above.
192 for (size_t i = 0; i < vni->kills.size(); ++i) {
193 index = vni->kills[i] / InstrSlots::NUM;
194 offset = vni->kills[i] % InstrSlots::NUM;
195 if (OldI2MI[vni->kills[i] / InstrSlots::NUM]) {
196 std::vector<IdxMBBPair>::const_iterator I =
197 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
198 // Take the pair containing the index
199 std::vector<IdxMBBPair>::const_iterator J =
200 ((I != OldI2MBB.end() && I->first > index) ||
201 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
203 vni->kills[i] = getMBBEndIdx(J->second) + 1;
205 vni->kills[i] = mi2iMap_[OldI2MI[index]] + offset;
211 /// runOnMachineFunction - Register allocate the whole function
213 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
215 mri_ = &mf_->getRegInfo();
216 tm_ = &fn.getTarget();
217 tri_ = tm_->getRegisterInfo();
218 tii_ = tm_->getInstrInfo();
219 aa_ = &getAnalysis<AliasAnalysis>();
220 lv_ = &getAnalysis<LiveVariables>();
221 allocatableRegs_ = tri_->getAllocatableSet(fn);
226 numIntervals += getNumIntervals();
228 DOUT << "********** INTERVALS **********\n";
229 for (iterator I = begin(), E = end(); I != E; ++I) {
230 I->second.print(DOUT, tri_);
234 numIntervalsAfter += getNumIntervals();
239 /// print - Implement the dump method.
240 void LiveIntervals::print(std::ostream &O, const Module* ) const {
241 O << "********** INTERVALS **********\n";
242 for (const_iterator I = begin(), E = end(); I != E; ++I) {
243 I->second.print(O, tri_);
247 O << "********** MACHINEINSTRS **********\n";
248 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
249 mbbi != mbbe; ++mbbi) {
250 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
251 for (MachineBasicBlock::iterator mii = mbbi->begin(),
252 mie = mbbi->end(); mii != mie; ++mii) {
253 O << getInstructionIndex(mii) << '\t' << *mii;
258 /// conflictsWithPhysRegDef - Returns true if the specified register
259 /// is defined during the duration of the specified interval.
260 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
261 VirtRegMap &vrm, unsigned reg) {
262 for (LiveInterval::Ranges::const_iterator
263 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
264 for (unsigned index = getBaseIndex(I->start),
265 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
266 index += InstrSlots::NUM) {
267 // skip deleted instructions
268 while (index != end && !getInstructionFromIndex(index))
269 index += InstrSlots::NUM;
270 if (index == end) break;
272 MachineInstr *MI = getInstructionFromIndex(index);
273 unsigned SrcReg, DstReg;
274 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
275 if (SrcReg == li.reg || DstReg == li.reg)
277 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
278 MachineOperand& mop = MI->getOperand(i);
279 if (!mop.isRegister())
281 unsigned PhysReg = mop.getReg();
282 if (PhysReg == 0 || PhysReg == li.reg)
284 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
285 if (!vrm.hasPhys(PhysReg))
287 PhysReg = vrm.getPhys(PhysReg);
289 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
298 void LiveIntervals::printRegName(unsigned reg) const {
299 if (TargetRegisterInfo::isPhysicalRegister(reg))
300 cerr << tri_->getName(reg);
302 cerr << "%reg" << reg;
305 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
306 MachineBasicBlock::iterator mi,
307 unsigned MIIdx, MachineOperand& MO,
309 LiveInterval &interval) {
310 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
311 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
313 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
314 DOUT << "is a implicit_def\n";
318 // Virtual registers may be defined multiple times (due to phi
319 // elimination and 2-addr elimination). Much of what we do only has to be
320 // done once for the vreg. We use an empty interval to detect the first
321 // time we see a vreg.
322 if (interval.empty()) {
323 // Get the Idx of the defining instructions.
324 unsigned defIndex = getDefIndex(MIIdx);
326 MachineInstr *CopyMI = NULL;
327 unsigned SrcReg, DstReg;
328 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
329 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
330 tii_->isMoveInstr(*mi, SrcReg, DstReg))
332 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
334 assert(ValNo->id == 0 && "First value in interval is not 0?");
336 // Loop over all of the blocks that the vreg is defined in. There are
337 // two cases we have to handle here. The most common case is a vreg
338 // whose lifetime is contained within a basic block. In this case there
339 // will be a single kill, in MBB, which comes after the definition.
340 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
341 // FIXME: what about dead vars?
343 if (vi.Kills[0] != mi)
344 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
346 killIdx = defIndex+1;
348 // If the kill happens after the definition, we have an intra-block
350 if (killIdx > defIndex) {
351 assert(vi.AliveBlocks.none() &&
352 "Shouldn't be alive across any blocks!");
353 LiveRange LR(defIndex, killIdx, ValNo);
354 interval.addRange(LR);
355 DOUT << " +" << LR << "\n";
356 interval.addKill(ValNo, killIdx);
361 // The other case we handle is when a virtual register lives to the end
362 // of the defining block, potentially live across some blocks, then is
363 // live into some number of blocks, but gets killed. Start by adding a
364 // range that goes from this definition to the end of the defining block.
365 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
366 DOUT << " +" << NewLR;
367 interval.addRange(NewLR);
369 // Iterate over all of the blocks that the variable is completely
370 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
372 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
373 if (vi.AliveBlocks[i]) {
374 LiveRange LR(getMBBStartIdx(i),
375 getMBBEndIdx(i)+1, // MBB ends at -1.
377 interval.addRange(LR);
382 // Finally, this virtual register is live from the start of any killing
383 // block to the 'use' slot of the killing instruction.
384 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
385 MachineInstr *Kill = vi.Kills[i];
386 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
387 LiveRange LR(getMBBStartIdx(Kill->getParent()),
389 interval.addRange(LR);
390 interval.addKill(ValNo, killIdx);
395 // If this is the second time we see a virtual register definition, it
396 // must be due to phi elimination or two addr elimination. If this is
397 // the result of two address elimination, then the vreg is one of the
398 // def-and-use register operand.
399 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
400 // If this is a two-address definition, then we have already processed
401 // the live range. The only problem is that we didn't realize there
402 // are actually two values in the live interval. Because of this we
403 // need to take the LiveRegion that defines this register and split it
405 assert(interval.containsOneValue());
406 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
407 unsigned RedefIndex = getDefIndex(MIIdx);
409 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
410 VNInfo *OldValNo = OldLR->valno;
412 // Delete the initial value, which should be short and continuous,
413 // because the 2-addr copy must be in the same MBB as the redef.
414 interval.removeRange(DefIndex, RedefIndex);
416 // Two-address vregs should always only be redefined once. This means
417 // that at this point, there should be exactly one value number in it.
418 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
420 // The new value number (#1) is defined by the instruction we claimed
422 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
425 // Value#0 is now defined by the 2-addr instruction.
426 OldValNo->def = RedefIndex;
429 // Add the new live interval which replaces the range for the input copy.
430 LiveRange LR(DefIndex, RedefIndex, ValNo);
431 DOUT << " replace range with " << LR;
432 interval.addRange(LR);
433 interval.addKill(ValNo, RedefIndex);
435 // If this redefinition is dead, we need to add a dummy unit live
436 // range covering the def slot.
438 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
441 interval.print(DOUT, tri_);
444 // Otherwise, this must be because of phi elimination. If this is the
445 // first redefinition of the vreg that we have seen, go back and change
446 // the live range in the PHI block to be a different value number.
447 if (interval.containsOneValue()) {
448 assert(vi.Kills.size() == 1 &&
449 "PHI elimination vreg should have one kill, the PHI itself!");
451 // Remove the old range that we now know has an incorrect number.
452 VNInfo *VNI = interval.getValNumInfo(0);
453 MachineInstr *Killer = vi.Kills[0];
454 unsigned Start = getMBBStartIdx(Killer->getParent());
455 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
456 DOUT << " Removing [" << Start << "," << End << "] from: ";
457 interval.print(DOUT, tri_); DOUT << "\n";
458 interval.removeRange(Start, End);
459 VNI->hasPHIKill = true;
460 DOUT << " RESULT: "; interval.print(DOUT, tri_);
462 // Replace the interval with one of a NEW value number. Note that this
463 // value number isn't actually defined by an instruction, weird huh? :)
464 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
465 DOUT << " replace range with " << LR;
466 interval.addRange(LR);
467 interval.addKill(LR.valno, End);
468 DOUT << " RESULT: "; interval.print(DOUT, tri_);
471 // In the case of PHI elimination, each variable definition is only
472 // live until the end of the block. We've already taken care of the
473 // rest of the live range.
474 unsigned defIndex = getDefIndex(MIIdx);
477 MachineInstr *CopyMI = NULL;
478 unsigned SrcReg, DstReg;
479 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
480 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
481 tii_->isMoveInstr(*mi, SrcReg, DstReg))
483 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
485 unsigned killIndex = getMBBEndIdx(mbb) + 1;
486 LiveRange LR(defIndex, killIndex, ValNo);
487 interval.addRange(LR);
488 interval.addKill(ValNo, killIndex);
489 ValNo->hasPHIKill = true;
497 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
498 MachineBasicBlock::iterator mi,
501 LiveInterval &interval,
502 MachineInstr *CopyMI) {
503 // A physical register cannot be live across basic block, so its
504 // lifetime must end somewhere in its defining basic block.
505 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
507 unsigned baseIndex = MIIdx;
508 unsigned start = getDefIndex(baseIndex);
509 unsigned end = start;
511 // If it is not used after definition, it is considered dead at
512 // the instruction defining it. Hence its interval is:
513 // [defSlot(def), defSlot(def)+1)
516 end = getDefIndex(start) + 1;
520 // If it is not dead on definition, it must be killed by a
521 // subsequent instruction. Hence its interval is:
522 // [defSlot(def), useSlot(kill)+1)
523 baseIndex += InstrSlots::NUM;
524 while (++mi != MBB->end()) {
525 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
526 getInstructionFromIndex(baseIndex) == 0)
527 baseIndex += InstrSlots::NUM;
528 if (mi->killsRegister(interval.reg, tri_)) {
530 end = getUseIndex(baseIndex) + 1;
532 } else if (mi->modifiesRegister(interval.reg, tri_)) {
533 // Another instruction redefines the register before it is ever read.
534 // Then the register is essentially dead at the instruction that defines
535 // it. Hence its interval is:
536 // [defSlot(def), defSlot(def)+1)
538 end = getDefIndex(start) + 1;
542 baseIndex += InstrSlots::NUM;
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
548 assert(!CopyMI && "physreg was not killed in defining block!");
549 end = getDefIndex(start) + 1; // It's dead.
552 assert(start < end && "did not find end of interval?");
554 // Already exists? Extend old live interval.
555 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
556 VNInfo *ValNo = (OldLR != interval.end())
557 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
558 LiveRange LR(start, end, ValNo);
559 interval.addRange(LR);
560 interval.addKill(LR.valno, end);
561 DOUT << " +" << LR << '\n';
564 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
565 MachineBasicBlock::iterator MI,
569 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
570 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
571 getOrCreateInterval(MO.getReg()));
572 else if (allocatableRegs_[MO.getReg()]) {
573 MachineInstr *CopyMI = NULL;
574 unsigned SrcReg, DstReg;
575 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
576 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
577 tii_->isMoveInstr(*MI, SrcReg, DstReg))
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->modifiesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
594 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
596 // Look for kills, if it reaches a def before it's killed, then it shouldn't
597 // be considered a livein.
598 MachineBasicBlock::iterator mi = MBB->begin();
599 unsigned baseIndex = MIIdx;
600 unsigned start = baseIndex;
601 unsigned end = start;
602 while (mi != MBB->end()) {
603 if (mi->killsRegister(interval.reg, tri_)) {
605 end = getUseIndex(baseIndex) + 1;
607 } else if (mi->modifiesRegister(interval.reg, tri_)) {
608 // Another instruction redefines the register before it is ever read.
609 // Then the register is essentially dead at the instruction that defines
610 // it. Hence its interval is:
611 // [defSlot(def), defSlot(def)+1)
613 end = getDefIndex(start) + 1;
617 baseIndex += InstrSlots::NUM;
618 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
619 getInstructionFromIndex(baseIndex) == 0)
620 baseIndex += InstrSlots::NUM;
625 // Live-in register might not be used at all.
629 end = getDefIndex(MIIdx) + 1;
631 DOUT << " live through";
636 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
637 interval.addRange(LR);
638 interval.addKill(LR.valno, end);
639 DOUT << " +" << LR << '\n';
642 /// computeIntervals - computes the live intervals for virtual
643 /// registers. for some ordering of the machine instructions [1,N] a
644 /// live interval is an interval [i, j) where 1 <= i <= j < N for
645 /// which a variable is live
646 void LiveIntervals::computeIntervals() {
647 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
648 << "********** Function: "
649 << ((Value*)mf_->getFunction())->getName() << '\n';
650 // Track the index of the current machine instr.
651 unsigned MIIndex = 0;
653 // Skip over empty initial indices.
654 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
655 getInstructionFromIndex(MIIndex) == 0)
656 MIIndex += InstrSlots::NUM;
658 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
660 MachineBasicBlock *MBB = MBBI;
661 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
663 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
665 // Create intervals for live-ins to this BB first.
666 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
667 LE = MBB->livein_end(); LI != LE; ++LI) {
668 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
669 // Multiple live-ins can alias the same register.
670 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
671 if (!hasInterval(*AS))
672 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
676 for (; MI != miEnd; ++MI) {
677 DOUT << MIIndex << "\t" << *MI;
680 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
681 MachineOperand &MO = MI->getOperand(i);
682 // handle register defs - build intervals
683 if (MO.isRegister() && MO.getReg() && MO.isDef())
684 handleRegisterDef(MBB, MI, MIIndex, MO, i);
687 MIIndex += InstrSlots::NUM;
689 // Skip over empty indices.
690 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
691 getInstructionFromIndex(MIIndex) == 0)
692 MIIndex += InstrSlots::NUM;
697 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
698 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
699 std::vector<IdxMBBPair>::const_iterator I =
700 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
703 while (I != Idx2MBBMap.end()) {
704 if (LR.end <= I->first)
706 MBBs.push_back(I->second);
714 LiveInterval LiveIntervals::createInterval(unsigned reg) {
715 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
717 return LiveInterval(reg, Weight);
720 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
721 /// copy field and returns the source register that defines it.
722 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
726 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
727 return VNI->copy->getOperand(1).getReg();
728 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
729 return VNI->copy->getOperand(2).getReg();
730 unsigned SrcReg, DstReg;
731 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
733 assert(0 && "Unrecognized copy instruction!");
737 //===----------------------------------------------------------------------===//
738 // Register allocator hooks.
741 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
742 /// allow one) virtual register operand, then its uses are implicitly using
743 /// the register. Returns the virtual register.
744 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
745 MachineInstr *MI) const {
747 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
748 MachineOperand &MO = MI->getOperand(i);
749 if (!MO.isRegister() || !MO.isUse())
751 unsigned Reg = MO.getReg();
752 if (Reg == 0 || Reg == li.reg)
754 // FIXME: For now, only remat MI with at most one register operand.
756 "Can't rematerialize instruction with multiple register operand!");
765 /// isValNoAvailableAt - Return true if the val# of the specified interval
766 /// which reaches the given instruction also reaches the specified use index.
767 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
768 unsigned UseIdx) const {
769 unsigned Index = getInstructionIndex(MI);
770 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
771 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
772 return UI != li.end() && UI->valno == ValNo;
775 /// isReMaterializable - Returns true if the definition MI of the specified
776 /// val# of the specified interval is re-materializable.
777 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
778 const VNInfo *ValNo, MachineInstr *MI,
783 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
787 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
788 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
789 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
790 // this but remember this is not safe to fold into a two-address
792 // This is a load from fixed stack slot. It can be rematerialized.
795 // If the target-specific rules don't identify an instruction as
796 // being trivially rematerializable, use some target-independent
798 if (!MI->getDesc().isRematerializable() ||
799 !tii_->isTriviallyReMaterializable(MI)) {
801 // If the instruction access memory but the memoperands have been lost,
802 // we can't analyze it.
803 const TargetInstrDesc &TID = MI->getDesc();
804 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
807 // Avoid instructions obviously unsafe for remat.
808 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
811 // If the instruction accesses memory and the memory could be non-constant,
812 // assume the instruction is not rematerializable.
813 for (alist<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
814 E = MI->memoperands_end(); I != E; ++I) {
815 const MachineMemOperand &MMO = *I;
816 if (MMO.isVolatile() || MMO.isStore())
818 const Value *V = MMO.getValue();
821 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
822 if (!PSV->isConstant(mf_->getFrameInfo()))
824 } else if (!aa_->pointsToConstantMemory(V))
828 // If any of the registers accessed are non-constant, conservatively assume
829 // the instruction is not rematerializable.
831 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
832 const MachineOperand &MO = MI->getOperand(i);
834 unsigned Reg = MO.getReg();
837 if (TargetRegisterInfo::isPhysicalRegister(Reg))
840 // Only allow one def, and that in the first operand.
841 if (MO.isDef() != (i == 0))
844 // Only allow constant-valued registers.
845 bool IsLiveIn = mri_->isLiveIn(Reg);
846 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
849 // For the def, it should be the only def.
850 if (MO.isDef() && (next(I) != E || IsLiveIn))
854 // Only allow one use other register use, as that's all the
855 // remat mechanisms support currently.
859 else if (Reg != ImpUse)
862 // For uses, there should be only one associate def.
863 if (I != E && (next(I) != E || IsLiveIn))
870 unsigned ImpUse = getReMatImplicitUse(li, MI);
872 const LiveInterval &ImpLi = getInterval(ImpUse);
873 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
874 re = mri_->use_end(); ri != re; ++ri) {
875 MachineInstr *UseMI = &*ri;
876 unsigned UseIdx = getInstructionIndex(UseMI);
877 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
879 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
886 /// isReMaterializable - Returns true if every definition of MI of every
887 /// val# of the specified interval is re-materializable.
888 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
890 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
892 const VNInfo *VNI = *i;
893 unsigned DefIdx = VNI->def;
895 continue; // Dead val#.
896 // Is the def for the val# rematerializable?
899 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
900 bool DefIsLoad = false;
902 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
909 /// FilterFoldedOps - Filter out two-address use operands. Return
910 /// true if it finds any issue with the operands that ought to prevent
912 static bool FilterFoldedOps(MachineInstr *MI,
913 SmallVector<unsigned, 2> &Ops,
915 SmallVector<unsigned, 2> &FoldOps) {
916 const TargetInstrDesc &TID = MI->getDesc();
919 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
920 unsigned OpIdx = Ops[i];
921 MachineOperand &MO = MI->getOperand(OpIdx);
922 // FIXME: fold subreg use.
926 MRInfo |= (unsigned)VirtRegMap::isMod;
928 // Filter out two-address use operand(s).
929 if (!MO.isImplicit() &&
930 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
931 MRInfo = VirtRegMap::isModRef;
934 MRInfo |= (unsigned)VirtRegMap::isRef;
936 FoldOps.push_back(OpIdx);
942 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
943 /// slot / to reg or any rematerialized load into ith operand of specified
944 /// MI. If it is successul, MI is updated with the newly created MI and
946 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
947 VirtRegMap &vrm, MachineInstr *DefMI,
949 SmallVector<unsigned, 2> &Ops,
950 bool isSS, int Slot, unsigned Reg) {
951 // If it is an implicit def instruction, just delete it.
952 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
953 RemoveMachineInstrFromMaps(MI);
954 vrm.RemoveMachineInstrFromMaps(MI);
955 MI->eraseFromParent();
960 // Filter the list of operand indexes that are to be folded. Abort if
961 // any operand will prevent folding.
963 SmallVector<unsigned, 2> FoldOps;
964 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
967 // The only time it's safe to fold into a two address instruction is when
968 // it's folding reload and spill from / into a spill stack slot.
969 if (DefMI && (MRInfo & VirtRegMap::isMod))
972 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
973 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
975 // Remember this instruction uses the spill slot.
976 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
978 // Attempt to fold the memory reference into the instruction. If
979 // we can do this, we don't need to insert spill code.
980 MachineBasicBlock &MBB = *MI->getParent();
981 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
982 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
983 vrm.transferSpillPts(MI, fmi);
984 vrm.transferRestorePts(MI, fmi);
985 vrm.transferEmergencySpills(MI, fmi);
987 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
988 mi2iMap_[fmi] = InstrIdx;
989 MI = MBB.insert(MBB.erase(MI), fmi);
996 /// canFoldMemoryOperand - Returns true if the specified load / store
997 /// folding is possible.
998 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
999 SmallVector<unsigned, 2> &Ops,
1001 // Filter the list of operand indexes that are to be folded. Abort if
1002 // any operand will prevent folding.
1003 unsigned MRInfo = 0;
1004 SmallVector<unsigned, 2> FoldOps;
1005 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1008 // It's only legal to remat for a use, not a def.
1009 if (ReMat && (MRInfo & VirtRegMap::isMod))
1012 return tii_->canFoldMemoryOperand(MI, FoldOps);
1015 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1016 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1017 for (LiveInterval::Ranges::const_iterator
1018 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1019 std::vector<IdxMBBPair>::const_iterator II =
1020 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1021 if (II == Idx2MBBMap.end())
1023 if (I->end > II->first) // crossing a MBB.
1025 MBBs.insert(II->second);
1026 if (MBBs.size() > 1)
1032 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1033 /// interval on to-be re-materialized operands of MI) with new register.
1034 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1035 MachineInstr *MI, unsigned NewVReg,
1037 // There is an implicit use. That means one of the other operand is
1038 // being remat'ed and the remat'ed instruction has li.reg as an
1039 // use operand. Make sure we rewrite that as well.
1040 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1041 MachineOperand &MO = MI->getOperand(i);
1042 if (!MO.isRegister())
1044 unsigned Reg = MO.getReg();
1045 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1047 if (!vrm.isReMaterialized(Reg))
1049 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1050 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1052 UseMO->setReg(NewVReg);
1056 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1057 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1058 bool LiveIntervals::
1059 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1060 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1061 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1062 unsigned Slot, int LdSlot,
1063 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1065 const TargetRegisterClass* rc,
1066 SmallVector<int, 4> &ReMatIds,
1067 const MachineLoopInfo *loopInfo,
1068 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1069 std::map<unsigned,unsigned> &MBBVRegsMap,
1070 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1071 MachineBasicBlock *MBB = MI->getParent();
1072 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1073 bool CanFold = false;
1075 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1076 MachineOperand& mop = MI->getOperand(i);
1077 if (!mop.isRegister())
1079 unsigned Reg = mop.getReg();
1080 unsigned RegI = Reg;
1081 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1086 bool TryFold = !DefIsReMat;
1087 bool FoldSS = true; // Default behavior unless it's a remat.
1088 int FoldSlot = Slot;
1090 // If this is the rematerializable definition MI itself and
1091 // all of its uses are rematerialized, simply delete it.
1092 if (MI == ReMatOrigDefMI && CanDelete) {
1093 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1095 RemoveMachineInstrFromMaps(MI);
1096 vrm.RemoveMachineInstrFromMaps(MI);
1097 MI->eraseFromParent();
1101 // If def for this use can't be rematerialized, then try folding.
1102 // If def is rematerializable and it's a load, also try folding.
1103 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1105 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1111 // Scan all of the operands of this instruction rewriting operands
1112 // to use NewVReg instead of li.reg as appropriate. We do this for
1115 // 1. If the instr reads the same spilled vreg multiple times, we
1116 // want to reuse the NewVReg.
1117 // 2. If the instr is a two-addr instruction, we are required to
1118 // keep the src/dst regs pinned.
1120 // Keep track of whether we replace a use and/or def so that we can
1121 // create the spill interval with the appropriate range.
1123 HasUse = mop.isUse();
1124 HasDef = mop.isDef();
1125 SmallVector<unsigned, 2> Ops;
1127 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1128 const MachineOperand &MOj = MI->getOperand(j);
1129 if (!MOj.isRegister())
1131 unsigned RegJ = MOj.getReg();
1132 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1136 HasUse |= MOj.isUse();
1137 HasDef |= MOj.isDef();
1141 if (HasUse && !li.liveAt(getUseIndex(index)))
1142 // Must be defined by an implicit def. It should not be spilled. Note,
1143 // this is for correctness reason. e.g.
1144 // 8 %reg1024<def> = IMPLICIT_DEF
1145 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1146 // The live range [12, 14) are not part of the r1024 live interval since
1147 // it's defined by an implicit def. It will not conflicts with live
1148 // interval of r1025. Now suppose both registers are spilled, you can
1149 // easily see a situation where both registers are reloaded before
1150 // the INSERT_SUBREG and both target registers that would overlap.
1153 // Update stack slot spill weight if we are splitting.
1154 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1161 // Do not fold load / store here if we are splitting. We'll find an
1162 // optimal point to insert a load / store later.
1164 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1165 Ops, FoldSS, FoldSlot, Reg)) {
1166 // Folding the load/store can completely change the instruction in
1167 // unpredictable ways, rescan it from the beginning.
1171 if (isRemoved(MI)) {
1175 goto RestartInstruction;
1178 // We'll try to fold it later if it's profitable.
1179 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1183 // Create a new virtual register for the spill interval.
1184 bool CreatedNewVReg = false;
1186 NewVReg = mri_->createVirtualRegister(rc);
1188 CreatedNewVReg = true;
1190 mop.setReg(NewVReg);
1191 if (mop.isImplicit())
1192 rewriteImplicitOps(li, MI, NewVReg, vrm);
1194 // Reuse NewVReg for other reads.
1195 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1196 MachineOperand &mopj = MI->getOperand(Ops[j]);
1197 mopj.setReg(NewVReg);
1198 if (mopj.isImplicit())
1199 rewriteImplicitOps(li, MI, NewVReg, vrm);
1202 if (CreatedNewVReg) {
1204 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1205 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1206 // Each valnum may have its own remat id.
1207 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1209 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1211 if (!CanDelete || (HasUse && HasDef)) {
1212 // If this is a two-addr instruction then its use operands are
1213 // rematerializable but its def is not. It should be assigned a
1215 vrm.assignVirt2StackSlot(NewVReg, Slot);
1218 vrm.assignVirt2StackSlot(NewVReg, Slot);
1220 } else if (HasUse && HasDef &&
1221 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1222 // If this interval hasn't been assigned a stack slot (because earlier
1223 // def is a deleted remat def), do it now.
1224 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1225 vrm.assignVirt2StackSlot(NewVReg, Slot);
1228 // Re-matting an instruction with virtual register use. Add the
1229 // register as an implicit use on the use MI.
1230 if (DefIsReMat && ImpUse)
1231 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1233 // create a new register interval for this spill / remat.
1234 LiveInterval &nI = getOrCreateInterval(NewVReg);
1235 if (CreatedNewVReg) {
1236 NewLIs.push_back(&nI);
1237 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1239 vrm.setIsSplitFromReg(NewVReg, li.reg);
1243 if (CreatedNewVReg) {
1244 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1245 nI.getNextValue(~0U, 0, VNInfoAllocator));
1249 // Extend the split live interval to this def / use.
1250 unsigned End = getUseIndex(index)+1;
1251 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1252 nI.getValNumInfo(nI.getNumValNums()-1));
1258 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1259 nI.getNextValue(~0U, 0, VNInfoAllocator));
1264 DOUT << "\t\t\t\tAdded new interval: ";
1265 nI.print(DOUT, tri_);
1270 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1272 MachineBasicBlock *MBB, unsigned Idx) const {
1273 unsigned End = getMBBEndIdx(MBB);
1274 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1275 unsigned KillIdx = VNI->kills[j];
1276 if (KillIdx > Idx && KillIdx < End)
1282 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1283 /// during spilling.
1285 struct RewriteInfo {
1290 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1291 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1294 struct RewriteInfoCompare {
1295 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1296 return LHS.Index < RHS.Index;
1301 void LiveIntervals::
1302 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1303 LiveInterval::Ranges::const_iterator &I,
1304 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1305 unsigned Slot, int LdSlot,
1306 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1308 const TargetRegisterClass* rc,
1309 SmallVector<int, 4> &ReMatIds,
1310 const MachineLoopInfo *loopInfo,
1311 BitVector &SpillMBBs,
1312 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1313 BitVector &RestoreMBBs,
1314 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1315 std::map<unsigned,unsigned> &MBBVRegsMap,
1316 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1317 bool AllCanFold = true;
1318 unsigned NewVReg = 0;
1319 unsigned start = getBaseIndex(I->start);
1320 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1322 // First collect all the def / use in this live range that will be rewritten.
1323 // Make sure they are sorted according to instruction index.
1324 std::vector<RewriteInfo> RewriteMIs;
1325 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1326 re = mri_->reg_end(); ri != re; ) {
1327 MachineInstr *MI = &*ri;
1328 MachineOperand &O = ri.getOperand();
1330 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1331 unsigned index = getInstructionIndex(MI);
1332 if (index < start || index >= end)
1334 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1335 // Must be defined by an implicit def. It should not be spilled. Note,
1336 // this is for correctness reason. e.g.
1337 // 8 %reg1024<def> = IMPLICIT_DEF
1338 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1339 // The live range [12, 14) are not part of the r1024 live interval since
1340 // it's defined by an implicit def. It will not conflicts with live
1341 // interval of r1025. Now suppose both registers are spilled, you can
1342 // easily see a situation where both registers are reloaded before
1343 // the INSERT_SUBREG and both target registers that would overlap.
1345 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1347 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1349 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1350 // Now rewrite the defs and uses.
1351 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1352 RewriteInfo &rwi = RewriteMIs[i];
1354 unsigned index = rwi.Index;
1355 bool MIHasUse = rwi.HasUse;
1356 bool MIHasDef = rwi.HasDef;
1357 MachineInstr *MI = rwi.MI;
1358 // If MI def and/or use the same register multiple times, then there
1359 // are multiple entries.
1360 unsigned NumUses = MIHasUse;
1361 while (i != e && RewriteMIs[i].MI == MI) {
1362 assert(RewriteMIs[i].Index == index);
1363 bool isUse = RewriteMIs[i].HasUse;
1364 if (isUse) ++NumUses;
1366 MIHasDef |= RewriteMIs[i].HasDef;
1369 MachineBasicBlock *MBB = MI->getParent();
1371 if (ImpUse && MI != ReMatDefMI) {
1372 // Re-matting an instruction with virtual register use. Update the
1373 // register interval's spill weight to HUGE_VALF to prevent it from
1375 LiveInterval &ImpLi = getInterval(ImpUse);
1376 ImpLi.weight = HUGE_VALF;
1379 unsigned MBBId = MBB->getNumber();
1380 unsigned ThisVReg = 0;
1382 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1383 if (NVI != MBBVRegsMap.end()) {
1384 ThisVReg = NVI->second;
1391 // It's better to start a new interval to avoid artifically
1392 // extend the new interval.
1393 if (MIHasDef && !MIHasUse) {
1394 MBBVRegsMap.erase(MBB->getNumber());
1400 bool IsNew = ThisVReg == 0;
1402 // This ends the previous live interval. If all of its def / use
1403 // can be folded, give it a low spill weight.
1404 if (NewVReg && TrySplit && AllCanFold) {
1405 LiveInterval &nI = getOrCreateInterval(NewVReg);
1412 bool HasDef = false;
1413 bool HasUse = false;
1414 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1415 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1416 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1417 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1418 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1419 if (!HasDef && !HasUse)
1422 AllCanFold &= CanFold;
1424 // Update weight of spill interval.
1425 LiveInterval &nI = getOrCreateInterval(NewVReg);
1427 // The spill weight is now infinity as it cannot be spilled again.
1428 nI.weight = HUGE_VALF;
1432 // Keep track of the last def and first use in each MBB.
1434 if (MI != ReMatOrigDefMI || !CanDelete) {
1435 bool HasKill = false;
1437 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1439 // If this is a two-address code, then this index starts a new VNInfo.
1440 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1442 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1444 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1445 SpillIdxes.find(MBBId);
1447 if (SII == SpillIdxes.end()) {
1448 std::vector<SRInfo> S;
1449 S.push_back(SRInfo(index, NewVReg, true));
1450 SpillIdxes.insert(std::make_pair(MBBId, S));
1451 } else if (SII->second.back().vreg != NewVReg) {
1452 SII->second.push_back(SRInfo(index, NewVReg, true));
1453 } else if ((int)index > SII->second.back().index) {
1454 // If there is an earlier def and this is a two-address
1455 // instruction, then it's not possible to fold the store (which
1456 // would also fold the load).
1457 SRInfo &Info = SII->second.back();
1459 Info.canFold = !HasUse;
1461 SpillMBBs.set(MBBId);
1462 } else if (SII != SpillIdxes.end() &&
1463 SII->second.back().vreg == NewVReg &&
1464 (int)index > SII->second.back().index) {
1465 // There is an earlier def that's not killed (must be two-address).
1466 // The spill is no longer needed.
1467 SII->second.pop_back();
1468 if (SII->second.empty()) {
1469 SpillIdxes.erase(MBBId);
1470 SpillMBBs.reset(MBBId);
1477 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1478 SpillIdxes.find(MBBId);
1479 if (SII != SpillIdxes.end() &&
1480 SII->second.back().vreg == NewVReg &&
1481 (int)index > SII->second.back().index)
1482 // Use(s) following the last def, it's not safe to fold the spill.
1483 SII->second.back().canFold = false;
1484 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1485 RestoreIdxes.find(MBBId);
1486 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1487 // If we are splitting live intervals, only fold if it's the first
1488 // use and there isn't another use later in the MBB.
1489 RII->second.back().canFold = false;
1491 // Only need a reload if there isn't an earlier def / use.
1492 if (RII == RestoreIdxes.end()) {
1493 std::vector<SRInfo> Infos;
1494 Infos.push_back(SRInfo(index, NewVReg, true));
1495 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1497 RII->second.push_back(SRInfo(index, NewVReg, true));
1499 RestoreMBBs.set(MBBId);
1503 // Update spill weight.
1504 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1505 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1508 if (NewVReg && TrySplit && AllCanFold) {
1509 // If all of its def / use can be folded, give it a low spill weight.
1510 LiveInterval &nI = getOrCreateInterval(NewVReg);
1515 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1516 BitVector &RestoreMBBs,
1517 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1518 if (!RestoreMBBs[Id])
1520 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1521 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1522 if (Restores[i].index == index &&
1523 Restores[i].vreg == vr &&
1524 Restores[i].canFold)
1529 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1530 BitVector &RestoreMBBs,
1531 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1532 if (!RestoreMBBs[Id])
1534 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1535 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1536 if (Restores[i].index == index && Restores[i].vreg)
1537 Restores[i].index = -1;
1540 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1541 /// spilled and create empty intervals for their uses.
1543 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1544 const TargetRegisterClass* rc,
1545 std::vector<LiveInterval*> &NewLIs) {
1546 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1547 re = mri_->reg_end(); ri != re; ) {
1548 MachineOperand &O = ri.getOperand();
1549 MachineInstr *MI = &*ri;
1552 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1553 "Register def was not rewritten?");
1554 RemoveMachineInstrFromMaps(MI);
1555 vrm.RemoveMachineInstrFromMaps(MI);
1556 MI->eraseFromParent();
1558 // This must be an use of an implicit_def so it's not part of the live
1559 // interval. Create a new empty live interval for it.
1560 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1561 unsigned NewVReg = mri_->createVirtualRegister(rc);
1563 vrm.setIsImplicitlyDefined(NewVReg);
1564 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1565 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1566 MachineOperand &MO = MI->getOperand(i);
1567 if (MO.isReg() && MO.getReg() == li.reg)
1575 std::vector<LiveInterval*> LiveIntervals::
1576 addIntervalsForSpills(const LiveInterval &li,
1577 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1579 assert(li.weight != HUGE_VALF &&
1580 "attempt to spill already spilled interval!");
1582 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1583 li.print(DOUT, tri_);
1586 // Spill slot weight.
1589 // Each bit specify whether it a spill is required in the MBB.
1590 BitVector SpillMBBs(mf_->getNumBlockIDs());
1591 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1592 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1593 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1594 std::map<unsigned,unsigned> MBBVRegsMap;
1595 std::vector<LiveInterval*> NewLIs;
1596 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1598 unsigned NumValNums = li.getNumValNums();
1599 SmallVector<MachineInstr*, 4> ReMatDefs;
1600 ReMatDefs.resize(NumValNums, NULL);
1601 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1602 ReMatOrigDefs.resize(NumValNums, NULL);
1603 SmallVector<int, 4> ReMatIds;
1604 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1605 BitVector ReMatDelete(NumValNums);
1606 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1608 // Spilling a split live interval. It cannot be split any further. Also,
1609 // it's also guaranteed to be a single val# / range interval.
1610 if (vrm.getPreSplitReg(li.reg)) {
1611 vrm.setIsSplitFromReg(li.reg, 0);
1612 // Unset the split kill marker on the last use.
1613 unsigned KillIdx = vrm.getKillPoint(li.reg);
1615 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1616 assert(KillMI && "Last use disappeared?");
1617 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1618 assert(KillOp != -1 && "Last use disappeared?");
1619 KillMI->getOperand(KillOp).setIsKill(false);
1621 vrm.removeKillPoint(li.reg);
1622 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1623 Slot = vrm.getStackSlot(li.reg);
1624 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1625 MachineInstr *ReMatDefMI = DefIsReMat ?
1626 vrm.getReMaterializedMI(li.reg) : NULL;
1628 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1629 bool isLoad = isLoadSS ||
1630 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1631 bool IsFirstRange = true;
1632 for (LiveInterval::Ranges::const_iterator
1633 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1634 // If this is a split live interval with multiple ranges, it means there
1635 // are two-address instructions that re-defined the value. Only the
1636 // first def can be rematerialized!
1638 // Note ReMatOrigDefMI has already been deleted.
1639 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1640 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1641 false, vrm, rc, ReMatIds, loopInfo,
1642 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1643 MBBVRegsMap, NewLIs, SSWeight);
1645 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1646 Slot, 0, false, false, false,
1647 false, vrm, rc, ReMatIds, loopInfo,
1648 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1649 MBBVRegsMap, NewLIs, SSWeight);
1651 IsFirstRange = false;
1654 SSWeight = 0.0f; // Already accounted for when split.
1655 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1659 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1660 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1664 bool NeedStackSlot = false;
1665 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1667 const VNInfo *VNI = *i;
1668 unsigned VN = VNI->id;
1669 unsigned DefIdx = VNI->def;
1671 continue; // Dead val#.
1672 // Is the def for the val# rematerializable?
1673 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1674 ? 0 : getInstructionFromIndex(DefIdx);
1676 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1677 // Remember how to remat the def of this val#.
1678 ReMatOrigDefs[VN] = ReMatDefMI;
1679 // Original def may be modified so we have to make a copy here.
1680 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1681 ClonedMIs.push_back(Clone);
1682 ReMatDefs[VN] = Clone;
1684 bool CanDelete = true;
1685 if (VNI->hasPHIKill) {
1686 // A kill is a phi node, not all of its uses can be rematerialized.
1687 // It must not be deleted.
1689 // Need a stack slot if there is any live range where uses cannot be
1691 NeedStackSlot = true;
1694 ReMatDelete.set(VN);
1696 // Need a stack slot if there is any live range where uses cannot be
1698 NeedStackSlot = true;
1702 // One stack slot per live interval.
1703 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1704 Slot = vrm.assignVirt2StackSlot(li.reg);
1706 // Create new intervals and rewrite defs and uses.
1707 for (LiveInterval::Ranges::const_iterator
1708 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1709 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1710 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1711 bool DefIsReMat = ReMatDefMI != NULL;
1712 bool CanDelete = ReMatDelete[I->valno->id];
1714 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1715 bool isLoad = isLoadSS ||
1716 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1717 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1718 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1719 CanDelete, vrm, rc, ReMatIds, loopInfo,
1720 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1721 MBBVRegsMap, NewLIs, SSWeight);
1724 // Insert spills / restores if we are splitting.
1726 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1730 SmallPtrSet<LiveInterval*, 4> AddedKill;
1731 SmallVector<unsigned, 2> Ops;
1732 if (NeedStackSlot) {
1733 int Id = SpillMBBs.find_first();
1735 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1736 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1737 std::vector<SRInfo> &spills = SpillIdxes[Id];
1738 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1739 int index = spills[i].index;
1740 unsigned VReg = spills[i].vreg;
1741 LiveInterval &nI = getOrCreateInterval(VReg);
1742 bool isReMat = vrm.isReMaterialized(VReg);
1743 MachineInstr *MI = getInstructionFromIndex(index);
1744 bool CanFold = false;
1745 bool FoundUse = false;
1747 if (spills[i].canFold) {
1749 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1750 MachineOperand &MO = MI->getOperand(j);
1751 if (!MO.isRegister() || MO.getReg() != VReg)
1758 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1759 RestoreMBBs, RestoreIdxes))) {
1760 // MI has two-address uses of the same register. If the use
1761 // isn't the first and only use in the BB, then we can't fold
1762 // it. FIXME: Move this to rewriteInstructionsForSpills.
1769 // Fold the store into the def if possible.
1770 bool Folded = false;
1771 if (CanFold && !Ops.empty()) {
1772 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1775 // Also folded uses, do not issue a load.
1776 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1777 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1779 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1783 // Otherwise tell the spiller to issue a spill.
1785 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1786 bool isKill = LR->end == getStoreIndex(index);
1787 if (!MI->registerDefIsDead(nI.reg))
1788 // No need to spill a dead def.
1789 vrm.addSpillPoint(VReg, isKill, MI);
1791 AddedKill.insert(&nI);
1794 // Update spill slot weight.
1796 SSWeight += getSpillWeight(true, false, loopDepth);
1798 Id = SpillMBBs.find_next(Id);
1802 int Id = RestoreMBBs.find_first();
1804 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1805 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1807 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1808 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1809 int index = restores[i].index;
1812 unsigned VReg = restores[i].vreg;
1813 LiveInterval &nI = getOrCreateInterval(VReg);
1814 bool isReMat = vrm.isReMaterialized(VReg);
1815 MachineInstr *MI = getInstructionFromIndex(index);
1816 bool CanFold = false;
1818 if (restores[i].canFold) {
1820 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1821 MachineOperand &MO = MI->getOperand(j);
1822 if (!MO.isRegister() || MO.getReg() != VReg)
1826 // If this restore were to be folded, it would have been folded
1835 // Fold the load into the use if possible.
1836 bool Folded = false;
1837 if (CanFold && !Ops.empty()) {
1839 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1841 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1843 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1844 // If the rematerializable def is a load, also try to fold it.
1845 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1846 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1847 Ops, isLoadSS, LdSlot, VReg);
1848 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1850 // Re-matting an instruction with virtual register use. Add the
1851 // register as an implicit use on the use MI and update the register
1852 // interval's spill weight to HUGE_VALF to prevent it from being
1854 LiveInterval &ImpLi = getInterval(ImpUse);
1855 ImpLi.weight = HUGE_VALF;
1856 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1860 // If folding is not possible / failed, then tell the spiller to issue a
1861 // load / rematerialization for us.
1863 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1865 vrm.addRestorePoint(VReg, MI);
1867 // Update spill slot weight.
1869 SSWeight += getSpillWeight(false, true, loopDepth);
1871 Id = RestoreMBBs.find_next(Id);
1874 // Finalize intervals: add kills, finalize spill weights, and filter out
1876 std::vector<LiveInterval*> RetNewLIs;
1877 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1878 LiveInterval *LI = NewLIs[i];
1880 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1881 if (!AddedKill.count(LI)) {
1882 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1883 unsigned LastUseIdx = getBaseIndex(LR->end);
1884 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1885 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1886 assert(UseIdx != -1);
1887 if (LastUse->getOperand(UseIdx).isImplicit() ||
1888 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1889 LastUse->getOperand(UseIdx).setIsKill();
1890 vrm.addKillPoint(LI->reg, LastUseIdx);
1893 RetNewLIs.push_back(LI);
1897 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1901 /// hasAllocatableSuperReg - Return true if the specified physical register has
1902 /// any super register that's allocatable.
1903 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1904 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1905 if (allocatableRegs_[*AS] && hasInterval(*AS))
1910 /// getRepresentativeReg - Find the largest super register of the specified
1911 /// physical register.
1912 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1913 // Find the largest super-register that is allocatable.
1914 unsigned BestReg = Reg;
1915 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1916 unsigned SuperReg = *AS;
1917 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1925 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1926 /// specified interval that conflicts with the specified physical register.
1927 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1928 unsigned PhysReg) const {
1929 unsigned NumConflicts = 0;
1930 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1931 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1932 E = mri_->reg_end(); I != E; ++I) {
1933 MachineOperand &O = I.getOperand();
1934 MachineInstr *MI = O.getParent();
1935 unsigned Index = getInstructionIndex(MI);
1936 if (pli.liveAt(Index))
1939 return NumConflicts;
1942 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1943 /// around all defs and uses of the specified interval.
1944 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1945 unsigned PhysReg, VirtRegMap &vrm) {
1946 unsigned SpillReg = getRepresentativeReg(PhysReg);
1948 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1949 // If there are registers which alias PhysReg, but which are not a
1950 // sub-register of the chosen representative super register. Assert
1951 // since we can't handle it yet.
1952 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1953 tri_->isSuperRegister(*AS, SpillReg));
1955 LiveInterval &pli = getInterval(SpillReg);
1956 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1957 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1958 E = mri_->reg_end(); I != E; ++I) {
1959 MachineOperand &O = I.getOperand();
1960 MachineInstr *MI = O.getParent();
1961 if (SeenMIs.count(MI))
1964 unsigned Index = getInstructionIndex(MI);
1965 if (pli.liveAt(Index)) {
1966 vrm.addEmergencySpill(SpillReg, MI);
1967 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1968 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1969 if (!hasInterval(*AS))
1971 LiveInterval &spli = getInterval(*AS);
1972 if (spli.liveAt(Index))
1973 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1979 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1980 MachineInstr* startInst) {
1981 LiveInterval& Interval = getOrCreateInterval(reg);
1982 VNInfo* VN = Interval.getNextValue(
1983 getInstructionIndex(startInst) + InstrSlots::DEF,
1984 startInst, getVNInfoAllocator());
1985 VN->hasPHIKill = true;
1986 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1987 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1988 getMBBEndIdx(startInst->getParent()) + 1, VN);
1989 Interval.addRange(LR);