1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 STATISTIC(numIntervals, "Number of original intervals");
53 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addRequired<AliasAnalysis>();
62 AU.addPreserved<AliasAnalysis>();
63 AU.addPreserved<LiveVariables>();
64 AU.addRequired<LiveVariables>();
65 AU.addPreservedID(MachineLoopInfoID);
66 AU.addPreservedID(MachineDominatorsID);
67 AU.addPreservedID(PHIEliminationID);
68 AU.addRequiredID(PHIEliminationID);
69 AU.addRequiredID(TwoAddressInstructionPassID);
70 MachineFunctionPass::getAnalysisUsage(AU);
73 void LiveIntervals::releaseMemory() {
79 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
80 VNInfoAllocator.Reset();
81 while (!ClonedMIs.empty()) {
82 MachineInstr *MI = ClonedMIs.back();
84 mf_->DeleteMachineInstr(MI);
88 void LiveIntervals::computeNumbering() {
89 Index2MiMap OldI2MI = i2miMap_;
90 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99 // Number MachineInstrs and MachineBasicBlocks.
100 // Initialize MBB indexes to a sentinal.
101 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
103 unsigned MIIndex = 0;
104 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
106 unsigned StartIdx = MIIndex;
108 // Insert an empty slot at the beginning of each block.
109 MIIndex += InstrSlots::NUM;
110 i2miMap_.push_back(0);
112 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
114 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
115 assert(inserted && "multiple MachineInstr -> index mappings");
116 i2miMap_.push_back(I);
117 MIIndex += InstrSlots::NUM;
120 // Insert an empty slot after every instruction.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
125 // Set the MBB2IdxMap entry for this MBB.
126 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
129 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
131 if (!OldI2MI.empty())
132 for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
133 for (LiveInterval::iterator LI = OI->second.begin(),
134 LE = OI->second.end(); LI != LE; ++LI) {
136 // Remap the start index of the live range to the corresponding new
137 // number, or our best guess at what it _should_ correspond to if the
138 // original instruction has been erased. This is either the following
139 // instruction or its predecessor.
140 unsigned index = LI->start / InstrSlots::NUM;
141 unsigned offset = LI->start % InstrSlots::NUM;
142 if (offset == InstrSlots::LOAD) {
143 std::vector<IdxMBBPair>::const_iterator I =
144 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
145 // Take the pair containing the index
146 std::vector<IdxMBBPair>::const_iterator J =
147 ((I != OldI2MBB.end() && I->first > index) ||
148 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
150 LI->start = getMBBStartIdx(J->second);
152 LI->start = mi2iMap_[OldI2MI[index]] + offset;
155 // Remap the ending index in the same way that we remapped the start,
156 // except for the final step where we always map to the immediately
157 // following instruction.
158 index = (LI->end - 1) / InstrSlots::NUM;
159 offset = LI->end % InstrSlots::NUM;
160 if (offset == InstrSlots::USE) {
161 std::vector<IdxMBBPair>::const_iterator I =
162 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
163 // Take the pair containing the index
164 std::vector<IdxMBBPair>::const_iterator J =
165 ((I != OldI2MBB.end() && I->first > index) ||
166 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
168 LI->end = getMBBEndIdx(J->second) + 1;
170 unsigned idx = index;
171 while (!OldI2MI[index]) ++index;
172 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
175 // Remap the VNInfo def index, which works the same as the
176 // start indices above.
177 VNInfo* vni = LI->valno;
178 index = vni->def / InstrSlots::NUM;
179 offset = vni->def % InstrSlots::NUM;
180 if (offset == InstrSlots::LOAD) {
181 std::vector<IdxMBBPair>::const_iterator I =
182 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
183 // Take the pair containing the index
184 std::vector<IdxMBBPair>::const_iterator J =
185 ((I != OldI2MBB.end() && I->first > index) ||
186 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
188 vni->def = getMBBStartIdx(J->second);
191 vni->def = mi2iMap_[OldI2MI[index]] + offset;
194 // Remap the VNInfo kill indices, which works the same as
195 // the end indices above.
196 for (size_t i = 0; i < vni->kills.size(); ++i) {
197 index = (vni->kills[i]-1) / InstrSlots::NUM;
198 offset = vni->kills[i] % InstrSlots::NUM;
199 if (offset == InstrSlots::USE) {
200 std::vector<IdxMBBPair>::const_iterator I =
201 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
202 // Take the pair containing the index
203 std::vector<IdxMBBPair>::const_iterator J =
204 ((I != OldI2MBB.end() && I->first > index) ||
205 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
207 vni->kills[i] = getMBBEndIdx(J->second) + 1;
209 unsigned idx = index;
210 while (!OldI2MI[index]) ++index;
211 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
212 (idx == index ? offset : 0);
218 /// runOnMachineFunction - Register allocate the whole function
220 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
222 mri_ = &mf_->getRegInfo();
223 tm_ = &fn.getTarget();
224 tri_ = tm_->getRegisterInfo();
225 tii_ = tm_->getInstrInfo();
226 aa_ = &getAnalysis<AliasAnalysis>();
227 lv_ = &getAnalysis<LiveVariables>();
228 allocatableRegs_ = tri_->getAllocatableSet(fn);
233 numIntervals += getNumIntervals();
235 DOUT << "********** INTERVALS **********\n";
236 for (iterator I = begin(), E = end(); I != E; ++I) {
237 I->second.print(DOUT, tri_);
241 numIntervalsAfter += getNumIntervals();
246 /// print - Implement the dump method.
247 void LiveIntervals::print(std::ostream &O, const Module* ) const {
248 O << "********** INTERVALS **********\n";
249 for (const_iterator I = begin(), E = end(); I != E; ++I) {
250 I->second.print(O, tri_);
254 O << "********** MACHINEINSTRS **********\n";
255 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
256 mbbi != mbbe; ++mbbi) {
257 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
258 for (MachineBasicBlock::iterator mii = mbbi->begin(),
259 mie = mbbi->end(); mii != mie; ++mii) {
260 O << getInstructionIndex(mii) << '\t' << *mii;
265 /// conflictsWithPhysRegDef - Returns true if the specified register
266 /// is defined during the duration of the specified interval.
267 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
268 VirtRegMap &vrm, unsigned reg) {
269 for (LiveInterval::Ranges::const_iterator
270 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
271 for (unsigned index = getBaseIndex(I->start),
272 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
273 index += InstrSlots::NUM) {
274 // skip deleted instructions
275 while (index != end && !getInstructionFromIndex(index))
276 index += InstrSlots::NUM;
277 if (index == end) break;
279 MachineInstr *MI = getInstructionFromIndex(index);
280 unsigned SrcReg, DstReg;
281 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
282 if (SrcReg == li.reg || DstReg == li.reg)
284 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
285 MachineOperand& mop = MI->getOperand(i);
286 if (!mop.isRegister())
288 unsigned PhysReg = mop.getReg();
289 if (PhysReg == 0 || PhysReg == li.reg)
291 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
292 if (!vrm.hasPhys(PhysReg))
294 PhysReg = vrm.getPhys(PhysReg);
296 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
305 void LiveIntervals::printRegName(unsigned reg) const {
306 if (TargetRegisterInfo::isPhysicalRegister(reg))
307 cerr << tri_->getName(reg);
309 cerr << "%reg" << reg;
312 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
313 MachineBasicBlock::iterator mi,
314 unsigned MIIdx, MachineOperand& MO,
316 LiveInterval &interval) {
317 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
318 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
320 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
321 DOUT << "is a implicit_def\n";
325 // Virtual registers may be defined multiple times (due to phi
326 // elimination and 2-addr elimination). Much of what we do only has to be
327 // done once for the vreg. We use an empty interval to detect the first
328 // time we see a vreg.
329 if (interval.empty()) {
330 // Get the Idx of the defining instructions.
331 unsigned defIndex = getDefIndex(MIIdx);
333 MachineInstr *CopyMI = NULL;
334 unsigned SrcReg, DstReg;
335 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
336 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
337 tii_->isMoveInstr(*mi, SrcReg, DstReg))
339 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
341 assert(ValNo->id == 0 && "First value in interval is not 0?");
343 // Loop over all of the blocks that the vreg is defined in. There are
344 // two cases we have to handle here. The most common case is a vreg
345 // whose lifetime is contained within a basic block. In this case there
346 // will be a single kill, in MBB, which comes after the definition.
347 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
348 // FIXME: what about dead vars?
350 if (vi.Kills[0] != mi)
351 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
353 killIdx = defIndex+1;
355 // If the kill happens after the definition, we have an intra-block
357 if (killIdx > defIndex) {
358 assert(vi.AliveBlocks.none() &&
359 "Shouldn't be alive across any blocks!");
360 LiveRange LR(defIndex, killIdx, ValNo);
361 interval.addRange(LR);
362 DOUT << " +" << LR << "\n";
363 interval.addKill(ValNo, killIdx);
368 // The other case we handle is when a virtual register lives to the end
369 // of the defining block, potentially live across some blocks, then is
370 // live into some number of blocks, but gets killed. Start by adding a
371 // range that goes from this definition to the end of the defining block.
372 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
373 DOUT << " +" << NewLR;
374 interval.addRange(NewLR);
376 // Iterate over all of the blocks that the variable is completely
377 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
379 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
380 if (vi.AliveBlocks[i]) {
381 LiveRange LR(getMBBStartIdx(i),
382 getMBBEndIdx(i)+1, // MBB ends at -1.
384 interval.addRange(LR);
389 // Finally, this virtual register is live from the start of any killing
390 // block to the 'use' slot of the killing instruction.
391 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
392 MachineInstr *Kill = vi.Kills[i];
393 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
394 LiveRange LR(getMBBStartIdx(Kill->getParent()),
396 interval.addRange(LR);
397 interval.addKill(ValNo, killIdx);
402 // If this is the second time we see a virtual register definition, it
403 // must be due to phi elimination or two addr elimination. If this is
404 // the result of two address elimination, then the vreg is one of the
405 // def-and-use register operand.
406 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
407 // If this is a two-address definition, then we have already processed
408 // the live range. The only problem is that we didn't realize there
409 // are actually two values in the live interval. Because of this we
410 // need to take the LiveRegion that defines this register and split it
412 assert(interval.containsOneValue());
413 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
414 unsigned RedefIndex = getDefIndex(MIIdx);
416 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
417 VNInfo *OldValNo = OldLR->valno;
419 // Delete the initial value, which should be short and continuous,
420 // because the 2-addr copy must be in the same MBB as the redef.
421 interval.removeRange(DefIndex, RedefIndex);
423 // Two-address vregs should always only be redefined once. This means
424 // that at this point, there should be exactly one value number in it.
425 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
432 // Value#0 is now defined by the 2-addr instruction.
433 OldValNo->def = RedefIndex;
436 // Add the new live interval which replaces the range for the input copy.
437 LiveRange LR(DefIndex, RedefIndex, ValNo);
438 DOUT << " replace range with " << LR;
439 interval.addRange(LR);
440 interval.addKill(ValNo, RedefIndex);
442 // If this redefinition is dead, we need to add a dummy unit live
443 // range covering the def slot.
445 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
448 interval.print(DOUT, tri_);
451 // Otherwise, this must be because of phi elimination. If this is the
452 // first redefinition of the vreg that we have seen, go back and change
453 // the live range in the PHI block to be a different value number.
454 if (interval.containsOneValue()) {
455 assert(vi.Kills.size() == 1 &&
456 "PHI elimination vreg should have one kill, the PHI itself!");
458 // Remove the old range that we now know has an incorrect number.
459 VNInfo *VNI = interval.getValNumInfo(0);
460 MachineInstr *Killer = vi.Kills[0];
461 unsigned Start = getMBBStartIdx(Killer->getParent());
462 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
463 DOUT << " Removing [" << Start << "," << End << "] from: ";
464 interval.print(DOUT, tri_); DOUT << "\n";
465 interval.removeRange(Start, End);
466 VNI->hasPHIKill = true;
467 DOUT << " RESULT: "; interval.print(DOUT, tri_);
469 // Replace the interval with one of a NEW value number. Note that this
470 // value number isn't actually defined by an instruction, weird huh? :)
471 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
472 DOUT << " replace range with " << LR;
473 interval.addRange(LR);
474 interval.addKill(LR.valno, End);
475 DOUT << " RESULT: "; interval.print(DOUT, tri_);
478 // In the case of PHI elimination, each variable definition is only
479 // live until the end of the block. We've already taken care of the
480 // rest of the live range.
481 unsigned defIndex = getDefIndex(MIIdx);
484 MachineInstr *CopyMI = NULL;
485 unsigned SrcReg, DstReg;
486 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
487 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
488 tii_->isMoveInstr(*mi, SrcReg, DstReg))
490 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
492 unsigned killIndex = getMBBEndIdx(mbb) + 1;
493 LiveRange LR(defIndex, killIndex, ValNo);
494 interval.addRange(LR);
495 interval.addKill(ValNo, killIndex);
496 ValNo->hasPHIKill = true;
504 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
505 MachineBasicBlock::iterator mi,
508 LiveInterval &interval,
509 MachineInstr *CopyMI) {
510 // A physical register cannot be live across basic block, so its
511 // lifetime must end somewhere in its defining basic block.
512 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
514 unsigned baseIndex = MIIdx;
515 unsigned start = getDefIndex(baseIndex);
516 unsigned end = start;
518 // If it is not used after definition, it is considered dead at
519 // the instruction defining it. Hence its interval is:
520 // [defSlot(def), defSlot(def)+1)
523 end = getDefIndex(start) + 1;
527 // If it is not dead on definition, it must be killed by a
528 // subsequent instruction. Hence its interval is:
529 // [defSlot(def), useSlot(kill)+1)
530 baseIndex += InstrSlots::NUM;
531 while (++mi != MBB->end()) {
532 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
533 getInstructionFromIndex(baseIndex) == 0)
534 baseIndex += InstrSlots::NUM;
535 if (mi->killsRegister(interval.reg, tri_)) {
537 end = getUseIndex(baseIndex) + 1;
539 } else if (mi->modifiesRegister(interval.reg, tri_)) {
540 // Another instruction redefines the register before it is ever read.
541 // Then the register is essentially dead at the instruction that defines
542 // it. Hence its interval is:
543 // [defSlot(def), defSlot(def)+1)
545 end = getDefIndex(start) + 1;
549 baseIndex += InstrSlots::NUM;
552 // The only case we should have a dead physreg here without a killing or
553 // instruction where we know it's dead is if it is live-in to the function
555 assert(!CopyMI && "physreg was not killed in defining block!");
556 end = getDefIndex(start) + 1; // It's dead.
559 assert(start < end && "did not find end of interval?");
561 // Already exists? Extend old live interval.
562 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
563 VNInfo *ValNo = (OldLR != interval.end())
564 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
565 LiveRange LR(start, end, ValNo);
566 interval.addRange(LR);
567 interval.addKill(LR.valno, end);
568 DOUT << " +" << LR << '\n';
571 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
572 MachineBasicBlock::iterator MI,
576 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
577 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
578 getOrCreateInterval(MO.getReg()));
579 else if (allocatableRegs_[MO.getReg()]) {
580 MachineInstr *CopyMI = NULL;
581 unsigned SrcReg, DstReg;
582 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
583 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
584 tii_->isMoveInstr(*MI, SrcReg, DstReg))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(MO.getReg()), CopyMI);
588 // Def of a register also defines its sub-registers.
589 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
590 // If MI also modifies the sub-register explicitly, avoid processing it
591 // more than once. Do not pass in TRI here so it checks for exact match.
592 if (!MI->modifiesRegister(*AS))
593 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594 getOrCreateInterval(*AS), 0);
598 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
600 LiveInterval &interval, bool isAlias) {
601 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
603 // Look for kills, if it reaches a def before it's killed, then it shouldn't
604 // be considered a livein.
605 MachineBasicBlock::iterator mi = MBB->begin();
606 unsigned baseIndex = MIIdx;
607 unsigned start = baseIndex;
608 unsigned end = start;
609 while (mi != MBB->end()) {
610 if (mi->killsRegister(interval.reg, tri_)) {
612 end = getUseIndex(baseIndex) + 1;
614 } else if (mi->modifiesRegister(interval.reg, tri_)) {
615 // Another instruction redefines the register before it is ever read.
616 // Then the register is essentially dead at the instruction that defines
617 // it. Hence its interval is:
618 // [defSlot(def), defSlot(def)+1)
620 end = getDefIndex(start) + 1;
624 baseIndex += InstrSlots::NUM;
625 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
626 getInstructionFromIndex(baseIndex) == 0)
627 baseIndex += InstrSlots::NUM;
632 // Live-in register might not be used at all.
636 end = getDefIndex(MIIdx) + 1;
638 DOUT << " live through";
643 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
644 interval.addRange(LR);
645 interval.addKill(LR.valno, end);
646 DOUT << " +" << LR << '\n';
649 /// computeIntervals - computes the live intervals for virtual
650 /// registers. for some ordering of the machine instructions [1,N] a
651 /// live interval is an interval [i, j) where 1 <= i <= j < N for
652 /// which a variable is live
653 void LiveIntervals::computeIntervals() {
654 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
655 << "********** Function: "
656 << ((Value*)mf_->getFunction())->getName() << '\n';
657 // Track the index of the current machine instr.
658 unsigned MIIndex = 0;
660 // Skip over empty initial indices.
661 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
662 getInstructionFromIndex(MIIndex) == 0)
663 MIIndex += InstrSlots::NUM;
665 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
667 MachineBasicBlock *MBB = MBBI;
668 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
670 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
672 // Create intervals for live-ins to this BB first.
673 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
674 LE = MBB->livein_end(); LI != LE; ++LI) {
675 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
676 // Multiple live-ins can alias the same register.
677 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
678 if (!hasInterval(*AS))
679 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
683 for (; MI != miEnd; ++MI) {
684 DOUT << MIIndex << "\t" << *MI;
687 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
688 MachineOperand &MO = MI->getOperand(i);
689 // handle register defs - build intervals
690 if (MO.isRegister() && MO.getReg() && MO.isDef())
691 handleRegisterDef(MBB, MI, MIIndex, MO, i);
694 MIIndex += InstrSlots::NUM;
696 // Skip over empty indices.
697 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
698 getInstructionFromIndex(MIIndex) == 0)
699 MIIndex += InstrSlots::NUM;
704 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
705 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
706 std::vector<IdxMBBPair>::const_iterator I =
707 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
710 while (I != Idx2MBBMap.end()) {
711 if (LR.end <= I->first)
713 MBBs.push_back(I->second);
721 LiveInterval LiveIntervals::createInterval(unsigned reg) {
722 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
724 return LiveInterval(reg, Weight);
727 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
728 /// copy field and returns the source register that defines it.
729 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
733 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
734 return VNI->copy->getOperand(1).getReg();
735 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
736 return VNI->copy->getOperand(2).getReg();
737 unsigned SrcReg, DstReg;
738 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
740 assert(0 && "Unrecognized copy instruction!");
744 //===----------------------------------------------------------------------===//
745 // Register allocator hooks.
748 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
749 /// allow one) virtual register operand, then its uses are implicitly using
750 /// the register. Returns the virtual register.
751 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
752 MachineInstr *MI) const {
754 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
755 MachineOperand &MO = MI->getOperand(i);
756 if (!MO.isRegister() || !MO.isUse())
758 unsigned Reg = MO.getReg();
759 if (Reg == 0 || Reg == li.reg)
761 // FIXME: For now, only remat MI with at most one register operand.
763 "Can't rematerialize instruction with multiple register operand!");
772 /// isValNoAvailableAt - Return true if the val# of the specified interval
773 /// which reaches the given instruction also reaches the specified use index.
774 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
775 unsigned UseIdx) const {
776 unsigned Index = getInstructionIndex(MI);
777 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
778 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
779 return UI != li.end() && UI->valno == ValNo;
782 /// isReMaterializable - Returns true if the definition MI of the specified
783 /// val# of the specified interval is re-materializable.
784 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
785 const VNInfo *ValNo, MachineInstr *MI,
790 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
794 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
795 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
796 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
797 // this but remember this is not safe to fold into a two-address
799 // This is a load from fixed stack slot. It can be rematerialized.
802 // If the target-specific rules don't identify an instruction as
803 // being trivially rematerializable, use some target-independent
805 if (!MI->getDesc().isRematerializable() ||
806 !tii_->isTriviallyReMaterializable(MI)) {
807 if (!EnableAggressiveRemat)
810 // If the instruction access memory but the memoperands have been lost,
811 // we can't analyze it.
812 const TargetInstrDesc &TID = MI->getDesc();
813 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
816 // Avoid instructions obviously unsafe for remat.
817 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
820 // If the instruction accesses memory and the memory could be non-constant,
821 // assume the instruction is not rematerializable.
822 for (alist<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
823 E = MI->memoperands_end(); I != E; ++I) {
824 const MachineMemOperand &MMO = *I;
825 if (MMO.isVolatile() || MMO.isStore())
827 const Value *V = MMO.getValue();
830 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
831 if (!PSV->isConstant(mf_->getFrameInfo()))
833 } else if (!aa_->pointsToConstantMemory(V))
837 // If any of the registers accessed are non-constant, conservatively assume
838 // the instruction is not rematerializable.
840 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
841 const MachineOperand &MO = MI->getOperand(i);
843 unsigned Reg = MO.getReg();
846 if (TargetRegisterInfo::isPhysicalRegister(Reg))
849 // Only allow one def, and that in the first operand.
850 if (MO.isDef() != (i == 0))
853 // Only allow constant-valued registers.
854 bool IsLiveIn = mri_->isLiveIn(Reg);
855 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
858 // For the def, it should be the only def.
859 if (MO.isDef() && (next(I) != E || IsLiveIn))
863 // Only allow one use other register use, as that's all the
864 // remat mechanisms support currently.
868 else if (Reg != ImpUse)
871 // For uses, there should be only one associate def.
872 if (I != E && (next(I) != E || IsLiveIn))
879 unsigned ImpUse = getReMatImplicitUse(li, MI);
881 const LiveInterval &ImpLi = getInterval(ImpUse);
882 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
883 re = mri_->use_end(); ri != re; ++ri) {
884 MachineInstr *UseMI = &*ri;
885 unsigned UseIdx = getInstructionIndex(UseMI);
886 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
888 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
895 /// isReMaterializable - Returns true if every definition of MI of every
896 /// val# of the specified interval is re-materializable.
897 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
899 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
901 const VNInfo *VNI = *i;
902 unsigned DefIdx = VNI->def;
904 continue; // Dead val#.
905 // Is the def for the val# rematerializable?
908 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
909 bool DefIsLoad = false;
911 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
918 /// FilterFoldedOps - Filter out two-address use operands. Return
919 /// true if it finds any issue with the operands that ought to prevent
921 static bool FilterFoldedOps(MachineInstr *MI,
922 SmallVector<unsigned, 2> &Ops,
924 SmallVector<unsigned, 2> &FoldOps) {
925 const TargetInstrDesc &TID = MI->getDesc();
928 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
929 unsigned OpIdx = Ops[i];
930 MachineOperand &MO = MI->getOperand(OpIdx);
931 // FIXME: fold subreg use.
935 MRInfo |= (unsigned)VirtRegMap::isMod;
937 // Filter out two-address use operand(s).
938 if (!MO.isImplicit() &&
939 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
940 MRInfo = VirtRegMap::isModRef;
943 MRInfo |= (unsigned)VirtRegMap::isRef;
945 FoldOps.push_back(OpIdx);
951 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
952 /// slot / to reg or any rematerialized load into ith operand of specified
953 /// MI. If it is successul, MI is updated with the newly created MI and
955 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
956 VirtRegMap &vrm, MachineInstr *DefMI,
958 SmallVector<unsigned, 2> &Ops,
959 bool isSS, int Slot, unsigned Reg) {
960 // If it is an implicit def instruction, just delete it.
961 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
962 RemoveMachineInstrFromMaps(MI);
963 vrm.RemoveMachineInstrFromMaps(MI);
964 MI->eraseFromParent();
969 // Filter the list of operand indexes that are to be folded. Abort if
970 // any operand will prevent folding.
972 SmallVector<unsigned, 2> FoldOps;
973 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
976 // The only time it's safe to fold into a two address instruction is when
977 // it's folding reload and spill from / into a spill stack slot.
978 if (DefMI && (MRInfo & VirtRegMap::isMod))
981 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
982 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
984 // Remember this instruction uses the spill slot.
985 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
987 // Attempt to fold the memory reference into the instruction. If
988 // we can do this, we don't need to insert spill code.
989 MachineBasicBlock &MBB = *MI->getParent();
990 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
991 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
992 vrm.transferSpillPts(MI, fmi);
993 vrm.transferRestorePts(MI, fmi);
994 vrm.transferEmergencySpills(MI, fmi);
996 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
997 mi2iMap_[fmi] = InstrIdx;
998 MI = MBB.insert(MBB.erase(MI), fmi);
1005 /// canFoldMemoryOperand - Returns true if the specified load / store
1006 /// folding is possible.
1007 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1008 SmallVector<unsigned, 2> &Ops,
1010 // Filter the list of operand indexes that are to be folded. Abort if
1011 // any operand will prevent folding.
1012 unsigned MRInfo = 0;
1013 SmallVector<unsigned, 2> FoldOps;
1014 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1017 // It's only legal to remat for a use, not a def.
1018 if (ReMat && (MRInfo & VirtRegMap::isMod))
1021 return tii_->canFoldMemoryOperand(MI, FoldOps);
1024 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1025 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1026 for (LiveInterval::Ranges::const_iterator
1027 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1028 std::vector<IdxMBBPair>::const_iterator II =
1029 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1030 if (II == Idx2MBBMap.end())
1032 if (I->end > II->first) // crossing a MBB.
1034 MBBs.insert(II->second);
1035 if (MBBs.size() > 1)
1041 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1042 /// interval on to-be re-materialized operands of MI) with new register.
1043 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1044 MachineInstr *MI, unsigned NewVReg,
1046 // There is an implicit use. That means one of the other operand is
1047 // being remat'ed and the remat'ed instruction has li.reg as an
1048 // use operand. Make sure we rewrite that as well.
1049 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1050 MachineOperand &MO = MI->getOperand(i);
1051 if (!MO.isRegister())
1053 unsigned Reg = MO.getReg();
1054 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1056 if (!vrm.isReMaterialized(Reg))
1058 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1059 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1061 UseMO->setReg(NewVReg);
1065 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1066 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1067 bool LiveIntervals::
1068 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1069 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1070 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1071 unsigned Slot, int LdSlot,
1072 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1074 const TargetRegisterClass* rc,
1075 SmallVector<int, 4> &ReMatIds,
1076 const MachineLoopInfo *loopInfo,
1077 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1078 std::map<unsigned,unsigned> &MBBVRegsMap,
1079 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1080 MachineBasicBlock *MBB = MI->getParent();
1081 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1082 bool CanFold = false;
1084 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1085 MachineOperand& mop = MI->getOperand(i);
1086 if (!mop.isRegister())
1088 unsigned Reg = mop.getReg();
1089 unsigned RegI = Reg;
1090 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1095 bool TryFold = !DefIsReMat;
1096 bool FoldSS = true; // Default behavior unless it's a remat.
1097 int FoldSlot = Slot;
1099 // If this is the rematerializable definition MI itself and
1100 // all of its uses are rematerialized, simply delete it.
1101 if (MI == ReMatOrigDefMI && CanDelete) {
1102 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1104 RemoveMachineInstrFromMaps(MI);
1105 vrm.RemoveMachineInstrFromMaps(MI);
1106 MI->eraseFromParent();
1110 // If def for this use can't be rematerialized, then try folding.
1111 // If def is rematerializable and it's a load, also try folding.
1112 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1114 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1120 // Scan all of the operands of this instruction rewriting operands
1121 // to use NewVReg instead of li.reg as appropriate. We do this for
1124 // 1. If the instr reads the same spilled vreg multiple times, we
1125 // want to reuse the NewVReg.
1126 // 2. If the instr is a two-addr instruction, we are required to
1127 // keep the src/dst regs pinned.
1129 // Keep track of whether we replace a use and/or def so that we can
1130 // create the spill interval with the appropriate range.
1132 HasUse = mop.isUse();
1133 HasDef = mop.isDef();
1134 SmallVector<unsigned, 2> Ops;
1136 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1137 const MachineOperand &MOj = MI->getOperand(j);
1138 if (!MOj.isRegister())
1140 unsigned RegJ = MOj.getReg();
1141 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1145 HasUse |= MOj.isUse();
1146 HasDef |= MOj.isDef();
1150 if (HasUse && !li.liveAt(getUseIndex(index)))
1151 // Must be defined by an implicit def. It should not be spilled. Note,
1152 // this is for correctness reason. e.g.
1153 // 8 %reg1024<def> = IMPLICIT_DEF
1154 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1155 // The live range [12, 14) are not part of the r1024 live interval since
1156 // it's defined by an implicit def. It will not conflicts with live
1157 // interval of r1025. Now suppose both registers are spilled, you can
1158 // easily see a situation where both registers are reloaded before
1159 // the INSERT_SUBREG and both target registers that would overlap.
1162 // Update stack slot spill weight if we are splitting.
1163 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1170 // Do not fold load / store here if we are splitting. We'll find an
1171 // optimal point to insert a load / store later.
1173 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1174 Ops, FoldSS, FoldSlot, Reg)) {
1175 // Folding the load/store can completely change the instruction in
1176 // unpredictable ways, rescan it from the beginning.
1180 if (isRemoved(MI)) {
1184 goto RestartInstruction;
1187 // We'll try to fold it later if it's profitable.
1188 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1192 // Create a new virtual register for the spill interval.
1193 bool CreatedNewVReg = false;
1195 NewVReg = mri_->createVirtualRegister(rc);
1197 CreatedNewVReg = true;
1199 mop.setReg(NewVReg);
1200 if (mop.isImplicit())
1201 rewriteImplicitOps(li, MI, NewVReg, vrm);
1203 // Reuse NewVReg for other reads.
1204 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1205 MachineOperand &mopj = MI->getOperand(Ops[j]);
1206 mopj.setReg(NewVReg);
1207 if (mopj.isImplicit())
1208 rewriteImplicitOps(li, MI, NewVReg, vrm);
1211 if (CreatedNewVReg) {
1213 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1214 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1215 // Each valnum may have its own remat id.
1216 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1218 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1220 if (!CanDelete || (HasUse && HasDef)) {
1221 // If this is a two-addr instruction then its use operands are
1222 // rematerializable but its def is not. It should be assigned a
1224 vrm.assignVirt2StackSlot(NewVReg, Slot);
1227 vrm.assignVirt2StackSlot(NewVReg, Slot);
1229 } else if (HasUse && HasDef &&
1230 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1231 // If this interval hasn't been assigned a stack slot (because earlier
1232 // def is a deleted remat def), do it now.
1233 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1234 vrm.assignVirt2StackSlot(NewVReg, Slot);
1237 // Re-matting an instruction with virtual register use. Add the
1238 // register as an implicit use on the use MI.
1239 if (DefIsReMat && ImpUse)
1240 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1242 // create a new register interval for this spill / remat.
1243 LiveInterval &nI = getOrCreateInterval(NewVReg);
1244 if (CreatedNewVReg) {
1245 NewLIs.push_back(&nI);
1246 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1248 vrm.setIsSplitFromReg(NewVReg, li.reg);
1252 if (CreatedNewVReg) {
1253 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1254 nI.getNextValue(~0U, 0, VNInfoAllocator));
1258 // Extend the split live interval to this def / use.
1259 unsigned End = getUseIndex(index)+1;
1260 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1261 nI.getValNumInfo(nI.getNumValNums()-1));
1267 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1268 nI.getNextValue(~0U, 0, VNInfoAllocator));
1273 DOUT << "\t\t\t\tAdded new interval: ";
1274 nI.print(DOUT, tri_);
1279 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1281 MachineBasicBlock *MBB, unsigned Idx) const {
1282 unsigned End = getMBBEndIdx(MBB);
1283 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1284 unsigned KillIdx = VNI->kills[j];
1285 if (KillIdx > Idx && KillIdx < End)
1291 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1292 /// during spilling.
1294 struct RewriteInfo {
1299 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1300 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1303 struct RewriteInfoCompare {
1304 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1305 return LHS.Index < RHS.Index;
1310 void LiveIntervals::
1311 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1312 LiveInterval::Ranges::const_iterator &I,
1313 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1314 unsigned Slot, int LdSlot,
1315 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1317 const TargetRegisterClass* rc,
1318 SmallVector<int, 4> &ReMatIds,
1319 const MachineLoopInfo *loopInfo,
1320 BitVector &SpillMBBs,
1321 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1322 BitVector &RestoreMBBs,
1323 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1324 std::map<unsigned,unsigned> &MBBVRegsMap,
1325 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1326 bool AllCanFold = true;
1327 unsigned NewVReg = 0;
1328 unsigned start = getBaseIndex(I->start);
1329 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1331 // First collect all the def / use in this live range that will be rewritten.
1332 // Make sure they are sorted according to instruction index.
1333 std::vector<RewriteInfo> RewriteMIs;
1334 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1335 re = mri_->reg_end(); ri != re; ) {
1336 MachineInstr *MI = &*ri;
1337 MachineOperand &O = ri.getOperand();
1339 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1340 unsigned index = getInstructionIndex(MI);
1341 if (index < start || index >= end)
1343 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1344 // Must be defined by an implicit def. It should not be spilled. Note,
1345 // this is for correctness reason. e.g.
1346 // 8 %reg1024<def> = IMPLICIT_DEF
1347 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1348 // The live range [12, 14) are not part of the r1024 live interval since
1349 // it's defined by an implicit def. It will not conflicts with live
1350 // interval of r1025. Now suppose both registers are spilled, you can
1351 // easily see a situation where both registers are reloaded before
1352 // the INSERT_SUBREG and both target registers that would overlap.
1354 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1356 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1358 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1359 // Now rewrite the defs and uses.
1360 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1361 RewriteInfo &rwi = RewriteMIs[i];
1363 unsigned index = rwi.Index;
1364 bool MIHasUse = rwi.HasUse;
1365 bool MIHasDef = rwi.HasDef;
1366 MachineInstr *MI = rwi.MI;
1367 // If MI def and/or use the same register multiple times, then there
1368 // are multiple entries.
1369 unsigned NumUses = MIHasUse;
1370 while (i != e && RewriteMIs[i].MI == MI) {
1371 assert(RewriteMIs[i].Index == index);
1372 bool isUse = RewriteMIs[i].HasUse;
1373 if (isUse) ++NumUses;
1375 MIHasDef |= RewriteMIs[i].HasDef;
1378 MachineBasicBlock *MBB = MI->getParent();
1380 if (ImpUse && MI != ReMatDefMI) {
1381 // Re-matting an instruction with virtual register use. Update the
1382 // register interval's spill weight to HUGE_VALF to prevent it from
1384 LiveInterval &ImpLi = getInterval(ImpUse);
1385 ImpLi.weight = HUGE_VALF;
1388 unsigned MBBId = MBB->getNumber();
1389 unsigned ThisVReg = 0;
1391 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1392 if (NVI != MBBVRegsMap.end()) {
1393 ThisVReg = NVI->second;
1400 // It's better to start a new interval to avoid artifically
1401 // extend the new interval.
1402 if (MIHasDef && !MIHasUse) {
1403 MBBVRegsMap.erase(MBB->getNumber());
1409 bool IsNew = ThisVReg == 0;
1411 // This ends the previous live interval. If all of its def / use
1412 // can be folded, give it a low spill weight.
1413 if (NewVReg && TrySplit && AllCanFold) {
1414 LiveInterval &nI = getOrCreateInterval(NewVReg);
1421 bool HasDef = false;
1422 bool HasUse = false;
1423 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1424 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1425 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1426 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1427 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1428 if (!HasDef && !HasUse)
1431 AllCanFold &= CanFold;
1433 // Update weight of spill interval.
1434 LiveInterval &nI = getOrCreateInterval(NewVReg);
1436 // The spill weight is now infinity as it cannot be spilled again.
1437 nI.weight = HUGE_VALF;
1441 // Keep track of the last def and first use in each MBB.
1443 if (MI != ReMatOrigDefMI || !CanDelete) {
1444 bool HasKill = false;
1446 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1448 // If this is a two-address code, then this index starts a new VNInfo.
1449 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1451 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1453 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1454 SpillIdxes.find(MBBId);
1456 if (SII == SpillIdxes.end()) {
1457 std::vector<SRInfo> S;
1458 S.push_back(SRInfo(index, NewVReg, true));
1459 SpillIdxes.insert(std::make_pair(MBBId, S));
1460 } else if (SII->second.back().vreg != NewVReg) {
1461 SII->second.push_back(SRInfo(index, NewVReg, true));
1462 } else if ((int)index > SII->second.back().index) {
1463 // If there is an earlier def and this is a two-address
1464 // instruction, then it's not possible to fold the store (which
1465 // would also fold the load).
1466 SRInfo &Info = SII->second.back();
1468 Info.canFold = !HasUse;
1470 SpillMBBs.set(MBBId);
1471 } else if (SII != SpillIdxes.end() &&
1472 SII->second.back().vreg == NewVReg &&
1473 (int)index > SII->second.back().index) {
1474 // There is an earlier def that's not killed (must be two-address).
1475 // The spill is no longer needed.
1476 SII->second.pop_back();
1477 if (SII->second.empty()) {
1478 SpillIdxes.erase(MBBId);
1479 SpillMBBs.reset(MBBId);
1486 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1487 SpillIdxes.find(MBBId);
1488 if (SII != SpillIdxes.end() &&
1489 SII->second.back().vreg == NewVReg &&
1490 (int)index > SII->second.back().index)
1491 // Use(s) following the last def, it's not safe to fold the spill.
1492 SII->second.back().canFold = false;
1493 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1494 RestoreIdxes.find(MBBId);
1495 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1496 // If we are splitting live intervals, only fold if it's the first
1497 // use and there isn't another use later in the MBB.
1498 RII->second.back().canFold = false;
1500 // Only need a reload if there isn't an earlier def / use.
1501 if (RII == RestoreIdxes.end()) {
1502 std::vector<SRInfo> Infos;
1503 Infos.push_back(SRInfo(index, NewVReg, true));
1504 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1506 RII->second.push_back(SRInfo(index, NewVReg, true));
1508 RestoreMBBs.set(MBBId);
1512 // Update spill weight.
1513 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1514 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1517 if (NewVReg && TrySplit && AllCanFold) {
1518 // If all of its def / use can be folded, give it a low spill weight.
1519 LiveInterval &nI = getOrCreateInterval(NewVReg);
1524 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1525 BitVector &RestoreMBBs,
1526 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1527 if (!RestoreMBBs[Id])
1529 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1530 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1531 if (Restores[i].index == index &&
1532 Restores[i].vreg == vr &&
1533 Restores[i].canFold)
1538 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1539 BitVector &RestoreMBBs,
1540 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1541 if (!RestoreMBBs[Id])
1543 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1544 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1545 if (Restores[i].index == index && Restores[i].vreg)
1546 Restores[i].index = -1;
1549 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1550 /// spilled and create empty intervals for their uses.
1552 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1553 const TargetRegisterClass* rc,
1554 std::vector<LiveInterval*> &NewLIs) {
1555 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1556 re = mri_->reg_end(); ri != re; ) {
1557 MachineOperand &O = ri.getOperand();
1558 MachineInstr *MI = &*ri;
1561 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1562 "Register def was not rewritten?");
1563 RemoveMachineInstrFromMaps(MI);
1564 vrm.RemoveMachineInstrFromMaps(MI);
1565 MI->eraseFromParent();
1567 // This must be an use of an implicit_def so it's not part of the live
1568 // interval. Create a new empty live interval for it.
1569 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1570 unsigned NewVReg = mri_->createVirtualRegister(rc);
1572 vrm.setIsImplicitlyDefined(NewVReg);
1573 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1574 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1575 MachineOperand &MO = MI->getOperand(i);
1576 if (MO.isReg() && MO.getReg() == li.reg)
1584 std::vector<LiveInterval*> LiveIntervals::
1585 addIntervalsForSpills(const LiveInterval &li,
1586 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1588 assert(li.weight != HUGE_VALF &&
1589 "attempt to spill already spilled interval!");
1591 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1592 li.print(DOUT, tri_);
1595 // Spill slot weight.
1598 // Each bit specify whether it a spill is required in the MBB.
1599 BitVector SpillMBBs(mf_->getNumBlockIDs());
1600 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1601 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1602 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1603 std::map<unsigned,unsigned> MBBVRegsMap;
1604 std::vector<LiveInterval*> NewLIs;
1605 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1607 unsigned NumValNums = li.getNumValNums();
1608 SmallVector<MachineInstr*, 4> ReMatDefs;
1609 ReMatDefs.resize(NumValNums, NULL);
1610 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1611 ReMatOrigDefs.resize(NumValNums, NULL);
1612 SmallVector<int, 4> ReMatIds;
1613 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1614 BitVector ReMatDelete(NumValNums);
1615 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1617 // Spilling a split live interval. It cannot be split any further. Also,
1618 // it's also guaranteed to be a single val# / range interval.
1619 if (vrm.getPreSplitReg(li.reg)) {
1620 vrm.setIsSplitFromReg(li.reg, 0);
1621 // Unset the split kill marker on the last use.
1622 unsigned KillIdx = vrm.getKillPoint(li.reg);
1624 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1625 assert(KillMI && "Last use disappeared?");
1626 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1627 assert(KillOp != -1 && "Last use disappeared?");
1628 KillMI->getOperand(KillOp).setIsKill(false);
1630 vrm.removeKillPoint(li.reg);
1631 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1632 Slot = vrm.getStackSlot(li.reg);
1633 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1634 MachineInstr *ReMatDefMI = DefIsReMat ?
1635 vrm.getReMaterializedMI(li.reg) : NULL;
1637 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1638 bool isLoad = isLoadSS ||
1639 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1640 bool IsFirstRange = true;
1641 for (LiveInterval::Ranges::const_iterator
1642 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1643 // If this is a split live interval with multiple ranges, it means there
1644 // are two-address instructions that re-defined the value. Only the
1645 // first def can be rematerialized!
1647 // Note ReMatOrigDefMI has already been deleted.
1648 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1649 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1650 false, vrm, rc, ReMatIds, loopInfo,
1651 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1652 MBBVRegsMap, NewLIs, SSWeight);
1654 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1655 Slot, 0, false, false, false,
1656 false, vrm, rc, ReMatIds, loopInfo,
1657 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1658 MBBVRegsMap, NewLIs, SSWeight);
1660 IsFirstRange = false;
1663 SSWeight = 0.0f; // Already accounted for when split.
1664 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1668 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1669 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1673 bool NeedStackSlot = false;
1674 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1676 const VNInfo *VNI = *i;
1677 unsigned VN = VNI->id;
1678 unsigned DefIdx = VNI->def;
1680 continue; // Dead val#.
1681 // Is the def for the val# rematerializable?
1682 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1683 ? 0 : getInstructionFromIndex(DefIdx);
1685 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1686 // Remember how to remat the def of this val#.
1687 ReMatOrigDefs[VN] = ReMatDefMI;
1688 // Original def may be modified so we have to make a copy here.
1689 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1690 ClonedMIs.push_back(Clone);
1691 ReMatDefs[VN] = Clone;
1693 bool CanDelete = true;
1694 if (VNI->hasPHIKill) {
1695 // A kill is a phi node, not all of its uses can be rematerialized.
1696 // It must not be deleted.
1698 // Need a stack slot if there is any live range where uses cannot be
1700 NeedStackSlot = true;
1703 ReMatDelete.set(VN);
1705 // Need a stack slot if there is any live range where uses cannot be
1707 NeedStackSlot = true;
1711 // One stack slot per live interval.
1712 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1713 Slot = vrm.assignVirt2StackSlot(li.reg);
1715 // Create new intervals and rewrite defs and uses.
1716 for (LiveInterval::Ranges::const_iterator
1717 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1718 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1719 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1720 bool DefIsReMat = ReMatDefMI != NULL;
1721 bool CanDelete = ReMatDelete[I->valno->id];
1723 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1724 bool isLoad = isLoadSS ||
1725 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1726 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1727 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1728 CanDelete, vrm, rc, ReMatIds, loopInfo,
1729 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1730 MBBVRegsMap, NewLIs, SSWeight);
1733 // Insert spills / restores if we are splitting.
1735 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1739 SmallPtrSet<LiveInterval*, 4> AddedKill;
1740 SmallVector<unsigned, 2> Ops;
1741 if (NeedStackSlot) {
1742 int Id = SpillMBBs.find_first();
1744 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1745 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1746 std::vector<SRInfo> &spills = SpillIdxes[Id];
1747 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1748 int index = spills[i].index;
1749 unsigned VReg = spills[i].vreg;
1750 LiveInterval &nI = getOrCreateInterval(VReg);
1751 bool isReMat = vrm.isReMaterialized(VReg);
1752 MachineInstr *MI = getInstructionFromIndex(index);
1753 bool CanFold = false;
1754 bool FoundUse = false;
1756 if (spills[i].canFold) {
1758 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1759 MachineOperand &MO = MI->getOperand(j);
1760 if (!MO.isRegister() || MO.getReg() != VReg)
1767 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1768 RestoreMBBs, RestoreIdxes))) {
1769 // MI has two-address uses of the same register. If the use
1770 // isn't the first and only use in the BB, then we can't fold
1771 // it. FIXME: Move this to rewriteInstructionsForSpills.
1778 // Fold the store into the def if possible.
1779 bool Folded = false;
1780 if (CanFold && !Ops.empty()) {
1781 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1784 // Also folded uses, do not issue a load.
1785 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1786 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1788 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1792 // Otherwise tell the spiller to issue a spill.
1794 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1795 bool isKill = LR->end == getStoreIndex(index);
1796 if (!MI->registerDefIsDead(nI.reg))
1797 // No need to spill a dead def.
1798 vrm.addSpillPoint(VReg, isKill, MI);
1800 AddedKill.insert(&nI);
1803 // Update spill slot weight.
1805 SSWeight += getSpillWeight(true, false, loopDepth);
1807 Id = SpillMBBs.find_next(Id);
1811 int Id = RestoreMBBs.find_first();
1813 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1814 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1816 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1817 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1818 int index = restores[i].index;
1821 unsigned VReg = restores[i].vreg;
1822 LiveInterval &nI = getOrCreateInterval(VReg);
1823 bool isReMat = vrm.isReMaterialized(VReg);
1824 MachineInstr *MI = getInstructionFromIndex(index);
1825 bool CanFold = false;
1827 if (restores[i].canFold) {
1829 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1830 MachineOperand &MO = MI->getOperand(j);
1831 if (!MO.isRegister() || MO.getReg() != VReg)
1835 // If this restore were to be folded, it would have been folded
1844 // Fold the load into the use if possible.
1845 bool Folded = false;
1846 if (CanFold && !Ops.empty()) {
1848 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1850 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1852 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1853 // If the rematerializable def is a load, also try to fold it.
1854 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1855 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1856 Ops, isLoadSS, LdSlot, VReg);
1857 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1859 // Re-matting an instruction with virtual register use. Add the
1860 // register as an implicit use on the use MI and update the register
1861 // interval's spill weight to HUGE_VALF to prevent it from being
1863 LiveInterval &ImpLi = getInterval(ImpUse);
1864 ImpLi.weight = HUGE_VALF;
1865 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1869 // If folding is not possible / failed, then tell the spiller to issue a
1870 // load / rematerialization for us.
1872 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1874 vrm.addRestorePoint(VReg, MI);
1876 // Update spill slot weight.
1878 SSWeight += getSpillWeight(false, true, loopDepth);
1880 Id = RestoreMBBs.find_next(Id);
1883 // Finalize intervals: add kills, finalize spill weights, and filter out
1885 std::vector<LiveInterval*> RetNewLIs;
1886 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1887 LiveInterval *LI = NewLIs[i];
1889 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1890 if (!AddedKill.count(LI)) {
1891 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1892 unsigned LastUseIdx = getBaseIndex(LR->end);
1893 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1894 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1895 assert(UseIdx != -1);
1896 if (LastUse->getOperand(UseIdx).isImplicit() ||
1897 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1898 LastUse->getOperand(UseIdx).setIsKill();
1899 vrm.addKillPoint(LI->reg, LastUseIdx);
1902 RetNewLIs.push_back(LI);
1906 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1910 /// hasAllocatableSuperReg - Return true if the specified physical register has
1911 /// any super register that's allocatable.
1912 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1913 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1914 if (allocatableRegs_[*AS] && hasInterval(*AS))
1919 /// getRepresentativeReg - Find the largest super register of the specified
1920 /// physical register.
1921 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1922 // Find the largest super-register that is allocatable.
1923 unsigned BestReg = Reg;
1924 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1925 unsigned SuperReg = *AS;
1926 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1934 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1935 /// specified interval that conflicts with the specified physical register.
1936 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1937 unsigned PhysReg) const {
1938 unsigned NumConflicts = 0;
1939 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1940 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1941 E = mri_->reg_end(); I != E; ++I) {
1942 MachineOperand &O = I.getOperand();
1943 MachineInstr *MI = O.getParent();
1944 unsigned Index = getInstructionIndex(MI);
1945 if (pli.liveAt(Index))
1948 return NumConflicts;
1951 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1952 /// around all defs and uses of the specified interval.
1953 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1954 unsigned PhysReg, VirtRegMap &vrm) {
1955 unsigned SpillReg = getRepresentativeReg(PhysReg);
1957 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1958 // If there are registers which alias PhysReg, but which are not a
1959 // sub-register of the chosen representative super register. Assert
1960 // since we can't handle it yet.
1961 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1962 tri_->isSuperRegister(*AS, SpillReg));
1964 LiveInterval &pli = getInterval(SpillReg);
1965 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1966 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1967 E = mri_->reg_end(); I != E; ++I) {
1968 MachineOperand &O = I.getOperand();
1969 MachineInstr *MI = O.getParent();
1970 if (SeenMIs.count(MI))
1973 unsigned Index = getInstructionIndex(MI);
1974 if (pli.liveAt(Index)) {
1975 vrm.addEmergencySpill(SpillReg, MI);
1976 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1977 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1978 if (!hasInterval(*AS))
1980 LiveInterval &spli = getInterval(*AS);
1981 if (spli.liveAt(Index))
1982 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1988 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1989 MachineInstr* startInst) {
1990 LiveInterval& Interval = getOrCreateInterval(reg);
1991 VNInfo* VN = Interval.getNextValue(
1992 getInstructionIndex(startInst) + InstrSlots::DEF,
1993 startInst, getVNInfoAllocator());
1994 VN->hasPHIKill = true;
1995 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1996 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1997 getMBBEndIdx(startInst->getParent()) + 1, VN);
1998 Interval.addRange(LR);