1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
219 /// it can check use as well.
220 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
221 unsigned Reg, bool CheckUse,
222 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223 for (LiveInterval::Ranges::const_iterator
224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225 for (SlotIndex index = I->start.getBaseIndex(),
226 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
228 index = index.getNextIndex()) {
229 MachineInstr *MI = getInstructionFromIndex(index);
231 continue; // skip deleted instructions
233 if (JoinedCopies.count(MI))
235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236 MachineOperand& MO = MI->getOperand(i);
239 if (MO.isUse() && !CheckUse)
241 unsigned PhysReg = MO.getReg();
242 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
244 if (tri_->isSubRegister(Reg, PhysReg))
254 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
255 if (TargetRegisterInfo::isPhysicalRegister(reg))
256 errs() << tri_->getName(reg);
258 errs() << "%reg" << reg;
262 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
263 MachineBasicBlock::iterator mi,
267 LiveInterval &interval) {
269 errs() << "\t\tregister: ";
270 printRegName(interval.reg, tri_);
273 // Virtual registers may be defined multiple times (due to phi
274 // elimination and 2-addr elimination). Much of what we do only has to be
275 // done once for the vreg. We use an empty interval to detect the first
276 // time we see a vreg.
277 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
278 if (interval.empty()) {
279 // Get the Idx of the defining instructions.
280 SlotIndex defIndex = MIIdx.getDefIndex();
281 // Earlyclobbers move back one, so that they overlap the live range
283 if (MO.isEarlyClobber())
284 defIndex = MIIdx.getUseIndex();
286 MachineInstr *CopyMI = NULL;
287 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
288 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
289 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
290 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
291 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293 // Earlyclobbers move back one.
294 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296 assert(ValNo->id == 0 && "First value in interval is not 0?");
298 // Loop over all of the blocks that the vreg is defined in. There are
299 // two cases we have to handle here. The most common case is a vreg
300 // whose lifetime is contained within a basic block. In this case there
301 // will be a single kill, in MBB, which comes after the definition.
302 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
303 // FIXME: what about dead vars?
305 if (vi.Kills[0] != mi)
306 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308 killIdx = defIndex.getStoreIndex();
310 // If the kill happens after the definition, we have an intra-block
312 if (killIdx > defIndex) {
313 assert(vi.AliveBlocks.empty() &&
314 "Shouldn't be alive across any blocks!");
315 LiveRange LR(defIndex, killIdx, ValNo);
316 interval.addRange(LR);
317 DEBUG(errs() << " +" << LR << "\n");
318 ValNo->addKill(killIdx);
323 // The other case we handle is when a virtual register lives to the end
324 // of the defining block, potentially live across some blocks, then is
325 // live into some number of blocks, but gets killed. Start by adding a
326 // range that goes from this definition to the end of the defining block.
327 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
329 DEBUG(errs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 // Iterate over all of the blocks that the variable is completely
333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336 E = vi.AliveBlocks.end(); I != E; ++I) {
338 getMBBStartIdx(mf_->getBlockNumbered(*I)),
339 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
341 interval.addRange(LR);
342 DEBUG(errs() << " +" << LR);
345 // Finally, this virtual register is live from the start of any killing
346 // block to the 'use' slot of the killing instruction.
347 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
348 MachineInstr *Kill = vi.Kills[i];
350 getInstructionIndex(Kill).getDefIndex();
351 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
352 interval.addRange(LR);
353 ValNo->addKill(killIdx);
354 DEBUG(errs() << " +" << LR);
358 // If this is the second time we see a virtual register definition, it
359 // must be due to phi elimination or two addr elimination. If this is
360 // the result of two address elimination, then the vreg is one of the
361 // def-and-use register operand.
362 if (mi->isRegTiedToUseOperand(MOIdx)) {
363 // If this is a two-address definition, then we have already processed
364 // the live range. The only problem is that we didn't realize there
365 // are actually two values in the live interval. Because of this we
366 // need to take the LiveRegion that defines this register and split it
368 assert(interval.containsOneValue());
369 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
370 SlotIndex RedefIndex = MIIdx.getDefIndex();
371 if (MO.isEarlyClobber())
372 RedefIndex = MIIdx.getUseIndex();
374 const LiveRange *OldLR =
375 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
376 VNInfo *OldValNo = OldLR->valno;
378 // Delete the initial value, which should be short and continuous,
379 // because the 2-addr copy must be in the same MBB as the redef.
380 interval.removeRange(DefIndex, RedefIndex);
382 // Two-address vregs should always only be redefined once. This means
383 // that at this point, there should be exactly one value number in it.
384 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
386 // The new value number (#1) is defined by the instruction we claimed
388 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
389 false, // update at *
391 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
393 // Value#0 is now defined by the 2-addr instruction.
394 OldValNo->def = RedefIndex;
395 OldValNo->setCopy(0);
397 // Add the new live interval which replaces the range for the input copy.
398 LiveRange LR(DefIndex, RedefIndex, ValNo);
399 DEBUG(errs() << " replace range with " << LR);
400 interval.addRange(LR);
401 ValNo->addKill(RedefIndex);
403 // If this redefinition is dead, we need to add a dummy unit live
404 // range covering the def slot.
406 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
410 errs() << " RESULT: ";
411 interval.print(errs(), tri_);
414 // Otherwise, this must be because of phi elimination. If this is the
415 // first redefinition of the vreg that we have seen, go back and change
416 // the live range in the PHI block to be a different value number.
417 if (interval.containsOneValue()) {
418 // Remove the old range that we now know has an incorrect number.
419 VNInfo *VNI = interval.getValNumInfo(0);
420 MachineInstr *Killer = vi.Kills[0];
421 SlotIndex Start = getMBBStartIdx(Killer->getParent());
422 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
424 errs() << " Removing [" << Start << "," << End << "] from: ";
425 interval.print(errs(), tri_);
428 interval.removeRange(Start, End);
429 assert(interval.ranges.size() == 1 &&
430 "Newly discovered PHI interval has >1 ranges.");
431 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
432 VNI->addKill(indexes_->getTerminatorGap(killMBB));
433 VNI->setHasPHIKill(true);
435 errs() << " RESULT: ";
436 interval.print(errs(), tri_);
439 // Replace the interval with one of a NEW value number. Note that this
440 // value number isn't actually defined by an instruction, weird huh? :)
441 LiveRange LR(Start, End,
442 interval.getNextValue(SlotIndex(getMBBStartIdx(Killer->getParent()), true),
443 0, false, VNInfoAllocator));
444 LR.valno->setIsPHIDef(true);
445 DEBUG(errs() << " replace range with " << LR);
446 interval.addRange(LR);
447 LR.valno->addKill(End);
449 errs() << " RESULT: ";
450 interval.print(errs(), tri_);
454 // In the case of PHI elimination, each variable definition is only
455 // live until the end of the block. We've already taken care of the
456 // rest of the live range.
457 SlotIndex defIndex = MIIdx.getDefIndex();
458 if (MO.isEarlyClobber())
459 defIndex = MIIdx.getUseIndex();
462 MachineInstr *CopyMI = NULL;
463 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
464 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
465 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
466 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
467 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
469 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
471 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
472 LiveRange LR(defIndex, killIndex, ValNo);
473 interval.addRange(LR);
474 ValNo->addKill(indexes_->getTerminatorGap(mbb));
475 ValNo->setHasPHIKill(true);
476 DEBUG(errs() << " +" << LR);
480 DEBUG(errs() << '\n');
483 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
484 MachineBasicBlock::iterator mi,
487 LiveInterval &interval,
488 MachineInstr *CopyMI) {
489 // A physical register cannot be live across basic block, so its
490 // lifetime must end somewhere in its defining basic block.
492 errs() << "\t\tregister: ";
493 printRegName(interval.reg, tri_);
496 SlotIndex baseIndex = MIIdx;
497 SlotIndex start = baseIndex.getDefIndex();
498 // Earlyclobbers move back one.
499 if (MO.isEarlyClobber())
500 start = MIIdx.getUseIndex();
501 SlotIndex end = start;
503 // If it is not used after definition, it is considered dead at
504 // the instruction defining it. Hence its interval is:
505 // [defSlot(def), defSlot(def)+1)
506 // For earlyclobbers, the defSlot was pushed back one; the extra
507 // advance below compensates.
509 DEBUG(errs() << " dead");
510 end = start.getStoreIndex();
514 // If it is not dead on definition, it must be killed by a
515 // subsequent instruction. Hence its interval is:
516 // [defSlot(def), useSlot(kill)+1)
517 baseIndex = baseIndex.getNextIndex();
518 while (++mi != MBB->end()) {
520 if (getInstructionFromIndex(baseIndex) == 0)
521 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
523 if (mi->killsRegister(interval.reg, tri_)) {
524 DEBUG(errs() << " killed");
525 end = baseIndex.getDefIndex();
528 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
530 if (mi->isRegTiedToUseOperand(DefIdx)) {
531 // Two-address instruction.
532 end = baseIndex.getDefIndex();
534 // Another instruction redefines the register before it is ever read.
535 // Then the register is essentially dead at the instruction that defines
536 // it. Hence its interval is:
537 // [defSlot(def), defSlot(def)+1)
538 DEBUG(errs() << " dead");
539 end = start.getStoreIndex();
545 baseIndex = baseIndex.getNextIndex();
548 // The only case we should have a dead physreg here without a killing or
549 // instruction where we know it's dead is if it is live-in to the function
550 // and never used. Another possible case is the implicit use of the
551 // physical register has been deleted by two-address pass.
552 end = start.getStoreIndex();
555 assert(start < end && "did not find end of interval?");
557 // Already exists? Extend old live interval.
558 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
559 bool Extend = OldLR != interval.end();
560 VNInfo *ValNo = Extend
561 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
562 if (MO.isEarlyClobber() && Extend)
563 ValNo->setHasRedefByEC(true);
564 LiveRange LR(start, end, ValNo);
565 interval.addRange(LR);
566 LR.valno->addKill(end);
567 DEBUG(errs() << " +" << LR << '\n');
570 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
571 MachineBasicBlock::iterator MI,
575 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
576 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
577 getOrCreateInterval(MO.getReg()));
578 else if (allocatableRegs_[MO.getReg()]) {
579 MachineInstr *CopyMI = NULL;
580 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
581 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
582 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
583 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
584 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(MO.getReg()), CopyMI);
588 // Def of a register also defines its sub-registers.
589 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
590 // If MI also modifies the sub-register explicitly, avoid processing it
591 // more than once. Do not pass in TRI here so it checks for exact match.
592 if (!MI->modifiesRegister(*AS))
593 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594 getOrCreateInterval(*AS), 0);
598 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
600 LiveInterval &interval, bool isAlias) {
602 errs() << "\t\tlivein register: ";
603 printRegName(interval.reg, tri_);
606 // Look for kills, if it reaches a def before it's killed, then it shouldn't
607 // be considered a livein.
608 MachineBasicBlock::iterator mi = MBB->begin();
609 SlotIndex baseIndex = MIIdx;
610 SlotIndex start = baseIndex;
611 if (getInstructionFromIndex(baseIndex) == 0)
612 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
614 SlotIndex end = baseIndex;
615 bool SeenDefUse = false;
617 while (mi != MBB->end()) {
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(errs() << " killed");
620 end = baseIndex.getDefIndex();
623 } else if (mi->modifiesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(errs() << " dead");
629 end = start.getStoreIndex();
635 if (mi != MBB->end()) {
636 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
640 // Live-in register might not be used at all.
643 DEBUG(errs() << " dead");
644 end = MIIdx.getStoreIndex();
646 DEBUG(errs() << " live through");
652 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
653 0, false, VNInfoAllocator);
654 vni->setIsPHIDef(true);
655 LiveRange LR(start, end, vni);
657 interval.addRange(LR);
658 LR.valno->addKill(end);
659 DEBUG(errs() << " +" << LR << '\n');
662 /// computeIntervals - computes the live intervals for virtual
663 /// registers. for some ordering of the machine instructions [1,N] a
664 /// live interval is an interval [i, j) where 1 <= i <= j < N for
665 /// which a variable is live
666 void LiveIntervals::computeIntervals() {
667 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
668 << "********** Function: "
669 << ((Value*)mf_->getFunction())->getName() << '\n');
671 SmallVector<unsigned, 8> UndefUses;
672 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
674 MachineBasicBlock *MBB = MBBI;
675 // Track the index of the current machine instr.
676 SlotIndex MIIndex = getMBBStartIdx(MBB);
677 DEBUG(errs() << MBB->getName() << ":\n");
679 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
681 // Create intervals for live-ins to this BB first.
682 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
683 LE = MBB->livein_end(); LI != LE; ++LI) {
684 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
685 // Multiple live-ins can alias the same register.
686 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
687 if (!hasInterval(*AS))
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
692 // Skip over empty initial indices.
693 if (getInstructionFromIndex(MIIndex) == 0)
694 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
696 for (; MI != miEnd; ++MI) {
697 DEBUG(errs() << MIIndex << "\t" << *MI);
700 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
701 MachineOperand &MO = MI->getOperand(i);
702 if (!MO.isReg() || !MO.getReg())
705 // handle register defs - build intervals
707 handleRegisterDef(MBB, MI, MIIndex, MO, i);
708 else if (MO.isUndef())
709 UndefUses.push_back(MO.getReg());
712 // Move to the next instr slot.
713 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
717 // Create empty intervals for registers defined by implicit_def's (except
718 // for those implicit_def that define values which are liveout of their
720 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
721 unsigned UndefReg = UndefUses[i];
722 (void)getOrCreateInterval(UndefReg);
726 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
727 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
728 return new LiveInterval(reg, Weight);
731 /// dupInterval - Duplicate a live interval. The caller is responsible for
732 /// managing the allocated memory.
733 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
734 LiveInterval *NewLI = createInterval(li->reg);
735 NewLI->Copy(*li, mri_, getVNInfoAllocator());
739 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
740 /// copy field and returns the source register that defines it.
741 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
745 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
746 // If it's extracting out of a physical register, return the sub-register.
747 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
748 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
749 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
750 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
751 if (SrcSubReg == DstSubReg)
752 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
753 // reg1034 can still be coalesced to EDX.
755 assert(DstSubReg == 0);
756 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
759 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
760 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
761 return VNI->getCopy()->getOperand(2).getReg();
763 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
764 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
766 llvm_unreachable("Unrecognized copy instruction!");
770 //===----------------------------------------------------------------------===//
771 // Register allocator hooks.
774 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
775 /// allow one) virtual register operand, then its uses are implicitly using
776 /// the register. Returns the virtual register.
777 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
778 MachineInstr *MI) const {
780 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
781 MachineOperand &MO = MI->getOperand(i);
782 if (!MO.isReg() || !MO.isUse())
784 unsigned Reg = MO.getReg();
785 if (Reg == 0 || Reg == li.reg)
788 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
789 !allocatableRegs_[Reg])
791 // FIXME: For now, only remat MI with at most one register operand.
793 "Can't rematerialize instruction with multiple register operand!");
802 /// isValNoAvailableAt - Return true if the val# of the specified interval
803 /// which reaches the given instruction also reaches the specified use index.
804 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
805 SlotIndex UseIdx) const {
806 SlotIndex Index = getInstructionIndex(MI);
807 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
808 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
809 return UI != li.end() && UI->valno == ValNo;
812 /// isReMaterializable - Returns true if the definition MI of the specified
813 /// val# of the specified interval is re-materializable.
814 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
815 const VNInfo *ValNo, MachineInstr *MI,
816 SmallVectorImpl<LiveInterval*> &SpillIs,
821 if (!tii_->isTriviallyReMaterializable(MI, aa_))
824 // Target-specific code can mark an instruction as being rematerializable
825 // if it has one virtual reg use, though it had better be something like
826 // a PIC base register which is likely to be live everywhere.
827 unsigned ImpUse = getReMatImplicitUse(li, MI);
829 const LiveInterval &ImpLi = getInterval(ImpUse);
830 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
831 re = mri_->use_end(); ri != re; ++ri) {
832 MachineInstr *UseMI = &*ri;
833 SlotIndex UseIdx = getInstructionIndex(UseMI);
834 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
836 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
840 // If a register operand of the re-materialized instruction is going to
841 // be spilled next, then it's not legal to re-materialize this instruction.
842 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
843 if (ImpUse == SpillIs[i]->reg)
849 /// isReMaterializable - Returns true if the definition MI of the specified
850 /// val# of the specified interval is re-materializable.
851 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
852 const VNInfo *ValNo, MachineInstr *MI) {
853 SmallVector<LiveInterval*, 4> Dummy1;
855 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
858 /// isReMaterializable - Returns true if every definition of MI of every
859 /// val# of the specified interval is re-materializable.
860 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
861 SmallVectorImpl<LiveInterval*> &SpillIs,
864 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
866 const VNInfo *VNI = *i;
868 continue; // Dead val#.
869 // Is the def for the val# rematerializable?
870 if (!VNI->isDefAccurate())
872 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
873 bool DefIsLoad = false;
875 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
882 /// FilterFoldedOps - Filter out two-address use operands. Return
883 /// true if it finds any issue with the operands that ought to prevent
885 static bool FilterFoldedOps(MachineInstr *MI,
886 SmallVector<unsigned, 2> &Ops,
888 SmallVector<unsigned, 2> &FoldOps) {
890 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
891 unsigned OpIdx = Ops[i];
892 MachineOperand &MO = MI->getOperand(OpIdx);
893 // FIXME: fold subreg use.
897 MRInfo |= (unsigned)VirtRegMap::isMod;
899 // Filter out two-address use operand(s).
900 if (MI->isRegTiedToDefOperand(OpIdx)) {
901 MRInfo = VirtRegMap::isModRef;
904 MRInfo |= (unsigned)VirtRegMap::isRef;
906 FoldOps.push_back(OpIdx);
912 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
913 /// slot / to reg or any rematerialized load into ith operand of specified
914 /// MI. If it is successul, MI is updated with the newly created MI and
916 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
917 VirtRegMap &vrm, MachineInstr *DefMI,
919 SmallVector<unsigned, 2> &Ops,
920 bool isSS, int Slot, unsigned Reg) {
921 // If it is an implicit def instruction, just delete it.
922 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
923 RemoveMachineInstrFromMaps(MI);
924 vrm.RemoveMachineInstrFromMaps(MI);
925 MI->eraseFromParent();
930 // Filter the list of operand indexes that are to be folded. Abort if
931 // any operand will prevent folding.
933 SmallVector<unsigned, 2> FoldOps;
934 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
937 // The only time it's safe to fold into a two address instruction is when
938 // it's folding reload and spill from / into a spill stack slot.
939 if (DefMI && (MRInfo & VirtRegMap::isMod))
942 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
943 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
945 // Remember this instruction uses the spill slot.
946 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
948 // Attempt to fold the memory reference into the instruction. If
949 // we can do this, we don't need to insert spill code.
950 MachineBasicBlock &MBB = *MI->getParent();
951 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
952 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
953 vrm.transferSpillPts(MI, fmi);
954 vrm.transferRestorePts(MI, fmi);
955 vrm.transferEmergencySpills(MI, fmi);
956 ReplaceMachineInstrInMaps(MI, fmi);
957 MI = MBB.insert(MBB.erase(MI), fmi);
964 /// canFoldMemoryOperand - Returns true if the specified load / store
965 /// folding is possible.
966 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
967 SmallVector<unsigned, 2> &Ops,
969 // Filter the list of operand indexes that are to be folded. Abort if
970 // any operand will prevent folding.
972 SmallVector<unsigned, 2> FoldOps;
973 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
976 // It's only legal to remat for a use, not a def.
977 if (ReMat && (MRInfo & VirtRegMap::isMod))
980 return tii_->canFoldMemoryOperand(MI, FoldOps);
983 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
984 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
986 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
991 for (++itr; itr != li.ranges.end(); ++itr) {
992 MachineBasicBlock *mbb2 =
993 indexes_->getMBBCoveringRange(itr->start, itr->end);
1002 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1003 /// interval on to-be re-materialized operands of MI) with new register.
1004 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1005 MachineInstr *MI, unsigned NewVReg,
1007 // There is an implicit use. That means one of the other operand is
1008 // being remat'ed and the remat'ed instruction has li.reg as an
1009 // use operand. Make sure we rewrite that as well.
1010 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1011 MachineOperand &MO = MI->getOperand(i);
1014 unsigned Reg = MO.getReg();
1015 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1017 if (!vrm.isReMaterialized(Reg))
1019 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1020 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1022 UseMO->setReg(NewVReg);
1026 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1027 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1028 bool LiveIntervals::
1029 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1030 bool TrySplit, SlotIndex index, SlotIndex end,
1032 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1033 unsigned Slot, int LdSlot,
1034 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1036 const TargetRegisterClass* rc,
1037 SmallVector<int, 4> &ReMatIds,
1038 const MachineLoopInfo *loopInfo,
1039 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1040 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1041 std::vector<LiveInterval*> &NewLIs) {
1042 bool CanFold = false;
1044 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1045 MachineOperand& mop = MI->getOperand(i);
1048 unsigned Reg = mop.getReg();
1049 unsigned RegI = Reg;
1050 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1055 bool TryFold = !DefIsReMat;
1056 bool FoldSS = true; // Default behavior unless it's a remat.
1057 int FoldSlot = Slot;
1059 // If this is the rematerializable definition MI itself and
1060 // all of its uses are rematerialized, simply delete it.
1061 if (MI == ReMatOrigDefMI && CanDelete) {
1062 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1064 RemoveMachineInstrFromMaps(MI);
1065 vrm.RemoveMachineInstrFromMaps(MI);
1066 MI->eraseFromParent();
1070 // If def for this use can't be rematerialized, then try folding.
1071 // If def is rematerializable and it's a load, also try folding.
1072 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1074 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1080 // Scan all of the operands of this instruction rewriting operands
1081 // to use NewVReg instead of li.reg as appropriate. We do this for
1084 // 1. If the instr reads the same spilled vreg multiple times, we
1085 // want to reuse the NewVReg.
1086 // 2. If the instr is a two-addr instruction, we are required to
1087 // keep the src/dst regs pinned.
1089 // Keep track of whether we replace a use and/or def so that we can
1090 // create the spill interval with the appropriate range.
1092 HasUse = mop.isUse();
1093 HasDef = mop.isDef();
1094 SmallVector<unsigned, 2> Ops;
1096 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1097 const MachineOperand &MOj = MI->getOperand(j);
1100 unsigned RegJ = MOj.getReg();
1101 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1105 if (!MOj.isUndef()) {
1106 HasUse |= MOj.isUse();
1107 HasDef |= MOj.isDef();
1112 // Create a new virtual register for the spill interval.
1113 // Create the new register now so we can map the fold instruction
1114 // to the new register so when it is unfolded we get the correct
1116 bool CreatedNewVReg = false;
1118 NewVReg = mri_->createVirtualRegister(rc);
1120 CreatedNewVReg = true;
1122 // The new virtual register should get the same allocation hints as the
1124 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1125 if (Hint.first || Hint.second)
1126 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1132 // Do not fold load / store here if we are splitting. We'll find an
1133 // optimal point to insert a load / store later.
1135 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1136 Ops, FoldSS, FoldSlot, NewVReg)) {
1137 // Folding the load/store can completely change the instruction in
1138 // unpredictable ways, rescan it from the beginning.
1141 // We need to give the new vreg the same stack slot as the
1142 // spilled interval.
1143 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1149 if (isNotInMIMap(MI))
1151 goto RestartInstruction;
1154 // We'll try to fold it later if it's profitable.
1155 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1159 mop.setReg(NewVReg);
1160 if (mop.isImplicit())
1161 rewriteImplicitOps(li, MI, NewVReg, vrm);
1163 // Reuse NewVReg for other reads.
1164 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1165 MachineOperand &mopj = MI->getOperand(Ops[j]);
1166 mopj.setReg(NewVReg);
1167 if (mopj.isImplicit())
1168 rewriteImplicitOps(li, MI, NewVReg, vrm);
1171 if (CreatedNewVReg) {
1173 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1174 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1175 // Each valnum may have its own remat id.
1176 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1178 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1180 if (!CanDelete || (HasUse && HasDef)) {
1181 // If this is a two-addr instruction then its use operands are
1182 // rematerializable but its def is not. It should be assigned a
1184 vrm.assignVirt2StackSlot(NewVReg, Slot);
1187 vrm.assignVirt2StackSlot(NewVReg, Slot);
1189 } else if (HasUse && HasDef &&
1190 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1191 // If this interval hasn't been assigned a stack slot (because earlier
1192 // def is a deleted remat def), do it now.
1193 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1194 vrm.assignVirt2StackSlot(NewVReg, Slot);
1197 // Re-matting an instruction with virtual register use. Add the
1198 // register as an implicit use on the use MI.
1199 if (DefIsReMat && ImpUse)
1200 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1202 // Create a new register interval for this spill / remat.
1203 LiveInterval &nI = getOrCreateInterval(NewVReg);
1204 if (CreatedNewVReg) {
1205 NewLIs.push_back(&nI);
1206 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1208 vrm.setIsSplitFromReg(NewVReg, li.reg);
1212 if (CreatedNewVReg) {
1213 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1214 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1215 DEBUG(errs() << " +" << LR);
1218 // Extend the split live interval to this def / use.
1219 SlotIndex End = index.getDefIndex();
1220 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1221 nI.getValNumInfo(nI.getNumValNums()-1));
1222 DEBUG(errs() << " +" << LR);
1227 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1228 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1229 DEBUG(errs() << " +" << LR);
1234 errs() << "\t\t\t\tAdded new interval: ";
1235 nI.print(errs(), tri_);
1241 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1243 MachineBasicBlock *MBB,
1244 SlotIndex Idx) const {
1245 SlotIndex End = getMBBEndIdx(MBB);
1246 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1247 if (VNI->kills[j].isPHI())
1250 SlotIndex KillIdx = VNI->kills[j];
1251 if (KillIdx > Idx && KillIdx < End)
1257 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1258 /// during spilling.
1260 struct RewriteInfo {
1265 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1266 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1269 struct RewriteInfoCompare {
1270 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1271 return LHS.Index < RHS.Index;
1276 void LiveIntervals::
1277 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1278 LiveInterval::Ranges::const_iterator &I,
1279 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1280 unsigned Slot, int LdSlot,
1281 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1283 const TargetRegisterClass* rc,
1284 SmallVector<int, 4> &ReMatIds,
1285 const MachineLoopInfo *loopInfo,
1286 BitVector &SpillMBBs,
1287 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1288 BitVector &RestoreMBBs,
1289 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1290 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1291 std::vector<LiveInterval*> &NewLIs) {
1292 bool AllCanFold = true;
1293 unsigned NewVReg = 0;
1294 SlotIndex start = I->start.getBaseIndex();
1295 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1297 // First collect all the def / use in this live range that will be rewritten.
1298 // Make sure they are sorted according to instruction index.
1299 std::vector<RewriteInfo> RewriteMIs;
1300 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1301 re = mri_->reg_end(); ri != re; ) {
1302 MachineInstr *MI = &*ri;
1303 MachineOperand &O = ri.getOperand();
1305 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1306 SlotIndex index = getInstructionIndex(MI);
1307 if (index < start || index >= end)
1311 // Must be defined by an implicit def. It should not be spilled. Note,
1312 // this is for correctness reason. e.g.
1313 // 8 %reg1024<def> = IMPLICIT_DEF
1314 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1315 // The live range [12, 14) are not part of the r1024 live interval since
1316 // it's defined by an implicit def. It will not conflicts with live
1317 // interval of r1025. Now suppose both registers are spilled, you can
1318 // easily see a situation where both registers are reloaded before
1319 // the INSERT_SUBREG and both target registers that would overlap.
1321 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1323 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1325 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1326 // Now rewrite the defs and uses.
1327 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1328 RewriteInfo &rwi = RewriteMIs[i];
1330 SlotIndex index = rwi.Index;
1331 bool MIHasUse = rwi.HasUse;
1332 bool MIHasDef = rwi.HasDef;
1333 MachineInstr *MI = rwi.MI;
1334 // If MI def and/or use the same register multiple times, then there
1335 // are multiple entries.
1336 unsigned NumUses = MIHasUse;
1337 while (i != e && RewriteMIs[i].MI == MI) {
1338 assert(RewriteMIs[i].Index == index);
1339 bool isUse = RewriteMIs[i].HasUse;
1340 if (isUse) ++NumUses;
1342 MIHasDef |= RewriteMIs[i].HasDef;
1345 MachineBasicBlock *MBB = MI->getParent();
1347 if (ImpUse && MI != ReMatDefMI) {
1348 // Re-matting an instruction with virtual register use. Update the
1349 // register interval's spill weight to HUGE_VALF to prevent it from
1351 LiveInterval &ImpLi = getInterval(ImpUse);
1352 ImpLi.weight = HUGE_VALF;
1355 unsigned MBBId = MBB->getNumber();
1356 unsigned ThisVReg = 0;
1358 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1359 if (NVI != MBBVRegsMap.end()) {
1360 ThisVReg = NVI->second;
1367 // It's better to start a new interval to avoid artifically
1368 // extend the new interval.
1369 if (MIHasDef && !MIHasUse) {
1370 MBBVRegsMap.erase(MBB->getNumber());
1376 bool IsNew = ThisVReg == 0;
1378 // This ends the previous live interval. If all of its def / use
1379 // can be folded, give it a low spill weight.
1380 if (NewVReg && TrySplit && AllCanFold) {
1381 LiveInterval &nI = getOrCreateInterval(NewVReg);
1388 bool HasDef = false;
1389 bool HasUse = false;
1390 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1391 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1392 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1393 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1394 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1395 if (!HasDef && !HasUse)
1398 AllCanFold &= CanFold;
1400 // Update weight of spill interval.
1401 LiveInterval &nI = getOrCreateInterval(NewVReg);
1403 // The spill weight is now infinity as it cannot be spilled again.
1404 nI.weight = HUGE_VALF;
1408 // Keep track of the last def and first use in each MBB.
1410 if (MI != ReMatOrigDefMI || !CanDelete) {
1411 bool HasKill = false;
1413 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1415 // If this is a two-address code, then this index starts a new VNInfo.
1416 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1418 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1420 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1421 SpillIdxes.find(MBBId);
1423 if (SII == SpillIdxes.end()) {
1424 std::vector<SRInfo> S;
1425 S.push_back(SRInfo(index, NewVReg, true));
1426 SpillIdxes.insert(std::make_pair(MBBId, S));
1427 } else if (SII->second.back().vreg != NewVReg) {
1428 SII->second.push_back(SRInfo(index, NewVReg, true));
1429 } else if (index > SII->second.back().index) {
1430 // If there is an earlier def and this is a two-address
1431 // instruction, then it's not possible to fold the store (which
1432 // would also fold the load).
1433 SRInfo &Info = SII->second.back();
1435 Info.canFold = !HasUse;
1437 SpillMBBs.set(MBBId);
1438 } else if (SII != SpillIdxes.end() &&
1439 SII->second.back().vreg == NewVReg &&
1440 index > SII->second.back().index) {
1441 // There is an earlier def that's not killed (must be two-address).
1442 // The spill is no longer needed.
1443 SII->second.pop_back();
1444 if (SII->second.empty()) {
1445 SpillIdxes.erase(MBBId);
1446 SpillMBBs.reset(MBBId);
1453 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1454 SpillIdxes.find(MBBId);
1455 if (SII != SpillIdxes.end() &&
1456 SII->second.back().vreg == NewVReg &&
1457 index > SII->second.back().index)
1458 // Use(s) following the last def, it's not safe to fold the spill.
1459 SII->second.back().canFold = false;
1460 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1461 RestoreIdxes.find(MBBId);
1462 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1463 // If we are splitting live intervals, only fold if it's the first
1464 // use and there isn't another use later in the MBB.
1465 RII->second.back().canFold = false;
1467 // Only need a reload if there isn't an earlier def / use.
1468 if (RII == RestoreIdxes.end()) {
1469 std::vector<SRInfo> Infos;
1470 Infos.push_back(SRInfo(index, NewVReg, true));
1471 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1473 RII->second.push_back(SRInfo(index, NewVReg, true));
1475 RestoreMBBs.set(MBBId);
1479 // Update spill weight.
1480 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1481 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1484 if (NewVReg && TrySplit && AllCanFold) {
1485 // If all of its def / use can be folded, give it a low spill weight.
1486 LiveInterval &nI = getOrCreateInterval(NewVReg);
1491 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1492 unsigned vr, BitVector &RestoreMBBs,
1493 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1494 if (!RestoreMBBs[Id])
1496 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1497 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1498 if (Restores[i].index == index &&
1499 Restores[i].vreg == vr &&
1500 Restores[i].canFold)
1505 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1506 unsigned vr, BitVector &RestoreMBBs,
1507 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1508 if (!RestoreMBBs[Id])
1510 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1511 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1512 if (Restores[i].index == index && Restores[i].vreg)
1513 Restores[i].index = SlotIndex();
1516 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1517 /// spilled and create empty intervals for their uses.
1519 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1520 const TargetRegisterClass* rc,
1521 std::vector<LiveInterval*> &NewLIs) {
1522 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1523 re = mri_->reg_end(); ri != re; ) {
1524 MachineOperand &O = ri.getOperand();
1525 MachineInstr *MI = &*ri;
1528 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1529 "Register def was not rewritten?");
1530 RemoveMachineInstrFromMaps(MI);
1531 vrm.RemoveMachineInstrFromMaps(MI);
1532 MI->eraseFromParent();
1534 // This must be an use of an implicit_def so it's not part of the live
1535 // interval. Create a new empty live interval for it.
1536 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1537 unsigned NewVReg = mri_->createVirtualRegister(rc);
1539 vrm.setIsImplicitlyDefined(NewVReg);
1540 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1541 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1542 MachineOperand &MO = MI->getOperand(i);
1543 if (MO.isReg() && MO.getReg() == li.reg) {
1552 std::vector<LiveInterval*> LiveIntervals::
1553 addIntervalsForSpillsFast(const LiveInterval &li,
1554 const MachineLoopInfo *loopInfo,
1556 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1558 std::vector<LiveInterval*> added;
1560 assert(li.weight != HUGE_VALF &&
1561 "attempt to spill already spilled interval!");
1564 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1569 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1571 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1572 while (RI != mri_->reg_end()) {
1573 MachineInstr* MI = &*RI;
1575 SmallVector<unsigned, 2> Indices;
1576 bool HasUse = false;
1577 bool HasDef = false;
1579 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1580 MachineOperand& mop = MI->getOperand(i);
1581 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1583 HasUse |= MI->getOperand(i).isUse();
1584 HasDef |= MI->getOperand(i).isDef();
1586 Indices.push_back(i);
1589 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1590 Indices, true, slot, li.reg)) {
1591 unsigned NewVReg = mri_->createVirtualRegister(rc);
1593 vrm.assignVirt2StackSlot(NewVReg, slot);
1595 // create a new register for this spill
1596 LiveInterval &nI = getOrCreateInterval(NewVReg);
1598 // the spill weight is now infinity as it
1599 // cannot be spilled again
1600 nI.weight = HUGE_VALF;
1602 // Rewrite register operands to use the new vreg.
1603 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1604 E = Indices.end(); I != E; ++I) {
1605 MI->getOperand(*I).setReg(NewVReg);
1607 if (MI->getOperand(*I).isUse())
1608 MI->getOperand(*I).setIsKill(true);
1611 // Fill in the new live interval.
1612 SlotIndex index = getInstructionIndex(MI);
1614 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1615 nI.getNextValue(SlotIndex(), 0, false,
1616 getVNInfoAllocator()));
1617 DEBUG(errs() << " +" << LR);
1619 vrm.addRestorePoint(NewVReg, MI);
1622 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1623 nI.getNextValue(SlotIndex(), 0, false,
1624 getVNInfoAllocator()));
1625 DEBUG(errs() << " +" << LR);
1627 vrm.addSpillPoint(NewVReg, true, MI);
1630 added.push_back(&nI);
1633 errs() << "\t\t\t\tadded new interval: ";
1640 RI = mri_->reg_begin(li.reg);
1646 std::vector<LiveInterval*> LiveIntervals::
1647 addIntervalsForSpills(const LiveInterval &li,
1648 SmallVectorImpl<LiveInterval*> &SpillIs,
1649 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1651 if (EnableFastSpilling)
1652 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1654 assert(li.weight != HUGE_VALF &&
1655 "attempt to spill already spilled interval!");
1658 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1659 li.print(errs(), tri_);
1663 // Each bit specify whether a spill is required in the MBB.
1664 BitVector SpillMBBs(mf_->getNumBlockIDs());
1665 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1666 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1667 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1668 DenseMap<unsigned,unsigned> MBBVRegsMap;
1669 std::vector<LiveInterval*> NewLIs;
1670 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1672 unsigned NumValNums = li.getNumValNums();
1673 SmallVector<MachineInstr*, 4> ReMatDefs;
1674 ReMatDefs.resize(NumValNums, NULL);
1675 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1676 ReMatOrigDefs.resize(NumValNums, NULL);
1677 SmallVector<int, 4> ReMatIds;
1678 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1679 BitVector ReMatDelete(NumValNums);
1680 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1682 // Spilling a split live interval. It cannot be split any further. Also,
1683 // it's also guaranteed to be a single val# / range interval.
1684 if (vrm.getPreSplitReg(li.reg)) {
1685 vrm.setIsSplitFromReg(li.reg, 0);
1686 // Unset the split kill marker on the last use.
1687 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1688 if (KillIdx != SlotIndex()) {
1689 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1690 assert(KillMI && "Last use disappeared?");
1691 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1692 assert(KillOp != -1 && "Last use disappeared?");
1693 KillMI->getOperand(KillOp).setIsKill(false);
1695 vrm.removeKillPoint(li.reg);
1696 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1697 Slot = vrm.getStackSlot(li.reg);
1698 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1699 MachineInstr *ReMatDefMI = DefIsReMat ?
1700 vrm.getReMaterializedMI(li.reg) : NULL;
1702 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1703 bool isLoad = isLoadSS ||
1704 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1705 bool IsFirstRange = true;
1706 for (LiveInterval::Ranges::const_iterator
1707 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1708 // If this is a split live interval with multiple ranges, it means there
1709 // are two-address instructions that re-defined the value. Only the
1710 // first def can be rematerialized!
1712 // Note ReMatOrigDefMI has already been deleted.
1713 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1714 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1715 false, vrm, rc, ReMatIds, loopInfo,
1716 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1717 MBBVRegsMap, NewLIs);
1719 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1720 Slot, 0, false, false, false,
1721 false, vrm, rc, ReMatIds, loopInfo,
1722 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1723 MBBVRegsMap, NewLIs);
1725 IsFirstRange = false;
1728 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1732 bool TrySplit = !intervalIsInOneMBB(li);
1735 bool NeedStackSlot = false;
1736 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1738 const VNInfo *VNI = *i;
1739 unsigned VN = VNI->id;
1740 if (VNI->isUnused())
1741 continue; // Dead val#.
1742 // Is the def for the val# rematerializable?
1743 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1744 ? getInstructionFromIndex(VNI->def) : 0;
1746 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1747 // Remember how to remat the def of this val#.
1748 ReMatOrigDefs[VN] = ReMatDefMI;
1749 // Original def may be modified so we have to make a copy here.
1750 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1751 CloneMIs.push_back(Clone);
1752 ReMatDefs[VN] = Clone;
1754 bool CanDelete = true;
1755 if (VNI->hasPHIKill()) {
1756 // A kill is a phi node, not all of its uses can be rematerialized.
1757 // It must not be deleted.
1759 // Need a stack slot if there is any live range where uses cannot be
1761 NeedStackSlot = true;
1764 ReMatDelete.set(VN);
1766 // Need a stack slot if there is any live range where uses cannot be
1768 NeedStackSlot = true;
1772 // One stack slot per live interval.
1773 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1774 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1775 Slot = vrm.assignVirt2StackSlot(li.reg);
1777 // This case only occurs when the prealloc splitter has already assigned
1778 // a stack slot to this vreg.
1780 Slot = vrm.getStackSlot(li.reg);
1783 // Create new intervals and rewrite defs and uses.
1784 for (LiveInterval::Ranges::const_iterator
1785 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1786 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1787 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1788 bool DefIsReMat = ReMatDefMI != NULL;
1789 bool CanDelete = ReMatDelete[I->valno->id];
1791 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1792 bool isLoad = isLoadSS ||
1793 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1794 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1795 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1796 CanDelete, vrm, rc, ReMatIds, loopInfo,
1797 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1798 MBBVRegsMap, NewLIs);
1801 // Insert spills / restores if we are splitting.
1803 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1807 SmallPtrSet<LiveInterval*, 4> AddedKill;
1808 SmallVector<unsigned, 2> Ops;
1809 if (NeedStackSlot) {
1810 int Id = SpillMBBs.find_first();
1812 std::vector<SRInfo> &spills = SpillIdxes[Id];
1813 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1814 SlotIndex index = spills[i].index;
1815 unsigned VReg = spills[i].vreg;
1816 LiveInterval &nI = getOrCreateInterval(VReg);
1817 bool isReMat = vrm.isReMaterialized(VReg);
1818 MachineInstr *MI = getInstructionFromIndex(index);
1819 bool CanFold = false;
1820 bool FoundUse = false;
1822 if (spills[i].canFold) {
1824 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1825 MachineOperand &MO = MI->getOperand(j);
1826 if (!MO.isReg() || MO.getReg() != VReg)
1833 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1834 RestoreMBBs, RestoreIdxes))) {
1835 // MI has two-address uses of the same register. If the use
1836 // isn't the first and only use in the BB, then we can't fold
1837 // it. FIXME: Move this to rewriteInstructionsForSpills.
1844 // Fold the store into the def if possible.
1845 bool Folded = false;
1846 if (CanFold && !Ops.empty()) {
1847 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1850 // Also folded uses, do not issue a load.
1851 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1852 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1854 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1858 // Otherwise tell the spiller to issue a spill.
1860 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1861 bool isKill = LR->end == index.getStoreIndex();
1862 if (!MI->registerDefIsDead(nI.reg))
1863 // No need to spill a dead def.
1864 vrm.addSpillPoint(VReg, isKill, MI);
1866 AddedKill.insert(&nI);
1869 Id = SpillMBBs.find_next(Id);
1873 int Id = RestoreMBBs.find_first();
1875 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1876 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1877 SlotIndex index = restores[i].index;
1878 if (index == SlotIndex())
1880 unsigned VReg = restores[i].vreg;
1881 LiveInterval &nI = getOrCreateInterval(VReg);
1882 bool isReMat = vrm.isReMaterialized(VReg);
1883 MachineInstr *MI = getInstructionFromIndex(index);
1884 bool CanFold = false;
1886 if (restores[i].canFold) {
1888 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1889 MachineOperand &MO = MI->getOperand(j);
1890 if (!MO.isReg() || MO.getReg() != VReg)
1894 // If this restore were to be folded, it would have been folded
1903 // Fold the load into the use if possible.
1904 bool Folded = false;
1905 if (CanFold && !Ops.empty()) {
1907 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1909 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1911 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1912 // If the rematerializable def is a load, also try to fold it.
1913 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1914 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1915 Ops, isLoadSS, LdSlot, VReg);
1917 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1919 // Re-matting an instruction with virtual register use. Add the
1920 // register as an implicit use on the use MI and update the register
1921 // interval's spill weight to HUGE_VALF to prevent it from being
1923 LiveInterval &ImpLi = getInterval(ImpUse);
1924 ImpLi.weight = HUGE_VALF;
1925 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1930 // If folding is not possible / failed, then tell the spiller to issue a
1931 // load / rematerialization for us.
1933 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1935 vrm.addRestorePoint(VReg, MI);
1937 Id = RestoreMBBs.find_next(Id);
1940 // Finalize intervals: add kills, finalize spill weights, and filter out
1942 std::vector<LiveInterval*> RetNewLIs;
1943 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1944 LiveInterval *LI = NewLIs[i];
1946 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1947 if (!AddedKill.count(LI)) {
1948 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1949 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1950 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1951 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1952 assert(UseIdx != -1);
1953 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1954 LastUse->getOperand(UseIdx).setIsKill();
1955 vrm.addKillPoint(LI->reg, LastUseIdx);
1958 RetNewLIs.push_back(LI);
1962 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1966 /// hasAllocatableSuperReg - Return true if the specified physical register has
1967 /// any super register that's allocatable.
1968 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1969 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1970 if (allocatableRegs_[*AS] && hasInterval(*AS))
1975 /// getRepresentativeReg - Find the largest super register of the specified
1976 /// physical register.
1977 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1978 // Find the largest super-register that is allocatable.
1979 unsigned BestReg = Reg;
1980 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1981 unsigned SuperReg = *AS;
1982 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1990 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1991 /// specified interval that conflicts with the specified physical register.
1992 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1993 unsigned PhysReg) const {
1994 unsigned NumConflicts = 0;
1995 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1996 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1997 E = mri_->reg_end(); I != E; ++I) {
1998 MachineOperand &O = I.getOperand();
1999 MachineInstr *MI = O.getParent();
2000 SlotIndex Index = getInstructionIndex(MI);
2001 if (pli.liveAt(Index))
2004 return NumConflicts;
2007 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2008 /// around all defs and uses of the specified interval. Return true if it
2009 /// was able to cut its interval.
2010 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2011 unsigned PhysReg, VirtRegMap &vrm) {
2012 unsigned SpillReg = getRepresentativeReg(PhysReg);
2014 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2015 // If there are registers which alias PhysReg, but which are not a
2016 // sub-register of the chosen representative super register. Assert
2017 // since we can't handle it yet.
2018 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2019 tri_->isSuperRegister(*AS, SpillReg));
2022 SmallVector<unsigned, 4> PRegs;
2023 if (hasInterval(SpillReg))
2024 PRegs.push_back(SpillReg);
2026 SmallSet<unsigned, 4> Added;
2027 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2028 if (Added.insert(*AS) && hasInterval(*AS)) {
2029 PRegs.push_back(*AS);
2030 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2035 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2036 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2037 E = mri_->reg_end(); I != E; ++I) {
2038 MachineOperand &O = I.getOperand();
2039 MachineInstr *MI = O.getParent();
2040 if (SeenMIs.count(MI))
2043 SlotIndex Index = getInstructionIndex(MI);
2044 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2045 unsigned PReg = PRegs[i];
2046 LiveInterval &pli = getInterval(PReg);
2047 if (!pli.liveAt(Index))
2049 vrm.addEmergencySpill(PReg, MI);
2050 SlotIndex StartIdx = Index.getLoadIndex();
2051 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2052 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2053 pli.removeRange(StartIdx, EndIdx);
2057 raw_string_ostream Msg(msg);
2058 Msg << "Ran out of registers during register allocation!";
2059 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2060 Msg << "\nPlease check your inline asm statement for invalid "
2061 << "constraints:\n";
2062 MI->print(Msg, tm_);
2064 llvm_report_error(Msg.str());
2066 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2067 if (!hasInterval(*AS))
2069 LiveInterval &spli = getInterval(*AS);
2070 if (spli.liveAt(Index))
2071 spli.removeRange(Index.getLoadIndex(),
2072 Index.getNextIndex().getBaseIndex());
2079 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2080 MachineInstr* startInst) {
2081 LiveInterval& Interval = getOrCreateInterval(reg);
2082 VNInfo* VN = Interval.getNextValue(
2083 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2084 startInst, true, getVNInfoAllocator());
2085 VN->setHasPHIKill(true);
2086 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2088 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2089 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2090 Interval.addRange(LR);