1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
219 /// it can check use as well.
220 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
221 unsigned Reg, bool CheckUse,
222 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223 for (LiveInterval::Ranges::const_iterator
224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225 for (SlotIndex index = I->start.getBaseIndex(),
226 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
228 index = index.getNextIndex()) {
229 MachineInstr *MI = getInstructionFromIndex(index);
231 continue; // skip deleted instructions
233 if (JoinedCopies.count(MI))
235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236 MachineOperand& MO = MI->getOperand(i);
239 if (MO.isUse() && !CheckUse)
241 unsigned PhysReg = MO.getReg();
242 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
244 if (tri_->isSubRegister(Reg, PhysReg))
254 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
255 if (TargetRegisterInfo::isPhysicalRegister(reg))
256 dbgs() << tri_->getName(reg);
258 dbgs() << "%reg" << reg;
262 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
263 MachineBasicBlock::iterator mi,
267 LiveInterval &interval) {
269 dbgs() << "\t\tregister: ";
270 printRegName(interval.reg, tri_);
273 // Virtual registers may be defined multiple times (due to phi
274 // elimination and 2-addr elimination). Much of what we do only has to be
275 // done once for the vreg. We use an empty interval to detect the first
276 // time we see a vreg.
277 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
278 if (interval.empty()) {
279 // Get the Idx of the defining instructions.
280 SlotIndex defIndex = MIIdx.getDefIndex();
281 // Earlyclobbers move back one, so that they overlap the live range
283 if (MO.isEarlyClobber())
284 defIndex = MIIdx.getUseIndex();
286 MachineInstr *CopyMI = NULL;
287 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
288 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
289 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
290 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
291 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293 // Earlyclobbers move back one.
294 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296 assert(ValNo->id == 0 && "First value in interval is not 0?");
298 // Loop over all of the blocks that the vreg is defined in. There are
299 // two cases we have to handle here. The most common case is a vreg
300 // whose lifetime is contained within a basic block. In this case there
301 // will be a single kill, in MBB, which comes after the definition.
302 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
303 // FIXME: what about dead vars?
305 if (vi.Kills[0] != mi)
306 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308 killIdx = defIndex.getStoreIndex();
310 // If the kill happens after the definition, we have an intra-block
312 if (killIdx > defIndex) {
313 assert(vi.AliveBlocks.empty() &&
314 "Shouldn't be alive across any blocks!");
315 LiveRange LR(defIndex, killIdx, ValNo);
316 interval.addRange(LR);
317 DEBUG(dbgs() << " +" << LR << "\n");
318 ValNo->addKill(killIdx);
323 // The other case we handle is when a virtual register lives to the end
324 // of the defining block, potentially live across some blocks, then is
325 // live into some number of blocks, but gets killed. Start by adding a
326 // range that goes from this definition to the end of the defining block.
327 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
328 DEBUG(dbgs() << " +" << NewLR);
329 interval.addRange(NewLR);
331 // Iterate over all of the blocks that the variable is completely
332 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
334 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
335 E = vi.AliveBlocks.end(); I != E; ++I) {
336 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
337 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
338 interval.addRange(LR);
339 DEBUG(dbgs() << " +" << LR);
342 // Finally, this virtual register is live from the start of any killing
343 // block to the 'use' slot of the killing instruction.
344 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
345 MachineInstr *Kill = vi.Kills[i];
347 getInstructionIndex(Kill).getDefIndex();
348 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
349 interval.addRange(LR);
350 ValNo->addKill(killIdx);
351 DEBUG(dbgs() << " +" << LR);
355 // If this is the second time we see a virtual register definition, it
356 // must be due to phi elimination or two addr elimination. If this is
357 // the result of two address elimination, then the vreg is one of the
358 // def-and-use register operand.
359 if (mi->isRegTiedToUseOperand(MOIdx)) {
360 // If this is a two-address definition, then we have already processed
361 // the live range. The only problem is that we didn't realize there
362 // are actually two values in the live interval. Because of this we
363 // need to take the LiveRegion that defines this register and split it
365 assert(interval.containsOneValue());
366 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
367 SlotIndex RedefIndex = MIIdx.getDefIndex();
368 if (MO.isEarlyClobber())
369 RedefIndex = MIIdx.getUseIndex();
371 const LiveRange *OldLR =
372 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
373 VNInfo *OldValNo = OldLR->valno;
375 // Delete the initial value, which should be short and continuous,
376 // because the 2-addr copy must be in the same MBB as the redef.
377 interval.removeRange(DefIndex, RedefIndex);
379 // Two-address vregs should always only be redefined once. This means
380 // that at this point, there should be exactly one value number in it.
381 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
383 // The new value number (#1) is defined by the instruction we claimed
385 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
386 false, // update at *
388 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
390 // Value#0 is now defined by the 2-addr instruction.
391 OldValNo->def = RedefIndex;
392 OldValNo->setCopy(0);
394 // Add the new live interval which replaces the range for the input copy.
395 LiveRange LR(DefIndex, RedefIndex, ValNo);
396 DEBUG(dbgs() << " replace range with " << LR);
397 interval.addRange(LR);
398 ValNo->addKill(RedefIndex);
400 // If this redefinition is dead, we need to add a dummy unit live
401 // range covering the def slot.
403 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
407 dbgs() << " RESULT: ";
408 interval.print(dbgs(), tri_);
411 // Otherwise, this must be because of phi elimination. If this is the
412 // first redefinition of the vreg that we have seen, go back and change
413 // the live range in the PHI block to be a different value number.
414 if (interval.containsOneValue()) {
416 VNInfo *VNI = interval.getValNumInfo(0);
417 // Phi elimination may have reused the register for multiple identical
418 // phi nodes. There will be a kill per phi. Remove the old ranges that
419 // we now know have an incorrect number.
420 for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
421 MachineInstr *Killer = vi.Kills[ki];
422 SlotIndex Start = getMBBStartIdx(Killer->getParent());
423 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
425 dbgs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
426 interval.print(dbgs(), tri_);
428 interval.removeRange(Start, End);
430 // Replace the interval with one of a NEW value number. Note that
431 // this value number isn't actually defined by an instruction, weird
433 LiveRange LR(Start, End,
434 interval.getNextValue(SlotIndex(Start, true),
435 0, false, VNInfoAllocator));
436 LR.valno->setIsPHIDef(true);
437 interval.addRange(LR);
438 LR.valno->addKill(End);
441 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
442 VNI->addKill(indexes_->getTerminatorGap(killMBB));
443 VNI->setHasPHIKill(true);
445 dbgs() << " RESULT: ";
446 interval.print(dbgs(), tri_);
450 // In the case of PHI elimination, each variable definition is only
451 // live until the end of the block. We've already taken care of the
452 // rest of the live range.
453 SlotIndex defIndex = MIIdx.getDefIndex();
454 if (MO.isEarlyClobber())
455 defIndex = MIIdx.getUseIndex();
458 MachineInstr *CopyMI = NULL;
459 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
460 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
461 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
462 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
463 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
465 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
467 SlotIndex killIndex = getMBBEndIdx(mbb);
468 LiveRange LR(defIndex, killIndex, ValNo);
469 interval.addRange(LR);
470 ValNo->addKill(indexes_->getTerminatorGap(mbb));
471 ValNo->setHasPHIKill(true);
472 DEBUG(dbgs() << " +" << LR);
476 DEBUG(dbgs() << '\n');
479 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
480 MachineBasicBlock::iterator mi,
483 LiveInterval &interval,
484 MachineInstr *CopyMI) {
485 // A physical register cannot be live across basic block, so its
486 // lifetime must end somewhere in its defining basic block.
488 dbgs() << "\t\tregister: ";
489 printRegName(interval.reg, tri_);
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (getInstructionFromIndex(baseIndex) == 0)
517 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
519 if (mi->killsRegister(interval.reg, tri_)) {
520 DEBUG(dbgs() << " killed");
521 end = baseIndex.getDefIndex();
524 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
526 if (mi->isRegTiedToUseOperand(DefIdx)) {
527 // Two-address instruction.
528 end = baseIndex.getDefIndex();
530 // Another instruction redefines the register before it is ever read.
531 // Then the register is essentially dead at the instruction that defines
532 // it. Hence its interval is:
533 // [defSlot(def), defSlot(def)+1)
534 DEBUG(dbgs() << " dead");
535 end = start.getStoreIndex();
541 baseIndex = baseIndex.getNextIndex();
544 // The only case we should have a dead physreg here without a killing or
545 // instruction where we know it's dead is if it is live-in to the function
546 // and never used. Another possible case is the implicit use of the
547 // physical register has been deleted by two-address pass.
548 end = start.getStoreIndex();
551 assert(start < end && "did not find end of interval?");
553 // Already exists? Extend old live interval.
554 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
555 bool Extend = OldLR != interval.end();
556 VNInfo *ValNo = Extend
557 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
558 if (MO.isEarlyClobber() && Extend)
559 ValNo->setHasRedefByEC(true);
560 LiveRange LR(start, end, ValNo);
561 interval.addRange(LR);
562 LR.valno->addKill(end);
563 DEBUG(dbgs() << " +" << LR << '\n');
566 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
567 MachineBasicBlock::iterator MI,
571 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
572 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
573 getOrCreateInterval(MO.getReg()));
574 else if (allocatableRegs_[MO.getReg()]) {
575 MachineInstr *CopyMI = NULL;
576 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
577 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
578 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
579 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
580 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
582 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
583 getOrCreateInterval(MO.getReg()), CopyMI);
584 // Def of a register also defines its sub-registers.
585 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
586 // If MI also modifies the sub-register explicitly, avoid processing it
587 // more than once. Do not pass in TRI here so it checks for exact match.
588 if (!MI->modifiesRegister(*AS))
589 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
590 getOrCreateInterval(*AS), 0);
594 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
596 LiveInterval &interval, bool isAlias) {
598 dbgs() << "\t\tlivein register: ";
599 printRegName(interval.reg, tri_);
602 // Look for kills, if it reaches a def before it's killed, then it shouldn't
603 // be considered a livein.
604 MachineBasicBlock::iterator mi = MBB->begin();
605 SlotIndex baseIndex = MIIdx;
606 SlotIndex start = baseIndex;
607 if (getInstructionFromIndex(baseIndex) == 0)
608 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
610 SlotIndex end = baseIndex;
611 bool SeenDefUse = false;
613 while (mi != MBB->end()) {
614 if (mi->killsRegister(interval.reg, tri_)) {
615 DEBUG(dbgs() << " killed");
616 end = baseIndex.getDefIndex();
619 } else if (mi->modifiesRegister(interval.reg, tri_)) {
620 // Another instruction redefines the register before it is ever read.
621 // Then the register is essentially dead at the instruction that defines
622 // it. Hence its interval is:
623 // [defSlot(def), defSlot(def)+1)
624 DEBUG(dbgs() << " dead");
625 end = start.getStoreIndex();
631 if (mi != MBB->end()) {
632 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
636 // Live-in register might not be used at all.
639 DEBUG(dbgs() << " dead");
640 end = MIIdx.getStoreIndex();
642 DEBUG(dbgs() << " live through");
648 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
649 0, false, VNInfoAllocator);
650 vni->setIsPHIDef(true);
651 LiveRange LR(start, end, vni);
653 interval.addRange(LR);
654 LR.valno->addKill(end);
655 DEBUG(dbgs() << " +" << LR << '\n');
658 /// computeIntervals - computes the live intervals for virtual
659 /// registers. for some ordering of the machine instructions [1,N] a
660 /// live interval is an interval [i, j) where 1 <= i <= j < N for
661 /// which a variable is live
662 void LiveIntervals::computeIntervals() {
663 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
664 << "********** Function: "
665 << ((Value*)mf_->getFunction())->getName() << '\n');
667 SmallVector<unsigned, 8> UndefUses;
668 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
670 MachineBasicBlock *MBB = MBBI;
671 // Track the index of the current machine instr.
672 SlotIndex MIIndex = getMBBStartIdx(MBB);
673 DEBUG(dbgs() << MBB->getName() << ":\n");
675 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
677 // Create intervals for live-ins to this BB first.
678 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
679 LE = MBB->livein_end(); LI != LE; ++LI) {
680 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
681 // Multiple live-ins can alias the same register.
682 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
683 if (!hasInterval(*AS))
684 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
688 // Skip over empty initial indices.
689 if (getInstructionFromIndex(MIIndex) == 0)
690 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
692 for (; MI != miEnd; ++MI) {
693 DEBUG(dbgs() << MIIndex << "\t" << *MI);
696 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
697 MachineOperand &MO = MI->getOperand(i);
698 if (!MO.isReg() || !MO.getReg())
701 // handle register defs - build intervals
703 handleRegisterDef(MBB, MI, MIIndex, MO, i);
704 else if (MO.isUndef())
705 UndefUses.push_back(MO.getReg());
708 // Move to the next instr slot.
709 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
713 // Create empty intervals for registers defined by implicit_def's (except
714 // for those implicit_def that define values which are liveout of their
716 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
717 unsigned UndefReg = UndefUses[i];
718 (void)getOrCreateInterval(UndefReg);
722 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
723 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
724 return new LiveInterval(reg, Weight);
727 /// dupInterval - Duplicate a live interval. The caller is responsible for
728 /// managing the allocated memory.
729 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
730 LiveInterval *NewLI = createInterval(li->reg);
731 NewLI->Copy(*li, mri_, getVNInfoAllocator());
735 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
736 /// copy field and returns the source register that defines it.
737 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
741 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
742 // If it's extracting out of a physical register, return the sub-register.
743 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
744 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
745 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
746 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
747 if (SrcSubReg == DstSubReg)
748 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
749 // reg1034 can still be coalesced to EDX.
751 assert(DstSubReg == 0);
752 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
755 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
756 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
757 return VNI->getCopy()->getOperand(2).getReg();
759 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
760 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
762 llvm_unreachable("Unrecognized copy instruction!");
766 //===----------------------------------------------------------------------===//
767 // Register allocator hooks.
770 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
771 /// allow one) virtual register operand, then its uses are implicitly using
772 /// the register. Returns the virtual register.
773 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
774 MachineInstr *MI) const {
776 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
777 MachineOperand &MO = MI->getOperand(i);
778 if (!MO.isReg() || !MO.isUse())
780 unsigned Reg = MO.getReg();
781 if (Reg == 0 || Reg == li.reg)
784 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
785 !allocatableRegs_[Reg])
787 // FIXME: For now, only remat MI with at most one register operand.
789 "Can't rematerialize instruction with multiple register operand!");
798 /// isValNoAvailableAt - Return true if the val# of the specified interval
799 /// which reaches the given instruction also reaches the specified use index.
800 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
801 SlotIndex UseIdx) const {
802 SlotIndex Index = getInstructionIndex(MI);
803 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
804 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
805 return UI != li.end() && UI->valno == ValNo;
808 /// isReMaterializable - Returns true if the definition MI of the specified
809 /// val# of the specified interval is re-materializable.
810 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
811 const VNInfo *ValNo, MachineInstr *MI,
812 SmallVectorImpl<LiveInterval*> &SpillIs,
817 if (!tii_->isTriviallyReMaterializable(MI, aa_))
820 // Target-specific code can mark an instruction as being rematerializable
821 // if it has one virtual reg use, though it had better be something like
822 // a PIC base register which is likely to be live everywhere.
823 unsigned ImpUse = getReMatImplicitUse(li, MI);
825 const LiveInterval &ImpLi = getInterval(ImpUse);
826 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
827 re = mri_->use_end(); ri != re; ++ri) {
828 MachineInstr *UseMI = &*ri;
829 SlotIndex UseIdx = getInstructionIndex(UseMI);
830 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
832 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
836 // If a register operand of the re-materialized instruction is going to
837 // be spilled next, then it's not legal to re-materialize this instruction.
838 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
839 if (ImpUse == SpillIs[i]->reg)
845 /// isReMaterializable - Returns true if the definition MI of the specified
846 /// val# of the specified interval is re-materializable.
847 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
848 const VNInfo *ValNo, MachineInstr *MI) {
849 SmallVector<LiveInterval*, 4> Dummy1;
851 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
854 /// isReMaterializable - Returns true if every definition of MI of every
855 /// val# of the specified interval is re-materializable.
856 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
857 SmallVectorImpl<LiveInterval*> &SpillIs,
860 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
862 const VNInfo *VNI = *i;
864 continue; // Dead val#.
865 // Is the def for the val# rematerializable?
866 if (!VNI->isDefAccurate())
868 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
869 bool DefIsLoad = false;
871 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
878 /// FilterFoldedOps - Filter out two-address use operands. Return
879 /// true if it finds any issue with the operands that ought to prevent
881 static bool FilterFoldedOps(MachineInstr *MI,
882 SmallVector<unsigned, 2> &Ops,
884 SmallVector<unsigned, 2> &FoldOps) {
886 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
887 unsigned OpIdx = Ops[i];
888 MachineOperand &MO = MI->getOperand(OpIdx);
889 // FIXME: fold subreg use.
893 MRInfo |= (unsigned)VirtRegMap::isMod;
895 // Filter out two-address use operand(s).
896 if (MI->isRegTiedToDefOperand(OpIdx)) {
897 MRInfo = VirtRegMap::isModRef;
900 MRInfo |= (unsigned)VirtRegMap::isRef;
902 FoldOps.push_back(OpIdx);
908 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
909 /// slot / to reg or any rematerialized load into ith operand of specified
910 /// MI. If it is successul, MI is updated with the newly created MI and
912 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
913 VirtRegMap &vrm, MachineInstr *DefMI,
915 SmallVector<unsigned, 2> &Ops,
916 bool isSS, int Slot, unsigned Reg) {
917 // If it is an implicit def instruction, just delete it.
918 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
919 RemoveMachineInstrFromMaps(MI);
920 vrm.RemoveMachineInstrFromMaps(MI);
921 MI->eraseFromParent();
926 // Filter the list of operand indexes that are to be folded. Abort if
927 // any operand will prevent folding.
929 SmallVector<unsigned, 2> FoldOps;
930 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
933 // The only time it's safe to fold into a two address instruction is when
934 // it's folding reload and spill from / into a spill stack slot.
935 if (DefMI && (MRInfo & VirtRegMap::isMod))
938 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
939 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
941 // Remember this instruction uses the spill slot.
942 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
944 // Attempt to fold the memory reference into the instruction. If
945 // we can do this, we don't need to insert spill code.
946 MachineBasicBlock &MBB = *MI->getParent();
947 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
948 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
949 vrm.transferSpillPts(MI, fmi);
950 vrm.transferRestorePts(MI, fmi);
951 vrm.transferEmergencySpills(MI, fmi);
952 ReplaceMachineInstrInMaps(MI, fmi);
953 MI = MBB.insert(MBB.erase(MI), fmi);
960 /// canFoldMemoryOperand - Returns true if the specified load / store
961 /// folding is possible.
962 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
963 SmallVector<unsigned, 2> &Ops,
965 // Filter the list of operand indexes that are to be folded. Abort if
966 // any operand will prevent folding.
968 SmallVector<unsigned, 2> FoldOps;
969 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
972 // It's only legal to remat for a use, not a def.
973 if (ReMat && (MRInfo & VirtRegMap::isMod))
976 return tii_->canFoldMemoryOperand(MI, FoldOps);
979 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
980 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
982 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
987 for (++itr; itr != li.ranges.end(); ++itr) {
988 MachineBasicBlock *mbb2 =
989 indexes_->getMBBCoveringRange(itr->start, itr->end);
998 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
999 /// interval on to-be re-materialized operands of MI) with new register.
1000 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1001 MachineInstr *MI, unsigned NewVReg,
1003 // There is an implicit use. That means one of the other operand is
1004 // being remat'ed and the remat'ed instruction has li.reg as an
1005 // use operand. Make sure we rewrite that as well.
1006 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1007 MachineOperand &MO = MI->getOperand(i);
1010 unsigned Reg = MO.getReg();
1011 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1013 if (!vrm.isReMaterialized(Reg))
1015 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1016 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1018 UseMO->setReg(NewVReg);
1022 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1023 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1024 bool LiveIntervals::
1025 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1026 bool TrySplit, SlotIndex index, SlotIndex end,
1028 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1029 unsigned Slot, int LdSlot,
1030 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1032 const TargetRegisterClass* rc,
1033 SmallVector<int, 4> &ReMatIds,
1034 const MachineLoopInfo *loopInfo,
1035 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1036 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1037 std::vector<LiveInterval*> &NewLIs) {
1038 bool CanFold = false;
1040 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1041 MachineOperand& mop = MI->getOperand(i);
1044 unsigned Reg = mop.getReg();
1045 unsigned RegI = Reg;
1046 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1051 bool TryFold = !DefIsReMat;
1052 bool FoldSS = true; // Default behavior unless it's a remat.
1053 int FoldSlot = Slot;
1055 // If this is the rematerializable definition MI itself and
1056 // all of its uses are rematerialized, simply delete it.
1057 if (MI == ReMatOrigDefMI && CanDelete) {
1058 DEBUG(dbgs() << "\t\t\t\tErasing re-materlizable def: "
1060 RemoveMachineInstrFromMaps(MI);
1061 vrm.RemoveMachineInstrFromMaps(MI);
1062 MI->eraseFromParent();
1066 // If def for this use can't be rematerialized, then try folding.
1067 // If def is rematerializable and it's a load, also try folding.
1068 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1070 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1076 // Scan all of the operands of this instruction rewriting operands
1077 // to use NewVReg instead of li.reg as appropriate. We do this for
1080 // 1. If the instr reads the same spilled vreg multiple times, we
1081 // want to reuse the NewVReg.
1082 // 2. If the instr is a two-addr instruction, we are required to
1083 // keep the src/dst regs pinned.
1085 // Keep track of whether we replace a use and/or def so that we can
1086 // create the spill interval with the appropriate range.
1088 HasUse = mop.isUse();
1089 HasDef = mop.isDef();
1090 SmallVector<unsigned, 2> Ops;
1092 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1093 const MachineOperand &MOj = MI->getOperand(j);
1096 unsigned RegJ = MOj.getReg();
1097 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1101 if (!MOj.isUndef()) {
1102 HasUse |= MOj.isUse();
1103 HasDef |= MOj.isDef();
1108 // Create a new virtual register for the spill interval.
1109 // Create the new register now so we can map the fold instruction
1110 // to the new register so when it is unfolded we get the correct
1112 bool CreatedNewVReg = false;
1114 NewVReg = mri_->createVirtualRegister(rc);
1116 CreatedNewVReg = true;
1118 // The new virtual register should get the same allocation hints as the
1120 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1121 if (Hint.first || Hint.second)
1122 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1128 // Do not fold load / store here if we are splitting. We'll find an
1129 // optimal point to insert a load / store later.
1131 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1132 Ops, FoldSS, FoldSlot, NewVReg)) {
1133 // Folding the load/store can completely change the instruction in
1134 // unpredictable ways, rescan it from the beginning.
1137 // We need to give the new vreg the same stack slot as the
1138 // spilled interval.
1139 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1145 if (isNotInMIMap(MI))
1147 goto RestartInstruction;
1150 // We'll try to fold it later if it's profitable.
1151 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1155 mop.setReg(NewVReg);
1156 if (mop.isImplicit())
1157 rewriteImplicitOps(li, MI, NewVReg, vrm);
1159 // Reuse NewVReg for other reads.
1160 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1161 MachineOperand &mopj = MI->getOperand(Ops[j]);
1162 mopj.setReg(NewVReg);
1163 if (mopj.isImplicit())
1164 rewriteImplicitOps(li, MI, NewVReg, vrm);
1167 if (CreatedNewVReg) {
1169 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1170 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1171 // Each valnum may have its own remat id.
1172 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1174 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1176 if (!CanDelete || (HasUse && HasDef)) {
1177 // If this is a two-addr instruction then its use operands are
1178 // rematerializable but its def is not. It should be assigned a
1180 vrm.assignVirt2StackSlot(NewVReg, Slot);
1183 vrm.assignVirt2StackSlot(NewVReg, Slot);
1185 } else if (HasUse && HasDef &&
1186 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1187 // If this interval hasn't been assigned a stack slot (because earlier
1188 // def is a deleted remat def), do it now.
1189 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1190 vrm.assignVirt2StackSlot(NewVReg, Slot);
1193 // Re-matting an instruction with virtual register use. Add the
1194 // register as an implicit use on the use MI.
1195 if (DefIsReMat && ImpUse)
1196 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1198 // Create a new register interval for this spill / remat.
1199 LiveInterval &nI = getOrCreateInterval(NewVReg);
1200 if (CreatedNewVReg) {
1201 NewLIs.push_back(&nI);
1202 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1204 vrm.setIsSplitFromReg(NewVReg, li.reg);
1208 if (CreatedNewVReg) {
1209 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1210 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1211 DEBUG(dbgs() << " +" << LR);
1214 // Extend the split live interval to this def / use.
1215 SlotIndex End = index.getDefIndex();
1216 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1217 nI.getValNumInfo(nI.getNumValNums()-1));
1218 DEBUG(dbgs() << " +" << LR);
1223 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1224 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1225 DEBUG(dbgs() << " +" << LR);
1230 dbgs() << "\t\t\t\tAdded new interval: ";
1231 nI.print(dbgs(), tri_);
1237 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1239 MachineBasicBlock *MBB,
1240 SlotIndex Idx) const {
1241 SlotIndex End = getMBBEndIdx(MBB);
1242 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1243 if (VNI->kills[j].isPHI())
1246 SlotIndex KillIdx = VNI->kills[j];
1247 if (KillIdx > Idx && KillIdx <= End)
1253 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1254 /// during spilling.
1256 struct RewriteInfo {
1261 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1262 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1265 struct RewriteInfoCompare {
1266 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1267 return LHS.Index < RHS.Index;
1272 void LiveIntervals::
1273 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1274 LiveInterval::Ranges::const_iterator &I,
1275 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1276 unsigned Slot, int LdSlot,
1277 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1279 const TargetRegisterClass* rc,
1280 SmallVector<int, 4> &ReMatIds,
1281 const MachineLoopInfo *loopInfo,
1282 BitVector &SpillMBBs,
1283 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1284 BitVector &RestoreMBBs,
1285 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1286 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1287 std::vector<LiveInterval*> &NewLIs) {
1288 bool AllCanFold = true;
1289 unsigned NewVReg = 0;
1290 SlotIndex start = I->start.getBaseIndex();
1291 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1293 // First collect all the def / use in this live range that will be rewritten.
1294 // Make sure they are sorted according to instruction index.
1295 std::vector<RewriteInfo> RewriteMIs;
1296 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1297 re = mri_->reg_end(); ri != re; ) {
1298 MachineInstr *MI = &*ri;
1299 MachineOperand &O = ri.getOperand();
1301 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1302 SlotIndex index = getInstructionIndex(MI);
1303 if (index < start || index >= end)
1307 // Must be defined by an implicit def. It should not be spilled. Note,
1308 // this is for correctness reason. e.g.
1309 // 8 %reg1024<def> = IMPLICIT_DEF
1310 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1311 // The live range [12, 14) are not part of the r1024 live interval since
1312 // it's defined by an implicit def. It will not conflicts with live
1313 // interval of r1025. Now suppose both registers are spilled, you can
1314 // easily see a situation where both registers are reloaded before
1315 // the INSERT_SUBREG and both target registers that would overlap.
1317 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1319 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1321 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1322 // Now rewrite the defs and uses.
1323 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1324 RewriteInfo &rwi = RewriteMIs[i];
1326 SlotIndex index = rwi.Index;
1327 bool MIHasUse = rwi.HasUse;
1328 bool MIHasDef = rwi.HasDef;
1329 MachineInstr *MI = rwi.MI;
1330 // If MI def and/or use the same register multiple times, then there
1331 // are multiple entries.
1332 unsigned NumUses = MIHasUse;
1333 while (i != e && RewriteMIs[i].MI == MI) {
1334 assert(RewriteMIs[i].Index == index);
1335 bool isUse = RewriteMIs[i].HasUse;
1336 if (isUse) ++NumUses;
1338 MIHasDef |= RewriteMIs[i].HasDef;
1341 MachineBasicBlock *MBB = MI->getParent();
1343 if (ImpUse && MI != ReMatDefMI) {
1344 // Re-matting an instruction with virtual register use. Update the
1345 // register interval's spill weight to HUGE_VALF to prevent it from
1347 LiveInterval &ImpLi = getInterval(ImpUse);
1348 ImpLi.weight = HUGE_VALF;
1351 unsigned MBBId = MBB->getNumber();
1352 unsigned ThisVReg = 0;
1354 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1355 if (NVI != MBBVRegsMap.end()) {
1356 ThisVReg = NVI->second;
1363 // It's better to start a new interval to avoid artifically
1364 // extend the new interval.
1365 if (MIHasDef && !MIHasUse) {
1366 MBBVRegsMap.erase(MBB->getNumber());
1372 bool IsNew = ThisVReg == 0;
1374 // This ends the previous live interval. If all of its def / use
1375 // can be folded, give it a low spill weight.
1376 if (NewVReg && TrySplit && AllCanFold) {
1377 LiveInterval &nI = getOrCreateInterval(NewVReg);
1384 bool HasDef = false;
1385 bool HasUse = false;
1386 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1387 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1388 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1389 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1390 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1391 if (!HasDef && !HasUse)
1394 AllCanFold &= CanFold;
1396 // Update weight of spill interval.
1397 LiveInterval &nI = getOrCreateInterval(NewVReg);
1399 // The spill weight is now infinity as it cannot be spilled again.
1400 nI.weight = HUGE_VALF;
1404 // Keep track of the last def and first use in each MBB.
1406 if (MI != ReMatOrigDefMI || !CanDelete) {
1407 bool HasKill = false;
1409 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1411 // If this is a two-address code, then this index starts a new VNInfo.
1412 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1414 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1416 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1417 SpillIdxes.find(MBBId);
1419 if (SII == SpillIdxes.end()) {
1420 std::vector<SRInfo> S;
1421 S.push_back(SRInfo(index, NewVReg, true));
1422 SpillIdxes.insert(std::make_pair(MBBId, S));
1423 } else if (SII->second.back().vreg != NewVReg) {
1424 SII->second.push_back(SRInfo(index, NewVReg, true));
1425 } else if (index > SII->second.back().index) {
1426 // If there is an earlier def and this is a two-address
1427 // instruction, then it's not possible to fold the store (which
1428 // would also fold the load).
1429 SRInfo &Info = SII->second.back();
1431 Info.canFold = !HasUse;
1433 SpillMBBs.set(MBBId);
1434 } else if (SII != SpillIdxes.end() &&
1435 SII->second.back().vreg == NewVReg &&
1436 index > SII->second.back().index) {
1437 // There is an earlier def that's not killed (must be two-address).
1438 // The spill is no longer needed.
1439 SII->second.pop_back();
1440 if (SII->second.empty()) {
1441 SpillIdxes.erase(MBBId);
1442 SpillMBBs.reset(MBBId);
1449 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1450 SpillIdxes.find(MBBId);
1451 if (SII != SpillIdxes.end() &&
1452 SII->second.back().vreg == NewVReg &&
1453 index > SII->second.back().index)
1454 // Use(s) following the last def, it's not safe to fold the spill.
1455 SII->second.back().canFold = false;
1456 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1457 RestoreIdxes.find(MBBId);
1458 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1459 // If we are splitting live intervals, only fold if it's the first
1460 // use and there isn't another use later in the MBB.
1461 RII->second.back().canFold = false;
1463 // Only need a reload if there isn't an earlier def / use.
1464 if (RII == RestoreIdxes.end()) {
1465 std::vector<SRInfo> Infos;
1466 Infos.push_back(SRInfo(index, NewVReg, true));
1467 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1469 RII->second.push_back(SRInfo(index, NewVReg, true));
1471 RestoreMBBs.set(MBBId);
1475 // Update spill weight.
1476 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1477 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1480 if (NewVReg && TrySplit && AllCanFold) {
1481 // If all of its def / use can be folded, give it a low spill weight.
1482 LiveInterval &nI = getOrCreateInterval(NewVReg);
1487 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1488 unsigned vr, BitVector &RestoreMBBs,
1489 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1490 if (!RestoreMBBs[Id])
1492 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1493 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1494 if (Restores[i].index == index &&
1495 Restores[i].vreg == vr &&
1496 Restores[i].canFold)
1501 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1502 unsigned vr, BitVector &RestoreMBBs,
1503 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1504 if (!RestoreMBBs[Id])
1506 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1507 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1508 if (Restores[i].index == index && Restores[i].vreg)
1509 Restores[i].index = SlotIndex();
1512 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1513 /// spilled and create empty intervals for their uses.
1515 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1516 const TargetRegisterClass* rc,
1517 std::vector<LiveInterval*> &NewLIs) {
1518 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1519 re = mri_->reg_end(); ri != re; ) {
1520 MachineOperand &O = ri.getOperand();
1521 MachineInstr *MI = &*ri;
1524 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1525 "Register def was not rewritten?");
1526 RemoveMachineInstrFromMaps(MI);
1527 vrm.RemoveMachineInstrFromMaps(MI);
1528 MI->eraseFromParent();
1530 // This must be an use of an implicit_def so it's not part of the live
1531 // interval. Create a new empty live interval for it.
1532 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1533 unsigned NewVReg = mri_->createVirtualRegister(rc);
1535 vrm.setIsImplicitlyDefined(NewVReg);
1536 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1537 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1538 MachineOperand &MO = MI->getOperand(i);
1539 if (MO.isReg() && MO.getReg() == li.reg) {
1548 std::vector<LiveInterval*> LiveIntervals::
1549 addIntervalsForSpillsFast(const LiveInterval &li,
1550 const MachineLoopInfo *loopInfo,
1552 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1554 std::vector<LiveInterval*> added;
1556 assert(li.weight != HUGE_VALF &&
1557 "attempt to spill already spilled interval!");
1560 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1565 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1567 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1568 while (RI != mri_->reg_end()) {
1569 MachineInstr* MI = &*RI;
1571 SmallVector<unsigned, 2> Indices;
1572 bool HasUse = false;
1573 bool HasDef = false;
1575 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1576 MachineOperand& mop = MI->getOperand(i);
1577 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1579 HasUse |= MI->getOperand(i).isUse();
1580 HasDef |= MI->getOperand(i).isDef();
1582 Indices.push_back(i);
1585 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1586 Indices, true, slot, li.reg)) {
1587 unsigned NewVReg = mri_->createVirtualRegister(rc);
1589 vrm.assignVirt2StackSlot(NewVReg, slot);
1591 // create a new register for this spill
1592 LiveInterval &nI = getOrCreateInterval(NewVReg);
1594 // the spill weight is now infinity as it
1595 // cannot be spilled again
1596 nI.weight = HUGE_VALF;
1598 // Rewrite register operands to use the new vreg.
1599 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1600 E = Indices.end(); I != E; ++I) {
1601 MI->getOperand(*I).setReg(NewVReg);
1603 if (MI->getOperand(*I).isUse())
1604 MI->getOperand(*I).setIsKill(true);
1607 // Fill in the new live interval.
1608 SlotIndex index = getInstructionIndex(MI);
1610 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1611 nI.getNextValue(SlotIndex(), 0, false,
1612 getVNInfoAllocator()));
1613 DEBUG(dbgs() << " +" << LR);
1615 vrm.addRestorePoint(NewVReg, MI);
1618 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1619 nI.getNextValue(SlotIndex(), 0, false,
1620 getVNInfoAllocator()));
1621 DEBUG(dbgs() << " +" << LR);
1623 vrm.addSpillPoint(NewVReg, true, MI);
1626 added.push_back(&nI);
1629 dbgs() << "\t\t\t\tadded new interval: ";
1636 RI = mri_->reg_begin(li.reg);
1642 std::vector<LiveInterval*> LiveIntervals::
1643 addIntervalsForSpills(const LiveInterval &li,
1644 SmallVectorImpl<LiveInterval*> &SpillIs,
1645 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1647 if (EnableFastSpilling)
1648 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1650 assert(li.weight != HUGE_VALF &&
1651 "attempt to spill already spilled interval!");
1654 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1655 li.print(dbgs(), tri_);
1659 // Each bit specify whether a spill is required in the MBB.
1660 BitVector SpillMBBs(mf_->getNumBlockIDs());
1661 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1662 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1663 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1664 DenseMap<unsigned,unsigned> MBBVRegsMap;
1665 std::vector<LiveInterval*> NewLIs;
1666 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1668 unsigned NumValNums = li.getNumValNums();
1669 SmallVector<MachineInstr*, 4> ReMatDefs;
1670 ReMatDefs.resize(NumValNums, NULL);
1671 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1672 ReMatOrigDefs.resize(NumValNums, NULL);
1673 SmallVector<int, 4> ReMatIds;
1674 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1675 BitVector ReMatDelete(NumValNums);
1676 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1678 // Spilling a split live interval. It cannot be split any further. Also,
1679 // it's also guaranteed to be a single val# / range interval.
1680 if (vrm.getPreSplitReg(li.reg)) {
1681 vrm.setIsSplitFromReg(li.reg, 0);
1682 // Unset the split kill marker on the last use.
1683 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1684 if (KillIdx != SlotIndex()) {
1685 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1686 assert(KillMI && "Last use disappeared?");
1687 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1688 assert(KillOp != -1 && "Last use disappeared?");
1689 KillMI->getOperand(KillOp).setIsKill(false);
1691 vrm.removeKillPoint(li.reg);
1692 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1693 Slot = vrm.getStackSlot(li.reg);
1694 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1695 MachineInstr *ReMatDefMI = DefIsReMat ?
1696 vrm.getReMaterializedMI(li.reg) : NULL;
1698 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1699 bool isLoad = isLoadSS ||
1700 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1701 bool IsFirstRange = true;
1702 for (LiveInterval::Ranges::const_iterator
1703 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1704 // If this is a split live interval with multiple ranges, it means there
1705 // are two-address instructions that re-defined the value. Only the
1706 // first def can be rematerialized!
1708 // Note ReMatOrigDefMI has already been deleted.
1709 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1710 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1711 false, vrm, rc, ReMatIds, loopInfo,
1712 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1713 MBBVRegsMap, NewLIs);
1715 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1716 Slot, 0, false, false, false,
1717 false, vrm, rc, ReMatIds, loopInfo,
1718 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1719 MBBVRegsMap, NewLIs);
1721 IsFirstRange = false;
1724 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1728 bool TrySplit = !intervalIsInOneMBB(li);
1731 bool NeedStackSlot = false;
1732 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1734 const VNInfo *VNI = *i;
1735 unsigned VN = VNI->id;
1736 if (VNI->isUnused())
1737 continue; // Dead val#.
1738 // Is the def for the val# rematerializable?
1739 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1740 ? getInstructionFromIndex(VNI->def) : 0;
1742 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1743 // Remember how to remat the def of this val#.
1744 ReMatOrigDefs[VN] = ReMatDefMI;
1745 // Original def may be modified so we have to make a copy here.
1746 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1747 CloneMIs.push_back(Clone);
1748 ReMatDefs[VN] = Clone;
1750 bool CanDelete = true;
1751 if (VNI->hasPHIKill()) {
1752 // A kill is a phi node, not all of its uses can be rematerialized.
1753 // It must not be deleted.
1755 // Need a stack slot if there is any live range where uses cannot be
1757 NeedStackSlot = true;
1760 ReMatDelete.set(VN);
1762 // Need a stack slot if there is any live range where uses cannot be
1764 NeedStackSlot = true;
1768 // One stack slot per live interval.
1769 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1770 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1771 Slot = vrm.assignVirt2StackSlot(li.reg);
1773 // This case only occurs when the prealloc splitter has already assigned
1774 // a stack slot to this vreg.
1776 Slot = vrm.getStackSlot(li.reg);
1779 // Create new intervals and rewrite defs and uses.
1780 for (LiveInterval::Ranges::const_iterator
1781 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1782 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1783 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1784 bool DefIsReMat = ReMatDefMI != NULL;
1785 bool CanDelete = ReMatDelete[I->valno->id];
1787 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1788 bool isLoad = isLoadSS ||
1789 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1790 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1791 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1792 CanDelete, vrm, rc, ReMatIds, loopInfo,
1793 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1794 MBBVRegsMap, NewLIs);
1797 // Insert spills / restores if we are splitting.
1799 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1803 SmallPtrSet<LiveInterval*, 4> AddedKill;
1804 SmallVector<unsigned, 2> Ops;
1805 if (NeedStackSlot) {
1806 int Id = SpillMBBs.find_first();
1808 std::vector<SRInfo> &spills = SpillIdxes[Id];
1809 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1810 SlotIndex index = spills[i].index;
1811 unsigned VReg = spills[i].vreg;
1812 LiveInterval &nI = getOrCreateInterval(VReg);
1813 bool isReMat = vrm.isReMaterialized(VReg);
1814 MachineInstr *MI = getInstructionFromIndex(index);
1815 bool CanFold = false;
1816 bool FoundUse = false;
1818 if (spills[i].canFold) {
1820 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1821 MachineOperand &MO = MI->getOperand(j);
1822 if (!MO.isReg() || MO.getReg() != VReg)
1829 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1830 RestoreMBBs, RestoreIdxes))) {
1831 // MI has two-address uses of the same register. If the use
1832 // isn't the first and only use in the BB, then we can't fold
1833 // it. FIXME: Move this to rewriteInstructionsForSpills.
1840 // Fold the store into the def if possible.
1841 bool Folded = false;
1842 if (CanFold && !Ops.empty()) {
1843 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1846 // Also folded uses, do not issue a load.
1847 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1848 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1850 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1854 // Otherwise tell the spiller to issue a spill.
1856 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1857 bool isKill = LR->end == index.getStoreIndex();
1858 if (!MI->registerDefIsDead(nI.reg))
1859 // No need to spill a dead def.
1860 vrm.addSpillPoint(VReg, isKill, MI);
1862 AddedKill.insert(&nI);
1865 Id = SpillMBBs.find_next(Id);
1869 int Id = RestoreMBBs.find_first();
1871 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1872 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1873 SlotIndex index = restores[i].index;
1874 if (index == SlotIndex())
1876 unsigned VReg = restores[i].vreg;
1877 LiveInterval &nI = getOrCreateInterval(VReg);
1878 bool isReMat = vrm.isReMaterialized(VReg);
1879 MachineInstr *MI = getInstructionFromIndex(index);
1880 bool CanFold = false;
1882 if (restores[i].canFold) {
1884 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1885 MachineOperand &MO = MI->getOperand(j);
1886 if (!MO.isReg() || MO.getReg() != VReg)
1890 // If this restore were to be folded, it would have been folded
1899 // Fold the load into the use if possible.
1900 bool Folded = false;
1901 if (CanFold && !Ops.empty()) {
1903 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1905 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1907 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1908 // If the rematerializable def is a load, also try to fold it.
1909 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1910 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1911 Ops, isLoadSS, LdSlot, VReg);
1913 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1915 // Re-matting an instruction with virtual register use. Add the
1916 // register as an implicit use on the use MI and update the register
1917 // interval's spill weight to HUGE_VALF to prevent it from being
1919 LiveInterval &ImpLi = getInterval(ImpUse);
1920 ImpLi.weight = HUGE_VALF;
1921 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1926 // If folding is not possible / failed, then tell the spiller to issue a
1927 // load / rematerialization for us.
1929 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1931 vrm.addRestorePoint(VReg, MI);
1933 Id = RestoreMBBs.find_next(Id);
1936 // Finalize intervals: add kills, finalize spill weights, and filter out
1938 std::vector<LiveInterval*> RetNewLIs;
1939 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1940 LiveInterval *LI = NewLIs[i];
1942 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1943 if (!AddedKill.count(LI)) {
1944 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1945 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1946 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1947 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1948 assert(UseIdx != -1);
1949 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1950 LastUse->getOperand(UseIdx).setIsKill();
1951 vrm.addKillPoint(LI->reg, LastUseIdx);
1954 RetNewLIs.push_back(LI);
1958 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1962 /// hasAllocatableSuperReg - Return true if the specified physical register has
1963 /// any super register that's allocatable.
1964 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1965 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1966 if (allocatableRegs_[*AS] && hasInterval(*AS))
1971 /// getRepresentativeReg - Find the largest super register of the specified
1972 /// physical register.
1973 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1974 // Find the largest super-register that is allocatable.
1975 unsigned BestReg = Reg;
1976 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1977 unsigned SuperReg = *AS;
1978 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1986 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1987 /// specified interval that conflicts with the specified physical register.
1988 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1989 unsigned PhysReg) const {
1990 unsigned NumConflicts = 0;
1991 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1992 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1993 E = mri_->reg_end(); I != E; ++I) {
1994 MachineOperand &O = I.getOperand();
1995 MachineInstr *MI = O.getParent();
1996 SlotIndex Index = getInstructionIndex(MI);
1997 if (pli.liveAt(Index))
2000 return NumConflicts;
2003 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2004 /// around all defs and uses of the specified interval. Return true if it
2005 /// was able to cut its interval.
2006 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2007 unsigned PhysReg, VirtRegMap &vrm) {
2008 unsigned SpillReg = getRepresentativeReg(PhysReg);
2010 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2011 // If there are registers which alias PhysReg, but which are not a
2012 // sub-register of the chosen representative super register. Assert
2013 // since we can't handle it yet.
2014 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2015 tri_->isSuperRegister(*AS, SpillReg));
2018 SmallVector<unsigned, 4> PRegs;
2019 if (hasInterval(SpillReg))
2020 PRegs.push_back(SpillReg);
2022 SmallSet<unsigned, 4> Added;
2023 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2024 if (Added.insert(*AS) && hasInterval(*AS)) {
2025 PRegs.push_back(*AS);
2026 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2031 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2032 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2033 E = mri_->reg_end(); I != E; ++I) {
2034 MachineOperand &O = I.getOperand();
2035 MachineInstr *MI = O.getParent();
2036 if (SeenMIs.count(MI))
2039 SlotIndex Index = getInstructionIndex(MI);
2040 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2041 unsigned PReg = PRegs[i];
2042 LiveInterval &pli = getInterval(PReg);
2043 if (!pli.liveAt(Index))
2045 vrm.addEmergencySpill(PReg, MI);
2046 SlotIndex StartIdx = Index.getLoadIndex();
2047 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2048 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2049 pli.removeRange(StartIdx, EndIdx);
2053 raw_string_ostream Msg(msg);
2054 Msg << "Ran out of registers during register allocation!";
2055 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2056 Msg << "\nPlease check your inline asm statement for invalid "
2057 << "constraints:\n";
2058 MI->print(Msg, tm_);
2060 llvm_report_error(Msg.str());
2062 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2063 if (!hasInterval(*AS))
2065 LiveInterval &spli = getInterval(*AS);
2066 if (spli.liveAt(Index))
2067 spli.removeRange(Index.getLoadIndex(),
2068 Index.getNextIndex().getBaseIndex());
2075 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2076 MachineInstr* startInst) {
2077 LiveInterval& Interval = getOrCreateInterval(reg);
2078 VNInfo* VN = Interval.getNextValue(
2079 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2080 startInst, true, getVNInfoAllocator());
2081 VN->setHasPHIKill(true);
2082 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2084 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2085 getMBBEndIdx(startInst->getParent()), VN);
2086 Interval.addRange(LR);