1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
144 mf_->print(OS, indexes_);
147 void LiveIntervals::dumpInstrs() const {
151 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 // We don't handle fancy stuff crossing basic block boundaries
154 if (li.ranges.size() != 1)
156 const LiveRange &range = li.ranges.front();
157 SlotIndex idx = range.start.getBaseIndex();
158 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160 // Skip deleted instructions
161 MachineInstr *firstMI = getInstructionFromIndex(idx);
162 while (!firstMI && idx != end) {
163 idx = idx.getNextIndex();
164 firstMI = getInstructionFromIndex(idx);
169 // Find last instruction in range
170 SlotIndex lastIdx = end.getPrevIndex();
171 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172 while (!lastMI && lastIdx != idx) {
173 lastIdx = lastIdx.getPrevIndex();
174 lastMI = getInstructionFromIndex(lastIdx);
179 // Range cannot cross basic block boundaries or terminators
180 MachineBasicBlock *MBB = firstMI->getParent();
181 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
184 MachineBasicBlock::const_iterator E = lastMI;
186 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187 const MachineInstr &MI = *I;
189 // Allow copies to and from li.reg
191 if (MI.getOperand(0).getReg() == li.reg ||
192 MI.getOperand(1).getReg() == li.reg)
195 // Check for operands using reg
196 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
197 const MachineOperand& mop = MI.getOperand(i);
200 unsigned PhysReg = mop.getReg();
201 if (PhysReg == 0 || PhysReg == li.reg)
203 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204 if (!vrm.hasPhys(PhysReg))
206 PhysReg = vrm.getPhys(PhysReg);
208 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 // No conflicts found.
217 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219 for (LiveInterval::Ranges::const_iterator
220 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221 for (SlotIndex index = I->start.getBaseIndex(),
222 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index = index.getNextIndex()) {
225 MachineInstr *MI = getInstructionFromIndex(index);
227 continue; // skip deleted instructions
229 if (JoinedCopies.count(MI))
231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232 MachineOperand& MO = MI->getOperand(i);
235 unsigned PhysReg = MO.getReg();
236 if (PhysReg == 0 || PhysReg == Reg ||
237 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 if (tri_->regsOverlap(Reg, PhysReg))
249 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
250 unsigned Reg = MI.getOperand(MOIdx).getReg();
251 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
252 const MachineOperand &MO = MI.getOperand(i);
255 if (MO.getReg() == Reg && MO.isDef()) {
256 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
257 MI.getOperand(MOIdx).getSubReg() &&
258 (MO.getSubReg() || MO.isImplicit()));
265 /// isPartialRedef - Return true if the specified def at the specific index is
266 /// partially re-defining the specified live interval. A common case of this is
267 /// a definition of the sub-register.
268 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
269 LiveInterval &interval) {
270 if (!MO.getSubReg() || MO.isEarlyClobber())
273 SlotIndex RedefIndex = MIIdx.getDefIndex();
274 const LiveRange *OldLR =
275 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
276 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
283 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
284 MachineBasicBlock::iterator mi,
288 LiveInterval &interval) {
289 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291 // Virtual registers may be defined multiple times (due to phi
292 // elimination and 2-addr elimination). Much of what we do only has to be
293 // done once for the vreg. We use an empty interval to detect the first
294 // time we see a vreg.
295 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
296 if (interval.empty()) {
297 // Get the Idx of the defining instructions.
298 SlotIndex defIndex = MIIdx.getDefIndex();
299 // Earlyclobbers move back one, so that they overlap the live range
301 if (MO.isEarlyClobber())
302 defIndex = MIIdx.getUseIndex();
304 // Make sure the first definition is not a partial redefinition. Add an
305 // <imp-def> of the full register.
307 mi->addRegisterDefined(interval.reg);
309 MachineInstr *CopyMI = NULL;
310 if (mi->isCopyLike()) {
314 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
315 assert(ValNo->id == 0 && "First value in interval is not 0?");
317 // Loop over all of the blocks that the vreg is defined in. There are
318 // two cases we have to handle here. The most common case is a vreg
319 // whose lifetime is contained within a basic block. In this case there
320 // will be a single kill, in MBB, which comes after the definition.
321 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
322 // FIXME: what about dead vars?
324 if (vi.Kills[0] != mi)
325 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327 killIdx = defIndex.getStoreIndex();
329 // If the kill happens after the definition, we have an intra-block
331 if (killIdx > defIndex) {
332 assert(vi.AliveBlocks.empty() &&
333 "Shouldn't be alive across any blocks!");
334 LiveRange LR(defIndex, killIdx, ValNo);
335 interval.addRange(LR);
336 DEBUG(dbgs() << " +" << LR << "\n");
341 // The other case we handle is when a virtual register lives to the end
342 // of the defining block, potentially live across some blocks, then is
343 // live into some number of blocks, but gets killed. Start by adding a
344 // range that goes from this definition to the end of the defining block.
345 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
346 DEBUG(dbgs() << " +" << NewLR);
347 interval.addRange(NewLR);
349 bool PHIJoin = lv_->isPHIJoin(interval.reg);
352 // A phi join register is killed at the end of the MBB and revived as a new
353 // valno in the killing blocks.
354 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
355 DEBUG(dbgs() << " phi-join");
356 ValNo->setHasPHIKill(true);
358 // Iterate over all of the blocks that the variable is completely
359 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
362 E = vi.AliveBlocks.end(); I != E; ++I) {
363 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
364 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
365 interval.addRange(LR);
366 DEBUG(dbgs() << " +" << LR);
370 // Finally, this virtual register is live from the start of any killing
371 // block to the 'use' slot of the killing instruction.
372 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
373 MachineInstr *Kill = vi.Kills[i];
374 SlotIndex Start = getMBBStartIdx(Kill->getParent());
375 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
377 // Create interval with one of a NEW value number. Note that this value
378 // number isn't actually defined by an instruction, weird huh? :)
380 assert(getInstructionFromIndex(Start) == 0 &&
381 "PHI def index points at actual instruction.");
382 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
383 ValNo->setIsPHIDef(true);
385 LiveRange LR(Start, killIdx, ValNo);
386 interval.addRange(LR);
387 DEBUG(dbgs() << " +" << LR);
391 if (MultipleDefsBySameMI(*mi, MOIdx))
392 // Multiple defs of the same virtual register by the same instruction.
393 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
394 // This is likely due to elimination of REG_SEQUENCE instructions. Return
395 // here since there is nothing to do.
398 // If this is the second time we see a virtual register definition, it
399 // must be due to phi elimination or two addr elimination. If this is
400 // the result of two address elimination, then the vreg is one of the
401 // def-and-use register operand.
403 // It may also be partial redef like this:
404 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
405 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
406 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
407 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
408 // If this is a two-address definition, then we have already processed
409 // the live range. The only problem is that we didn't realize there
410 // are actually two values in the live interval. Because of this we
411 // need to take the LiveRegion that defines this register and split it
413 SlotIndex RedefIndex = MIIdx.getDefIndex();
414 if (MO.isEarlyClobber())
415 RedefIndex = MIIdx.getUseIndex();
417 const LiveRange *OldLR =
418 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
419 VNInfo *OldValNo = OldLR->valno;
420 SlotIndex DefIndex = OldValNo->def.getDefIndex();
422 // Delete the previous value, which should be short and continuous,
423 // because the 2-addr copy must be in the same MBB as the redef.
424 interval.removeRange(DefIndex, RedefIndex);
426 // The new value number (#1) is defined by the instruction we claimed
428 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
430 // Value#0 is now defined by the 2-addr instruction.
431 OldValNo->def = RedefIndex;
432 OldValNo->setCopy(0);
434 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
435 if (PartReDef && mi->isCopyLike())
436 OldValNo->setCopy(&*mi);
438 // Add the new live interval which replaces the range for the input copy.
439 LiveRange LR(DefIndex, RedefIndex, ValNo);
440 DEBUG(dbgs() << " replace range with " << LR);
441 interval.addRange(LR);
443 // If this redefinition is dead, we need to add a dummy unit live
444 // range covering the def slot.
446 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
450 dbgs() << " RESULT: ";
451 interval.print(dbgs(), tri_);
453 } else if (lv_->isPHIJoin(interval.reg)) {
454 // In the case of PHI elimination, each variable definition is only
455 // live until the end of the block. We've already taken care of the
456 // rest of the live range.
458 SlotIndex defIndex = MIIdx.getDefIndex();
459 if (MO.isEarlyClobber())
460 defIndex = MIIdx.getUseIndex();
463 MachineInstr *CopyMI = NULL;
464 if (mi->isCopyLike())
466 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
468 SlotIndex killIndex = getMBBEndIdx(mbb);
469 LiveRange LR(defIndex, killIndex, ValNo);
470 interval.addRange(LR);
471 ValNo->setHasPHIKill(true);
472 DEBUG(dbgs() << " phi-join +" << LR);
474 llvm_unreachable("Multiply defined register");
478 DEBUG(dbgs() << '\n');
481 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
482 MachineBasicBlock::iterator mi,
485 LiveInterval &interval,
486 MachineInstr *CopyMI) {
487 // A physical register cannot be live across basic block, so its
488 // lifetime must end somewhere in its defining basic block.
489 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
515 if (mi->isDebugValue())
517 if (getInstructionFromIndex(baseIndex) == 0)
518 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520 if (mi->killsRegister(interval.reg, tri_)) {
521 DEBUG(dbgs() << " killed");
522 end = baseIndex.getDefIndex();
525 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527 if (mi->isRegTiedToUseOperand(DefIdx)) {
528 // Two-address instruction.
529 end = baseIndex.getDefIndex();
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that
533 // defines it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
542 baseIndex = baseIndex.getNextIndex();
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
547 // and never used. Another possible case is the implicit use of the
548 // physical register has been deleted by two-address pass.
549 end = start.getStoreIndex();
552 assert(start < end && "did not find end of interval?");
554 // Already exists? Extend old live interval.
555 VNInfo *ValNo = interval.getVNInfoAt(start);
556 bool Extend = ValNo != 0;
558 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
559 if (Extend && MO.isEarlyClobber())
560 ValNo->setHasRedefByEC(true);
561 LiveRange LR(start, end, ValNo);
562 interval.addRange(LR);
563 DEBUG(dbgs() << " +" << LR << '\n');
566 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
567 MachineBasicBlock::iterator MI,
571 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
572 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
573 getOrCreateInterval(MO.getReg()));
574 else if (allocatableRegs_[MO.getReg()]) {
575 MachineInstr *CopyMI = NULL;
576 if (MI->isCopyLike())
578 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579 getOrCreateInterval(MO.getReg()), CopyMI);
580 // Def of a register also defines its sub-registers.
581 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
582 // If MI also modifies the sub-register explicitly, avoid processing it
583 // more than once. Do not pass in TRI here so it checks for exact match.
584 if (!MI->definesRegister(*AS))
585 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586 getOrCreateInterval(*AS), 0);
590 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592 LiveInterval &interval, bool isAlias) {
593 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
595 // Look for kills, if it reaches a def before it's killed, then it shouldn't
596 // be considered a livein.
597 MachineBasicBlock::iterator mi = MBB->begin();
598 MachineBasicBlock::iterator E = MBB->end();
599 // Skip over DBG_VALUE at the start of the MBB.
600 if (mi != E && mi->isDebugValue()) {
601 while (++mi != E && mi->isDebugValue())
604 // MBB is empty except for DBG_VALUE's.
608 SlotIndex baseIndex = MIIdx;
609 SlotIndex start = baseIndex;
610 if (getInstructionFromIndex(baseIndex) == 0)
611 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613 SlotIndex end = baseIndex;
614 bool SeenDefUse = false;
617 if (mi->killsRegister(interval.reg, tri_)) {
618 DEBUG(dbgs() << " killed");
619 end = baseIndex.getDefIndex();
622 } else if (mi->definesRegister(interval.reg, tri_)) {
623 // Another instruction redefines the register before it is ever read.
624 // Then the register is essentially dead at the instruction that defines
625 // it. Hence its interval is:
626 // [defSlot(def), defSlot(def)+1)
627 DEBUG(dbgs() << " dead");
628 end = start.getStoreIndex();
633 while (++mi != E && mi->isDebugValue())
634 // Skip over DBG_VALUE.
637 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
640 // Live-in register might not be used at all.
643 DEBUG(dbgs() << " dead");
644 end = MIIdx.getStoreIndex();
646 DEBUG(dbgs() << " live through");
651 SlotIndex defIdx = getMBBStartIdx(MBB);
652 assert(getInstructionFromIndex(defIdx) == 0 &&
653 "PHI def index points at actual instruction.");
655 interval.getNextValue(defIdx, 0, VNInfoAllocator);
656 vni->setIsPHIDef(true);
657 LiveRange LR(start, end, vni);
659 interval.addRange(LR);
660 DEBUG(dbgs() << " +" << LR << '\n');
663 /// computeIntervals - computes the live intervals for virtual
664 /// registers. for some ordering of the machine instructions [1,N] a
665 /// live interval is an interval [i, j) where 1 <= i <= j < N for
666 /// which a variable is live
667 void LiveIntervals::computeIntervals() {
668 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
669 << "********** Function: "
670 << ((Value*)mf_->getFunction())->getName() << '\n');
672 SmallVector<unsigned, 8> UndefUses;
673 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675 MachineBasicBlock *MBB = MBBI;
679 // Track the index of the current machine instr.
680 SlotIndex MIIndex = getMBBStartIdx(MBB);
681 DEBUG(dbgs() << "BB#" << MBB->getNumber()
682 << ":\t\t# derived from " << MBB->getName() << "\n");
684 // Create intervals for live-ins to this BB first.
685 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
686 LE = MBB->livein_end(); LI != LE; ++LI) {
687 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
688 // Multiple live-ins can alias the same register.
689 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
690 if (!hasInterval(*AS))
691 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
695 // Skip over empty initial indices.
696 if (getInstructionFromIndex(MIIndex) == 0)
697 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
699 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701 DEBUG(dbgs() << MIIndex << "\t" << *MI);
702 if (MI->isDebugValue())
706 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707 MachineOperand &MO = MI->getOperand(i);
708 if (!MO.isReg() || !MO.getReg())
711 // handle register defs - build intervals
713 handleRegisterDef(MBB, MI, MIIndex, MO, i);
714 else if (MO.isUndef())
715 UndefUses.push_back(MO.getReg());
718 // Move to the next instr slot.
719 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
723 // Create empty intervals for registers defined by implicit_def's (except
724 // for those implicit_def that define values which are liveout of their
726 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
727 unsigned UndefReg = UndefUses[i];
728 (void)getOrCreateInterval(UndefReg);
732 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
733 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
734 return new LiveInterval(reg, Weight);
737 /// dupInterval - Duplicate a live interval. The caller is responsible for
738 /// managing the allocated memory.
739 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
740 LiveInterval *NewLI = createInterval(li->reg);
741 NewLI->Copy(*li, mri_, getVNInfoAllocator());
745 //===----------------------------------------------------------------------===//
746 // Register allocator hooks.
749 MachineBasicBlock::iterator
750 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
751 MachineBasicBlock *mbb) {
752 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
754 // If li is not live into a landing pad, we can insert spill code before the
756 if (!lpad || !isLiveInToMBB(li, lpad))
757 return mbb->getFirstTerminator();
759 // When there is a landing pad, spill code must go before the call instruction
761 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
764 if (I->getDesc().isCall())
767 assert(0 && "Block with landing pad successor contains no call instruction");
768 return mbb->getFirstTerminator();
771 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
772 /// allow one) virtual register operand, then its uses are implicitly using
773 /// the register. Returns the virtual register.
774 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
775 MachineInstr *MI) const {
777 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
778 MachineOperand &MO = MI->getOperand(i);
779 if (!MO.isReg() || !MO.isUse())
781 unsigned Reg = MO.getReg();
782 if (Reg == 0 || Reg == li.reg)
785 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
786 !allocatableRegs_[Reg])
788 // FIXME: For now, only remat MI with at most one register operand.
790 "Can't rematerialize instruction with multiple register operand!");
799 /// isValNoAvailableAt - Return true if the val# of the specified interval
800 /// which reaches the given instruction also reaches the specified use index.
801 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
802 SlotIndex UseIdx) const {
803 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
804 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
807 /// isReMaterializable - Returns true if the definition MI of the specified
808 /// val# of the specified interval is re-materializable.
810 LiveIntervals::isReMaterializable(const LiveInterval &li,
811 const VNInfo *ValNo, MachineInstr *MI,
812 const SmallVectorImpl<LiveInterval*> &SpillIs,
817 if (!tii_->isTriviallyReMaterializable(MI, aa_))
820 // Target-specific code can mark an instruction as being rematerializable
821 // if it has one virtual reg use, though it had better be something like
822 // a PIC base register which is likely to be live everywhere.
823 unsigned ImpUse = getReMatImplicitUse(li, MI);
825 const LiveInterval &ImpLi = getInterval(ImpUse);
826 for (MachineRegisterInfo::use_nodbg_iterator
827 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
829 MachineInstr *UseMI = &*ri;
830 SlotIndex UseIdx = getInstructionIndex(UseMI);
831 if (li.getVNInfoAt(UseIdx) != ValNo)
833 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
837 // If a register operand of the re-materialized instruction is going to
838 // be spilled next, then it's not legal to re-materialize this instruction.
839 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
840 if (ImpUse == SpillIs[i]->reg)
846 /// isReMaterializable - Returns true if the definition MI of the specified
847 /// val# of the specified interval is re-materializable.
848 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
849 const VNInfo *ValNo, MachineInstr *MI) {
850 SmallVector<LiveInterval*, 4> Dummy1;
852 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
855 /// isReMaterializable - Returns true if every definition of MI of every
856 /// val# of the specified interval is re-materializable.
858 LiveIntervals::isReMaterializable(const LiveInterval &li,
859 const SmallVectorImpl<LiveInterval*> &SpillIs,
862 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
864 const VNInfo *VNI = *i;
866 continue; // Dead val#.
867 // Is the def for the val# rematerializable?
868 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
871 bool DefIsLoad = false;
873 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
880 /// FilterFoldedOps - Filter out two-address use operands. Return
881 /// true if it finds any issue with the operands that ought to prevent
883 static bool FilterFoldedOps(MachineInstr *MI,
884 SmallVector<unsigned, 2> &Ops,
886 SmallVector<unsigned, 2> &FoldOps) {
888 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
889 unsigned OpIdx = Ops[i];
890 MachineOperand &MO = MI->getOperand(OpIdx);
891 // FIXME: fold subreg use.
895 MRInfo |= (unsigned)VirtRegMap::isMod;
897 // Filter out two-address use operand(s).
898 if (MI->isRegTiedToDefOperand(OpIdx)) {
899 MRInfo = VirtRegMap::isModRef;
902 MRInfo |= (unsigned)VirtRegMap::isRef;
904 FoldOps.push_back(OpIdx);
910 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
911 /// slot / to reg or any rematerialized load into ith operand of specified
912 /// MI. If it is successul, MI is updated with the newly created MI and
914 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
915 VirtRegMap &vrm, MachineInstr *DefMI,
917 SmallVector<unsigned, 2> &Ops,
918 bool isSS, int Slot, unsigned Reg) {
919 // If it is an implicit def instruction, just delete it.
920 if (MI->isImplicitDef()) {
921 RemoveMachineInstrFromMaps(MI);
922 vrm.RemoveMachineInstrFromMaps(MI);
923 MI->eraseFromParent();
928 // Filter the list of operand indexes that are to be folded. Abort if
929 // any operand will prevent folding.
931 SmallVector<unsigned, 2> FoldOps;
932 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
935 // The only time it's safe to fold into a two address instruction is when
936 // it's folding reload and spill from / into a spill stack slot.
937 if (DefMI && (MRInfo & VirtRegMap::isMod))
940 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
941 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
943 // Remember this instruction uses the spill slot.
944 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
946 // Attempt to fold the memory reference into the instruction. If
947 // we can do this, we don't need to insert spill code.
948 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
949 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
950 vrm.transferSpillPts(MI, fmi);
951 vrm.transferRestorePts(MI, fmi);
952 vrm.transferEmergencySpills(MI, fmi);
953 ReplaceMachineInstrInMaps(MI, fmi);
954 MI->eraseFromParent();
962 /// canFoldMemoryOperand - Returns true if the specified load / store
963 /// folding is possible.
964 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
965 SmallVector<unsigned, 2> &Ops,
967 // Filter the list of operand indexes that are to be folded. Abort if
968 // any operand will prevent folding.
970 SmallVector<unsigned, 2> FoldOps;
971 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
974 // It's only legal to remat for a use, not a def.
975 if (ReMat && (MRInfo & VirtRegMap::isMod))
978 return tii_->canFoldMemoryOperand(MI, FoldOps);
981 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
982 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
984 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
989 for (++itr; itr != li.ranges.end(); ++itr) {
990 MachineBasicBlock *mbb2 =
991 indexes_->getMBBCoveringRange(itr->start, itr->end);
1000 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1001 /// interval on to-be re-materialized operands of MI) with new register.
1002 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1003 MachineInstr *MI, unsigned NewVReg,
1005 // There is an implicit use. That means one of the other operand is
1006 // being remat'ed and the remat'ed instruction has li.reg as an
1007 // use operand. Make sure we rewrite that as well.
1008 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1009 MachineOperand &MO = MI->getOperand(i);
1012 unsigned Reg = MO.getReg();
1013 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1015 if (!vrm.isReMaterialized(Reg))
1017 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1018 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1020 UseMO->setReg(NewVReg);
1024 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1025 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1026 bool LiveIntervals::
1027 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1028 bool TrySplit, SlotIndex index, SlotIndex end,
1030 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1031 unsigned Slot, int LdSlot,
1032 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1034 const TargetRegisterClass* rc,
1035 SmallVector<int, 4> &ReMatIds,
1036 const MachineLoopInfo *loopInfo,
1037 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1038 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1039 std::vector<LiveInterval*> &NewLIs) {
1040 bool CanFold = false;
1042 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1043 MachineOperand& mop = MI->getOperand(i);
1046 unsigned Reg = mop.getReg();
1047 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1052 bool TryFold = !DefIsReMat;
1053 bool FoldSS = true; // Default behavior unless it's a remat.
1054 int FoldSlot = Slot;
1056 // If this is the rematerializable definition MI itself and
1057 // all of its uses are rematerialized, simply delete it.
1058 if (MI == ReMatOrigDefMI && CanDelete) {
1059 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1061 RemoveMachineInstrFromMaps(MI);
1062 vrm.RemoveMachineInstrFromMaps(MI);
1063 MI->eraseFromParent();
1067 // If def for this use can't be rematerialized, then try folding.
1068 // If def is rematerializable and it's a load, also try folding.
1069 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1071 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1077 // Scan all of the operands of this instruction rewriting operands
1078 // to use NewVReg instead of li.reg as appropriate. We do this for
1081 // 1. If the instr reads the same spilled vreg multiple times, we
1082 // want to reuse the NewVReg.
1083 // 2. If the instr is a two-addr instruction, we are required to
1084 // keep the src/dst regs pinned.
1086 // Keep track of whether we replace a use and/or def so that we can
1087 // create the spill interval with the appropriate range.
1088 SmallVector<unsigned, 2> Ops;
1089 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1091 // Create a new virtual register for the spill interval.
1092 // Create the new register now so we can map the fold instruction
1093 // to the new register so when it is unfolded we get the correct
1095 bool CreatedNewVReg = false;
1097 NewVReg = mri_->createVirtualRegister(rc);
1099 CreatedNewVReg = true;
1101 // The new virtual register should get the same allocation hints as the
1103 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1104 if (Hint.first || Hint.second)
1105 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1111 // Do not fold load / store here if we are splitting. We'll find an
1112 // optimal point to insert a load / store later.
1114 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1115 Ops, FoldSS, FoldSlot, NewVReg)) {
1116 // Folding the load/store can completely change the instruction in
1117 // unpredictable ways, rescan it from the beginning.
1120 // We need to give the new vreg the same stack slot as the
1121 // spilled interval.
1122 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1128 if (isNotInMIMap(MI))
1130 goto RestartInstruction;
1133 // We'll try to fold it later if it's profitable.
1134 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1138 mop.setReg(NewVReg);
1139 if (mop.isImplicit())
1140 rewriteImplicitOps(li, MI, NewVReg, vrm);
1142 // Reuse NewVReg for other reads.
1143 bool HasEarlyClobber = false;
1144 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1145 MachineOperand &mopj = MI->getOperand(Ops[j]);
1146 mopj.setReg(NewVReg);
1147 if (mopj.isImplicit())
1148 rewriteImplicitOps(li, MI, NewVReg, vrm);
1149 if (mopj.isEarlyClobber())
1150 HasEarlyClobber = true;
1153 if (CreatedNewVReg) {
1155 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1156 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1157 // Each valnum may have its own remat id.
1158 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1160 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1162 if (!CanDelete || (HasUse && HasDef)) {
1163 // If this is a two-addr instruction then its use operands are
1164 // rematerializable but its def is not. It should be assigned a
1166 vrm.assignVirt2StackSlot(NewVReg, Slot);
1169 vrm.assignVirt2StackSlot(NewVReg, Slot);
1171 } else if (HasUse && HasDef &&
1172 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1173 // If this interval hasn't been assigned a stack slot (because earlier
1174 // def is a deleted remat def), do it now.
1175 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1176 vrm.assignVirt2StackSlot(NewVReg, Slot);
1179 // Re-matting an instruction with virtual register use. Add the
1180 // register as an implicit use on the use MI.
1181 if (DefIsReMat && ImpUse)
1182 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1184 // Create a new register interval for this spill / remat.
1185 LiveInterval &nI = getOrCreateInterval(NewVReg);
1186 if (CreatedNewVReg) {
1187 NewLIs.push_back(&nI);
1188 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1190 vrm.setIsSplitFromReg(NewVReg, li.reg);
1194 if (CreatedNewVReg) {
1195 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1196 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1197 DEBUG(dbgs() << " +" << LR);
1200 // Extend the split live interval to this def / use.
1201 SlotIndex End = index.getDefIndex();
1202 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1203 nI.getValNumInfo(nI.getNumValNums()-1));
1204 DEBUG(dbgs() << " +" << LR);
1209 // An early clobber starts at the use slot, except for an early clobber
1210 // tied to a use operand (yes, that is a thing).
1211 LiveRange LR(HasEarlyClobber && !HasUse ?
1212 index.getUseIndex() : index.getDefIndex(),
1213 index.getStoreIndex(),
1214 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1215 DEBUG(dbgs() << " +" << LR);
1220 dbgs() << "\t\t\t\tAdded new interval: ";
1221 nI.print(dbgs(), tri_);
1227 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1229 MachineBasicBlock *MBB,
1230 SlotIndex Idx) const {
1231 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1234 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1235 /// during spilling.
1237 struct RewriteInfo {
1240 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1243 struct RewriteInfoCompare {
1244 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1245 return LHS.Index < RHS.Index;
1250 void LiveIntervals::
1251 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1252 LiveInterval::Ranges::const_iterator &I,
1253 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1254 unsigned Slot, int LdSlot,
1255 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1257 const TargetRegisterClass* rc,
1258 SmallVector<int, 4> &ReMatIds,
1259 const MachineLoopInfo *loopInfo,
1260 BitVector &SpillMBBs,
1261 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1262 BitVector &RestoreMBBs,
1263 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1264 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1265 std::vector<LiveInterval*> &NewLIs) {
1266 bool AllCanFold = true;
1267 unsigned NewVReg = 0;
1268 SlotIndex start = I->start.getBaseIndex();
1269 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1271 // First collect all the def / use in this live range that will be rewritten.
1272 // Make sure they are sorted according to instruction index.
1273 std::vector<RewriteInfo> RewriteMIs;
1274 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1275 re = mri_->reg_end(); ri != re; ) {
1276 MachineInstr *MI = &*ri;
1277 MachineOperand &O = ri.getOperand();
1279 if (MI->isDebugValue()) {
1280 // Modify DBG_VALUE now that the value is in a spill slot.
1281 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1282 uint64_t Offset = MI->getOperand(1).getImm();
1283 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1284 DebugLoc DL = MI->getDebugLoc();
1285 int FI = isLoadSS ? LdSlot : (int)Slot;
1286 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1287 Offset, MDPtr, DL)) {
1288 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1289 ReplaceMachineInstrInMaps(MI, NewDV);
1290 MachineBasicBlock *MBB = MI->getParent();
1291 MBB->insert(MBB->erase(MI), NewDV);
1296 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1297 RemoveMachineInstrFromMaps(MI);
1298 vrm.RemoveMachineInstrFromMaps(MI);
1299 MI->eraseFromParent();
1302 assert(!(O.isImplicit() && O.isUse()) &&
1303 "Spilling register that's used as implicit use?");
1304 SlotIndex index = getInstructionIndex(MI);
1305 if (index < start || index >= end)
1309 // Must be defined by an implicit def. It should not be spilled. Note,
1310 // this is for correctness reason. e.g.
1311 // 8 %reg1024<def> = IMPLICIT_DEF
1312 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1313 // The live range [12, 14) are not part of the r1024 live interval since
1314 // it's defined by an implicit def. It will not conflicts with live
1315 // interval of r1025. Now suppose both registers are spilled, you can
1316 // easily see a situation where both registers are reloaded before
1317 // the INSERT_SUBREG and both target registers that would overlap.
1319 RewriteMIs.push_back(RewriteInfo(index, MI));
1321 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1323 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1324 // Now rewrite the defs and uses.
1325 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1326 RewriteInfo &rwi = RewriteMIs[i];
1328 SlotIndex index = rwi.Index;
1329 MachineInstr *MI = rwi.MI;
1330 // If MI def and/or use the same register multiple times, then there
1331 // are multiple entries.
1332 while (i != e && RewriteMIs[i].MI == MI) {
1333 assert(RewriteMIs[i].Index == index);
1336 MachineBasicBlock *MBB = MI->getParent();
1338 if (ImpUse && MI != ReMatDefMI) {
1339 // Re-matting an instruction with virtual register use. Prevent interval
1340 // from being spilled.
1341 getInterval(ImpUse).markNotSpillable();
1344 unsigned MBBId = MBB->getNumber();
1345 unsigned ThisVReg = 0;
1347 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1348 if (NVI != MBBVRegsMap.end()) {
1349 ThisVReg = NVI->second;
1356 // It's better to start a new interval to avoid artifically
1357 // extend the new interval.
1358 if (MI->readsWritesVirtualRegister(li.reg) ==
1359 std::make_pair(false,true)) {
1360 MBBVRegsMap.erase(MBB->getNumber());
1366 bool IsNew = ThisVReg == 0;
1368 // This ends the previous live interval. If all of its def / use
1369 // can be folded, give it a low spill weight.
1370 if (NewVReg && TrySplit && AllCanFold) {
1371 LiveInterval &nI = getOrCreateInterval(NewVReg);
1378 bool HasDef = false;
1379 bool HasUse = false;
1380 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1381 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1382 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1383 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1384 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1385 if (!HasDef && !HasUse)
1388 AllCanFold &= CanFold;
1390 // Update weight of spill interval.
1391 LiveInterval &nI = getOrCreateInterval(NewVReg);
1393 // The spill weight is now infinity as it cannot be spilled again.
1394 nI.markNotSpillable();
1398 // Keep track of the last def and first use in each MBB.
1400 if (MI != ReMatOrigDefMI || !CanDelete) {
1401 bool HasKill = false;
1403 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1405 // If this is a two-address code, then this index starts a new VNInfo.
1406 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1408 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1410 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1411 SpillIdxes.find(MBBId);
1413 if (SII == SpillIdxes.end()) {
1414 std::vector<SRInfo> S;
1415 S.push_back(SRInfo(index, NewVReg, true));
1416 SpillIdxes.insert(std::make_pair(MBBId, S));
1417 } else if (SII->second.back().vreg != NewVReg) {
1418 SII->second.push_back(SRInfo(index, NewVReg, true));
1419 } else if (index > SII->second.back().index) {
1420 // If there is an earlier def and this is a two-address
1421 // instruction, then it's not possible to fold the store (which
1422 // would also fold the load).
1423 SRInfo &Info = SII->second.back();
1425 Info.canFold = !HasUse;
1427 SpillMBBs.set(MBBId);
1428 } else if (SII != SpillIdxes.end() &&
1429 SII->second.back().vreg == NewVReg &&
1430 index > SII->second.back().index) {
1431 // There is an earlier def that's not killed (must be two-address).
1432 // The spill is no longer needed.
1433 SII->second.pop_back();
1434 if (SII->second.empty()) {
1435 SpillIdxes.erase(MBBId);
1436 SpillMBBs.reset(MBBId);
1443 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1444 SpillIdxes.find(MBBId);
1445 if (SII != SpillIdxes.end() &&
1446 SII->second.back().vreg == NewVReg &&
1447 index > SII->second.back().index)
1448 // Use(s) following the last def, it's not safe to fold the spill.
1449 SII->second.back().canFold = false;
1450 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1451 RestoreIdxes.find(MBBId);
1452 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1453 // If we are splitting live intervals, only fold if it's the first
1454 // use and there isn't another use later in the MBB.
1455 RII->second.back().canFold = false;
1457 // Only need a reload if there isn't an earlier def / use.
1458 if (RII == RestoreIdxes.end()) {
1459 std::vector<SRInfo> Infos;
1460 Infos.push_back(SRInfo(index, NewVReg, true));
1461 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1463 RII->second.push_back(SRInfo(index, NewVReg, true));
1465 RestoreMBBs.set(MBBId);
1469 // Update spill weight.
1470 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1471 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1474 if (NewVReg && TrySplit && AllCanFold) {
1475 // If all of its def / use can be folded, give it a low spill weight.
1476 LiveInterval &nI = getOrCreateInterval(NewVReg);
1481 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1482 unsigned vr, BitVector &RestoreMBBs,
1483 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1484 if (!RestoreMBBs[Id])
1486 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1487 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1488 if (Restores[i].index == index &&
1489 Restores[i].vreg == vr &&
1490 Restores[i].canFold)
1495 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1496 unsigned vr, BitVector &RestoreMBBs,
1497 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1498 if (!RestoreMBBs[Id])
1500 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1501 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1502 if (Restores[i].index == index && Restores[i].vreg)
1503 Restores[i].index = SlotIndex();
1506 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1507 /// spilled and create empty intervals for their uses.
1509 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1510 const TargetRegisterClass* rc,
1511 std::vector<LiveInterval*> &NewLIs) {
1512 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1513 re = mri_->reg_end(); ri != re; ) {
1514 MachineOperand &O = ri.getOperand();
1515 MachineInstr *MI = &*ri;
1517 if (MI->isDebugValue()) {
1518 // Remove debug info for now.
1520 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1524 assert(MI->isImplicitDef() &&
1525 "Register def was not rewritten?");
1526 RemoveMachineInstrFromMaps(MI);
1527 vrm.RemoveMachineInstrFromMaps(MI);
1528 MI->eraseFromParent();
1530 // This must be an use of an implicit_def so it's not part of the live
1531 // interval. Create a new empty live interval for it.
1532 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1533 unsigned NewVReg = mri_->createVirtualRegister(rc);
1535 vrm.setIsImplicitlyDefined(NewVReg);
1536 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1537 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1538 MachineOperand &MO = MI->getOperand(i);
1539 if (MO.isReg() && MO.getReg() == li.reg) {
1549 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1550 // Limit the loop depth ridiculousness.
1551 if (loopDepth > 200)
1554 // The loop depth is used to roughly estimate the number of times the
1555 // instruction is executed. Something like 10^d is simple, but will quickly
1556 // overflow a float. This expression behaves like 10^d for small d, but is
1557 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1558 // headroom before overflow.
1559 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1561 return (isDef + isUse) * lc;
1565 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1566 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1567 normalizeSpillWeight(*NewLIs[i]);
1570 std::vector<LiveInterval*> LiveIntervals::
1571 addIntervalsForSpills(const LiveInterval &li,
1572 const SmallVectorImpl<LiveInterval*> &SpillIs,
1573 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1574 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1577 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1578 li.print(dbgs(), tri_);
1582 // Each bit specify whether a spill is required in the MBB.
1583 BitVector SpillMBBs(mf_->getNumBlockIDs());
1584 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1585 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1586 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1587 DenseMap<unsigned,unsigned> MBBVRegsMap;
1588 std::vector<LiveInterval*> NewLIs;
1589 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1591 unsigned NumValNums = li.getNumValNums();
1592 SmallVector<MachineInstr*, 4> ReMatDefs;
1593 ReMatDefs.resize(NumValNums, NULL);
1594 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1595 ReMatOrigDefs.resize(NumValNums, NULL);
1596 SmallVector<int, 4> ReMatIds;
1597 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1598 BitVector ReMatDelete(NumValNums);
1599 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1601 // Spilling a split live interval. It cannot be split any further. Also,
1602 // it's also guaranteed to be a single val# / range interval.
1603 if (vrm.getPreSplitReg(li.reg)) {
1604 vrm.setIsSplitFromReg(li.reg, 0);
1605 // Unset the split kill marker on the last use.
1606 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1607 if (KillIdx != SlotIndex()) {
1608 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1609 assert(KillMI && "Last use disappeared?");
1610 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1611 assert(KillOp != -1 && "Last use disappeared?");
1612 KillMI->getOperand(KillOp).setIsKill(false);
1614 vrm.removeKillPoint(li.reg);
1615 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1616 Slot = vrm.getStackSlot(li.reg);
1617 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1618 MachineInstr *ReMatDefMI = DefIsReMat ?
1619 vrm.getReMaterializedMI(li.reg) : NULL;
1621 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1622 bool isLoad = isLoadSS ||
1623 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1624 bool IsFirstRange = true;
1625 for (LiveInterval::Ranges::const_iterator
1626 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1627 // If this is a split live interval with multiple ranges, it means there
1628 // are two-address instructions that re-defined the value. Only the
1629 // first def can be rematerialized!
1631 // Note ReMatOrigDefMI has already been deleted.
1632 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1633 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1634 false, vrm, rc, ReMatIds, loopInfo,
1635 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1636 MBBVRegsMap, NewLIs);
1638 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1639 Slot, 0, false, false, false,
1640 false, vrm, rc, ReMatIds, loopInfo,
1641 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1642 MBBVRegsMap, NewLIs);
1644 IsFirstRange = false;
1647 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1648 normalizeSpillWeights(NewLIs);
1652 bool TrySplit = !intervalIsInOneMBB(li);
1655 bool NeedStackSlot = false;
1656 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1658 const VNInfo *VNI = *i;
1659 unsigned VN = VNI->id;
1660 if (VNI->isUnused())
1661 continue; // Dead val#.
1662 // Is the def for the val# rematerializable?
1663 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1665 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1666 // Remember how to remat the def of this val#.
1667 ReMatOrigDefs[VN] = ReMatDefMI;
1668 // Original def may be modified so we have to make a copy here.
1669 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1670 CloneMIs.push_back(Clone);
1671 ReMatDefs[VN] = Clone;
1673 bool CanDelete = true;
1674 if (VNI->hasPHIKill()) {
1675 // A kill is a phi node, not all of its uses can be rematerialized.
1676 // It must not be deleted.
1678 // Need a stack slot if there is any live range where uses cannot be
1680 NeedStackSlot = true;
1683 ReMatDelete.set(VN);
1685 // Need a stack slot if there is any live range where uses cannot be
1687 NeedStackSlot = true;
1691 // One stack slot per live interval.
1692 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1693 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1694 Slot = vrm.assignVirt2StackSlot(li.reg);
1696 // This case only occurs when the prealloc splitter has already assigned
1697 // a stack slot to this vreg.
1699 Slot = vrm.getStackSlot(li.reg);
1702 // Create new intervals and rewrite defs and uses.
1703 for (LiveInterval::Ranges::const_iterator
1704 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1705 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1706 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1707 bool DefIsReMat = ReMatDefMI != NULL;
1708 bool CanDelete = ReMatDelete[I->valno->id];
1710 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1711 bool isLoad = isLoadSS ||
1712 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1713 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1714 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1715 CanDelete, vrm, rc, ReMatIds, loopInfo,
1716 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1717 MBBVRegsMap, NewLIs);
1720 // Insert spills / restores if we are splitting.
1722 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1723 normalizeSpillWeights(NewLIs);
1727 SmallPtrSet<LiveInterval*, 4> AddedKill;
1728 SmallVector<unsigned, 2> Ops;
1729 if (NeedStackSlot) {
1730 int Id = SpillMBBs.find_first();
1732 std::vector<SRInfo> &spills = SpillIdxes[Id];
1733 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1734 SlotIndex index = spills[i].index;
1735 unsigned VReg = spills[i].vreg;
1736 LiveInterval &nI = getOrCreateInterval(VReg);
1737 bool isReMat = vrm.isReMaterialized(VReg);
1738 MachineInstr *MI = getInstructionFromIndex(index);
1739 bool CanFold = false;
1740 bool FoundUse = false;
1742 if (spills[i].canFold) {
1744 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1745 MachineOperand &MO = MI->getOperand(j);
1746 if (!MO.isReg() || MO.getReg() != VReg)
1753 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1754 RestoreMBBs, RestoreIdxes))) {
1755 // MI has two-address uses of the same register. If the use
1756 // isn't the first and only use in the BB, then we can't fold
1757 // it. FIXME: Move this to rewriteInstructionsForSpills.
1764 // Fold the store into the def if possible.
1765 bool Folded = false;
1766 if (CanFold && !Ops.empty()) {
1767 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1770 // Also folded uses, do not issue a load.
1771 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1772 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1774 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1778 // Otherwise tell the spiller to issue a spill.
1780 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1781 bool isKill = LR->end == index.getStoreIndex();
1782 if (!MI->registerDefIsDead(nI.reg))
1783 // No need to spill a dead def.
1784 vrm.addSpillPoint(VReg, isKill, MI);
1786 AddedKill.insert(&nI);
1789 Id = SpillMBBs.find_next(Id);
1793 int Id = RestoreMBBs.find_first();
1795 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1796 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1797 SlotIndex index = restores[i].index;
1798 if (index == SlotIndex())
1800 unsigned VReg = restores[i].vreg;
1801 LiveInterval &nI = getOrCreateInterval(VReg);
1802 bool isReMat = vrm.isReMaterialized(VReg);
1803 MachineInstr *MI = getInstructionFromIndex(index);
1804 bool CanFold = false;
1806 if (restores[i].canFold) {
1808 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1809 MachineOperand &MO = MI->getOperand(j);
1810 if (!MO.isReg() || MO.getReg() != VReg)
1814 // If this restore were to be folded, it would have been folded
1823 // Fold the load into the use if possible.
1824 bool Folded = false;
1825 if (CanFold && !Ops.empty()) {
1827 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1829 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1831 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1832 // If the rematerializable def is a load, also try to fold it.
1833 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1834 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1835 Ops, isLoadSS, LdSlot, VReg);
1837 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1839 // Re-matting an instruction with virtual register use. Add the
1840 // register as an implicit use on the use MI and mark the register
1841 // interval as unspillable.
1842 LiveInterval &ImpLi = getInterval(ImpUse);
1843 ImpLi.markNotSpillable();
1844 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1849 // If folding is not possible / failed, then tell the spiller to issue a
1850 // load / rematerialization for us.
1852 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1854 vrm.addRestorePoint(VReg, MI);
1856 Id = RestoreMBBs.find_next(Id);
1859 // Finalize intervals: add kills, finalize spill weights, and filter out
1861 std::vector<LiveInterval*> RetNewLIs;
1862 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1863 LiveInterval *LI = NewLIs[i];
1865 if (!AddedKill.count(LI)) {
1866 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1867 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1868 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1869 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1870 assert(UseIdx != -1);
1871 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1872 LastUse->getOperand(UseIdx).setIsKill();
1873 vrm.addKillPoint(LI->reg, LastUseIdx);
1876 RetNewLIs.push_back(LI);
1880 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1881 normalizeSpillWeights(RetNewLIs);
1885 /// hasAllocatableSuperReg - Return true if the specified physical register has
1886 /// any super register that's allocatable.
1887 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1888 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1889 if (allocatableRegs_[*AS] && hasInterval(*AS))
1894 /// getRepresentativeReg - Find the largest super register of the specified
1895 /// physical register.
1896 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1897 // Find the largest super-register that is allocatable.
1898 unsigned BestReg = Reg;
1899 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1900 unsigned SuperReg = *AS;
1901 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1909 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1910 /// specified interval that conflicts with the specified physical register.
1911 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1912 unsigned PhysReg) const {
1913 unsigned NumConflicts = 0;
1914 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1915 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1916 E = mri_->reg_end(); I != E; ++I) {
1917 MachineOperand &O = I.getOperand();
1918 MachineInstr *MI = O.getParent();
1919 if (MI->isDebugValue())
1921 SlotIndex Index = getInstructionIndex(MI);
1922 if (pli.liveAt(Index))
1925 return NumConflicts;
1928 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1929 /// around all defs and uses of the specified interval. Return true if it
1930 /// was able to cut its interval.
1931 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1932 unsigned PhysReg, VirtRegMap &vrm) {
1933 unsigned SpillReg = getRepresentativeReg(PhysReg);
1935 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
1936 << " represented by " << tri_->getName(SpillReg) << '\n');
1938 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1939 // If there are registers which alias PhysReg, but which are not a
1940 // sub-register of the chosen representative super register. Assert
1941 // since we can't handle it yet.
1942 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1943 tri_->isSuperRegister(*AS, SpillReg));
1946 SmallVector<unsigned, 4> PRegs;
1947 if (hasInterval(SpillReg))
1948 PRegs.push_back(SpillReg);
1949 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
1950 if (hasInterval(*SR))
1951 PRegs.push_back(*SR);
1954 dbgs() << "Trying to spill:";
1955 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
1956 dbgs() << ' ' << tri_->getName(PRegs[i]);
1960 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1961 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1962 E = mri_->reg_end(); I != E; ++I) {
1963 MachineOperand &O = I.getOperand();
1964 MachineInstr *MI = O.getParent();
1965 if (MI->isDebugValue() || SeenMIs.count(MI))
1968 SlotIndex Index = getInstructionIndex(MI);
1969 bool LiveReg = false;
1970 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1971 unsigned PReg = PRegs[i];
1972 LiveInterval &pli = getInterval(PReg);
1973 if (!pli.liveAt(Index))
1976 SlotIndex StartIdx = Index.getLoadIndex();
1977 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1978 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
1980 raw_string_ostream Msg(msg);
1981 Msg << "Ran out of registers during register allocation!";
1982 if (MI->isInlineAsm()) {
1983 Msg << "\nPlease check your inline asm statement for invalid "
1984 << "constraints:\n";
1985 MI->print(Msg, tm_);
1987 report_fatal_error(Msg.str());
1989 pli.removeRange(StartIdx, EndIdx);
1994 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
1995 vrm.addEmergencySpill(SpillReg, MI);
2001 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2002 MachineInstr* startInst) {
2003 LiveInterval& Interval = getOrCreateInterval(reg);
2004 VNInfo* VN = Interval.getNextValue(
2005 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2006 startInst, getVNInfoAllocator());
2007 VN->setHasPHIKill(true);
2009 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2010 getMBBEndIdx(startInst->getParent()), VN);
2011 Interval.addRange(LR);