1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
144 mf_->print(OS, indexes_);
147 void LiveIntervals::dumpInstrs() const {
151 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 // We don't handle fancy stuff crossing basic block boundaries
154 if (li.ranges.size() != 1)
156 const LiveRange &range = li.ranges.front();
157 SlotIndex idx = range.start.getBaseIndex();
158 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160 // Skip deleted instructions
161 MachineInstr *firstMI = getInstructionFromIndex(idx);
162 while (!firstMI && idx != end) {
163 idx = idx.getNextIndex();
164 firstMI = getInstructionFromIndex(idx);
169 // Find last instruction in range
170 SlotIndex lastIdx = end.getPrevIndex();
171 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172 while (!lastMI && lastIdx != idx) {
173 lastIdx = lastIdx.getPrevIndex();
174 lastMI = getInstructionFromIndex(lastIdx);
179 // Range cannot cross basic block boundaries or terminators
180 MachineBasicBlock *MBB = firstMI->getParent();
181 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
184 MachineBasicBlock::const_iterator E = lastMI;
186 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187 const MachineInstr &MI = *I;
189 // Allow copies to and from li.reg
191 if (MI.getOperand(0).getReg() == li.reg ||
192 MI.getOperand(1).getReg() == li.reg)
195 // Check for operands using reg
196 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
197 const MachineOperand& mop = MI.getOperand(i);
200 unsigned PhysReg = mop.getReg();
201 if (PhysReg == 0 || PhysReg == li.reg)
203 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204 if (!vrm.hasPhys(PhysReg))
206 PhysReg = vrm.getPhys(PhysReg);
208 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 // No conflicts found.
217 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219 for (LiveInterval::Ranges::const_iterator
220 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221 for (SlotIndex index = I->start.getBaseIndex(),
222 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index = index.getNextIndex()) {
225 MachineInstr *MI = getInstructionFromIndex(index);
227 continue; // skip deleted instructions
229 if (JoinedCopies.count(MI))
231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232 MachineOperand& MO = MI->getOperand(i);
235 unsigned PhysReg = MO.getReg();
236 if (PhysReg == 0 || PhysReg == Reg ||
237 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 if (tri_->regsOverlap(Reg, PhysReg))
249 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
250 unsigned Reg = MI.getOperand(MOIdx).getReg();
251 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
252 const MachineOperand &MO = MI.getOperand(i);
255 if (MO.getReg() == Reg && MO.isDef()) {
256 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
257 MI.getOperand(MOIdx).getSubReg() &&
258 (MO.getSubReg() || MO.isImplicit()));
265 /// isPartialRedef - Return true if the specified def at the specific index is
266 /// partially re-defining the specified live interval. A common case of this is
267 /// a definition of the sub-register.
268 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
269 LiveInterval &interval) {
270 if (!MO.getSubReg() || MO.isEarlyClobber())
273 SlotIndex RedefIndex = MIIdx.getDefIndex();
274 const LiveRange *OldLR =
275 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
276 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
283 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
284 MachineBasicBlock::iterator mi,
288 LiveInterval &interval) {
289 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291 // Virtual registers may be defined multiple times (due to phi
292 // elimination and 2-addr elimination). Much of what we do only has to be
293 // done once for the vreg. We use an empty interval to detect the first
294 // time we see a vreg.
295 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
296 if (interval.empty()) {
297 // Get the Idx of the defining instructions.
298 SlotIndex defIndex = MIIdx.getDefIndex();
299 // Earlyclobbers move back one, so that they overlap the live range
301 if (MO.isEarlyClobber())
302 defIndex = MIIdx.getUseIndex();
304 // Make sure the first definition is not a partial redefinition. Add an
305 // <imp-def> of the full register.
307 mi->addRegisterDefined(interval.reg);
309 MachineInstr *CopyMI = NULL;
310 if (mi->isCopyLike()) {
314 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
315 assert(ValNo->id == 0 && "First value in interval is not 0?");
317 // Loop over all of the blocks that the vreg is defined in. There are
318 // two cases we have to handle here. The most common case is a vreg
319 // whose lifetime is contained within a basic block. In this case there
320 // will be a single kill, in MBB, which comes after the definition.
321 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
322 // FIXME: what about dead vars?
324 if (vi.Kills[0] != mi)
325 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327 killIdx = defIndex.getStoreIndex();
329 // If the kill happens after the definition, we have an intra-block
331 if (killIdx > defIndex) {
332 assert(vi.AliveBlocks.empty() &&
333 "Shouldn't be alive across any blocks!");
334 LiveRange LR(defIndex, killIdx, ValNo);
335 interval.addRange(LR);
336 DEBUG(dbgs() << " +" << LR << "\n");
341 // The other case we handle is when a virtual register lives to the end
342 // of the defining block, potentially live across some blocks, then is
343 // live into some number of blocks, but gets killed. Start by adding a
344 // range that goes from this definition to the end of the defining block.
345 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
346 DEBUG(dbgs() << " +" << NewLR);
347 interval.addRange(NewLR);
349 bool PHIJoin = lv_->isPHIJoin(interval.reg);
352 // A phi join register is killed at the end of the MBB and revived as a new
353 // valno in the killing blocks.
354 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
355 DEBUG(dbgs() << " phi-join");
356 ValNo->setHasPHIKill(true);
358 // Iterate over all of the blocks that the variable is completely
359 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
362 E = vi.AliveBlocks.end(); I != E; ++I) {
363 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
364 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
365 interval.addRange(LR);
366 DEBUG(dbgs() << " +" << LR);
370 // Finally, this virtual register is live from the start of any killing
371 // block to the 'use' slot of the killing instruction.
372 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
373 MachineInstr *Kill = vi.Kills[i];
374 SlotIndex Start = getMBBStartIdx(Kill->getParent());
375 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
377 // Create interval with one of a NEW value number. Note that this value
378 // number isn't actually defined by an instruction, weird huh? :)
380 assert(getInstructionFromIndex(Start) == 0 &&
381 "PHI def index points at actual instruction.");
382 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
383 ValNo->setIsPHIDef(true);
385 LiveRange LR(Start, killIdx, ValNo);
386 interval.addRange(LR);
387 DEBUG(dbgs() << " +" << LR);
391 if (MultipleDefsBySameMI(*mi, MOIdx))
392 // Multiple defs of the same virtual register by the same instruction.
393 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
394 // This is likely due to elimination of REG_SEQUENCE instructions. Return
395 // here since there is nothing to do.
398 // If this is the second time we see a virtual register definition, it
399 // must be due to phi elimination or two addr elimination. If this is
400 // the result of two address elimination, then the vreg is one of the
401 // def-and-use register operand.
403 // It may also be partial redef like this:
404 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
405 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
406 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
407 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
408 // If this is a two-address definition, then we have already processed
409 // the live range. The only problem is that we didn't realize there
410 // are actually two values in the live interval. Because of this we
411 // need to take the LiveRegion that defines this register and split it
413 SlotIndex RedefIndex = MIIdx.getDefIndex();
414 if (MO.isEarlyClobber())
415 RedefIndex = MIIdx.getUseIndex();
417 const LiveRange *OldLR =
418 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
419 VNInfo *OldValNo = OldLR->valno;
420 SlotIndex DefIndex = OldValNo->def.getDefIndex();
422 // Delete the previous value, which should be short and continuous,
423 // because the 2-addr copy must be in the same MBB as the redef.
424 interval.removeRange(DefIndex, RedefIndex);
426 // The new value number (#1) is defined by the instruction we claimed
428 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
430 // Value#0 is now defined by the 2-addr instruction.
431 OldValNo->def = RedefIndex;
432 OldValNo->setCopy(0);
434 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
435 if (PartReDef && mi->isCopyLike())
436 OldValNo->setCopy(&*mi);
438 // Add the new live interval which replaces the range for the input copy.
439 LiveRange LR(DefIndex, RedefIndex, ValNo);
440 DEBUG(dbgs() << " replace range with " << LR);
441 interval.addRange(LR);
443 // If this redefinition is dead, we need to add a dummy unit live
444 // range covering the def slot.
446 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
450 dbgs() << " RESULT: ";
451 interval.print(dbgs(), tri_);
453 } else if (lv_->isPHIJoin(interval.reg)) {
454 // In the case of PHI elimination, each variable definition is only
455 // live until the end of the block. We've already taken care of the
456 // rest of the live range.
458 SlotIndex defIndex = MIIdx.getDefIndex();
459 if (MO.isEarlyClobber())
460 defIndex = MIIdx.getUseIndex();
463 MachineInstr *CopyMI = NULL;
464 if (mi->isCopyLike())
466 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
468 SlotIndex killIndex = getMBBEndIdx(mbb);
469 LiveRange LR(defIndex, killIndex, ValNo);
470 interval.addRange(LR);
471 ValNo->setHasPHIKill(true);
472 DEBUG(dbgs() << " phi-join +" << LR);
474 llvm_unreachable("Multiply defined register");
478 DEBUG(dbgs() << '\n');
481 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
482 MachineBasicBlock::iterator mi,
485 LiveInterval &interval,
486 MachineInstr *CopyMI) {
487 // A physical register cannot be live across basic block, so its
488 // lifetime must end somewhere in its defining basic block.
489 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
515 if (mi->isDebugValue())
517 if (getInstructionFromIndex(baseIndex) == 0)
518 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520 if (mi->killsRegister(interval.reg, tri_)) {
521 DEBUG(dbgs() << " killed");
522 end = baseIndex.getDefIndex();
525 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527 if (mi->isRegTiedToUseOperand(DefIdx)) {
528 // Two-address instruction.
529 end = baseIndex.getDefIndex();
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that
533 // defines it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
542 baseIndex = baseIndex.getNextIndex();
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
547 // and never used. Another possible case is the implicit use of the
548 // physical register has been deleted by two-address pass.
549 end = start.getStoreIndex();
552 assert(start < end && "did not find end of interval?");
554 // Already exists? Extend old live interval.
555 VNInfo *ValNo = interval.getVNInfoAt(start);
556 bool Extend = ValNo != 0;
558 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
559 if (Extend && MO.isEarlyClobber())
560 ValNo->setHasRedefByEC(true);
561 LiveRange LR(start, end, ValNo);
562 interval.addRange(LR);
563 DEBUG(dbgs() << " +" << LR << '\n');
566 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
567 MachineBasicBlock::iterator MI,
571 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
572 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
573 getOrCreateInterval(MO.getReg()));
574 else if (allocatableRegs_[MO.getReg()]) {
575 MachineInstr *CopyMI = NULL;
576 if (MI->isCopyLike())
578 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579 getOrCreateInterval(MO.getReg()), CopyMI);
580 // Def of a register also defines its sub-registers.
581 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
582 // If MI also modifies the sub-register explicitly, avoid processing it
583 // more than once. Do not pass in TRI here so it checks for exact match.
584 if (!MI->definesRegister(*AS))
585 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586 getOrCreateInterval(*AS), 0);
590 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592 LiveInterval &interval, bool isAlias) {
593 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
595 // Look for kills, if it reaches a def before it's killed, then it shouldn't
596 // be considered a livein.
597 MachineBasicBlock::iterator mi = MBB->begin();
598 MachineBasicBlock::iterator E = MBB->end();
599 // Skip over DBG_VALUE at the start of the MBB.
600 if (mi != E && mi->isDebugValue()) {
601 while (++mi != E && mi->isDebugValue())
604 // MBB is empty except for DBG_VALUE's.
608 SlotIndex baseIndex = MIIdx;
609 SlotIndex start = baseIndex;
610 if (getInstructionFromIndex(baseIndex) == 0)
611 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613 SlotIndex end = baseIndex;
614 bool SeenDefUse = false;
617 if (mi->killsRegister(interval.reg, tri_)) {
618 DEBUG(dbgs() << " killed");
619 end = baseIndex.getDefIndex();
622 } else if (mi->definesRegister(interval.reg, tri_)) {
623 // Another instruction redefines the register before it is ever read.
624 // Then the register is essentially dead at the instruction that defines
625 // it. Hence its interval is:
626 // [defSlot(def), defSlot(def)+1)
627 DEBUG(dbgs() << " dead");
628 end = start.getStoreIndex();
633 while (++mi != E && mi->isDebugValue())
634 // Skip over DBG_VALUE.
637 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
640 // Live-in register might not be used at all.
643 DEBUG(dbgs() << " dead");
644 end = MIIdx.getStoreIndex();
646 DEBUG(dbgs() << " live through");
651 SlotIndex defIdx = getMBBStartIdx(MBB);
652 assert(getInstructionFromIndex(defIdx) == 0 &&
653 "PHI def index points at actual instruction.");
655 interval.getNextValue(defIdx, 0, VNInfoAllocator);
656 vni->setIsPHIDef(true);
657 LiveRange LR(start, end, vni);
659 interval.addRange(LR);
660 DEBUG(dbgs() << " +" << LR << '\n');
663 /// computeIntervals - computes the live intervals for virtual
664 /// registers. for some ordering of the machine instructions [1,N] a
665 /// live interval is an interval [i, j) where 1 <= i <= j < N for
666 /// which a variable is live
667 void LiveIntervals::computeIntervals() {
668 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
669 << "********** Function: "
670 << ((Value*)mf_->getFunction())->getName() << '\n');
672 SmallVector<unsigned, 8> UndefUses;
673 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675 MachineBasicBlock *MBB = MBBI;
679 // Track the index of the current machine instr.
680 SlotIndex MIIndex = getMBBStartIdx(MBB);
681 DEBUG(dbgs() << "BB#" << MBB->getNumber()
682 << ":\t\t# derived from " << MBB->getName() << "\n");
684 // Create intervals for live-ins to this BB first.
685 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
686 LE = MBB->livein_end(); LI != LE; ++LI) {
687 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
688 // Multiple live-ins can alias the same register.
689 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
690 if (!hasInterval(*AS))
691 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
695 // Skip over empty initial indices.
696 if (getInstructionFromIndex(MIIndex) == 0)
697 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
699 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701 DEBUG(dbgs() << MIIndex << "\t" << *MI);
702 if (MI->isDebugValue())
706 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707 MachineOperand &MO = MI->getOperand(i);
708 if (!MO.isReg() || !MO.getReg())
711 // handle register defs - build intervals
713 handleRegisterDef(MBB, MI, MIIndex, MO, i);
714 else if (MO.isUndef())
715 UndefUses.push_back(MO.getReg());
718 // Move to the next instr slot.
719 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
723 // Create empty intervals for registers defined by implicit_def's (except
724 // for those implicit_def that define values which are liveout of their
726 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
727 unsigned UndefReg = UndefUses[i];
728 (void)getOrCreateInterval(UndefReg);
732 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
733 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
734 return new LiveInterval(reg, Weight);
737 /// dupInterval - Duplicate a live interval. The caller is responsible for
738 /// managing the allocated memory.
739 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
740 LiveInterval *NewLI = createInterval(li->reg);
741 NewLI->Copy(*li, mri_, getVNInfoAllocator());
745 /// shrinkToUses - After removing some uses of a register, shrink its live
746 /// range to just the remaining uses. This method does not compute reaching
747 /// defs for new uses, and it doesn't remove dead defs.
748 void LiveIntervals::shrinkToUses(LiveInterval *li) {
749 DEBUG(dbgs() << "Shrink: " << *li << '\n');
750 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
751 && "Can't only shrink physical registers");
752 // Find all the values used, including PHI kills.
753 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
755 // Visit all instructions reading li->reg.
756 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
757 MachineInstr *UseMI = I.skipInstruction();) {
758 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
760 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
761 VNInfo *VNI = li->getVNInfoAt(Idx);
762 assert(VNI && "Live interval not live into reading instruction");
763 if (VNI->def == Idx) {
764 // Special case: An early-clobber tied operand reads and writes the
765 // register one slot early.
766 Idx = Idx.getPrevSlot();
767 VNI = li->getVNInfoAt(Idx);
768 assert(VNI && "Early-clobber tied value not available");
770 WorkList.push_back(std::make_pair(Idx, VNI));
773 // Create a new live interval with only minimal live segments per def.
774 LiveInterval NewLI(li->reg, 0);
775 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
780 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
783 // Extend intervals to reach all uses in WorkList.
784 while (!WorkList.empty()) {
785 SlotIndex Idx = WorkList.back().first;
786 VNInfo *VNI = WorkList.back().second;
789 // Extend the live range for VNI to be live at Idx.
790 LiveInterval::iterator I = NewLI.find(Idx);
793 if (I != NewLI.end() && I->start <= Idx) {
794 assert(I->valno == VNI && "Unexpected existing value number");
798 // Is there already a live range in the block containing Idx?
799 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
800 SlotIndex BlockStart = getMBBStartIdx(MBB);
801 DEBUG(dbgs() << "Shrink: Use val#" << VNI->id << " at " << Idx
802 << " in BB#" << MBB->getNumber() << '@' << BlockStart);
803 if (I != NewLI.begin() && (--I)->end > BlockStart) {
804 assert(I->valno == VNI && "Wrong reaching def");
805 DEBUG(dbgs() << " extend [" << I->start << ';' << I->end << ")\n");
806 // Is this the first use of a PHIDef in its defining block?
807 if (VNI->isPHIDef() && I->end == VNI->def.getNextSlot()) {
808 // The PHI is live, make sure the predecessors are live-out.
809 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
810 PE = MBB->pred_end(); PI != PE; ++PI) {
811 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
812 VNInfo *PVNI = li->getVNInfoAt(Stop);
813 // A predecessor is not required to have a live-out value for a PHI.
815 assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
816 WorkList.push_back(std::make_pair(Stop, PVNI));
821 // Extend the live range in the block to include Idx.
822 NewLI.addRange(LiveRange(I->end, Idx.getNextSlot(), VNI));
826 // VNI is live-in to MBB.
827 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
828 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
830 // Make sure VNI is live-out from the predecessors.
831 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
832 PE = MBB->pred_end(); PI != PE; ++PI) {
833 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
834 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
835 WorkList.push_back(std::make_pair(Stop, VNI));
839 // Handle dead values.
840 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
845 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
846 assert(LII != NewLI.end() && "Missing live range for PHI");
847 if (LII->end != VNI->def.getNextSlot())
849 if (!VNI->isPHIDef()) {
850 // This is a dead PHI. Remove it.
851 VNI->setIsUnused(true);
852 NewLI.removeRange(*LII);
854 // This is a dead def. Make sure the instruction knows.
855 MachineInstr *MI = getInstructionFromIndex(VNI->def);
856 assert(MI && "No instruction defining live value");
857 MI->addRegisterDead(li->reg, tri_);
861 // Move the trimmed ranges back.
862 li->ranges.swap(NewLI.ranges);
863 DEBUG(dbgs() << "Shrink: " << *li << '\n');
867 //===----------------------------------------------------------------------===//
868 // Register allocator hooks.
871 MachineBasicBlock::iterator
872 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
873 MachineBasicBlock *mbb) {
874 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
876 // If li is not live into a landing pad, we can insert spill code before the
878 if (!lpad || !isLiveInToMBB(li, lpad))
879 return mbb->getFirstTerminator();
881 // When there is a landing pad, spill code must go before the call instruction
883 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
886 if (I->getDesc().isCall())
889 // The block contains no calls that can throw, so use the first terminator.
890 return mbb->getFirstTerminator();
893 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
894 /// allow one) virtual register operand, then its uses are implicitly using
895 /// the register. Returns the virtual register.
896 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
897 MachineInstr *MI) const {
899 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
900 MachineOperand &MO = MI->getOperand(i);
901 if (!MO.isReg() || !MO.isUse())
903 unsigned Reg = MO.getReg();
904 if (Reg == 0 || Reg == li.reg)
907 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
908 !allocatableRegs_[Reg])
910 // FIXME: For now, only remat MI with at most one register operand.
912 "Can't rematerialize instruction with multiple register operand!");
921 /// isValNoAvailableAt - Return true if the val# of the specified interval
922 /// which reaches the given instruction also reaches the specified use index.
923 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
924 SlotIndex UseIdx) const {
925 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
926 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
929 /// isReMaterializable - Returns true if the definition MI of the specified
930 /// val# of the specified interval is re-materializable.
932 LiveIntervals::isReMaterializable(const LiveInterval &li,
933 const VNInfo *ValNo, MachineInstr *MI,
934 const SmallVectorImpl<LiveInterval*> &SpillIs,
939 if (!tii_->isTriviallyReMaterializable(MI, aa_))
942 // Target-specific code can mark an instruction as being rematerializable
943 // if it has one virtual reg use, though it had better be something like
944 // a PIC base register which is likely to be live everywhere.
945 unsigned ImpUse = getReMatImplicitUse(li, MI);
947 const LiveInterval &ImpLi = getInterval(ImpUse);
948 for (MachineRegisterInfo::use_nodbg_iterator
949 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
951 MachineInstr *UseMI = &*ri;
952 SlotIndex UseIdx = getInstructionIndex(UseMI);
953 if (li.getVNInfoAt(UseIdx) != ValNo)
955 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
959 // If a register operand of the re-materialized instruction is going to
960 // be spilled next, then it's not legal to re-materialize this instruction.
961 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
962 if (ImpUse == SpillIs[i]->reg)
968 /// isReMaterializable - Returns true if the definition MI of the specified
969 /// val# of the specified interval is re-materializable.
970 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
971 const VNInfo *ValNo, MachineInstr *MI) {
972 SmallVector<LiveInterval*, 4> Dummy1;
974 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
977 /// isReMaterializable - Returns true if every definition of MI of every
978 /// val# of the specified interval is re-materializable.
980 LiveIntervals::isReMaterializable(const LiveInterval &li,
981 const SmallVectorImpl<LiveInterval*> &SpillIs,
984 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
986 const VNInfo *VNI = *i;
988 continue; // Dead val#.
989 // Is the def for the val# rematerializable?
990 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
993 bool DefIsLoad = false;
995 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1002 /// FilterFoldedOps - Filter out two-address use operands. Return
1003 /// true if it finds any issue with the operands that ought to prevent
1005 static bool FilterFoldedOps(MachineInstr *MI,
1006 SmallVector<unsigned, 2> &Ops,
1008 SmallVector<unsigned, 2> &FoldOps) {
1010 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1011 unsigned OpIdx = Ops[i];
1012 MachineOperand &MO = MI->getOperand(OpIdx);
1013 // FIXME: fold subreg use.
1017 MRInfo |= (unsigned)VirtRegMap::isMod;
1019 // Filter out two-address use operand(s).
1020 if (MI->isRegTiedToDefOperand(OpIdx)) {
1021 MRInfo = VirtRegMap::isModRef;
1024 MRInfo |= (unsigned)VirtRegMap::isRef;
1026 FoldOps.push_back(OpIdx);
1032 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1033 /// slot / to reg or any rematerialized load into ith operand of specified
1034 /// MI. If it is successul, MI is updated with the newly created MI and
1036 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1037 VirtRegMap &vrm, MachineInstr *DefMI,
1039 SmallVector<unsigned, 2> &Ops,
1040 bool isSS, int Slot, unsigned Reg) {
1041 // If it is an implicit def instruction, just delete it.
1042 if (MI->isImplicitDef()) {
1043 RemoveMachineInstrFromMaps(MI);
1044 vrm.RemoveMachineInstrFromMaps(MI);
1045 MI->eraseFromParent();
1050 // Filter the list of operand indexes that are to be folded. Abort if
1051 // any operand will prevent folding.
1052 unsigned MRInfo = 0;
1053 SmallVector<unsigned, 2> FoldOps;
1054 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1057 // The only time it's safe to fold into a two address instruction is when
1058 // it's folding reload and spill from / into a spill stack slot.
1059 if (DefMI && (MRInfo & VirtRegMap::isMod))
1062 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1063 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1065 // Remember this instruction uses the spill slot.
1066 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1068 // Attempt to fold the memory reference into the instruction. If
1069 // we can do this, we don't need to insert spill code.
1070 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1071 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1072 vrm.transferSpillPts(MI, fmi);
1073 vrm.transferRestorePts(MI, fmi);
1074 vrm.transferEmergencySpills(MI, fmi);
1075 ReplaceMachineInstrInMaps(MI, fmi);
1076 MI->eraseFromParent();
1084 /// canFoldMemoryOperand - Returns true if the specified load / store
1085 /// folding is possible.
1086 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1087 SmallVector<unsigned, 2> &Ops,
1089 // Filter the list of operand indexes that are to be folded. Abort if
1090 // any operand will prevent folding.
1091 unsigned MRInfo = 0;
1092 SmallVector<unsigned, 2> FoldOps;
1093 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1096 // It's only legal to remat for a use, not a def.
1097 if (ReMat && (MRInfo & VirtRegMap::isMod))
1100 return tii_->canFoldMemoryOperand(MI, FoldOps);
1103 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1104 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1106 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1111 for (++itr; itr != li.ranges.end(); ++itr) {
1112 MachineBasicBlock *mbb2 =
1113 indexes_->getMBBCoveringRange(itr->start, itr->end);
1122 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1123 /// interval on to-be re-materialized operands of MI) with new register.
1124 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1125 MachineInstr *MI, unsigned NewVReg,
1127 // There is an implicit use. That means one of the other operand is
1128 // being remat'ed and the remat'ed instruction has li.reg as an
1129 // use operand. Make sure we rewrite that as well.
1130 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1131 MachineOperand &MO = MI->getOperand(i);
1134 unsigned Reg = MO.getReg();
1135 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1137 if (!vrm.isReMaterialized(Reg))
1139 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1140 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1142 UseMO->setReg(NewVReg);
1146 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1147 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1148 bool LiveIntervals::
1149 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1150 bool TrySplit, SlotIndex index, SlotIndex end,
1152 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1153 unsigned Slot, int LdSlot,
1154 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1156 const TargetRegisterClass* rc,
1157 SmallVector<int, 4> &ReMatIds,
1158 const MachineLoopInfo *loopInfo,
1159 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1160 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1161 std::vector<LiveInterval*> &NewLIs) {
1162 bool CanFold = false;
1164 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1165 MachineOperand& mop = MI->getOperand(i);
1168 unsigned Reg = mop.getReg();
1169 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1174 bool TryFold = !DefIsReMat;
1175 bool FoldSS = true; // Default behavior unless it's a remat.
1176 int FoldSlot = Slot;
1178 // If this is the rematerializable definition MI itself and
1179 // all of its uses are rematerialized, simply delete it.
1180 if (MI == ReMatOrigDefMI && CanDelete) {
1181 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1183 RemoveMachineInstrFromMaps(MI);
1184 vrm.RemoveMachineInstrFromMaps(MI);
1185 MI->eraseFromParent();
1189 // If def for this use can't be rematerialized, then try folding.
1190 // If def is rematerializable and it's a load, also try folding.
1191 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1193 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1199 // Scan all of the operands of this instruction rewriting operands
1200 // to use NewVReg instead of li.reg as appropriate. We do this for
1203 // 1. If the instr reads the same spilled vreg multiple times, we
1204 // want to reuse the NewVReg.
1205 // 2. If the instr is a two-addr instruction, we are required to
1206 // keep the src/dst regs pinned.
1208 // Keep track of whether we replace a use and/or def so that we can
1209 // create the spill interval with the appropriate range.
1210 SmallVector<unsigned, 2> Ops;
1211 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1213 // Create a new virtual register for the spill interval.
1214 // Create the new register now so we can map the fold instruction
1215 // to the new register so when it is unfolded we get the correct
1217 bool CreatedNewVReg = false;
1219 NewVReg = mri_->createVirtualRegister(rc);
1221 CreatedNewVReg = true;
1223 // The new virtual register should get the same allocation hints as the
1225 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1226 if (Hint.first || Hint.second)
1227 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1233 // Do not fold load / store here if we are splitting. We'll find an
1234 // optimal point to insert a load / store later.
1236 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1237 Ops, FoldSS, FoldSlot, NewVReg)) {
1238 // Folding the load/store can completely change the instruction in
1239 // unpredictable ways, rescan it from the beginning.
1242 // We need to give the new vreg the same stack slot as the
1243 // spilled interval.
1244 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1250 if (isNotInMIMap(MI))
1252 goto RestartInstruction;
1255 // We'll try to fold it later if it's profitable.
1256 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1260 mop.setReg(NewVReg);
1261 if (mop.isImplicit())
1262 rewriteImplicitOps(li, MI, NewVReg, vrm);
1264 // Reuse NewVReg for other reads.
1265 bool HasEarlyClobber = false;
1266 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1267 MachineOperand &mopj = MI->getOperand(Ops[j]);
1268 mopj.setReg(NewVReg);
1269 if (mopj.isImplicit())
1270 rewriteImplicitOps(li, MI, NewVReg, vrm);
1271 if (mopj.isEarlyClobber())
1272 HasEarlyClobber = true;
1275 if (CreatedNewVReg) {
1277 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1278 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1279 // Each valnum may have its own remat id.
1280 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1282 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1284 if (!CanDelete || (HasUse && HasDef)) {
1285 // If this is a two-addr instruction then its use operands are
1286 // rematerializable but its def is not. It should be assigned a
1288 vrm.assignVirt2StackSlot(NewVReg, Slot);
1291 vrm.assignVirt2StackSlot(NewVReg, Slot);
1293 } else if (HasUse && HasDef &&
1294 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1295 // If this interval hasn't been assigned a stack slot (because earlier
1296 // def is a deleted remat def), do it now.
1297 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1298 vrm.assignVirt2StackSlot(NewVReg, Slot);
1301 // Re-matting an instruction with virtual register use. Add the
1302 // register as an implicit use on the use MI.
1303 if (DefIsReMat && ImpUse)
1304 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1306 // Create a new register interval for this spill / remat.
1307 LiveInterval &nI = getOrCreateInterval(NewVReg);
1308 if (CreatedNewVReg) {
1309 NewLIs.push_back(&nI);
1310 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1312 vrm.setIsSplitFromReg(NewVReg, li.reg);
1316 if (CreatedNewVReg) {
1317 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1318 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1319 DEBUG(dbgs() << " +" << LR);
1322 // Extend the split live interval to this def / use.
1323 SlotIndex End = index.getDefIndex();
1324 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1325 nI.getValNumInfo(nI.getNumValNums()-1));
1326 DEBUG(dbgs() << " +" << LR);
1331 // An early clobber starts at the use slot, except for an early clobber
1332 // tied to a use operand (yes, that is a thing).
1333 LiveRange LR(HasEarlyClobber && !HasUse ?
1334 index.getUseIndex() : index.getDefIndex(),
1335 index.getStoreIndex(),
1336 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1337 DEBUG(dbgs() << " +" << LR);
1342 dbgs() << "\t\t\t\tAdded new interval: ";
1343 nI.print(dbgs(), tri_);
1349 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1351 MachineBasicBlock *MBB,
1352 SlotIndex Idx) const {
1353 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1356 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1357 /// during spilling.
1359 struct RewriteInfo {
1362 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1365 struct RewriteInfoCompare {
1366 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1367 return LHS.Index < RHS.Index;
1372 void LiveIntervals::
1373 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1374 LiveInterval::Ranges::const_iterator &I,
1375 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1376 unsigned Slot, int LdSlot,
1377 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1379 const TargetRegisterClass* rc,
1380 SmallVector<int, 4> &ReMatIds,
1381 const MachineLoopInfo *loopInfo,
1382 BitVector &SpillMBBs,
1383 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1384 BitVector &RestoreMBBs,
1385 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1386 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1387 std::vector<LiveInterval*> &NewLIs) {
1388 bool AllCanFold = true;
1389 unsigned NewVReg = 0;
1390 SlotIndex start = I->start.getBaseIndex();
1391 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1393 // First collect all the def / use in this live range that will be rewritten.
1394 // Make sure they are sorted according to instruction index.
1395 std::vector<RewriteInfo> RewriteMIs;
1396 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1397 re = mri_->reg_end(); ri != re; ) {
1398 MachineInstr *MI = &*ri;
1399 MachineOperand &O = ri.getOperand();
1401 if (MI->isDebugValue()) {
1402 // Modify DBG_VALUE now that the value is in a spill slot.
1403 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1404 uint64_t Offset = MI->getOperand(1).getImm();
1405 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1406 DebugLoc DL = MI->getDebugLoc();
1407 int FI = isLoadSS ? LdSlot : (int)Slot;
1408 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1409 Offset, MDPtr, DL)) {
1410 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1411 ReplaceMachineInstrInMaps(MI, NewDV);
1412 MachineBasicBlock *MBB = MI->getParent();
1413 MBB->insert(MBB->erase(MI), NewDV);
1418 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1419 RemoveMachineInstrFromMaps(MI);
1420 vrm.RemoveMachineInstrFromMaps(MI);
1421 MI->eraseFromParent();
1424 assert(!(O.isImplicit() && O.isUse()) &&
1425 "Spilling register that's used as implicit use?");
1426 SlotIndex index = getInstructionIndex(MI);
1427 if (index < start || index >= end)
1431 // Must be defined by an implicit def. It should not be spilled. Note,
1432 // this is for correctness reason. e.g.
1433 // 8 %reg1024<def> = IMPLICIT_DEF
1434 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1435 // The live range [12, 14) are not part of the r1024 live interval since
1436 // it's defined by an implicit def. It will not conflicts with live
1437 // interval of r1025. Now suppose both registers are spilled, you can
1438 // easily see a situation where both registers are reloaded before
1439 // the INSERT_SUBREG and both target registers that would overlap.
1441 RewriteMIs.push_back(RewriteInfo(index, MI));
1443 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1445 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1446 // Now rewrite the defs and uses.
1447 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1448 RewriteInfo &rwi = RewriteMIs[i];
1450 SlotIndex index = rwi.Index;
1451 MachineInstr *MI = rwi.MI;
1452 // If MI def and/or use the same register multiple times, then there
1453 // are multiple entries.
1454 while (i != e && RewriteMIs[i].MI == MI) {
1455 assert(RewriteMIs[i].Index == index);
1458 MachineBasicBlock *MBB = MI->getParent();
1460 if (ImpUse && MI != ReMatDefMI) {
1461 // Re-matting an instruction with virtual register use. Prevent interval
1462 // from being spilled.
1463 getInterval(ImpUse).markNotSpillable();
1466 unsigned MBBId = MBB->getNumber();
1467 unsigned ThisVReg = 0;
1469 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1470 if (NVI != MBBVRegsMap.end()) {
1471 ThisVReg = NVI->second;
1478 // It's better to start a new interval to avoid artifically
1479 // extend the new interval.
1480 if (MI->readsWritesVirtualRegister(li.reg) ==
1481 std::make_pair(false,true)) {
1482 MBBVRegsMap.erase(MBB->getNumber());
1488 bool IsNew = ThisVReg == 0;
1490 // This ends the previous live interval. If all of its def / use
1491 // can be folded, give it a low spill weight.
1492 if (NewVReg && TrySplit && AllCanFold) {
1493 LiveInterval &nI = getOrCreateInterval(NewVReg);
1500 bool HasDef = false;
1501 bool HasUse = false;
1502 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1503 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1504 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1505 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1506 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1507 if (!HasDef && !HasUse)
1510 AllCanFold &= CanFold;
1512 // Update weight of spill interval.
1513 LiveInterval &nI = getOrCreateInterval(NewVReg);
1515 // The spill weight is now infinity as it cannot be spilled again.
1516 nI.markNotSpillable();
1520 // Keep track of the last def and first use in each MBB.
1522 if (MI != ReMatOrigDefMI || !CanDelete) {
1523 bool HasKill = false;
1525 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1527 // If this is a two-address code, then this index starts a new VNInfo.
1528 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1530 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1532 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1533 SpillIdxes.find(MBBId);
1535 if (SII == SpillIdxes.end()) {
1536 std::vector<SRInfo> S;
1537 S.push_back(SRInfo(index, NewVReg, true));
1538 SpillIdxes.insert(std::make_pair(MBBId, S));
1539 } else if (SII->second.back().vreg != NewVReg) {
1540 SII->second.push_back(SRInfo(index, NewVReg, true));
1541 } else if (index > SII->second.back().index) {
1542 // If there is an earlier def and this is a two-address
1543 // instruction, then it's not possible to fold the store (which
1544 // would also fold the load).
1545 SRInfo &Info = SII->second.back();
1547 Info.canFold = !HasUse;
1549 SpillMBBs.set(MBBId);
1550 } else if (SII != SpillIdxes.end() &&
1551 SII->second.back().vreg == NewVReg &&
1552 index > SII->second.back().index) {
1553 // There is an earlier def that's not killed (must be two-address).
1554 // The spill is no longer needed.
1555 SII->second.pop_back();
1556 if (SII->second.empty()) {
1557 SpillIdxes.erase(MBBId);
1558 SpillMBBs.reset(MBBId);
1565 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1566 SpillIdxes.find(MBBId);
1567 if (SII != SpillIdxes.end() &&
1568 SII->second.back().vreg == NewVReg &&
1569 index > SII->second.back().index)
1570 // Use(s) following the last def, it's not safe to fold the spill.
1571 SII->second.back().canFold = false;
1572 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1573 RestoreIdxes.find(MBBId);
1574 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1575 // If we are splitting live intervals, only fold if it's the first
1576 // use and there isn't another use later in the MBB.
1577 RII->second.back().canFold = false;
1579 // Only need a reload if there isn't an earlier def / use.
1580 if (RII == RestoreIdxes.end()) {
1581 std::vector<SRInfo> Infos;
1582 Infos.push_back(SRInfo(index, NewVReg, true));
1583 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1585 RII->second.push_back(SRInfo(index, NewVReg, true));
1587 RestoreMBBs.set(MBBId);
1591 // Update spill weight.
1592 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1593 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1596 if (NewVReg && TrySplit && AllCanFold) {
1597 // If all of its def / use can be folded, give it a low spill weight.
1598 LiveInterval &nI = getOrCreateInterval(NewVReg);
1603 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1604 unsigned vr, BitVector &RestoreMBBs,
1605 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1606 if (!RestoreMBBs[Id])
1608 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1609 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1610 if (Restores[i].index == index &&
1611 Restores[i].vreg == vr &&
1612 Restores[i].canFold)
1617 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1618 unsigned vr, BitVector &RestoreMBBs,
1619 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1620 if (!RestoreMBBs[Id])
1622 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1623 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1624 if (Restores[i].index == index && Restores[i].vreg)
1625 Restores[i].index = SlotIndex();
1628 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1629 /// spilled and create empty intervals for their uses.
1631 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1632 const TargetRegisterClass* rc,
1633 std::vector<LiveInterval*> &NewLIs) {
1634 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1635 re = mri_->reg_end(); ri != re; ) {
1636 MachineOperand &O = ri.getOperand();
1637 MachineInstr *MI = &*ri;
1639 if (MI->isDebugValue()) {
1640 // Remove debug info for now.
1642 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1646 assert(MI->isImplicitDef() &&
1647 "Register def was not rewritten?");
1648 RemoveMachineInstrFromMaps(MI);
1649 vrm.RemoveMachineInstrFromMaps(MI);
1650 MI->eraseFromParent();
1652 // This must be an use of an implicit_def so it's not part of the live
1653 // interval. Create a new empty live interval for it.
1654 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1655 unsigned NewVReg = mri_->createVirtualRegister(rc);
1657 vrm.setIsImplicitlyDefined(NewVReg);
1658 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1659 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1660 MachineOperand &MO = MI->getOperand(i);
1661 if (MO.isReg() && MO.getReg() == li.reg) {
1671 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1672 // Limit the loop depth ridiculousness.
1673 if (loopDepth > 200)
1676 // The loop depth is used to roughly estimate the number of times the
1677 // instruction is executed. Something like 10^d is simple, but will quickly
1678 // overflow a float. This expression behaves like 10^d for small d, but is
1679 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1680 // headroom before overflow.
1681 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1683 return (isDef + isUse) * lc;
1687 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1688 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1689 normalizeSpillWeight(*NewLIs[i]);
1692 std::vector<LiveInterval*> LiveIntervals::
1693 addIntervalsForSpills(const LiveInterval &li,
1694 const SmallVectorImpl<LiveInterval*> &SpillIs,
1695 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1696 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1699 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1700 li.print(dbgs(), tri_);
1704 // Each bit specify whether a spill is required in the MBB.
1705 BitVector SpillMBBs(mf_->getNumBlockIDs());
1706 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1707 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1708 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1709 DenseMap<unsigned,unsigned> MBBVRegsMap;
1710 std::vector<LiveInterval*> NewLIs;
1711 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1713 unsigned NumValNums = li.getNumValNums();
1714 SmallVector<MachineInstr*, 4> ReMatDefs;
1715 ReMatDefs.resize(NumValNums, NULL);
1716 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1717 ReMatOrigDefs.resize(NumValNums, NULL);
1718 SmallVector<int, 4> ReMatIds;
1719 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1720 BitVector ReMatDelete(NumValNums);
1721 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1723 // Spilling a split live interval. It cannot be split any further. Also,
1724 // it's also guaranteed to be a single val# / range interval.
1725 if (vrm.getPreSplitReg(li.reg)) {
1726 vrm.setIsSplitFromReg(li.reg, 0);
1727 // Unset the split kill marker on the last use.
1728 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1729 if (KillIdx != SlotIndex()) {
1730 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1731 assert(KillMI && "Last use disappeared?");
1732 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1733 assert(KillOp != -1 && "Last use disappeared?");
1734 KillMI->getOperand(KillOp).setIsKill(false);
1736 vrm.removeKillPoint(li.reg);
1737 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1738 Slot = vrm.getStackSlot(li.reg);
1739 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1740 MachineInstr *ReMatDefMI = DefIsReMat ?
1741 vrm.getReMaterializedMI(li.reg) : NULL;
1743 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1744 bool isLoad = isLoadSS ||
1745 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1746 bool IsFirstRange = true;
1747 for (LiveInterval::Ranges::const_iterator
1748 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1749 // If this is a split live interval with multiple ranges, it means there
1750 // are two-address instructions that re-defined the value. Only the
1751 // first def can be rematerialized!
1753 // Note ReMatOrigDefMI has already been deleted.
1754 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1755 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1756 false, vrm, rc, ReMatIds, loopInfo,
1757 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1758 MBBVRegsMap, NewLIs);
1760 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1761 Slot, 0, false, false, false,
1762 false, vrm, rc, ReMatIds, loopInfo,
1763 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1764 MBBVRegsMap, NewLIs);
1766 IsFirstRange = false;
1769 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1770 normalizeSpillWeights(NewLIs);
1774 bool TrySplit = !intervalIsInOneMBB(li);
1777 bool NeedStackSlot = false;
1778 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1780 const VNInfo *VNI = *i;
1781 unsigned VN = VNI->id;
1782 if (VNI->isUnused())
1783 continue; // Dead val#.
1784 // Is the def for the val# rematerializable?
1785 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1787 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1788 // Remember how to remat the def of this val#.
1789 ReMatOrigDefs[VN] = ReMatDefMI;
1790 // Original def may be modified so we have to make a copy here.
1791 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1792 CloneMIs.push_back(Clone);
1793 ReMatDefs[VN] = Clone;
1795 bool CanDelete = true;
1796 if (VNI->hasPHIKill()) {
1797 // A kill is a phi node, not all of its uses can be rematerialized.
1798 // It must not be deleted.
1800 // Need a stack slot if there is any live range where uses cannot be
1802 NeedStackSlot = true;
1805 ReMatDelete.set(VN);
1807 // Need a stack slot if there is any live range where uses cannot be
1809 NeedStackSlot = true;
1813 // One stack slot per live interval.
1814 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1815 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1816 Slot = vrm.assignVirt2StackSlot(li.reg);
1818 // This case only occurs when the prealloc splitter has already assigned
1819 // a stack slot to this vreg.
1821 Slot = vrm.getStackSlot(li.reg);
1824 // Create new intervals and rewrite defs and uses.
1825 for (LiveInterval::Ranges::const_iterator
1826 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1827 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1828 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1829 bool DefIsReMat = ReMatDefMI != NULL;
1830 bool CanDelete = ReMatDelete[I->valno->id];
1832 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1833 bool isLoad = isLoadSS ||
1834 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1835 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1836 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1837 CanDelete, vrm, rc, ReMatIds, loopInfo,
1838 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1839 MBBVRegsMap, NewLIs);
1842 // Insert spills / restores if we are splitting.
1844 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1845 normalizeSpillWeights(NewLIs);
1849 SmallPtrSet<LiveInterval*, 4> AddedKill;
1850 SmallVector<unsigned, 2> Ops;
1851 if (NeedStackSlot) {
1852 int Id = SpillMBBs.find_first();
1854 std::vector<SRInfo> &spills = SpillIdxes[Id];
1855 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1856 SlotIndex index = spills[i].index;
1857 unsigned VReg = spills[i].vreg;
1858 LiveInterval &nI = getOrCreateInterval(VReg);
1859 bool isReMat = vrm.isReMaterialized(VReg);
1860 MachineInstr *MI = getInstructionFromIndex(index);
1861 bool CanFold = false;
1862 bool FoundUse = false;
1864 if (spills[i].canFold) {
1866 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1867 MachineOperand &MO = MI->getOperand(j);
1868 if (!MO.isReg() || MO.getReg() != VReg)
1875 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1876 RestoreMBBs, RestoreIdxes))) {
1877 // MI has two-address uses of the same register. If the use
1878 // isn't the first and only use in the BB, then we can't fold
1879 // it. FIXME: Move this to rewriteInstructionsForSpills.
1886 // Fold the store into the def if possible.
1887 bool Folded = false;
1888 if (CanFold && !Ops.empty()) {
1889 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1892 // Also folded uses, do not issue a load.
1893 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1894 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1896 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1900 // Otherwise tell the spiller to issue a spill.
1902 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1903 bool isKill = LR->end == index.getStoreIndex();
1904 if (!MI->registerDefIsDead(nI.reg))
1905 // No need to spill a dead def.
1906 vrm.addSpillPoint(VReg, isKill, MI);
1908 AddedKill.insert(&nI);
1911 Id = SpillMBBs.find_next(Id);
1915 int Id = RestoreMBBs.find_first();
1917 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1918 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1919 SlotIndex index = restores[i].index;
1920 if (index == SlotIndex())
1922 unsigned VReg = restores[i].vreg;
1923 LiveInterval &nI = getOrCreateInterval(VReg);
1924 bool isReMat = vrm.isReMaterialized(VReg);
1925 MachineInstr *MI = getInstructionFromIndex(index);
1926 bool CanFold = false;
1928 if (restores[i].canFold) {
1930 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1931 MachineOperand &MO = MI->getOperand(j);
1932 if (!MO.isReg() || MO.getReg() != VReg)
1936 // If this restore were to be folded, it would have been folded
1945 // Fold the load into the use if possible.
1946 bool Folded = false;
1947 if (CanFold && !Ops.empty()) {
1949 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1951 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1953 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1954 // If the rematerializable def is a load, also try to fold it.
1955 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1956 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1957 Ops, isLoadSS, LdSlot, VReg);
1959 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1961 // Re-matting an instruction with virtual register use. Add the
1962 // register as an implicit use on the use MI and mark the register
1963 // interval as unspillable.
1964 LiveInterval &ImpLi = getInterval(ImpUse);
1965 ImpLi.markNotSpillable();
1966 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1971 // If folding is not possible / failed, then tell the spiller to issue a
1972 // load / rematerialization for us.
1974 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1976 vrm.addRestorePoint(VReg, MI);
1978 Id = RestoreMBBs.find_next(Id);
1981 // Finalize intervals: add kills, finalize spill weights, and filter out
1983 std::vector<LiveInterval*> RetNewLIs;
1984 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1985 LiveInterval *LI = NewLIs[i];
1987 if (!AddedKill.count(LI)) {
1988 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1989 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1990 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1991 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1992 assert(UseIdx != -1);
1993 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1994 LastUse->getOperand(UseIdx).setIsKill();
1995 vrm.addKillPoint(LI->reg, LastUseIdx);
1998 RetNewLIs.push_back(LI);
2002 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2003 normalizeSpillWeights(RetNewLIs);
2007 /// hasAllocatableSuperReg - Return true if the specified physical register has
2008 /// any super register that's allocatable.
2009 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2010 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2011 if (allocatableRegs_[*AS] && hasInterval(*AS))
2016 /// getRepresentativeReg - Find the largest super register of the specified
2017 /// physical register.
2018 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2019 // Find the largest super-register that is allocatable.
2020 unsigned BestReg = Reg;
2021 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2022 unsigned SuperReg = *AS;
2023 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2031 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2032 /// specified interval that conflicts with the specified physical register.
2033 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2034 unsigned PhysReg) const {
2035 unsigned NumConflicts = 0;
2036 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2037 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2038 E = mri_->reg_end(); I != E; ++I) {
2039 MachineOperand &O = I.getOperand();
2040 MachineInstr *MI = O.getParent();
2041 if (MI->isDebugValue())
2043 SlotIndex Index = getInstructionIndex(MI);
2044 if (pli.liveAt(Index))
2047 return NumConflicts;
2050 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2051 /// around all defs and uses of the specified interval. Return true if it
2052 /// was able to cut its interval.
2053 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2054 unsigned PhysReg, VirtRegMap &vrm) {
2055 unsigned SpillReg = getRepresentativeReg(PhysReg);
2057 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2058 << " represented by " << tri_->getName(SpillReg) << '\n');
2060 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2061 // If there are registers which alias PhysReg, but which are not a
2062 // sub-register of the chosen representative super register. Assert
2063 // since we can't handle it yet.
2064 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2065 tri_->isSuperRegister(*AS, SpillReg));
2068 SmallVector<unsigned, 4> PRegs;
2069 if (hasInterval(SpillReg))
2070 PRegs.push_back(SpillReg);
2071 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2072 if (hasInterval(*SR))
2073 PRegs.push_back(*SR);
2076 dbgs() << "Trying to spill:";
2077 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2078 dbgs() << ' ' << tri_->getName(PRegs[i]);
2082 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2083 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2084 E = mri_->reg_end(); I != E; ++I) {
2085 MachineOperand &O = I.getOperand();
2086 MachineInstr *MI = O.getParent();
2087 if (MI->isDebugValue() || SeenMIs.count(MI))
2090 SlotIndex Index = getInstructionIndex(MI);
2091 bool LiveReg = false;
2092 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2093 unsigned PReg = PRegs[i];
2094 LiveInterval &pli = getInterval(PReg);
2095 if (!pli.liveAt(Index))
2098 SlotIndex StartIdx = Index.getLoadIndex();
2099 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2100 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2102 raw_string_ostream Msg(msg);
2103 Msg << "Ran out of registers during register allocation!";
2104 if (MI->isInlineAsm()) {
2105 Msg << "\nPlease check your inline asm statement for invalid "
2106 << "constraints:\n";
2107 MI->print(Msg, tm_);
2109 report_fatal_error(Msg.str());
2111 pli.removeRange(StartIdx, EndIdx);
2116 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2117 vrm.addEmergencySpill(SpillReg, MI);
2123 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2124 MachineInstr* startInst) {
2125 LiveInterval& Interval = getOrCreateInterval(reg);
2126 VNInfo* VN = Interval.getNextValue(
2127 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2128 startInst, getVNInfoAllocator());
2129 VN->setHasPHIKill(true);
2131 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2132 getMBBEndIdx(startInst->getParent()), VN);
2133 Interval.addRange(LR);