1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
144 mf_->print(OS, indexes_);
147 void LiveIntervals::dumpInstrs() const {
151 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 // We don't handle fancy stuff crossing basic block boundaries
154 if (li.ranges.size() != 1)
156 const LiveRange &range = li.ranges.front();
157 SlotIndex idx = range.start.getBaseIndex();
158 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160 // Skip deleted instructions
161 MachineInstr *firstMI = getInstructionFromIndex(idx);
162 while (!firstMI && idx != end) {
163 idx = idx.getNextIndex();
164 firstMI = getInstructionFromIndex(idx);
169 // Find last instruction in range
170 SlotIndex lastIdx = end.getPrevIndex();
171 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172 while (!lastMI && lastIdx != idx) {
173 lastIdx = lastIdx.getPrevIndex();
174 lastMI = getInstructionFromIndex(lastIdx);
179 // Range cannot cross basic block boundaries or terminators
180 MachineBasicBlock *MBB = firstMI->getParent();
181 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
184 MachineBasicBlock::const_iterator E = lastMI;
186 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187 const MachineInstr &MI = *I;
189 // Allow copies to and from li.reg
191 if (MI.getOperand(0).getReg() == li.reg ||
192 MI.getOperand(1).getReg() == li.reg)
195 // Check for operands using reg
196 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
197 const MachineOperand& mop = MI.getOperand(i);
200 unsigned PhysReg = mop.getReg();
201 if (PhysReg == 0 || PhysReg == li.reg)
203 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204 if (!vrm.hasPhys(PhysReg))
206 PhysReg = vrm.getPhys(PhysReg);
208 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 // No conflicts found.
217 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219 for (LiveInterval::Ranges::const_iterator
220 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221 for (SlotIndex index = I->start.getBaseIndex(),
222 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index = index.getNextIndex()) {
225 MachineInstr *MI = getInstructionFromIndex(index);
227 continue; // skip deleted instructions
229 if (JoinedCopies.count(MI))
231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232 MachineOperand& MO = MI->getOperand(i);
235 unsigned PhysReg = MO.getReg();
236 if (PhysReg == 0 || PhysReg == Reg ||
237 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 if (tri_->regsOverlap(Reg, PhysReg))
249 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
250 unsigned Reg = MI.getOperand(MOIdx).getReg();
251 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
252 const MachineOperand &MO = MI.getOperand(i);
255 if (MO.getReg() == Reg && MO.isDef()) {
256 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
257 MI.getOperand(MOIdx).getSubReg() &&
258 (MO.getSubReg() || MO.isImplicit()));
265 /// isPartialRedef - Return true if the specified def at the specific index is
266 /// partially re-defining the specified live interval. A common case of this is
267 /// a definition of the sub-register.
268 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
269 LiveInterval &interval) {
270 if (!MO.getSubReg() || MO.isEarlyClobber())
273 SlotIndex RedefIndex = MIIdx.getDefIndex();
274 const LiveRange *OldLR =
275 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
276 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
283 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
284 MachineBasicBlock::iterator mi,
288 LiveInterval &interval) {
289 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291 // Virtual registers may be defined multiple times (due to phi
292 // elimination and 2-addr elimination). Much of what we do only has to be
293 // done once for the vreg. We use an empty interval to detect the first
294 // time we see a vreg.
295 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
296 if (interval.empty()) {
297 // Get the Idx of the defining instructions.
298 SlotIndex defIndex = MIIdx.getDefIndex();
299 // Earlyclobbers move back one, so that they overlap the live range
301 if (MO.isEarlyClobber())
302 defIndex = MIIdx.getUseIndex();
304 // Make sure the first definition is not a partial redefinition. Add an
305 // <imp-def> of the full register.
307 mi->addRegisterDefined(interval.reg);
309 MachineInstr *CopyMI = NULL;
310 if (mi->isCopyLike()) {
314 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
315 assert(ValNo->id == 0 && "First value in interval is not 0?");
317 // Loop over all of the blocks that the vreg is defined in. There are
318 // two cases we have to handle here. The most common case is a vreg
319 // whose lifetime is contained within a basic block. In this case there
320 // will be a single kill, in MBB, which comes after the definition.
321 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
322 // FIXME: what about dead vars?
324 if (vi.Kills[0] != mi)
325 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327 killIdx = defIndex.getStoreIndex();
329 // If the kill happens after the definition, we have an intra-block
331 if (killIdx > defIndex) {
332 assert(vi.AliveBlocks.empty() &&
333 "Shouldn't be alive across any blocks!");
334 LiveRange LR(defIndex, killIdx, ValNo);
335 interval.addRange(LR);
336 DEBUG(dbgs() << " +" << LR << "\n");
341 // The other case we handle is when a virtual register lives to the end
342 // of the defining block, potentially live across some blocks, then is
343 // live into some number of blocks, but gets killed. Start by adding a
344 // range that goes from this definition to the end of the defining block.
345 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
346 DEBUG(dbgs() << " +" << NewLR);
347 interval.addRange(NewLR);
349 bool PHIJoin = lv_->isPHIJoin(interval.reg);
352 // A phi join register is killed at the end of the MBB and revived as a new
353 // valno in the killing blocks.
354 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
355 DEBUG(dbgs() << " phi-join");
356 ValNo->setHasPHIKill(true);
358 // Iterate over all of the blocks that the variable is completely
359 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
362 E = vi.AliveBlocks.end(); I != E; ++I) {
363 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
364 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
365 interval.addRange(LR);
366 DEBUG(dbgs() << " +" << LR);
370 // Finally, this virtual register is live from the start of any killing
371 // block to the 'use' slot of the killing instruction.
372 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
373 MachineInstr *Kill = vi.Kills[i];
374 SlotIndex Start = getMBBStartIdx(Kill->getParent());
375 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
377 // Create interval with one of a NEW value number. Note that this value
378 // number isn't actually defined by an instruction, weird huh? :)
380 assert(getInstructionFromIndex(Start) == 0 &&
381 "PHI def index points at actual instruction.");
382 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
383 ValNo->setIsPHIDef(true);
385 LiveRange LR(Start, killIdx, ValNo);
386 interval.addRange(LR);
387 DEBUG(dbgs() << " +" << LR);
391 if (MultipleDefsBySameMI(*mi, MOIdx))
392 // Multiple defs of the same virtual register by the same instruction.
393 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
394 // This is likely due to elimination of REG_SEQUENCE instructions. Return
395 // here since there is nothing to do.
398 // If this is the second time we see a virtual register definition, it
399 // must be due to phi elimination or two addr elimination. If this is
400 // the result of two address elimination, then the vreg is one of the
401 // def-and-use register operand.
403 // It may also be partial redef like this:
404 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
405 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
406 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
407 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
408 // If this is a two-address definition, then we have already processed
409 // the live range. The only problem is that we didn't realize there
410 // are actually two values in the live interval. Because of this we
411 // need to take the LiveRegion that defines this register and split it
413 SlotIndex RedefIndex = MIIdx.getDefIndex();
414 if (MO.isEarlyClobber())
415 RedefIndex = MIIdx.getUseIndex();
417 const LiveRange *OldLR =
418 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
419 VNInfo *OldValNo = OldLR->valno;
420 SlotIndex DefIndex = OldValNo->def.getDefIndex();
422 // Delete the previous value, which should be short and continuous,
423 // because the 2-addr copy must be in the same MBB as the redef.
424 interval.removeRange(DefIndex, RedefIndex);
426 // The new value number (#1) is defined by the instruction we claimed
428 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
430 // Value#0 is now defined by the 2-addr instruction.
431 OldValNo->def = RedefIndex;
432 OldValNo->setCopy(0);
434 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
435 if (PartReDef && mi->isCopyLike())
436 OldValNo->setCopy(&*mi);
438 // Add the new live interval which replaces the range for the input copy.
439 LiveRange LR(DefIndex, RedefIndex, ValNo);
440 DEBUG(dbgs() << " replace range with " << LR);
441 interval.addRange(LR);
443 // If this redefinition is dead, we need to add a dummy unit live
444 // range covering the def slot.
446 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
450 dbgs() << " RESULT: ";
451 interval.print(dbgs(), tri_);
453 } else if (lv_->isPHIJoin(interval.reg)) {
454 // In the case of PHI elimination, each variable definition is only
455 // live until the end of the block. We've already taken care of the
456 // rest of the live range.
458 SlotIndex defIndex = MIIdx.getDefIndex();
459 if (MO.isEarlyClobber())
460 defIndex = MIIdx.getUseIndex();
463 MachineInstr *CopyMI = NULL;
464 if (mi->isCopyLike())
466 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
468 SlotIndex killIndex = getMBBEndIdx(mbb);
469 LiveRange LR(defIndex, killIndex, ValNo);
470 interval.addRange(LR);
471 ValNo->setHasPHIKill(true);
472 DEBUG(dbgs() << " phi-join +" << LR);
474 llvm_unreachable("Multiply defined register");
478 DEBUG(dbgs() << '\n');
481 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
482 MachineBasicBlock::iterator mi,
485 LiveInterval &interval,
486 MachineInstr *CopyMI) {
487 // A physical register cannot be live across basic block, so its
488 // lifetime must end somewhere in its defining basic block.
489 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
515 if (mi->isDebugValue())
517 if (getInstructionFromIndex(baseIndex) == 0)
518 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520 if (mi->killsRegister(interval.reg, tri_)) {
521 DEBUG(dbgs() << " killed");
522 end = baseIndex.getDefIndex();
525 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527 if (mi->isRegTiedToUseOperand(DefIdx)) {
528 // Two-address instruction.
529 end = baseIndex.getDefIndex();
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that
533 // defines it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
542 baseIndex = baseIndex.getNextIndex();
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
547 // and never used. Another possible case is the implicit use of the
548 // physical register has been deleted by two-address pass.
549 end = start.getStoreIndex();
552 assert(start < end && "did not find end of interval?");
554 // Already exists? Extend old live interval.
555 VNInfo *ValNo = interval.getVNInfoAt(start);
556 bool Extend = ValNo != 0;
558 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
559 if (Extend && MO.isEarlyClobber())
560 ValNo->setHasRedefByEC(true);
561 LiveRange LR(start, end, ValNo);
562 interval.addRange(LR);
563 DEBUG(dbgs() << " +" << LR << '\n');
566 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
567 MachineBasicBlock::iterator MI,
571 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
572 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
573 getOrCreateInterval(MO.getReg()));
574 else if (allocatableRegs_[MO.getReg()]) {
575 MachineInstr *CopyMI = NULL;
576 if (MI->isCopyLike())
578 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579 getOrCreateInterval(MO.getReg()), CopyMI);
580 // Def of a register also defines its sub-registers.
581 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
582 // If MI also modifies the sub-register explicitly, avoid processing it
583 // more than once. Do not pass in TRI here so it checks for exact match.
584 if (!MI->definesRegister(*AS))
585 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586 getOrCreateInterval(*AS), 0);
590 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592 LiveInterval &interval, bool isAlias) {
593 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
595 // Look for kills, if it reaches a def before it's killed, then it shouldn't
596 // be considered a livein.
597 MachineBasicBlock::iterator mi = MBB->begin();
598 MachineBasicBlock::iterator E = MBB->end();
599 // Skip over DBG_VALUE at the start of the MBB.
600 if (mi != E && mi->isDebugValue()) {
601 while (++mi != E && mi->isDebugValue())
604 // MBB is empty except for DBG_VALUE's.
608 SlotIndex baseIndex = MIIdx;
609 SlotIndex start = baseIndex;
610 if (getInstructionFromIndex(baseIndex) == 0)
611 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613 SlotIndex end = baseIndex;
614 bool SeenDefUse = false;
617 if (mi->killsRegister(interval.reg, tri_)) {
618 DEBUG(dbgs() << " killed");
619 end = baseIndex.getDefIndex();
622 } else if (mi->definesRegister(interval.reg, tri_)) {
623 // Another instruction redefines the register before it is ever read.
624 // Then the register is essentially dead at the instruction that defines
625 // it. Hence its interval is:
626 // [defSlot(def), defSlot(def)+1)
627 DEBUG(dbgs() << " dead");
628 end = start.getStoreIndex();
633 while (++mi != E && mi->isDebugValue())
634 // Skip over DBG_VALUE.
637 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
640 // Live-in register might not be used at all.
643 DEBUG(dbgs() << " dead");
644 end = MIIdx.getStoreIndex();
646 DEBUG(dbgs() << " live through");
651 SlotIndex defIdx = getMBBStartIdx(MBB);
652 assert(getInstructionFromIndex(defIdx) == 0 &&
653 "PHI def index points at actual instruction.");
655 interval.getNextValue(defIdx, 0, VNInfoAllocator);
656 vni->setIsPHIDef(true);
657 LiveRange LR(start, end, vni);
659 interval.addRange(LR);
660 DEBUG(dbgs() << " +" << LR << '\n');
663 /// computeIntervals - computes the live intervals for virtual
664 /// registers. for some ordering of the machine instructions [1,N] a
665 /// live interval is an interval [i, j) where 1 <= i <= j < N for
666 /// which a variable is live
667 void LiveIntervals::computeIntervals() {
668 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
669 << "********** Function: "
670 << ((Value*)mf_->getFunction())->getName() << '\n');
672 SmallVector<unsigned, 8> UndefUses;
673 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675 MachineBasicBlock *MBB = MBBI;
679 // Track the index of the current machine instr.
680 SlotIndex MIIndex = getMBBStartIdx(MBB);
681 DEBUG(dbgs() << "BB#" << MBB->getNumber()
682 << ":\t\t# derived from " << MBB->getName() << "\n");
684 // Create intervals for live-ins to this BB first.
685 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
686 LE = MBB->livein_end(); LI != LE; ++LI) {
687 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
688 // Multiple live-ins can alias the same register.
689 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
690 if (!hasInterval(*AS))
691 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
695 // Skip over empty initial indices.
696 if (getInstructionFromIndex(MIIndex) == 0)
697 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
699 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701 DEBUG(dbgs() << MIIndex << "\t" << *MI);
702 if (MI->isDebugValue())
706 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707 MachineOperand &MO = MI->getOperand(i);
708 if (!MO.isReg() || !MO.getReg())
711 // handle register defs - build intervals
713 handleRegisterDef(MBB, MI, MIIndex, MO, i);
714 else if (MO.isUndef())
715 UndefUses.push_back(MO.getReg());
718 // Move to the next instr slot.
719 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
723 // Create empty intervals for registers defined by implicit_def's (except
724 // for those implicit_def that define values which are liveout of their
726 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
727 unsigned UndefReg = UndefUses[i];
728 (void)getOrCreateInterval(UndefReg);
732 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
733 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
734 return new LiveInterval(reg, Weight);
737 /// dupInterval - Duplicate a live interval. The caller is responsible for
738 /// managing the allocated memory.
739 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
740 LiveInterval *NewLI = createInterval(li->reg);
741 NewLI->Copy(*li, mri_, getVNInfoAllocator());
745 /// shrinkToUses - After removing some uses of a register, shrink its live
746 /// range to just the remaining uses. This method does not compute reaching
747 /// defs for new uses, and it doesn't remove dead defs.
748 void LiveIntervals::shrinkToUses(LiveInterval *li) {
749 DEBUG(dbgs() << "Shrink: " << *li << '\n');
750 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
751 && "Can't only shrink physical registers");
752 // Find all the values used, including PHI kills.
753 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
755 // Visit all instructions reading li->reg.
756 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
757 MachineInstr *UseMI = I.skipInstruction();) {
758 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
760 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
761 VNInfo *VNI = li->getVNInfoAt(Idx);
762 assert(VNI && "Live interval not live into reading instruction");
763 if (VNI->def == Idx) {
764 // Special case: An early-clobber tied operand reads and writes the
765 // register one slot early.
766 Idx = Idx.getPrevSlot();
767 VNI = li->getVNInfoAt(Idx);
768 assert(VNI && "Early-clobber tied value not available");
770 WorkList.push_back(std::make_pair(Idx, VNI));
773 // Create a new live interval with only minimal live segments per def.
774 LiveInterval NewLI(li->reg, 0);
775 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
780 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
783 // Extend intervals to reach all uses in WorkList.
784 while (!WorkList.empty()) {
785 SlotIndex Idx = WorkList.back().first;
786 VNInfo *VNI = WorkList.back().second;
789 // Extend the live range for VNI to be live at Idx.
790 LiveInterval::iterator I = NewLI.find(Idx);
793 if (I != NewLI.end() && I->start <= Idx) {
794 assert(I->valno == VNI && "Unexpected existing value number");
798 // Is there already a live range in the block containing Idx?
799 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
800 SlotIndex BlockStart = getMBBStartIdx(MBB);
801 DEBUG(dbgs() << "Shrink: Use val#" << VNI->id << " at " << Idx
802 << " in BB#" << MBB->getNumber() << '@' << BlockStart);
803 if (I != NewLI.begin() && (--I)->end > BlockStart) {
804 assert(I->valno == VNI && "Wrong reaching def");
805 DEBUG(dbgs() << " extend [" << I->start << ';' << I->end << ")\n");
806 // Is this the first use of a PHIDef in its defining block?
807 if (VNI->isPHIDef() && I->end == VNI->def.getNextSlot()) {
808 // The PHI is live, make sure the predecessors are live-out.
809 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
810 PE = MBB->pred_end(); PI != PE; ++PI) {
811 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
812 VNInfo *PVNI = li->getVNInfoAt(Stop);
813 // A predecessor is not required to have a live-out value for a PHI.
815 assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
816 WorkList.push_back(std::make_pair(Stop, PVNI));
821 // Extend the live range in the block to include Idx.
822 NewLI.addRange(LiveRange(I->end, Idx.getNextSlot(), VNI));
826 // VNI is live-in to MBB.
827 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
828 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
830 // Make sure VNI is live-out from the predecessors.
831 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
832 PE = MBB->pred_end(); PI != PE; ++PI) {
833 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
834 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
835 WorkList.push_back(std::make_pair(Stop, VNI));
839 // Handle dead values.
840 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
845 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
846 assert(LII != NewLI.end() && "Missing live range for PHI");
847 if (LII->end != VNI->def.getNextSlot())
849 if (!VNI->isPHIDef()) {
850 // This is a dead PHI. Remove it.
851 VNI->setIsUnused(true);
852 NewLI.removeRange(*LII);
854 // This is a dead def. Make sure the instruction knows.
855 MachineInstr *MI = getInstructionFromIndex(VNI->def);
856 assert(MI && "No instruction defining live value");
857 MI->addRegisterDead(li->reg, tri_);
861 // Move the trimmed ranges back.
862 li->ranges.swap(NewLI.ranges);
863 DEBUG(dbgs() << "Shrink: " << *li << '\n');
867 //===----------------------------------------------------------------------===//
868 // Register allocator hooks.
871 MachineBasicBlock::iterator
872 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
873 MachineBasicBlock *mbb) const {
874 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
876 // If li is not live into a landing pad, we can insert spill code before the
878 if (!lpad || !isLiveInToMBB(li, lpad))
879 return mbb->getFirstTerminator();
881 // When there is a landing pad, spill code must go before the call instruction
883 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
886 if (I->getDesc().isCall())
889 // The block contains no calls that can throw, so use the first terminator.
890 return mbb->getFirstTerminator();
893 void LiveIntervals::addKillFlags() {
894 for (iterator I = begin(), E = end(); I != E; ++I) {
895 unsigned Reg = I->first;
896 if (TargetRegisterInfo::isPhysicalRegister(Reg))
898 if (mri_->reg_nodbg_empty(Reg))
900 LiveInterval *LI = I->second;
902 // Every instruction that kills Reg corresponds to a live range end point.
903 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
905 // A LOAD index indicates an MBB edge.
906 if (RI->end.isLoad())
908 MachineInstr *MI = getInstructionFromIndex(RI->end);
911 MI->addRegisterKilled(Reg, NULL);
916 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
917 /// allow one) virtual register operand, then its uses are implicitly using
918 /// the register. Returns the virtual register.
919 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
920 MachineInstr *MI) const {
922 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
923 MachineOperand &MO = MI->getOperand(i);
924 if (!MO.isReg() || !MO.isUse())
926 unsigned Reg = MO.getReg();
927 if (Reg == 0 || Reg == li.reg)
930 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
931 !allocatableRegs_[Reg])
933 // FIXME: For now, only remat MI with at most one register operand.
935 "Can't rematerialize instruction with multiple register operand!");
944 /// isValNoAvailableAt - Return true if the val# of the specified interval
945 /// which reaches the given instruction also reaches the specified use index.
946 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
947 SlotIndex UseIdx) const {
948 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
949 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
952 /// isReMaterializable - Returns true if the definition MI of the specified
953 /// val# of the specified interval is re-materializable.
955 LiveIntervals::isReMaterializable(const LiveInterval &li,
956 const VNInfo *ValNo, MachineInstr *MI,
957 const SmallVectorImpl<LiveInterval*> &SpillIs,
962 if (!tii_->isTriviallyReMaterializable(MI, aa_))
965 // Target-specific code can mark an instruction as being rematerializable
966 // if it has one virtual reg use, though it had better be something like
967 // a PIC base register which is likely to be live everywhere.
968 unsigned ImpUse = getReMatImplicitUse(li, MI);
970 const LiveInterval &ImpLi = getInterval(ImpUse);
971 for (MachineRegisterInfo::use_nodbg_iterator
972 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
974 MachineInstr *UseMI = &*ri;
975 SlotIndex UseIdx = getInstructionIndex(UseMI);
976 if (li.getVNInfoAt(UseIdx) != ValNo)
978 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
982 // If a register operand of the re-materialized instruction is going to
983 // be spilled next, then it's not legal to re-materialize this instruction.
984 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
985 if (ImpUse == SpillIs[i]->reg)
991 /// isReMaterializable - Returns true if the definition MI of the specified
992 /// val# of the specified interval is re-materializable.
993 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
994 const VNInfo *ValNo, MachineInstr *MI) {
995 SmallVector<LiveInterval*, 4> Dummy1;
997 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1000 /// isReMaterializable - Returns true if every definition of MI of every
1001 /// val# of the specified interval is re-materializable.
1003 LiveIntervals::isReMaterializable(const LiveInterval &li,
1004 const SmallVectorImpl<LiveInterval*> &SpillIs,
1007 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1009 const VNInfo *VNI = *i;
1010 if (VNI->isUnused())
1011 continue; // Dead val#.
1012 // Is the def for the val# rematerializable?
1013 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1016 bool DefIsLoad = false;
1018 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1020 isLoad |= DefIsLoad;
1025 /// FilterFoldedOps - Filter out two-address use operands. Return
1026 /// true if it finds any issue with the operands that ought to prevent
1028 static bool FilterFoldedOps(MachineInstr *MI,
1029 SmallVector<unsigned, 2> &Ops,
1031 SmallVector<unsigned, 2> &FoldOps) {
1033 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1034 unsigned OpIdx = Ops[i];
1035 MachineOperand &MO = MI->getOperand(OpIdx);
1036 // FIXME: fold subreg use.
1040 MRInfo |= (unsigned)VirtRegMap::isMod;
1042 // Filter out two-address use operand(s).
1043 if (MI->isRegTiedToDefOperand(OpIdx)) {
1044 MRInfo = VirtRegMap::isModRef;
1047 MRInfo |= (unsigned)VirtRegMap::isRef;
1049 FoldOps.push_back(OpIdx);
1055 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1056 /// slot / to reg or any rematerialized load into ith operand of specified
1057 /// MI. If it is successul, MI is updated with the newly created MI and
1059 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1060 VirtRegMap &vrm, MachineInstr *DefMI,
1062 SmallVector<unsigned, 2> &Ops,
1063 bool isSS, int Slot, unsigned Reg) {
1064 // If it is an implicit def instruction, just delete it.
1065 if (MI->isImplicitDef()) {
1066 RemoveMachineInstrFromMaps(MI);
1067 vrm.RemoveMachineInstrFromMaps(MI);
1068 MI->eraseFromParent();
1073 // Filter the list of operand indexes that are to be folded. Abort if
1074 // any operand will prevent folding.
1075 unsigned MRInfo = 0;
1076 SmallVector<unsigned, 2> FoldOps;
1077 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1080 // The only time it's safe to fold into a two address instruction is when
1081 // it's folding reload and spill from / into a spill stack slot.
1082 if (DefMI && (MRInfo & VirtRegMap::isMod))
1085 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1086 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1088 // Remember this instruction uses the spill slot.
1089 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1091 // Attempt to fold the memory reference into the instruction. If
1092 // we can do this, we don't need to insert spill code.
1093 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1094 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1095 vrm.transferSpillPts(MI, fmi);
1096 vrm.transferRestorePts(MI, fmi);
1097 vrm.transferEmergencySpills(MI, fmi);
1098 ReplaceMachineInstrInMaps(MI, fmi);
1099 MI->eraseFromParent();
1107 /// canFoldMemoryOperand - Returns true if the specified load / store
1108 /// folding is possible.
1109 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1110 SmallVector<unsigned, 2> &Ops,
1112 // Filter the list of operand indexes that are to be folded. Abort if
1113 // any operand will prevent folding.
1114 unsigned MRInfo = 0;
1115 SmallVector<unsigned, 2> FoldOps;
1116 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1119 // It's only legal to remat for a use, not a def.
1120 if (ReMat && (MRInfo & VirtRegMap::isMod))
1123 return tii_->canFoldMemoryOperand(MI, FoldOps);
1126 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1127 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1129 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1134 for (++itr; itr != li.ranges.end(); ++itr) {
1135 MachineBasicBlock *mbb2 =
1136 indexes_->getMBBCoveringRange(itr->start, itr->end);
1145 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1146 /// interval on to-be re-materialized operands of MI) with new register.
1147 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1148 MachineInstr *MI, unsigned NewVReg,
1150 // There is an implicit use. That means one of the other operand is
1151 // being remat'ed and the remat'ed instruction has li.reg as an
1152 // use operand. Make sure we rewrite that as well.
1153 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1154 MachineOperand &MO = MI->getOperand(i);
1157 unsigned Reg = MO.getReg();
1158 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1160 if (!vrm.isReMaterialized(Reg))
1162 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1163 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1165 UseMO->setReg(NewVReg);
1169 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1170 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1171 bool LiveIntervals::
1172 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1173 bool TrySplit, SlotIndex index, SlotIndex end,
1175 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1176 unsigned Slot, int LdSlot,
1177 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1179 const TargetRegisterClass* rc,
1180 SmallVector<int, 4> &ReMatIds,
1181 const MachineLoopInfo *loopInfo,
1182 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1183 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1184 std::vector<LiveInterval*> &NewLIs) {
1185 bool CanFold = false;
1187 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1188 MachineOperand& mop = MI->getOperand(i);
1191 unsigned Reg = mop.getReg();
1192 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1197 bool TryFold = !DefIsReMat;
1198 bool FoldSS = true; // Default behavior unless it's a remat.
1199 int FoldSlot = Slot;
1201 // If this is the rematerializable definition MI itself and
1202 // all of its uses are rematerialized, simply delete it.
1203 if (MI == ReMatOrigDefMI && CanDelete) {
1204 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1206 RemoveMachineInstrFromMaps(MI);
1207 vrm.RemoveMachineInstrFromMaps(MI);
1208 MI->eraseFromParent();
1212 // If def for this use can't be rematerialized, then try folding.
1213 // If def is rematerializable and it's a load, also try folding.
1214 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1216 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1222 // Scan all of the operands of this instruction rewriting operands
1223 // to use NewVReg instead of li.reg as appropriate. We do this for
1226 // 1. If the instr reads the same spilled vreg multiple times, we
1227 // want to reuse the NewVReg.
1228 // 2. If the instr is a two-addr instruction, we are required to
1229 // keep the src/dst regs pinned.
1231 // Keep track of whether we replace a use and/or def so that we can
1232 // create the spill interval with the appropriate range.
1233 SmallVector<unsigned, 2> Ops;
1234 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1236 // Create a new virtual register for the spill interval.
1237 // Create the new register now so we can map the fold instruction
1238 // to the new register so when it is unfolded we get the correct
1240 bool CreatedNewVReg = false;
1242 NewVReg = mri_->createVirtualRegister(rc);
1244 CreatedNewVReg = true;
1246 // The new virtual register should get the same allocation hints as the
1248 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1249 if (Hint.first || Hint.second)
1250 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1256 // Do not fold load / store here if we are splitting. We'll find an
1257 // optimal point to insert a load / store later.
1259 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1260 Ops, FoldSS, FoldSlot, NewVReg)) {
1261 // Folding the load/store can completely change the instruction in
1262 // unpredictable ways, rescan it from the beginning.
1265 // We need to give the new vreg the same stack slot as the
1266 // spilled interval.
1267 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1273 if (isNotInMIMap(MI))
1275 goto RestartInstruction;
1278 // We'll try to fold it later if it's profitable.
1279 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1283 mop.setReg(NewVReg);
1284 if (mop.isImplicit())
1285 rewriteImplicitOps(li, MI, NewVReg, vrm);
1287 // Reuse NewVReg for other reads.
1288 bool HasEarlyClobber = false;
1289 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1290 MachineOperand &mopj = MI->getOperand(Ops[j]);
1291 mopj.setReg(NewVReg);
1292 if (mopj.isImplicit())
1293 rewriteImplicitOps(li, MI, NewVReg, vrm);
1294 if (mopj.isEarlyClobber())
1295 HasEarlyClobber = true;
1298 if (CreatedNewVReg) {
1300 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1301 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1302 // Each valnum may have its own remat id.
1303 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1305 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1307 if (!CanDelete || (HasUse && HasDef)) {
1308 // If this is a two-addr instruction then its use operands are
1309 // rematerializable but its def is not. It should be assigned a
1311 vrm.assignVirt2StackSlot(NewVReg, Slot);
1314 vrm.assignVirt2StackSlot(NewVReg, Slot);
1316 } else if (HasUse && HasDef &&
1317 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1318 // If this interval hasn't been assigned a stack slot (because earlier
1319 // def is a deleted remat def), do it now.
1320 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1321 vrm.assignVirt2StackSlot(NewVReg, Slot);
1324 // Re-matting an instruction with virtual register use. Add the
1325 // register as an implicit use on the use MI.
1326 if (DefIsReMat && ImpUse)
1327 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1329 // Create a new register interval for this spill / remat.
1330 LiveInterval &nI = getOrCreateInterval(NewVReg);
1331 if (CreatedNewVReg) {
1332 NewLIs.push_back(&nI);
1333 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1335 vrm.setIsSplitFromReg(NewVReg, li.reg);
1339 if (CreatedNewVReg) {
1340 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1341 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1342 DEBUG(dbgs() << " +" << LR);
1345 // Extend the split live interval to this def / use.
1346 SlotIndex End = index.getDefIndex();
1347 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1348 nI.getValNumInfo(nI.getNumValNums()-1));
1349 DEBUG(dbgs() << " +" << LR);
1354 // An early clobber starts at the use slot, except for an early clobber
1355 // tied to a use operand (yes, that is a thing).
1356 LiveRange LR(HasEarlyClobber && !HasUse ?
1357 index.getUseIndex() : index.getDefIndex(),
1358 index.getStoreIndex(),
1359 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1360 DEBUG(dbgs() << " +" << LR);
1365 dbgs() << "\t\t\t\tAdded new interval: ";
1366 nI.print(dbgs(), tri_);
1372 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1374 MachineBasicBlock *MBB,
1375 SlotIndex Idx) const {
1376 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1379 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1380 /// during spilling.
1382 struct RewriteInfo {
1385 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1388 struct RewriteInfoCompare {
1389 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1390 return LHS.Index < RHS.Index;
1395 void LiveIntervals::
1396 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1397 LiveInterval::Ranges::const_iterator &I,
1398 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1399 unsigned Slot, int LdSlot,
1400 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1402 const TargetRegisterClass* rc,
1403 SmallVector<int, 4> &ReMatIds,
1404 const MachineLoopInfo *loopInfo,
1405 BitVector &SpillMBBs,
1406 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1407 BitVector &RestoreMBBs,
1408 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1409 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1410 std::vector<LiveInterval*> &NewLIs) {
1411 bool AllCanFold = true;
1412 unsigned NewVReg = 0;
1413 SlotIndex start = I->start.getBaseIndex();
1414 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1416 // First collect all the def / use in this live range that will be rewritten.
1417 // Make sure they are sorted according to instruction index.
1418 std::vector<RewriteInfo> RewriteMIs;
1419 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1420 re = mri_->reg_end(); ri != re; ) {
1421 MachineInstr *MI = &*ri;
1422 MachineOperand &O = ri.getOperand();
1424 if (MI->isDebugValue()) {
1425 // Modify DBG_VALUE now that the value is in a spill slot.
1426 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1427 uint64_t Offset = MI->getOperand(1).getImm();
1428 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1429 DebugLoc DL = MI->getDebugLoc();
1430 int FI = isLoadSS ? LdSlot : (int)Slot;
1431 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1432 Offset, MDPtr, DL)) {
1433 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1434 ReplaceMachineInstrInMaps(MI, NewDV);
1435 MachineBasicBlock *MBB = MI->getParent();
1436 MBB->insert(MBB->erase(MI), NewDV);
1441 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1442 RemoveMachineInstrFromMaps(MI);
1443 vrm.RemoveMachineInstrFromMaps(MI);
1444 MI->eraseFromParent();
1447 assert(!(O.isImplicit() && O.isUse()) &&
1448 "Spilling register that's used as implicit use?");
1449 SlotIndex index = getInstructionIndex(MI);
1450 if (index < start || index >= end)
1454 // Must be defined by an implicit def. It should not be spilled. Note,
1455 // this is for correctness reason. e.g.
1456 // 8 %reg1024<def> = IMPLICIT_DEF
1457 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1458 // The live range [12, 14) are not part of the r1024 live interval since
1459 // it's defined by an implicit def. It will not conflicts with live
1460 // interval of r1025. Now suppose both registers are spilled, you can
1461 // easily see a situation where both registers are reloaded before
1462 // the INSERT_SUBREG and both target registers that would overlap.
1464 RewriteMIs.push_back(RewriteInfo(index, MI));
1466 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1468 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1469 // Now rewrite the defs and uses.
1470 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1471 RewriteInfo &rwi = RewriteMIs[i];
1473 SlotIndex index = rwi.Index;
1474 MachineInstr *MI = rwi.MI;
1475 // If MI def and/or use the same register multiple times, then there
1476 // are multiple entries.
1477 while (i != e && RewriteMIs[i].MI == MI) {
1478 assert(RewriteMIs[i].Index == index);
1481 MachineBasicBlock *MBB = MI->getParent();
1483 if (ImpUse && MI != ReMatDefMI) {
1484 // Re-matting an instruction with virtual register use. Prevent interval
1485 // from being spilled.
1486 getInterval(ImpUse).markNotSpillable();
1489 unsigned MBBId = MBB->getNumber();
1490 unsigned ThisVReg = 0;
1492 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1493 if (NVI != MBBVRegsMap.end()) {
1494 ThisVReg = NVI->second;
1501 // It's better to start a new interval to avoid artifically
1502 // extend the new interval.
1503 if (MI->readsWritesVirtualRegister(li.reg) ==
1504 std::make_pair(false,true)) {
1505 MBBVRegsMap.erase(MBB->getNumber());
1511 bool IsNew = ThisVReg == 0;
1513 // This ends the previous live interval. If all of its def / use
1514 // can be folded, give it a low spill weight.
1515 if (NewVReg && TrySplit && AllCanFold) {
1516 LiveInterval &nI = getOrCreateInterval(NewVReg);
1523 bool HasDef = false;
1524 bool HasUse = false;
1525 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1526 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1527 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1528 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1529 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1530 if (!HasDef && !HasUse)
1533 AllCanFold &= CanFold;
1535 // Update weight of spill interval.
1536 LiveInterval &nI = getOrCreateInterval(NewVReg);
1538 // The spill weight is now infinity as it cannot be spilled again.
1539 nI.markNotSpillable();
1543 // Keep track of the last def and first use in each MBB.
1545 if (MI != ReMatOrigDefMI || !CanDelete) {
1546 bool HasKill = false;
1548 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1550 // If this is a two-address code, then this index starts a new VNInfo.
1551 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1553 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1555 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1556 SpillIdxes.find(MBBId);
1558 if (SII == SpillIdxes.end()) {
1559 std::vector<SRInfo> S;
1560 S.push_back(SRInfo(index, NewVReg, true));
1561 SpillIdxes.insert(std::make_pair(MBBId, S));
1562 } else if (SII->second.back().vreg != NewVReg) {
1563 SII->second.push_back(SRInfo(index, NewVReg, true));
1564 } else if (index > SII->second.back().index) {
1565 // If there is an earlier def and this is a two-address
1566 // instruction, then it's not possible to fold the store (which
1567 // would also fold the load).
1568 SRInfo &Info = SII->second.back();
1570 Info.canFold = !HasUse;
1572 SpillMBBs.set(MBBId);
1573 } else if (SII != SpillIdxes.end() &&
1574 SII->second.back().vreg == NewVReg &&
1575 index > SII->second.back().index) {
1576 // There is an earlier def that's not killed (must be two-address).
1577 // The spill is no longer needed.
1578 SII->second.pop_back();
1579 if (SII->second.empty()) {
1580 SpillIdxes.erase(MBBId);
1581 SpillMBBs.reset(MBBId);
1588 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1589 SpillIdxes.find(MBBId);
1590 if (SII != SpillIdxes.end() &&
1591 SII->second.back().vreg == NewVReg &&
1592 index > SII->second.back().index)
1593 // Use(s) following the last def, it's not safe to fold the spill.
1594 SII->second.back().canFold = false;
1595 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1596 RestoreIdxes.find(MBBId);
1597 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1598 // If we are splitting live intervals, only fold if it's the first
1599 // use and there isn't another use later in the MBB.
1600 RII->second.back().canFold = false;
1602 // Only need a reload if there isn't an earlier def / use.
1603 if (RII == RestoreIdxes.end()) {
1604 std::vector<SRInfo> Infos;
1605 Infos.push_back(SRInfo(index, NewVReg, true));
1606 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1608 RII->second.push_back(SRInfo(index, NewVReg, true));
1610 RestoreMBBs.set(MBBId);
1614 // Update spill weight.
1615 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1616 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1619 if (NewVReg && TrySplit && AllCanFold) {
1620 // If all of its def / use can be folded, give it a low spill weight.
1621 LiveInterval &nI = getOrCreateInterval(NewVReg);
1626 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1627 unsigned vr, BitVector &RestoreMBBs,
1628 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1629 if (!RestoreMBBs[Id])
1631 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1632 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1633 if (Restores[i].index == index &&
1634 Restores[i].vreg == vr &&
1635 Restores[i].canFold)
1640 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1641 unsigned vr, BitVector &RestoreMBBs,
1642 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1643 if (!RestoreMBBs[Id])
1645 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1646 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1647 if (Restores[i].index == index && Restores[i].vreg)
1648 Restores[i].index = SlotIndex();
1651 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1652 /// spilled and create empty intervals for their uses.
1654 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1655 const TargetRegisterClass* rc,
1656 std::vector<LiveInterval*> &NewLIs) {
1657 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1658 re = mri_->reg_end(); ri != re; ) {
1659 MachineOperand &O = ri.getOperand();
1660 MachineInstr *MI = &*ri;
1662 if (MI->isDebugValue()) {
1663 // Remove debug info for now.
1665 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1669 assert(MI->isImplicitDef() &&
1670 "Register def was not rewritten?");
1671 RemoveMachineInstrFromMaps(MI);
1672 vrm.RemoveMachineInstrFromMaps(MI);
1673 MI->eraseFromParent();
1675 // This must be an use of an implicit_def so it's not part of the live
1676 // interval. Create a new empty live interval for it.
1677 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1678 unsigned NewVReg = mri_->createVirtualRegister(rc);
1680 vrm.setIsImplicitlyDefined(NewVReg);
1681 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1682 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1683 MachineOperand &MO = MI->getOperand(i);
1684 if (MO.isReg() && MO.getReg() == li.reg) {
1694 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1695 // Limit the loop depth ridiculousness.
1696 if (loopDepth > 200)
1699 // The loop depth is used to roughly estimate the number of times the
1700 // instruction is executed. Something like 10^d is simple, but will quickly
1701 // overflow a float. This expression behaves like 10^d for small d, but is
1702 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1703 // headroom before overflow.
1704 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1706 return (isDef + isUse) * lc;
1710 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1711 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1712 normalizeSpillWeight(*NewLIs[i]);
1715 std::vector<LiveInterval*> LiveIntervals::
1716 addIntervalsForSpills(const LiveInterval &li,
1717 const SmallVectorImpl<LiveInterval*> &SpillIs,
1718 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1719 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1722 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1723 li.print(dbgs(), tri_);
1727 // Each bit specify whether a spill is required in the MBB.
1728 BitVector SpillMBBs(mf_->getNumBlockIDs());
1729 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1730 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1731 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1732 DenseMap<unsigned,unsigned> MBBVRegsMap;
1733 std::vector<LiveInterval*> NewLIs;
1734 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1736 unsigned NumValNums = li.getNumValNums();
1737 SmallVector<MachineInstr*, 4> ReMatDefs;
1738 ReMatDefs.resize(NumValNums, NULL);
1739 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1740 ReMatOrigDefs.resize(NumValNums, NULL);
1741 SmallVector<int, 4> ReMatIds;
1742 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1743 BitVector ReMatDelete(NumValNums);
1744 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1746 // Spilling a split live interval. It cannot be split any further. Also,
1747 // it's also guaranteed to be a single val# / range interval.
1748 if (vrm.getPreSplitReg(li.reg)) {
1749 vrm.setIsSplitFromReg(li.reg, 0);
1750 // Unset the split kill marker on the last use.
1751 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1752 if (KillIdx != SlotIndex()) {
1753 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1754 assert(KillMI && "Last use disappeared?");
1755 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1756 assert(KillOp != -1 && "Last use disappeared?");
1757 KillMI->getOperand(KillOp).setIsKill(false);
1759 vrm.removeKillPoint(li.reg);
1760 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1761 Slot = vrm.getStackSlot(li.reg);
1762 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1763 MachineInstr *ReMatDefMI = DefIsReMat ?
1764 vrm.getReMaterializedMI(li.reg) : NULL;
1766 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1767 bool isLoad = isLoadSS ||
1768 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1769 bool IsFirstRange = true;
1770 for (LiveInterval::Ranges::const_iterator
1771 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1772 // If this is a split live interval with multiple ranges, it means there
1773 // are two-address instructions that re-defined the value. Only the
1774 // first def can be rematerialized!
1776 // Note ReMatOrigDefMI has already been deleted.
1777 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1778 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1779 false, vrm, rc, ReMatIds, loopInfo,
1780 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1781 MBBVRegsMap, NewLIs);
1783 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1784 Slot, 0, false, false, false,
1785 false, vrm, rc, ReMatIds, loopInfo,
1786 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1787 MBBVRegsMap, NewLIs);
1789 IsFirstRange = false;
1792 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1793 normalizeSpillWeights(NewLIs);
1797 bool TrySplit = !intervalIsInOneMBB(li);
1800 bool NeedStackSlot = false;
1801 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1803 const VNInfo *VNI = *i;
1804 unsigned VN = VNI->id;
1805 if (VNI->isUnused())
1806 continue; // Dead val#.
1807 // Is the def for the val# rematerializable?
1808 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1810 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1811 // Remember how to remat the def of this val#.
1812 ReMatOrigDefs[VN] = ReMatDefMI;
1813 // Original def may be modified so we have to make a copy here.
1814 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1815 CloneMIs.push_back(Clone);
1816 ReMatDefs[VN] = Clone;
1818 bool CanDelete = true;
1819 if (VNI->hasPHIKill()) {
1820 // A kill is a phi node, not all of its uses can be rematerialized.
1821 // It must not be deleted.
1823 // Need a stack slot if there is any live range where uses cannot be
1825 NeedStackSlot = true;
1828 ReMatDelete.set(VN);
1830 // Need a stack slot if there is any live range where uses cannot be
1832 NeedStackSlot = true;
1836 // One stack slot per live interval.
1837 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1838 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1839 Slot = vrm.assignVirt2StackSlot(li.reg);
1841 // This case only occurs when the prealloc splitter has already assigned
1842 // a stack slot to this vreg.
1844 Slot = vrm.getStackSlot(li.reg);
1847 // Create new intervals and rewrite defs and uses.
1848 for (LiveInterval::Ranges::const_iterator
1849 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1850 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1851 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1852 bool DefIsReMat = ReMatDefMI != NULL;
1853 bool CanDelete = ReMatDelete[I->valno->id];
1855 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1856 bool isLoad = isLoadSS ||
1857 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1858 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1859 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1860 CanDelete, vrm, rc, ReMatIds, loopInfo,
1861 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1862 MBBVRegsMap, NewLIs);
1865 // Insert spills / restores if we are splitting.
1867 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1868 normalizeSpillWeights(NewLIs);
1872 SmallPtrSet<LiveInterval*, 4> AddedKill;
1873 SmallVector<unsigned, 2> Ops;
1874 if (NeedStackSlot) {
1875 int Id = SpillMBBs.find_first();
1877 std::vector<SRInfo> &spills = SpillIdxes[Id];
1878 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1879 SlotIndex index = spills[i].index;
1880 unsigned VReg = spills[i].vreg;
1881 LiveInterval &nI = getOrCreateInterval(VReg);
1882 bool isReMat = vrm.isReMaterialized(VReg);
1883 MachineInstr *MI = getInstructionFromIndex(index);
1884 bool CanFold = false;
1885 bool FoundUse = false;
1887 if (spills[i].canFold) {
1889 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1890 MachineOperand &MO = MI->getOperand(j);
1891 if (!MO.isReg() || MO.getReg() != VReg)
1898 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1899 RestoreMBBs, RestoreIdxes))) {
1900 // MI has two-address uses of the same register. If the use
1901 // isn't the first and only use in the BB, then we can't fold
1902 // it. FIXME: Move this to rewriteInstructionsForSpills.
1909 // Fold the store into the def if possible.
1910 bool Folded = false;
1911 if (CanFold && !Ops.empty()) {
1912 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1915 // Also folded uses, do not issue a load.
1916 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1917 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1919 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1923 // Otherwise tell the spiller to issue a spill.
1925 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1926 bool isKill = LR->end == index.getStoreIndex();
1927 if (!MI->registerDefIsDead(nI.reg))
1928 // No need to spill a dead def.
1929 vrm.addSpillPoint(VReg, isKill, MI);
1931 AddedKill.insert(&nI);
1934 Id = SpillMBBs.find_next(Id);
1938 int Id = RestoreMBBs.find_first();
1940 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1941 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1942 SlotIndex index = restores[i].index;
1943 if (index == SlotIndex())
1945 unsigned VReg = restores[i].vreg;
1946 LiveInterval &nI = getOrCreateInterval(VReg);
1947 bool isReMat = vrm.isReMaterialized(VReg);
1948 MachineInstr *MI = getInstructionFromIndex(index);
1949 bool CanFold = false;
1951 if (restores[i].canFold) {
1953 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1954 MachineOperand &MO = MI->getOperand(j);
1955 if (!MO.isReg() || MO.getReg() != VReg)
1959 // If this restore were to be folded, it would have been folded
1968 // Fold the load into the use if possible.
1969 bool Folded = false;
1970 if (CanFold && !Ops.empty()) {
1972 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1974 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1976 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1977 // If the rematerializable def is a load, also try to fold it.
1978 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1979 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1980 Ops, isLoadSS, LdSlot, VReg);
1982 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1984 // Re-matting an instruction with virtual register use. Add the
1985 // register as an implicit use on the use MI and mark the register
1986 // interval as unspillable.
1987 LiveInterval &ImpLi = getInterval(ImpUse);
1988 ImpLi.markNotSpillable();
1989 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1994 // If folding is not possible / failed, then tell the spiller to issue a
1995 // load / rematerialization for us.
1997 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1999 vrm.addRestorePoint(VReg, MI);
2001 Id = RestoreMBBs.find_next(Id);
2004 // Finalize intervals: add kills, finalize spill weights, and filter out
2006 std::vector<LiveInterval*> RetNewLIs;
2007 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2008 LiveInterval *LI = NewLIs[i];
2010 if (!AddedKill.count(LI)) {
2011 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2012 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2013 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2014 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2015 assert(UseIdx != -1);
2016 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2017 LastUse->getOperand(UseIdx).setIsKill();
2018 vrm.addKillPoint(LI->reg, LastUseIdx);
2021 RetNewLIs.push_back(LI);
2025 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2026 normalizeSpillWeights(RetNewLIs);
2030 /// hasAllocatableSuperReg - Return true if the specified physical register has
2031 /// any super register that's allocatable.
2032 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2033 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2034 if (allocatableRegs_[*AS] && hasInterval(*AS))
2039 /// getRepresentativeReg - Find the largest super register of the specified
2040 /// physical register.
2041 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2042 // Find the largest super-register that is allocatable.
2043 unsigned BestReg = Reg;
2044 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2045 unsigned SuperReg = *AS;
2046 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2054 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2055 /// specified interval that conflicts with the specified physical register.
2056 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2057 unsigned PhysReg) const {
2058 unsigned NumConflicts = 0;
2059 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2060 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2061 E = mri_->reg_end(); I != E; ++I) {
2062 MachineOperand &O = I.getOperand();
2063 MachineInstr *MI = O.getParent();
2064 if (MI->isDebugValue())
2066 SlotIndex Index = getInstructionIndex(MI);
2067 if (pli.liveAt(Index))
2070 return NumConflicts;
2073 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2074 /// around all defs and uses of the specified interval. Return true if it
2075 /// was able to cut its interval.
2076 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2077 unsigned PhysReg, VirtRegMap &vrm) {
2078 unsigned SpillReg = getRepresentativeReg(PhysReg);
2080 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2081 << " represented by " << tri_->getName(SpillReg) << '\n');
2083 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2084 // If there are registers which alias PhysReg, but which are not a
2085 // sub-register of the chosen representative super register. Assert
2086 // since we can't handle it yet.
2087 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2088 tri_->isSuperRegister(*AS, SpillReg));
2091 SmallVector<unsigned, 4> PRegs;
2092 if (hasInterval(SpillReg))
2093 PRegs.push_back(SpillReg);
2094 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2095 if (hasInterval(*SR))
2096 PRegs.push_back(*SR);
2099 dbgs() << "Trying to spill:";
2100 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2101 dbgs() << ' ' << tri_->getName(PRegs[i]);
2105 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2106 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2107 E = mri_->reg_end(); I != E; ++I) {
2108 MachineOperand &O = I.getOperand();
2109 MachineInstr *MI = O.getParent();
2110 if (MI->isDebugValue() || SeenMIs.count(MI))
2113 SlotIndex Index = getInstructionIndex(MI);
2114 bool LiveReg = false;
2115 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2116 unsigned PReg = PRegs[i];
2117 LiveInterval &pli = getInterval(PReg);
2118 if (!pli.liveAt(Index))
2121 SlotIndex StartIdx = Index.getLoadIndex();
2122 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2123 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2125 raw_string_ostream Msg(msg);
2126 Msg << "Ran out of registers during register allocation!";
2127 if (MI->isInlineAsm()) {
2128 Msg << "\nPlease check your inline asm statement for invalid "
2129 << "constraints:\n";
2130 MI->print(Msg, tm_);
2132 report_fatal_error(Msg.str());
2134 pli.removeRange(StartIdx, EndIdx);
2139 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2140 vrm.addEmergencySpill(SpillReg, MI);
2146 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2147 MachineInstr* startInst) {
2148 LiveInterval& Interval = getOrCreateInterval(reg);
2149 VNInfo* VN = Interval.getNextValue(
2150 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2151 startInst, getVNInfoAllocator());
2152 VN->setHasPHIKill(true);
2154 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2155 getMBBEndIdx(startInst->getParent()), VN);
2156 Interval.addRange(LR);