1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 /// runOnMachineFunction - Register allocate the whole function
113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
126 numIntervals += getNumIntervals();
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
308 mi->addRegisterDefined(interval.reg);
310 MachineInstr *CopyMI = NULL;
311 if (mi->isCopyLike()) {
315 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316 assert(ValNo->id == 0 && "First value in interval is not 0?");
318 // Loop over all of the blocks that the vreg is defined in. There are
319 // two cases we have to handle here. The most common case is a vreg
320 // whose lifetime is contained within a basic block. In this case there
321 // will be a single kill, in MBB, which comes after the definition.
322 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323 // FIXME: what about dead vars?
325 if (vi.Kills[0] != mi)
326 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
328 killIdx = defIndex.getStoreIndex();
330 // If the kill happens after the definition, we have an intra-block
332 if (killIdx > defIndex) {
333 assert(vi.AliveBlocks.empty() &&
334 "Shouldn't be alive across any blocks!");
335 LiveRange LR(defIndex, killIdx, ValNo);
336 interval.addRange(LR);
337 DEBUG(dbgs() << " +" << LR << "\n");
342 // The other case we handle is when a virtual register lives to the end
343 // of the defining block, potentially live across some blocks, then is
344 // live into some number of blocks, but gets killed. Start by adding a
345 // range that goes from this definition to the end of the defining block.
346 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347 DEBUG(dbgs() << " +" << NewLR);
348 interval.addRange(NewLR);
350 bool PHIJoin = lv_->isPHIJoin(interval.reg);
353 // A phi join register is killed at the end of the MBB and revived as a new
354 // valno in the killing blocks.
355 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356 DEBUG(dbgs() << " phi-join");
357 ValNo->setHasPHIKill(true);
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
381 assert(getInstructionFromIndex(Start) == 0 &&
382 "PHI def index points at actual instruction.");
383 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384 ValNo->setIsPHIDef(true);
386 LiveRange LR(Start, killIdx, ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
392 if (MultipleDefsBySameMI(*mi, MOIdx))
393 // Multiple defs of the same virtual register by the same instruction.
394 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
404 // It may also be partial redef like this:
405 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409 // If this is a two-address definition, then we have already processed
410 // the live range. The only problem is that we didn't realize there
411 // are actually two values in the live interval. Because of this we
412 // need to take the LiveRegion that defines this register and split it
414 SlotIndex RedefIndex = MIIdx.getDefIndex();
415 if (MO.isEarlyClobber())
416 RedefIndex = MIIdx.getUseIndex();
418 const LiveRange *OldLR =
419 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420 VNInfo *OldValNo = OldLR->valno;
421 SlotIndex DefIndex = OldValNo->def.getDefIndex();
423 // Delete the previous value, which should be short and continuous,
424 // because the 2-addr copy must be in the same MBB as the redef.
425 interval.removeRange(DefIndex, RedefIndex);
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
431 // Value#0 is now defined by the 2-addr instruction.
432 OldValNo->def = RedefIndex;
433 OldValNo->setCopy(0);
435 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436 if (PartReDef && mi->isCopyLike())
437 OldValNo->setCopy(&*mi);
439 // Add the new live interval which replaces the range for the input copy.
440 LiveRange LR(DefIndex, RedefIndex, ValNo);
441 DEBUG(dbgs() << " replace range with " << LR);
442 interval.addRange(LR);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
454 } else if (lv_->isPHIJoin(interval.reg)) {
455 // In the case of PHI elimination, each variable definition is only
456 // live until the end of the block. We've already taken care of the
457 // rest of the live range.
459 SlotIndex defIndex = MIIdx.getDefIndex();
460 if (MO.isEarlyClobber())
461 defIndex = MIIdx.getUseIndex();
464 MachineInstr *CopyMI = NULL;
465 if (mi->isCopyLike())
467 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
469 SlotIndex killIndex = getMBBEndIdx(mbb);
470 LiveRange LR(defIndex, killIndex, ValNo);
471 interval.addRange(LR);
472 ValNo->setHasPHIKill(true);
473 DEBUG(dbgs() << " phi-join +" << LR);
475 llvm_unreachable("Multiply defined register");
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
490 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (mi->isDebugValue())
518 if (getInstructionFromIndex(baseIndex) == 0)
519 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521 if (mi->killsRegister(interval.reg, tri_)) {
522 DEBUG(dbgs() << " killed");
523 end = baseIndex.getDefIndex();
526 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
528 if (mi->isRegTiedToUseOperand(DefIdx)) {
529 // Two-address instruction.
530 end = baseIndex.getDefIndex();
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that
534 // defines it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 DEBUG(dbgs() << " dead");
537 end = start.getStoreIndex();
543 baseIndex = baseIndex.getNextIndex();
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
548 // and never used. Another possible case is the implicit use of the
549 // physical register has been deleted by two-address pass.
550 end = start.getStoreIndex();
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 VNInfo *ValNo = interval.getVNInfoAt(start);
557 bool Extend = ValNo != 0;
559 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560 if (Extend && MO.isEarlyClobber())
561 ValNo->setHasRedefByEC(true);
562 LiveRange LR(start, end, ValNo);
563 interval.addRange(LR);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
576 MachineInstr *CopyMI = NULL;
577 if (MI->isCopyLike())
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
584 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
586 LiveInterval &interval, bool isAlias) {
587 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
589 // Look for kills, if it reaches a def before it's killed, then it shouldn't
590 // be considered a livein.
591 MachineBasicBlock::iterator mi = MBB->begin();
592 MachineBasicBlock::iterator E = MBB->end();
593 // Skip over DBG_VALUE at the start of the MBB.
594 if (mi != E && mi->isDebugValue()) {
595 while (++mi != E && mi->isDebugValue())
598 // MBB is empty except for DBG_VALUE's.
602 SlotIndex baseIndex = MIIdx;
603 SlotIndex start = baseIndex;
604 if (getInstructionFromIndex(baseIndex) == 0)
605 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
607 SlotIndex end = baseIndex;
608 bool SeenDefUse = false;
611 if (mi->killsRegister(interval.reg, tri_)) {
612 DEBUG(dbgs() << " killed");
613 end = baseIndex.getDefIndex();
616 } else if (mi->definesRegister(interval.reg, tri_)) {
617 // Another instruction redefines the register before it is ever read.
618 // Then the register is essentially dead at the instruction that defines
619 // it. Hence its interval is:
620 // [defSlot(def), defSlot(def)+1)
621 DEBUG(dbgs() << " dead");
622 end = start.getStoreIndex();
627 while (++mi != E && mi->isDebugValue())
628 // Skip over DBG_VALUE.
631 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
634 // Live-in register might not be used at all.
637 DEBUG(dbgs() << " dead");
638 end = MIIdx.getStoreIndex();
640 DEBUG(dbgs() << " live through");
641 end = getMBBEndIdx(MBB);
645 SlotIndex defIdx = getMBBStartIdx(MBB);
646 assert(getInstructionFromIndex(defIdx) == 0 &&
647 "PHI def index points at actual instruction.");
649 interval.getNextValue(defIdx, 0, VNInfoAllocator);
650 vni->setIsPHIDef(true);
651 LiveRange LR(start, end, vni);
653 interval.addRange(LR);
654 DEBUG(dbgs() << " +" << LR << '\n');
657 /// computeIntervals - computes the live intervals for virtual
658 /// registers. for some ordering of the machine instructions [1,N] a
659 /// live interval is an interval [i, j) where 1 <= i <= j < N for
660 /// which a variable is live
661 void LiveIntervals::computeIntervals() {
662 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
663 << "********** Function: "
664 << ((Value*)mf_->getFunction())->getName() << '\n');
666 SmallVector<unsigned, 8> UndefUses;
667 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
669 MachineBasicBlock *MBB = MBBI;
673 // Track the index of the current machine instr.
674 SlotIndex MIIndex = getMBBStartIdx(MBB);
675 DEBUG(dbgs() << "BB#" << MBB->getNumber()
676 << ":\t\t# derived from " << MBB->getName() << "\n");
678 // Create intervals for live-ins to this BB first.
679 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
680 LE = MBB->livein_end(); LI != LE; ++LI) {
681 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
682 // Multiple live-ins can alias the same register.
683 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
684 if (!hasInterval(*AS))
685 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
689 // Skip over empty initial indices.
690 if (getInstructionFromIndex(MIIndex) == 0)
691 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
693 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
695 DEBUG(dbgs() << MIIndex << "\t" << *MI);
696 if (MI->isDebugValue())
700 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
701 MachineOperand &MO = MI->getOperand(i);
702 if (!MO.isReg() || !MO.getReg())
705 // handle register defs - build intervals
707 handleRegisterDef(MBB, MI, MIIndex, MO, i);
708 else if (MO.isUndef())
709 UndefUses.push_back(MO.getReg());
712 // Move to the next instr slot.
713 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
717 // Create empty intervals for registers defined by implicit_def's (except
718 // for those implicit_def that define values which are liveout of their
720 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
721 unsigned UndefReg = UndefUses[i];
722 (void)getOrCreateInterval(UndefReg);
726 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
727 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
728 return new LiveInterval(reg, Weight);
731 /// dupInterval - Duplicate a live interval. The caller is responsible for
732 /// managing the allocated memory.
733 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
734 LiveInterval *NewLI = createInterval(li->reg);
735 NewLI->Copy(*li, mri_, getVNInfoAllocator());
739 /// shrinkToUses - After removing some uses of a register, shrink its live
740 /// range to just the remaining uses. This method does not compute reaching
741 /// defs for new uses, and it doesn't remove dead defs.
742 bool LiveIntervals::shrinkToUses(LiveInterval *li,
743 SmallVectorImpl<MachineInstr*> *dead) {
744 DEBUG(dbgs() << "Shrink: " << *li << '\n');
745 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
746 && "Can't only shrink physical registers");
747 // Find all the values used, including PHI kills.
748 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
750 // Blocks that have already been added to WorkList as live-out.
751 SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
753 // Visit all instructions reading li->reg.
754 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
755 MachineInstr *UseMI = I.skipInstruction();) {
756 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
758 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
759 VNInfo *VNI = li->getVNInfoAt(Idx);
761 // This shouldn't happen: readsVirtualRegister returns true, but there is
762 // no live value. It is likely caused by a target getting <undef> flags
764 DEBUG(dbgs() << Idx << '\t' << *UseMI
765 << "Warning: Instr claims to read non-existent value in "
769 if (VNI->def == Idx) {
770 // Special case: An early-clobber tied operand reads and writes the
771 // register one slot early.
772 Idx = Idx.getPrevSlot();
773 VNI = li->getVNInfoAt(Idx);
774 assert(VNI && "Early-clobber tied value not available");
776 WorkList.push_back(std::make_pair(Idx, VNI));
779 // Create a new live interval with only minimal live segments per def.
780 LiveInterval NewLI(li->reg, 0);
781 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
786 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
788 // A use tied to an early-clobber def ends at the load slot and isn't caught
789 // above. Catch it here instead. This probably only ever happens for inline
791 if (VNI->def.isUse())
792 if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
793 WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
796 // Keep track of the PHIs that are in use.
797 SmallPtrSet<VNInfo*, 8> UsedPHIs;
799 // Extend intervals to reach all uses in WorkList.
800 while (!WorkList.empty()) {
801 SlotIndex Idx = WorkList.back().first;
802 VNInfo *VNI = WorkList.back().second;
804 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
805 SlotIndex BlockStart = getMBBStartIdx(MBB);
807 // Extend the live range for VNI to be live at Idx.
808 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx.getNextSlot())) {
810 assert(ExtVNI == VNI && "Unexpected existing value number");
811 // Is this a PHIDef we haven't seen before?
812 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
814 // The PHI is live, make sure the predecessors are live-out.
815 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
816 PE = MBB->pred_end(); PI != PE; ++PI) {
817 if (!LiveOut.insert(*PI))
819 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
820 // A predecessor is not required to have a live-out value for a PHI.
821 if (VNInfo *PVNI = li->getVNInfoAt(Stop))
822 WorkList.push_back(std::make_pair(Stop, PVNI));
827 // VNI is live-in to MBB.
828 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
829 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
831 // Make sure VNI is live-out from the predecessors.
832 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
833 PE = MBB->pred_end(); PI != PE; ++PI) {
834 if (!LiveOut.insert(*PI))
836 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
837 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
838 WorkList.push_back(std::make_pair(Stop, VNI));
842 // Handle dead values.
843 bool CanSeparate = false;
844 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
849 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
850 assert(LII != NewLI.end() && "Missing live range for PHI");
851 if (LII->end != VNI->def.getNextSlot())
853 if (VNI->isPHIDef()) {
854 // This is a dead PHI. Remove it.
855 VNI->setIsUnused(true);
856 NewLI.removeRange(*LII);
857 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
860 // This is a dead def. Make sure the instruction knows.
861 MachineInstr *MI = getInstructionFromIndex(VNI->def);
862 assert(MI && "No instruction defining live value");
863 MI->addRegisterDead(li->reg, tri_);
864 if (dead && MI->allDefsAreDead()) {
865 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
871 // Move the trimmed ranges back.
872 li->ranges.swap(NewLI.ranges);
873 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
878 //===----------------------------------------------------------------------===//
879 // Register allocator hooks.
882 MachineBasicBlock::iterator
883 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
884 MachineBasicBlock *mbb) const {
885 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
887 // If li is not live into a landing pad, we can insert spill code before the
889 if (!lpad || !isLiveInToMBB(li, lpad))
890 return mbb->getFirstTerminator();
892 // When there is a landing pad, spill code must go before the call instruction
894 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
897 if (I->getDesc().isCall())
900 // The block contains no calls that can throw, so use the first terminator.
901 return mbb->getFirstTerminator();
904 void LiveIntervals::addKillFlags() {
905 for (iterator I = begin(), E = end(); I != E; ++I) {
906 unsigned Reg = I->first;
907 if (TargetRegisterInfo::isPhysicalRegister(Reg))
909 if (mri_->reg_nodbg_empty(Reg))
911 LiveInterval *LI = I->second;
913 // Every instruction that kills Reg corresponds to a live range end point.
914 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
916 // A LOAD index indicates an MBB edge.
917 if (RI->end.isLoad())
919 MachineInstr *MI = getInstructionFromIndex(RI->end);
922 MI->addRegisterKilled(Reg, NULL);
927 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
928 /// allow one) virtual register operand, then its uses are implicitly using
929 /// the register. Returns the virtual register.
930 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
931 MachineInstr *MI) const {
933 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
934 MachineOperand &MO = MI->getOperand(i);
935 if (!MO.isReg() || !MO.isUse())
937 unsigned Reg = MO.getReg();
938 if (Reg == 0 || Reg == li.reg)
941 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
942 !allocatableRegs_[Reg])
944 // FIXME: For now, only remat MI with at most one register operand.
946 "Can't rematerialize instruction with multiple register operand!");
955 /// isValNoAvailableAt - Return true if the val# of the specified interval
956 /// which reaches the given instruction also reaches the specified use index.
957 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
958 SlotIndex UseIdx) const {
959 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
960 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
963 /// isReMaterializable - Returns true if the definition MI of the specified
964 /// val# of the specified interval is re-materializable.
966 LiveIntervals::isReMaterializable(const LiveInterval &li,
967 const VNInfo *ValNo, MachineInstr *MI,
968 const SmallVectorImpl<LiveInterval*> *SpillIs,
973 if (!tii_->isTriviallyReMaterializable(MI, aa_))
976 // Target-specific code can mark an instruction as being rematerializable
977 // if it has one virtual reg use, though it had better be something like
978 // a PIC base register which is likely to be live everywhere.
979 unsigned ImpUse = getReMatImplicitUse(li, MI);
981 const LiveInterval &ImpLi = getInterval(ImpUse);
982 for (MachineRegisterInfo::use_nodbg_iterator
983 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
985 MachineInstr *UseMI = &*ri;
986 SlotIndex UseIdx = getInstructionIndex(UseMI);
987 if (li.getVNInfoAt(UseIdx) != ValNo)
989 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
993 // If a register operand of the re-materialized instruction is going to
994 // be spilled next, then it's not legal to re-materialize this instruction.
996 for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
997 if (ImpUse == (*SpillIs)[i]->reg)
1003 /// isReMaterializable - Returns true if the definition MI of the specified
1004 /// val# of the specified interval is re-materializable.
1005 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1006 const VNInfo *ValNo, MachineInstr *MI) {
1008 return isReMaterializable(li, ValNo, MI, 0, Dummy2);
1011 /// isReMaterializable - Returns true if every definition of MI of every
1012 /// val# of the specified interval is re-materializable.
1014 LiveIntervals::isReMaterializable(const LiveInterval &li,
1015 const SmallVectorImpl<LiveInterval*> *SpillIs,
1018 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1020 const VNInfo *VNI = *i;
1021 if (VNI->isUnused())
1022 continue; // Dead val#.
1023 // Is the def for the val# rematerializable?
1024 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1027 bool DefIsLoad = false;
1029 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1031 isLoad |= DefIsLoad;
1036 /// FilterFoldedOps - Filter out two-address use operands. Return
1037 /// true if it finds any issue with the operands that ought to prevent
1039 static bool FilterFoldedOps(MachineInstr *MI,
1040 SmallVector<unsigned, 2> &Ops,
1042 SmallVector<unsigned, 2> &FoldOps) {
1044 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1045 unsigned OpIdx = Ops[i];
1046 MachineOperand &MO = MI->getOperand(OpIdx);
1047 // FIXME: fold subreg use.
1051 MRInfo |= (unsigned)VirtRegMap::isMod;
1053 // Filter out two-address use operand(s).
1054 if (MI->isRegTiedToDefOperand(OpIdx)) {
1055 MRInfo = VirtRegMap::isModRef;
1058 MRInfo |= (unsigned)VirtRegMap::isRef;
1060 FoldOps.push_back(OpIdx);
1066 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1067 /// slot / to reg or any rematerialized load into ith operand of specified
1068 /// MI. If it is successul, MI is updated with the newly created MI and
1070 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1071 VirtRegMap &vrm, MachineInstr *DefMI,
1073 SmallVector<unsigned, 2> &Ops,
1074 bool isSS, int Slot, unsigned Reg) {
1075 // If it is an implicit def instruction, just delete it.
1076 if (MI->isImplicitDef()) {
1077 RemoveMachineInstrFromMaps(MI);
1078 vrm.RemoveMachineInstrFromMaps(MI);
1079 MI->eraseFromParent();
1084 // Filter the list of operand indexes that are to be folded. Abort if
1085 // any operand will prevent folding.
1086 unsigned MRInfo = 0;
1087 SmallVector<unsigned, 2> FoldOps;
1088 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1091 // The only time it's safe to fold into a two address instruction is when
1092 // it's folding reload and spill from / into a spill stack slot.
1093 if (DefMI && (MRInfo & VirtRegMap::isMod))
1096 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1097 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1099 // Remember this instruction uses the spill slot.
1100 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1102 // Attempt to fold the memory reference into the instruction. If
1103 // we can do this, we don't need to insert spill code.
1104 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1105 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1106 vrm.transferSpillPts(MI, fmi);
1107 vrm.transferRestorePts(MI, fmi);
1108 vrm.transferEmergencySpills(MI, fmi);
1109 ReplaceMachineInstrInMaps(MI, fmi);
1110 MI->eraseFromParent();
1118 /// canFoldMemoryOperand - Returns true if the specified load / store
1119 /// folding is possible.
1120 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1121 SmallVector<unsigned, 2> &Ops,
1123 // Filter the list of operand indexes that are to be folded. Abort if
1124 // any operand will prevent folding.
1125 unsigned MRInfo = 0;
1126 SmallVector<unsigned, 2> FoldOps;
1127 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1130 // It's only legal to remat for a use, not a def.
1131 if (ReMat && (MRInfo & VirtRegMap::isMod))
1134 return tii_->canFoldMemoryOperand(MI, FoldOps);
1137 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1138 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1140 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1145 for (++itr; itr != li.ranges.end(); ++itr) {
1146 MachineBasicBlock *mbb2 =
1147 indexes_->getMBBCoveringRange(itr->start, itr->end);
1156 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1157 /// interval on to-be re-materialized operands of MI) with new register.
1158 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1159 MachineInstr *MI, unsigned NewVReg,
1161 // There is an implicit use. That means one of the other operand is
1162 // being remat'ed and the remat'ed instruction has li.reg as an
1163 // use operand. Make sure we rewrite that as well.
1164 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1165 MachineOperand &MO = MI->getOperand(i);
1168 unsigned Reg = MO.getReg();
1169 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1171 if (!vrm.isReMaterialized(Reg))
1173 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1174 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1176 UseMO->setReg(NewVReg);
1180 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1181 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1182 bool LiveIntervals::
1183 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1184 bool TrySplit, SlotIndex index, SlotIndex end,
1186 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1187 unsigned Slot, int LdSlot,
1188 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1190 const TargetRegisterClass* rc,
1191 SmallVector<int, 4> &ReMatIds,
1192 const MachineLoopInfo *loopInfo,
1193 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1194 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1195 std::vector<LiveInterval*> &NewLIs) {
1196 bool CanFold = false;
1198 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1199 MachineOperand& mop = MI->getOperand(i);
1202 unsigned Reg = mop.getReg();
1203 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1208 bool TryFold = !DefIsReMat;
1209 bool FoldSS = true; // Default behavior unless it's a remat.
1210 int FoldSlot = Slot;
1212 // If this is the rematerializable definition MI itself and
1213 // all of its uses are rematerialized, simply delete it.
1214 if (MI == ReMatOrigDefMI && CanDelete) {
1215 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1217 RemoveMachineInstrFromMaps(MI);
1218 vrm.RemoveMachineInstrFromMaps(MI);
1219 MI->eraseFromParent();
1223 // If def for this use can't be rematerialized, then try folding.
1224 // If def is rematerializable and it's a load, also try folding.
1225 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1227 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1233 // Scan all of the operands of this instruction rewriting operands
1234 // to use NewVReg instead of li.reg as appropriate. We do this for
1237 // 1. If the instr reads the same spilled vreg multiple times, we
1238 // want to reuse the NewVReg.
1239 // 2. If the instr is a two-addr instruction, we are required to
1240 // keep the src/dst regs pinned.
1242 // Keep track of whether we replace a use and/or def so that we can
1243 // create the spill interval with the appropriate range.
1244 SmallVector<unsigned, 2> Ops;
1245 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1247 // Create a new virtual register for the spill interval.
1248 // Create the new register now so we can map the fold instruction
1249 // to the new register so when it is unfolded we get the correct
1251 bool CreatedNewVReg = false;
1253 NewVReg = mri_->createVirtualRegister(rc);
1255 CreatedNewVReg = true;
1257 // The new virtual register should get the same allocation hints as the
1259 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1260 if (Hint.first || Hint.second)
1261 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1267 // Do not fold load / store here if we are splitting. We'll find an
1268 // optimal point to insert a load / store later.
1270 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1271 Ops, FoldSS, FoldSlot, NewVReg)) {
1272 // Folding the load/store can completely change the instruction in
1273 // unpredictable ways, rescan it from the beginning.
1276 // We need to give the new vreg the same stack slot as the
1277 // spilled interval.
1278 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1284 if (isNotInMIMap(MI))
1286 goto RestartInstruction;
1289 // We'll try to fold it later if it's profitable.
1290 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1294 mop.setReg(NewVReg);
1295 if (mop.isImplicit())
1296 rewriteImplicitOps(li, MI, NewVReg, vrm);
1298 // Reuse NewVReg for other reads.
1299 bool HasEarlyClobber = false;
1300 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1301 MachineOperand &mopj = MI->getOperand(Ops[j]);
1302 mopj.setReg(NewVReg);
1303 if (mopj.isImplicit())
1304 rewriteImplicitOps(li, MI, NewVReg, vrm);
1305 if (mopj.isEarlyClobber())
1306 HasEarlyClobber = true;
1309 if (CreatedNewVReg) {
1311 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1312 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1313 // Each valnum may have its own remat id.
1314 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1316 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1318 if (!CanDelete || (HasUse && HasDef)) {
1319 // If this is a two-addr instruction then its use operands are
1320 // rematerializable but its def is not. It should be assigned a
1322 vrm.assignVirt2StackSlot(NewVReg, Slot);
1325 vrm.assignVirt2StackSlot(NewVReg, Slot);
1327 } else if (HasUse && HasDef &&
1328 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1329 // If this interval hasn't been assigned a stack slot (because earlier
1330 // def is a deleted remat def), do it now.
1331 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1332 vrm.assignVirt2StackSlot(NewVReg, Slot);
1335 // Re-matting an instruction with virtual register use. Add the
1336 // register as an implicit use on the use MI.
1337 if (DefIsReMat && ImpUse)
1338 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1340 // Create a new register interval for this spill / remat.
1341 LiveInterval &nI = getOrCreateInterval(NewVReg);
1342 if (CreatedNewVReg) {
1343 NewLIs.push_back(&nI);
1344 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1346 vrm.setIsSplitFromReg(NewVReg, li.reg);
1350 if (CreatedNewVReg) {
1351 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1352 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1353 DEBUG(dbgs() << " +" << LR);
1356 // Extend the split live interval to this def / use.
1357 SlotIndex End = index.getDefIndex();
1358 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1359 nI.getValNumInfo(nI.getNumValNums()-1));
1360 DEBUG(dbgs() << " +" << LR);
1365 // An early clobber starts at the use slot, except for an early clobber
1366 // tied to a use operand (yes, that is a thing).
1367 LiveRange LR(HasEarlyClobber && !HasUse ?
1368 index.getUseIndex() : index.getDefIndex(),
1369 index.getStoreIndex(),
1370 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1371 DEBUG(dbgs() << " +" << LR);
1376 dbgs() << "\t\t\t\tAdded new interval: ";
1377 nI.print(dbgs(), tri_);
1383 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1385 MachineBasicBlock *MBB,
1386 SlotIndex Idx) const {
1387 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1390 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1391 /// during spilling.
1393 struct RewriteInfo {
1396 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1399 struct RewriteInfoCompare {
1400 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1401 return LHS.Index < RHS.Index;
1406 void LiveIntervals::
1407 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1408 LiveInterval::Ranges::const_iterator &I,
1409 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1410 unsigned Slot, int LdSlot,
1411 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1413 const TargetRegisterClass* rc,
1414 SmallVector<int, 4> &ReMatIds,
1415 const MachineLoopInfo *loopInfo,
1416 BitVector &SpillMBBs,
1417 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1418 BitVector &RestoreMBBs,
1419 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1420 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1421 std::vector<LiveInterval*> &NewLIs) {
1422 bool AllCanFold = true;
1423 unsigned NewVReg = 0;
1424 SlotIndex start = I->start.getBaseIndex();
1425 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1427 // First collect all the def / use in this live range that will be rewritten.
1428 // Make sure they are sorted according to instruction index.
1429 std::vector<RewriteInfo> RewriteMIs;
1430 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1431 re = mri_->reg_end(); ri != re; ) {
1432 MachineInstr *MI = &*ri;
1433 MachineOperand &O = ri.getOperand();
1435 if (MI->isDebugValue()) {
1436 // Modify DBG_VALUE now that the value is in a spill slot.
1437 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1438 uint64_t Offset = MI->getOperand(1).getImm();
1439 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1440 DebugLoc DL = MI->getDebugLoc();
1441 int FI = isLoadSS ? LdSlot : (int)Slot;
1442 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1443 Offset, MDPtr, DL)) {
1444 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1445 ReplaceMachineInstrInMaps(MI, NewDV);
1446 MachineBasicBlock *MBB = MI->getParent();
1447 MBB->insert(MBB->erase(MI), NewDV);
1452 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1453 RemoveMachineInstrFromMaps(MI);
1454 vrm.RemoveMachineInstrFromMaps(MI);
1455 MI->eraseFromParent();
1458 assert(!(O.isImplicit() && O.isUse()) &&
1459 "Spilling register that's used as implicit use?");
1460 SlotIndex index = getInstructionIndex(MI);
1461 if (index < start || index >= end)
1465 // Must be defined by an implicit def. It should not be spilled. Note,
1466 // this is for correctness reason. e.g.
1467 // 8 %reg1024<def> = IMPLICIT_DEF
1468 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1469 // The live range [12, 14) are not part of the r1024 live interval since
1470 // it's defined by an implicit def. It will not conflicts with live
1471 // interval of r1025. Now suppose both registers are spilled, you can
1472 // easily see a situation where both registers are reloaded before
1473 // the INSERT_SUBREG and both target registers that would overlap.
1475 RewriteMIs.push_back(RewriteInfo(index, MI));
1477 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1479 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1480 // Now rewrite the defs and uses.
1481 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1482 RewriteInfo &rwi = RewriteMIs[i];
1484 SlotIndex index = rwi.Index;
1485 MachineInstr *MI = rwi.MI;
1486 // If MI def and/or use the same register multiple times, then there
1487 // are multiple entries.
1488 while (i != e && RewriteMIs[i].MI == MI) {
1489 assert(RewriteMIs[i].Index == index);
1492 MachineBasicBlock *MBB = MI->getParent();
1494 if (ImpUse && MI != ReMatDefMI) {
1495 // Re-matting an instruction with virtual register use. Prevent interval
1496 // from being spilled.
1497 getInterval(ImpUse).markNotSpillable();
1500 unsigned MBBId = MBB->getNumber();
1501 unsigned ThisVReg = 0;
1503 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1504 if (NVI != MBBVRegsMap.end()) {
1505 ThisVReg = NVI->second;
1512 // It's better to start a new interval to avoid artificially
1513 // extend the new interval.
1514 if (MI->readsWritesVirtualRegister(li.reg) ==
1515 std::make_pair(false,true)) {
1516 MBBVRegsMap.erase(MBB->getNumber());
1522 bool IsNew = ThisVReg == 0;
1524 // This ends the previous live interval. If all of its def / use
1525 // can be folded, give it a low spill weight.
1526 if (NewVReg && TrySplit && AllCanFold) {
1527 LiveInterval &nI = getOrCreateInterval(NewVReg);
1534 bool HasDef = false;
1535 bool HasUse = false;
1536 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1537 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1538 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1539 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1540 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1541 if (!HasDef && !HasUse)
1544 AllCanFold &= CanFold;
1546 // Update weight of spill interval.
1547 LiveInterval &nI = getOrCreateInterval(NewVReg);
1549 // The spill weight is now infinity as it cannot be spilled again.
1550 nI.markNotSpillable();
1554 // Keep track of the last def and first use in each MBB.
1556 if (MI != ReMatOrigDefMI || !CanDelete) {
1557 bool HasKill = false;
1559 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1561 // If this is a two-address code, then this index starts a new VNInfo.
1562 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1564 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1566 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1567 SpillIdxes.find(MBBId);
1569 if (SII == SpillIdxes.end()) {
1570 std::vector<SRInfo> S;
1571 S.push_back(SRInfo(index, NewVReg, true));
1572 SpillIdxes.insert(std::make_pair(MBBId, S));
1573 } else if (SII->second.back().vreg != NewVReg) {
1574 SII->second.push_back(SRInfo(index, NewVReg, true));
1575 } else if (index > SII->second.back().index) {
1576 // If there is an earlier def and this is a two-address
1577 // instruction, then it's not possible to fold the store (which
1578 // would also fold the load).
1579 SRInfo &Info = SII->second.back();
1581 Info.canFold = !HasUse;
1583 SpillMBBs.set(MBBId);
1584 } else if (SII != SpillIdxes.end() &&
1585 SII->second.back().vreg == NewVReg &&
1586 index > SII->second.back().index) {
1587 // There is an earlier def that's not killed (must be two-address).
1588 // The spill is no longer needed.
1589 SII->second.pop_back();
1590 if (SII->second.empty()) {
1591 SpillIdxes.erase(MBBId);
1592 SpillMBBs.reset(MBBId);
1599 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1600 SpillIdxes.find(MBBId);
1601 if (SII != SpillIdxes.end() &&
1602 SII->second.back().vreg == NewVReg &&
1603 index > SII->second.back().index)
1604 // Use(s) following the last def, it's not safe to fold the spill.
1605 SII->second.back().canFold = false;
1606 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1607 RestoreIdxes.find(MBBId);
1608 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1609 // If we are splitting live intervals, only fold if it's the first
1610 // use and there isn't another use later in the MBB.
1611 RII->second.back().canFold = false;
1613 // Only need a reload if there isn't an earlier def / use.
1614 if (RII == RestoreIdxes.end()) {
1615 std::vector<SRInfo> Infos;
1616 Infos.push_back(SRInfo(index, NewVReg, true));
1617 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1619 RII->second.push_back(SRInfo(index, NewVReg, true));
1621 RestoreMBBs.set(MBBId);
1625 // Update spill weight.
1626 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1627 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1630 if (NewVReg && TrySplit && AllCanFold) {
1631 // If all of its def / use can be folded, give it a low spill weight.
1632 LiveInterval &nI = getOrCreateInterval(NewVReg);
1637 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1638 unsigned vr, BitVector &RestoreMBBs,
1639 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1640 if (!RestoreMBBs[Id])
1642 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1643 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1644 if (Restores[i].index == index &&
1645 Restores[i].vreg == vr &&
1646 Restores[i].canFold)
1651 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1652 unsigned vr, BitVector &RestoreMBBs,
1653 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1654 if (!RestoreMBBs[Id])
1656 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1657 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1658 if (Restores[i].index == index && Restores[i].vreg)
1659 Restores[i].index = SlotIndex();
1662 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1663 /// spilled and create empty intervals for their uses.
1665 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1666 const TargetRegisterClass* rc,
1667 std::vector<LiveInterval*> &NewLIs) {
1668 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1669 re = mri_->reg_end(); ri != re; ) {
1670 MachineOperand &O = ri.getOperand();
1671 MachineInstr *MI = &*ri;
1673 if (MI->isDebugValue()) {
1674 // Remove debug info for now.
1676 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1680 assert(MI->isImplicitDef() &&
1681 "Register def was not rewritten?");
1682 RemoveMachineInstrFromMaps(MI);
1683 vrm.RemoveMachineInstrFromMaps(MI);
1684 MI->eraseFromParent();
1686 // This must be an use of an implicit_def so it's not part of the live
1687 // interval. Create a new empty live interval for it.
1688 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1689 unsigned NewVReg = mri_->createVirtualRegister(rc);
1691 vrm.setIsImplicitlyDefined(NewVReg);
1692 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1693 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1694 MachineOperand &MO = MI->getOperand(i);
1695 if (MO.isReg() && MO.getReg() == li.reg) {
1705 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1706 // Limit the loop depth ridiculousness.
1707 if (loopDepth > 200)
1710 // The loop depth is used to roughly estimate the number of times the
1711 // instruction is executed. Something like 10^d is simple, but will quickly
1712 // overflow a float. This expression behaves like 10^d for small d, but is
1713 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1714 // headroom before overflow.
1715 // By the way, powf() might be unavailable here. For consistency,
1716 // We may take pow(double,double).
1717 float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
1719 return (isDef + isUse) * lc;
1722 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1723 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1725 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1728 std::vector<LiveInterval*> LiveIntervals::
1729 addIntervalsForSpills(const LiveInterval &li,
1730 const SmallVectorImpl<LiveInterval*> *SpillIs,
1731 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1732 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1735 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1736 li.print(dbgs(), tri_);
1740 // Each bit specify whether a spill is required in the MBB.
1741 BitVector SpillMBBs(mf_->getNumBlockIDs());
1742 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1743 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1744 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1745 DenseMap<unsigned,unsigned> MBBVRegsMap;
1746 std::vector<LiveInterval*> NewLIs;
1747 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1749 unsigned NumValNums = li.getNumValNums();
1750 SmallVector<MachineInstr*, 4> ReMatDefs;
1751 ReMatDefs.resize(NumValNums, NULL);
1752 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1753 ReMatOrigDefs.resize(NumValNums, NULL);
1754 SmallVector<int, 4> ReMatIds;
1755 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1756 BitVector ReMatDelete(NumValNums);
1757 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1759 // Spilling a split live interval. It cannot be split any further. Also,
1760 // it's also guaranteed to be a single val# / range interval.
1761 if (vrm.getPreSplitReg(li.reg)) {
1762 vrm.setIsSplitFromReg(li.reg, 0);
1763 // Unset the split kill marker on the last use.
1764 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1765 if (KillIdx != SlotIndex()) {
1766 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1767 assert(KillMI && "Last use disappeared?");
1768 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1769 assert(KillOp != -1 && "Last use disappeared?");
1770 KillMI->getOperand(KillOp).setIsKill(false);
1772 vrm.removeKillPoint(li.reg);
1773 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1774 Slot = vrm.getStackSlot(li.reg);
1775 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1776 MachineInstr *ReMatDefMI = DefIsReMat ?
1777 vrm.getReMaterializedMI(li.reg) : NULL;
1779 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1780 bool isLoad = isLoadSS ||
1781 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1782 bool IsFirstRange = true;
1783 for (LiveInterval::Ranges::const_iterator
1784 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1785 // If this is a split live interval with multiple ranges, it means there
1786 // are two-address instructions that re-defined the value. Only the
1787 // first def can be rematerialized!
1789 // Note ReMatOrigDefMI has already been deleted.
1790 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1791 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1792 false, vrm, rc, ReMatIds, loopInfo,
1793 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1794 MBBVRegsMap, NewLIs);
1796 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1797 Slot, 0, false, false, false,
1798 false, vrm, rc, ReMatIds, loopInfo,
1799 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1800 MBBVRegsMap, NewLIs);
1802 IsFirstRange = false;
1805 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1806 normalizeSpillWeights(NewLIs);
1810 bool TrySplit = !intervalIsInOneMBB(li);
1813 bool NeedStackSlot = false;
1814 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1816 const VNInfo *VNI = *i;
1817 unsigned VN = VNI->id;
1818 if (VNI->isUnused())
1819 continue; // Dead val#.
1820 // Is the def for the val# rematerializable?
1821 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1823 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1824 // Remember how to remat the def of this val#.
1825 ReMatOrigDefs[VN] = ReMatDefMI;
1826 // Original def may be modified so we have to make a copy here.
1827 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1828 CloneMIs.push_back(Clone);
1829 ReMatDefs[VN] = Clone;
1831 bool CanDelete = true;
1832 if (VNI->hasPHIKill()) {
1833 // A kill is a phi node, not all of its uses can be rematerialized.
1834 // It must not be deleted.
1836 // Need a stack slot if there is any live range where uses cannot be
1838 NeedStackSlot = true;
1841 ReMatDelete.set(VN);
1843 // Need a stack slot if there is any live range where uses cannot be
1845 NeedStackSlot = true;
1849 // One stack slot per live interval.
1850 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1851 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1852 Slot = vrm.assignVirt2StackSlot(li.reg);
1854 // This case only occurs when the prealloc splitter has already assigned
1855 // a stack slot to this vreg.
1857 Slot = vrm.getStackSlot(li.reg);
1860 // Create new intervals and rewrite defs and uses.
1861 for (LiveInterval::Ranges::const_iterator
1862 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1863 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1864 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1865 bool DefIsReMat = ReMatDefMI != NULL;
1866 bool CanDelete = ReMatDelete[I->valno->id];
1868 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1869 bool isLoad = isLoadSS ||
1870 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1871 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1872 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1873 CanDelete, vrm, rc, ReMatIds, loopInfo,
1874 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1875 MBBVRegsMap, NewLIs);
1878 // Insert spills / restores if we are splitting.
1880 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1881 normalizeSpillWeights(NewLIs);
1885 SmallPtrSet<LiveInterval*, 4> AddedKill;
1886 SmallVector<unsigned, 2> Ops;
1887 if (NeedStackSlot) {
1888 int Id = SpillMBBs.find_first();
1890 std::vector<SRInfo> &spills = SpillIdxes[Id];
1891 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1892 SlotIndex index = spills[i].index;
1893 unsigned VReg = spills[i].vreg;
1894 LiveInterval &nI = getOrCreateInterval(VReg);
1895 bool isReMat = vrm.isReMaterialized(VReg);
1896 MachineInstr *MI = getInstructionFromIndex(index);
1897 bool CanFold = false;
1898 bool FoundUse = false;
1900 if (spills[i].canFold) {
1902 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1903 MachineOperand &MO = MI->getOperand(j);
1904 if (!MO.isReg() || MO.getReg() != VReg)
1911 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1912 RestoreMBBs, RestoreIdxes))) {
1913 // MI has two-address uses of the same register. If the use
1914 // isn't the first and only use in the BB, then we can't fold
1915 // it. FIXME: Move this to rewriteInstructionsForSpills.
1922 // Fold the store into the def if possible.
1923 bool Folded = false;
1924 if (CanFold && !Ops.empty()) {
1925 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1928 // Also folded uses, do not issue a load.
1929 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1930 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1932 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1936 // Otherwise tell the spiller to issue a spill.
1938 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1939 bool isKill = LR->end == index.getStoreIndex();
1940 if (!MI->registerDefIsDead(nI.reg))
1941 // No need to spill a dead def.
1942 vrm.addSpillPoint(VReg, isKill, MI);
1944 AddedKill.insert(&nI);
1947 Id = SpillMBBs.find_next(Id);
1951 int Id = RestoreMBBs.find_first();
1953 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1954 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1955 SlotIndex index = restores[i].index;
1956 if (index == SlotIndex())
1958 unsigned VReg = restores[i].vreg;
1959 LiveInterval &nI = getOrCreateInterval(VReg);
1960 bool isReMat = vrm.isReMaterialized(VReg);
1961 MachineInstr *MI = getInstructionFromIndex(index);
1962 bool CanFold = false;
1964 if (restores[i].canFold) {
1966 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1967 MachineOperand &MO = MI->getOperand(j);
1968 if (!MO.isReg() || MO.getReg() != VReg)
1972 // If this restore were to be folded, it would have been folded
1981 // Fold the load into the use if possible.
1982 bool Folded = false;
1983 if (CanFold && !Ops.empty()) {
1985 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1987 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1989 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1990 // If the rematerializable def is a load, also try to fold it.
1991 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1992 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1993 Ops, isLoadSS, LdSlot, VReg);
1995 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1997 // Re-matting an instruction with virtual register use. Add the
1998 // register as an implicit use on the use MI and mark the register
1999 // interval as unspillable.
2000 LiveInterval &ImpLi = getInterval(ImpUse);
2001 ImpLi.markNotSpillable();
2002 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2007 // If folding is not possible / failed, then tell the spiller to issue a
2008 // load / rematerialization for us.
2010 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2012 vrm.addRestorePoint(VReg, MI);
2014 Id = RestoreMBBs.find_next(Id);
2017 // Finalize intervals: add kills, finalize spill weights, and filter out
2019 std::vector<LiveInterval*> RetNewLIs;
2020 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2021 LiveInterval *LI = NewLIs[i];
2023 if (!AddedKill.count(LI)) {
2024 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2025 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2026 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2027 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2028 assert(UseIdx != -1);
2029 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2030 LastUse->getOperand(UseIdx).setIsKill();
2031 vrm.addKillPoint(LI->reg, LastUseIdx);
2034 RetNewLIs.push_back(LI);
2038 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2039 normalizeSpillWeights(RetNewLIs);
2043 /// hasAllocatableSuperReg - Return true if the specified physical register has
2044 /// any super register that's allocatable.
2045 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2046 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2047 if (allocatableRegs_[*AS] && hasInterval(*AS))
2052 /// getRepresentativeReg - Find the largest super register of the specified
2053 /// physical register.
2054 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2055 // Find the largest super-register that is allocatable.
2056 unsigned BestReg = Reg;
2057 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2058 unsigned SuperReg = *AS;
2059 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2067 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2068 /// specified interval that conflicts with the specified physical register.
2069 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2070 unsigned PhysReg) const {
2071 unsigned NumConflicts = 0;
2072 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2073 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2074 E = mri_->reg_end(); I != E; ++I) {
2075 MachineOperand &O = I.getOperand();
2076 MachineInstr *MI = O.getParent();
2077 if (MI->isDebugValue())
2079 SlotIndex Index = getInstructionIndex(MI);
2080 if (pli.liveAt(Index))
2083 return NumConflicts;
2086 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2087 /// around all defs and uses of the specified interval. Return true if it
2088 /// was able to cut its interval.
2089 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2090 unsigned PhysReg, VirtRegMap &vrm) {
2091 unsigned SpillReg = getRepresentativeReg(PhysReg);
2093 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2094 << " represented by " << tri_->getName(SpillReg) << '\n');
2096 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2097 // If there are registers which alias PhysReg, but which are not a
2098 // sub-register of the chosen representative super register. Assert
2099 // since we can't handle it yet.
2100 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2101 tri_->isSuperRegister(*AS, SpillReg));
2104 SmallVector<unsigned, 4> PRegs;
2105 if (hasInterval(SpillReg))
2106 PRegs.push_back(SpillReg);
2107 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2108 if (hasInterval(*SR))
2109 PRegs.push_back(*SR);
2112 dbgs() << "Trying to spill:";
2113 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2114 dbgs() << ' ' << tri_->getName(PRegs[i]);
2118 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2119 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2120 E = mri_->reg_end(); I != E; ++I) {
2121 MachineOperand &O = I.getOperand();
2122 MachineInstr *MI = O.getParent();
2123 if (MI->isDebugValue() || SeenMIs.count(MI))
2126 SlotIndex Index = getInstructionIndex(MI);
2127 bool LiveReg = false;
2128 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2129 unsigned PReg = PRegs[i];
2130 LiveInterval &pli = getInterval(PReg);
2131 if (!pli.liveAt(Index))
2134 SlotIndex StartIdx = Index.getLoadIndex();
2135 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2136 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2138 raw_string_ostream Msg(msg);
2139 Msg << "Ran out of registers during register allocation!";
2140 if (MI->isInlineAsm()) {
2141 Msg << "\nPlease check your inline asm statement for invalid "
2142 << "constraints:\n";
2143 MI->print(Msg, tm_);
2145 report_fatal_error(Msg.str());
2147 pli.removeRange(StartIdx, EndIdx);
2152 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2153 vrm.addEmergencySpill(SpillReg, MI);
2159 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2160 MachineInstr* startInst) {
2161 LiveInterval& Interval = getOrCreateInterval(reg);
2162 VNInfo* VN = Interval.getNextValue(
2163 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2164 startInst, getVNInfoAllocator());
2165 VN->setHasPHIKill(true);
2167 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2168 getMBBEndIdx(startInst->getParent()), VN);
2169 Interval.addRange(LR);