1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 /// runOnMachineFunction - Register allocate the whole function
113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
126 numIntervals += getNumIntervals();
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
308 mi->addRegisterDefined(interval.reg);
310 MachineInstr *CopyMI = NULL;
311 if (mi->isCopyLike()) {
315 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316 assert(ValNo->id == 0 && "First value in interval is not 0?");
318 // Loop over all of the blocks that the vreg is defined in. There are
319 // two cases we have to handle here. The most common case is a vreg
320 // whose lifetime is contained within a basic block. In this case there
321 // will be a single kill, in MBB, which comes after the definition.
322 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323 // FIXME: what about dead vars?
325 if (vi.Kills[0] != mi)
326 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
328 killIdx = defIndex.getStoreIndex();
330 // If the kill happens after the definition, we have an intra-block
332 if (killIdx > defIndex) {
333 assert(vi.AliveBlocks.empty() &&
334 "Shouldn't be alive across any blocks!");
335 LiveRange LR(defIndex, killIdx, ValNo);
336 interval.addRange(LR);
337 DEBUG(dbgs() << " +" << LR << "\n");
342 // The other case we handle is when a virtual register lives to the end
343 // of the defining block, potentially live across some blocks, then is
344 // live into some number of blocks, but gets killed. Start by adding a
345 // range that goes from this definition to the end of the defining block.
346 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347 DEBUG(dbgs() << " +" << NewLR);
348 interval.addRange(NewLR);
350 bool PHIJoin = lv_->isPHIJoin(interval.reg);
353 // A phi join register is killed at the end of the MBB and revived as a new
354 // valno in the killing blocks.
355 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356 DEBUG(dbgs() << " phi-join");
357 ValNo->setHasPHIKill(true);
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
381 assert(getInstructionFromIndex(Start) == 0 &&
382 "PHI def index points at actual instruction.");
383 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384 ValNo->setIsPHIDef(true);
386 LiveRange LR(Start, killIdx, ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
392 if (MultipleDefsBySameMI(*mi, MOIdx))
393 // Multiple defs of the same virtual register by the same instruction.
394 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
404 // It may also be partial redef like this:
405 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409 // If this is a two-address definition, then we have already processed
410 // the live range. The only problem is that we didn't realize there
411 // are actually two values in the live interval. Because of this we
412 // need to take the LiveRegion that defines this register and split it
414 SlotIndex RedefIndex = MIIdx.getDefIndex();
415 if (MO.isEarlyClobber())
416 RedefIndex = MIIdx.getUseIndex();
418 const LiveRange *OldLR =
419 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420 VNInfo *OldValNo = OldLR->valno;
421 SlotIndex DefIndex = OldValNo->def.getDefIndex();
423 // Delete the previous value, which should be short and continuous,
424 // because the 2-addr copy must be in the same MBB as the redef.
425 interval.removeRange(DefIndex, RedefIndex);
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
431 // Value#0 is now defined by the 2-addr instruction.
432 OldValNo->def = RedefIndex;
433 OldValNo->setCopy(0);
435 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436 if (PartReDef && mi->isCopyLike())
437 OldValNo->setCopy(&*mi);
439 // Add the new live interval which replaces the range for the input copy.
440 LiveRange LR(DefIndex, RedefIndex, ValNo);
441 DEBUG(dbgs() << " replace range with " << LR);
442 interval.addRange(LR);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
454 } else if (lv_->isPHIJoin(interval.reg)) {
455 // In the case of PHI elimination, each variable definition is only
456 // live until the end of the block. We've already taken care of the
457 // rest of the live range.
459 SlotIndex defIndex = MIIdx.getDefIndex();
460 if (MO.isEarlyClobber())
461 defIndex = MIIdx.getUseIndex();
464 MachineInstr *CopyMI = NULL;
465 if (mi->isCopyLike())
467 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
469 SlotIndex killIndex = getMBBEndIdx(mbb);
470 LiveRange LR(defIndex, killIndex, ValNo);
471 interval.addRange(LR);
472 ValNo->setHasPHIKill(true);
473 DEBUG(dbgs() << " phi-join +" << LR);
475 llvm_unreachable("Multiply defined register");
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
490 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (mi->isDebugValue())
518 if (getInstructionFromIndex(baseIndex) == 0)
519 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521 if (mi->killsRegister(interval.reg, tri_)) {
522 DEBUG(dbgs() << " killed");
523 end = baseIndex.getDefIndex();
526 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
528 if (mi->isRegTiedToUseOperand(DefIdx)) {
529 // Two-address instruction.
530 end = baseIndex.getDefIndex();
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that
534 // defines it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 DEBUG(dbgs() << " dead");
537 end = start.getStoreIndex();
543 baseIndex = baseIndex.getNextIndex();
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
548 // and never used. Another possible case is the implicit use of the
549 // physical register has been deleted by two-address pass.
550 end = start.getStoreIndex();
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 VNInfo *ValNo = interval.getVNInfoAt(start);
557 bool Extend = ValNo != 0;
559 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560 if (Extend && MO.isEarlyClobber())
561 ValNo->setHasRedefByEC(true);
562 LiveRange LR(start, end, ValNo);
563 interval.addRange(LR);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else if (allocatableRegs_[MO.getReg()]) {
576 MachineInstr *CopyMI = NULL;
577 if (MI->isCopyLike())
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->definesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
594 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
596 // Look for kills, if it reaches a def before it's killed, then it shouldn't
597 // be considered a livein.
598 MachineBasicBlock::iterator mi = MBB->begin();
599 MachineBasicBlock::iterator E = MBB->end();
600 // Skip over DBG_VALUE at the start of the MBB.
601 if (mi != E && mi->isDebugValue()) {
602 while (++mi != E && mi->isDebugValue())
605 // MBB is empty except for DBG_VALUE's.
609 SlotIndex baseIndex = MIIdx;
610 SlotIndex start = baseIndex;
611 if (getInstructionFromIndex(baseIndex) == 0)
612 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
614 SlotIndex end = baseIndex;
615 bool SeenDefUse = false;
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(dbgs() << " killed");
620 end = baseIndex.getDefIndex();
623 } else if (mi->definesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(dbgs() << " dead");
629 end = start.getStoreIndex();
634 while (++mi != E && mi->isDebugValue())
635 // Skip over DBG_VALUE.
638 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
641 // Live-in register might not be used at all.
644 DEBUG(dbgs() << " dead");
645 end = MIIdx.getStoreIndex();
647 DEBUG(dbgs() << " live through");
652 SlotIndex defIdx = getMBBStartIdx(MBB);
653 assert(getInstructionFromIndex(defIdx) == 0 &&
654 "PHI def index points at actual instruction.");
656 interval.getNextValue(defIdx, 0, VNInfoAllocator);
657 vni->setIsPHIDef(true);
658 LiveRange LR(start, end, vni);
660 interval.addRange(LR);
661 DEBUG(dbgs() << " +" << LR << '\n');
664 /// computeIntervals - computes the live intervals for virtual
665 /// registers. for some ordering of the machine instructions [1,N] a
666 /// live interval is an interval [i, j) where 1 <= i <= j < N for
667 /// which a variable is live
668 void LiveIntervals::computeIntervals() {
669 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670 << "********** Function: "
671 << ((Value*)mf_->getFunction())->getName() << '\n');
673 SmallVector<unsigned, 8> UndefUses;
674 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
676 MachineBasicBlock *MBB = MBBI;
680 // Track the index of the current machine instr.
681 SlotIndex MIIndex = getMBBStartIdx(MBB);
682 DEBUG(dbgs() << "BB#" << MBB->getNumber()
683 << ":\t\t# derived from " << MBB->getName() << "\n");
685 // Create intervals for live-ins to this BB first.
686 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687 LE = MBB->livein_end(); LI != LE; ++LI) {
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689 // Multiple live-ins can alias the same register.
690 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691 if (!hasInterval(*AS))
692 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
696 // Skip over empty initial indices.
697 if (getInstructionFromIndex(MIIndex) == 0)
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
700 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
702 DEBUG(dbgs() << MIIndex << "\t" << *MI);
703 if (MI->isDebugValue())
707 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708 MachineOperand &MO = MI->getOperand(i);
709 if (!MO.isReg() || !MO.getReg())
712 // handle register defs - build intervals
714 handleRegisterDef(MBB, MI, MIIndex, MO, i);
715 else if (MO.isUndef())
716 UndefUses.push_back(MO.getReg());
719 // Move to the next instr slot.
720 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
724 // Create empty intervals for registers defined by implicit_def's (except
725 // for those implicit_def that define values which are liveout of their
727 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728 unsigned UndefReg = UndefUses[i];
729 (void)getOrCreateInterval(UndefReg);
733 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735 return new LiveInterval(reg, Weight);
738 /// dupInterval - Duplicate a live interval. The caller is responsible for
739 /// managing the allocated memory.
740 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741 LiveInterval *NewLI = createInterval(li->reg);
742 NewLI->Copy(*li, mri_, getVNInfoAllocator());
746 /// shrinkToUses - After removing some uses of a register, shrink its live
747 /// range to just the remaining uses. This method does not compute reaching
748 /// defs for new uses, and it doesn't remove dead defs.
749 bool LiveIntervals::shrinkToUses(LiveInterval *li,
750 SmallVectorImpl<MachineInstr*> *dead) {
751 DEBUG(dbgs() << "Shrink: " << *li << '\n');
752 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
753 && "Can't only shrink physical registers");
754 // Find all the values used, including PHI kills.
755 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
757 // Visit all instructions reading li->reg.
758 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
759 MachineInstr *UseMI = I.skipInstruction();) {
760 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
762 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
763 VNInfo *VNI = li->getVNInfoAt(Idx);
764 assert(VNI && "Live interval not live into reading instruction");
765 if (VNI->def == Idx) {
766 // Special case: An early-clobber tied operand reads and writes the
767 // register one slot early.
768 Idx = Idx.getPrevSlot();
769 VNI = li->getVNInfoAt(Idx);
770 assert(VNI && "Early-clobber tied value not available");
772 WorkList.push_back(std::make_pair(Idx, VNI));
775 // Create a new live interval with only minimal live segments per def.
776 LiveInterval NewLI(li->reg, 0);
777 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
782 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
784 // A use tied to an early-clobber def ends at the load slot and isn't caught
785 // above. Catch it here instead. This probably only ever happens for inline
787 if (VNI->def.isUse())
788 if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
789 WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
792 // Keep track of the PHIs that are in use.
793 SmallPtrSet<VNInfo*, 8> UsedPHIs;
795 // Extend intervals to reach all uses in WorkList.
796 while (!WorkList.empty()) {
797 SlotIndex Idx = WorkList.back().first;
798 VNInfo *VNI = WorkList.back().second;
800 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
801 SlotIndex BlockStart = getMBBStartIdx(MBB);
803 // Extend the live range for VNI to be live at Idx.
804 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
806 assert(ExtVNI == VNI && "Unexpected existing value number");
807 // Is this a PHIDef we haven't seen before?
808 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
810 // The PHI is live, make sure the predecessors are live-out.
811 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
812 PE = MBB->pred_end(); PI != PE; ++PI) {
813 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
814 VNInfo *PVNI = li->getVNInfoAt(Stop);
815 // A predecessor is not required to have a live-out value for a PHI.
817 assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
818 WorkList.push_back(std::make_pair(Stop, PVNI));
824 // VNI is live-in to MBB.
825 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
826 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
828 // Make sure VNI is live-out from the predecessors.
829 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
830 PE = MBB->pred_end(); PI != PE; ++PI) {
831 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
832 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
833 WorkList.push_back(std::make_pair(Stop, VNI));
837 // Handle dead values.
838 bool CanSeparate = false;
839 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
844 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
845 assert(LII != NewLI.end() && "Missing live range for PHI");
846 if (LII->end != VNI->def.getNextSlot())
848 if (VNI->isPHIDef()) {
849 // This is a dead PHI. Remove it.
850 VNI->setIsUnused(true);
851 NewLI.removeRange(*LII);
852 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
855 // This is a dead def. Make sure the instruction knows.
856 MachineInstr *MI = getInstructionFromIndex(VNI->def);
857 assert(MI && "No instruction defining live value");
858 MI->addRegisterDead(li->reg, tri_);
859 if (dead && MI->allDefsAreDead()) {
860 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
866 // Move the trimmed ranges back.
867 li->ranges.swap(NewLI.ranges);
868 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
873 //===----------------------------------------------------------------------===//
874 // Register allocator hooks.
877 MachineBasicBlock::iterator
878 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
879 MachineBasicBlock *mbb) const {
880 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
882 // If li is not live into a landing pad, we can insert spill code before the
884 if (!lpad || !isLiveInToMBB(li, lpad))
885 return mbb->getFirstTerminator();
887 // When there is a landing pad, spill code must go before the call instruction
889 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
892 if (I->getDesc().isCall())
895 // The block contains no calls that can throw, so use the first terminator.
896 return mbb->getFirstTerminator();
899 void LiveIntervals::addKillFlags() {
900 for (iterator I = begin(), E = end(); I != E; ++I) {
901 unsigned Reg = I->first;
902 if (TargetRegisterInfo::isPhysicalRegister(Reg))
904 if (mri_->reg_nodbg_empty(Reg))
906 LiveInterval *LI = I->second;
908 // Every instruction that kills Reg corresponds to a live range end point.
909 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
911 // A LOAD index indicates an MBB edge.
912 if (RI->end.isLoad())
914 MachineInstr *MI = getInstructionFromIndex(RI->end);
917 MI->addRegisterKilled(Reg, NULL);
922 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
923 /// allow one) virtual register operand, then its uses are implicitly using
924 /// the register. Returns the virtual register.
925 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
926 MachineInstr *MI) const {
928 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
929 MachineOperand &MO = MI->getOperand(i);
930 if (!MO.isReg() || !MO.isUse())
932 unsigned Reg = MO.getReg();
933 if (Reg == 0 || Reg == li.reg)
936 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
937 !allocatableRegs_[Reg])
939 // FIXME: For now, only remat MI with at most one register operand.
941 "Can't rematerialize instruction with multiple register operand!");
950 /// isValNoAvailableAt - Return true if the val# of the specified interval
951 /// which reaches the given instruction also reaches the specified use index.
952 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
953 SlotIndex UseIdx) const {
954 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
955 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
958 /// isReMaterializable - Returns true if the definition MI of the specified
959 /// val# of the specified interval is re-materializable.
961 LiveIntervals::isReMaterializable(const LiveInterval &li,
962 const VNInfo *ValNo, MachineInstr *MI,
963 const SmallVectorImpl<LiveInterval*> *SpillIs,
968 if (!tii_->isTriviallyReMaterializable(MI, aa_))
971 // Target-specific code can mark an instruction as being rematerializable
972 // if it has one virtual reg use, though it had better be something like
973 // a PIC base register which is likely to be live everywhere.
974 unsigned ImpUse = getReMatImplicitUse(li, MI);
976 const LiveInterval &ImpLi = getInterval(ImpUse);
977 for (MachineRegisterInfo::use_nodbg_iterator
978 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
980 MachineInstr *UseMI = &*ri;
981 SlotIndex UseIdx = getInstructionIndex(UseMI);
982 if (li.getVNInfoAt(UseIdx) != ValNo)
984 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
988 // If a register operand of the re-materialized instruction is going to
989 // be spilled next, then it's not legal to re-materialize this instruction.
991 for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
992 if (ImpUse == (*SpillIs)[i]->reg)
998 /// isReMaterializable - Returns true if the definition MI of the specified
999 /// val# of the specified interval is re-materializable.
1000 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1001 const VNInfo *ValNo, MachineInstr *MI) {
1003 return isReMaterializable(li, ValNo, MI, 0, Dummy2);
1006 /// isReMaterializable - Returns true if every definition of MI of every
1007 /// val# of the specified interval is re-materializable.
1009 LiveIntervals::isReMaterializable(const LiveInterval &li,
1010 const SmallVectorImpl<LiveInterval*> *SpillIs,
1013 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1015 const VNInfo *VNI = *i;
1016 if (VNI->isUnused())
1017 continue; // Dead val#.
1018 // Is the def for the val# rematerializable?
1019 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1022 bool DefIsLoad = false;
1024 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1026 isLoad |= DefIsLoad;
1031 /// FilterFoldedOps - Filter out two-address use operands. Return
1032 /// true if it finds any issue with the operands that ought to prevent
1034 static bool FilterFoldedOps(MachineInstr *MI,
1035 SmallVector<unsigned, 2> &Ops,
1037 SmallVector<unsigned, 2> &FoldOps) {
1039 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1040 unsigned OpIdx = Ops[i];
1041 MachineOperand &MO = MI->getOperand(OpIdx);
1042 // FIXME: fold subreg use.
1046 MRInfo |= (unsigned)VirtRegMap::isMod;
1048 // Filter out two-address use operand(s).
1049 if (MI->isRegTiedToDefOperand(OpIdx)) {
1050 MRInfo = VirtRegMap::isModRef;
1053 MRInfo |= (unsigned)VirtRegMap::isRef;
1055 FoldOps.push_back(OpIdx);
1061 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1062 /// slot / to reg or any rematerialized load into ith operand of specified
1063 /// MI. If it is successul, MI is updated with the newly created MI and
1065 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1066 VirtRegMap &vrm, MachineInstr *DefMI,
1068 SmallVector<unsigned, 2> &Ops,
1069 bool isSS, int Slot, unsigned Reg) {
1070 // If it is an implicit def instruction, just delete it.
1071 if (MI->isImplicitDef()) {
1072 RemoveMachineInstrFromMaps(MI);
1073 vrm.RemoveMachineInstrFromMaps(MI);
1074 MI->eraseFromParent();
1079 // Filter the list of operand indexes that are to be folded. Abort if
1080 // any operand will prevent folding.
1081 unsigned MRInfo = 0;
1082 SmallVector<unsigned, 2> FoldOps;
1083 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1086 // The only time it's safe to fold into a two address instruction is when
1087 // it's folding reload and spill from / into a spill stack slot.
1088 if (DefMI && (MRInfo & VirtRegMap::isMod))
1091 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1092 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1094 // Remember this instruction uses the spill slot.
1095 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1097 // Attempt to fold the memory reference into the instruction. If
1098 // we can do this, we don't need to insert spill code.
1099 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1100 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1101 vrm.transferSpillPts(MI, fmi);
1102 vrm.transferRestorePts(MI, fmi);
1103 vrm.transferEmergencySpills(MI, fmi);
1104 ReplaceMachineInstrInMaps(MI, fmi);
1105 MI->eraseFromParent();
1113 /// canFoldMemoryOperand - Returns true if the specified load / store
1114 /// folding is possible.
1115 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1116 SmallVector<unsigned, 2> &Ops,
1118 // Filter the list of operand indexes that are to be folded. Abort if
1119 // any operand will prevent folding.
1120 unsigned MRInfo = 0;
1121 SmallVector<unsigned, 2> FoldOps;
1122 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1125 // It's only legal to remat for a use, not a def.
1126 if (ReMat && (MRInfo & VirtRegMap::isMod))
1129 return tii_->canFoldMemoryOperand(MI, FoldOps);
1132 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1133 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1135 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1140 for (++itr; itr != li.ranges.end(); ++itr) {
1141 MachineBasicBlock *mbb2 =
1142 indexes_->getMBBCoveringRange(itr->start, itr->end);
1151 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1152 /// interval on to-be re-materialized operands of MI) with new register.
1153 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1154 MachineInstr *MI, unsigned NewVReg,
1156 // There is an implicit use. That means one of the other operand is
1157 // being remat'ed and the remat'ed instruction has li.reg as an
1158 // use operand. Make sure we rewrite that as well.
1159 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1160 MachineOperand &MO = MI->getOperand(i);
1163 unsigned Reg = MO.getReg();
1164 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1166 if (!vrm.isReMaterialized(Reg))
1168 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1169 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1171 UseMO->setReg(NewVReg);
1175 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1176 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1177 bool LiveIntervals::
1178 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1179 bool TrySplit, SlotIndex index, SlotIndex end,
1181 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1182 unsigned Slot, int LdSlot,
1183 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1185 const TargetRegisterClass* rc,
1186 SmallVector<int, 4> &ReMatIds,
1187 const MachineLoopInfo *loopInfo,
1188 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1189 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1190 std::vector<LiveInterval*> &NewLIs) {
1191 bool CanFold = false;
1193 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1194 MachineOperand& mop = MI->getOperand(i);
1197 unsigned Reg = mop.getReg();
1198 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1203 bool TryFold = !DefIsReMat;
1204 bool FoldSS = true; // Default behavior unless it's a remat.
1205 int FoldSlot = Slot;
1207 // If this is the rematerializable definition MI itself and
1208 // all of its uses are rematerialized, simply delete it.
1209 if (MI == ReMatOrigDefMI && CanDelete) {
1210 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1212 RemoveMachineInstrFromMaps(MI);
1213 vrm.RemoveMachineInstrFromMaps(MI);
1214 MI->eraseFromParent();
1218 // If def for this use can't be rematerialized, then try folding.
1219 // If def is rematerializable and it's a load, also try folding.
1220 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1222 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1228 // Scan all of the operands of this instruction rewriting operands
1229 // to use NewVReg instead of li.reg as appropriate. We do this for
1232 // 1. If the instr reads the same spilled vreg multiple times, we
1233 // want to reuse the NewVReg.
1234 // 2. If the instr is a two-addr instruction, we are required to
1235 // keep the src/dst regs pinned.
1237 // Keep track of whether we replace a use and/or def so that we can
1238 // create the spill interval with the appropriate range.
1239 SmallVector<unsigned, 2> Ops;
1240 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1242 // Create a new virtual register for the spill interval.
1243 // Create the new register now so we can map the fold instruction
1244 // to the new register so when it is unfolded we get the correct
1246 bool CreatedNewVReg = false;
1248 NewVReg = mri_->createVirtualRegister(rc);
1250 CreatedNewVReg = true;
1252 // The new virtual register should get the same allocation hints as the
1254 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1255 if (Hint.first || Hint.second)
1256 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1262 // Do not fold load / store here if we are splitting. We'll find an
1263 // optimal point to insert a load / store later.
1265 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1266 Ops, FoldSS, FoldSlot, NewVReg)) {
1267 // Folding the load/store can completely change the instruction in
1268 // unpredictable ways, rescan it from the beginning.
1271 // We need to give the new vreg the same stack slot as the
1272 // spilled interval.
1273 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1279 if (isNotInMIMap(MI))
1281 goto RestartInstruction;
1284 // We'll try to fold it later if it's profitable.
1285 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1289 mop.setReg(NewVReg);
1290 if (mop.isImplicit())
1291 rewriteImplicitOps(li, MI, NewVReg, vrm);
1293 // Reuse NewVReg for other reads.
1294 bool HasEarlyClobber = false;
1295 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1296 MachineOperand &mopj = MI->getOperand(Ops[j]);
1297 mopj.setReg(NewVReg);
1298 if (mopj.isImplicit())
1299 rewriteImplicitOps(li, MI, NewVReg, vrm);
1300 if (mopj.isEarlyClobber())
1301 HasEarlyClobber = true;
1304 if (CreatedNewVReg) {
1306 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1307 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1308 // Each valnum may have its own remat id.
1309 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1311 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1313 if (!CanDelete || (HasUse && HasDef)) {
1314 // If this is a two-addr instruction then its use operands are
1315 // rematerializable but its def is not. It should be assigned a
1317 vrm.assignVirt2StackSlot(NewVReg, Slot);
1320 vrm.assignVirt2StackSlot(NewVReg, Slot);
1322 } else if (HasUse && HasDef &&
1323 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1324 // If this interval hasn't been assigned a stack slot (because earlier
1325 // def is a deleted remat def), do it now.
1326 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1327 vrm.assignVirt2StackSlot(NewVReg, Slot);
1330 // Re-matting an instruction with virtual register use. Add the
1331 // register as an implicit use on the use MI.
1332 if (DefIsReMat && ImpUse)
1333 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1335 // Create a new register interval for this spill / remat.
1336 LiveInterval &nI = getOrCreateInterval(NewVReg);
1337 if (CreatedNewVReg) {
1338 NewLIs.push_back(&nI);
1339 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1341 vrm.setIsSplitFromReg(NewVReg, li.reg);
1345 if (CreatedNewVReg) {
1346 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1347 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1348 DEBUG(dbgs() << " +" << LR);
1351 // Extend the split live interval to this def / use.
1352 SlotIndex End = index.getDefIndex();
1353 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1354 nI.getValNumInfo(nI.getNumValNums()-1));
1355 DEBUG(dbgs() << " +" << LR);
1360 // An early clobber starts at the use slot, except for an early clobber
1361 // tied to a use operand (yes, that is a thing).
1362 LiveRange LR(HasEarlyClobber && !HasUse ?
1363 index.getUseIndex() : index.getDefIndex(),
1364 index.getStoreIndex(),
1365 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1366 DEBUG(dbgs() << " +" << LR);
1371 dbgs() << "\t\t\t\tAdded new interval: ";
1372 nI.print(dbgs(), tri_);
1378 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1380 MachineBasicBlock *MBB,
1381 SlotIndex Idx) const {
1382 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1385 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1386 /// during spilling.
1388 struct RewriteInfo {
1391 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1394 struct RewriteInfoCompare {
1395 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1396 return LHS.Index < RHS.Index;
1401 void LiveIntervals::
1402 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1403 LiveInterval::Ranges::const_iterator &I,
1404 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1405 unsigned Slot, int LdSlot,
1406 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1408 const TargetRegisterClass* rc,
1409 SmallVector<int, 4> &ReMatIds,
1410 const MachineLoopInfo *loopInfo,
1411 BitVector &SpillMBBs,
1412 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1413 BitVector &RestoreMBBs,
1414 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1415 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1416 std::vector<LiveInterval*> &NewLIs) {
1417 bool AllCanFold = true;
1418 unsigned NewVReg = 0;
1419 SlotIndex start = I->start.getBaseIndex();
1420 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1422 // First collect all the def / use in this live range that will be rewritten.
1423 // Make sure they are sorted according to instruction index.
1424 std::vector<RewriteInfo> RewriteMIs;
1425 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1426 re = mri_->reg_end(); ri != re; ) {
1427 MachineInstr *MI = &*ri;
1428 MachineOperand &O = ri.getOperand();
1430 if (MI->isDebugValue()) {
1431 // Modify DBG_VALUE now that the value is in a spill slot.
1432 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1433 uint64_t Offset = MI->getOperand(1).getImm();
1434 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1435 DebugLoc DL = MI->getDebugLoc();
1436 int FI = isLoadSS ? LdSlot : (int)Slot;
1437 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1438 Offset, MDPtr, DL)) {
1439 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1440 ReplaceMachineInstrInMaps(MI, NewDV);
1441 MachineBasicBlock *MBB = MI->getParent();
1442 MBB->insert(MBB->erase(MI), NewDV);
1447 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1448 RemoveMachineInstrFromMaps(MI);
1449 vrm.RemoveMachineInstrFromMaps(MI);
1450 MI->eraseFromParent();
1453 assert(!(O.isImplicit() && O.isUse()) &&
1454 "Spilling register that's used as implicit use?");
1455 SlotIndex index = getInstructionIndex(MI);
1456 if (index < start || index >= end)
1460 // Must be defined by an implicit def. It should not be spilled. Note,
1461 // this is for correctness reason. e.g.
1462 // 8 %reg1024<def> = IMPLICIT_DEF
1463 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1464 // The live range [12, 14) are not part of the r1024 live interval since
1465 // it's defined by an implicit def. It will not conflicts with live
1466 // interval of r1025. Now suppose both registers are spilled, you can
1467 // easily see a situation where both registers are reloaded before
1468 // the INSERT_SUBREG and both target registers that would overlap.
1470 RewriteMIs.push_back(RewriteInfo(index, MI));
1472 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1474 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1475 // Now rewrite the defs and uses.
1476 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1477 RewriteInfo &rwi = RewriteMIs[i];
1479 SlotIndex index = rwi.Index;
1480 MachineInstr *MI = rwi.MI;
1481 // If MI def and/or use the same register multiple times, then there
1482 // are multiple entries.
1483 while (i != e && RewriteMIs[i].MI == MI) {
1484 assert(RewriteMIs[i].Index == index);
1487 MachineBasicBlock *MBB = MI->getParent();
1489 if (ImpUse && MI != ReMatDefMI) {
1490 // Re-matting an instruction with virtual register use. Prevent interval
1491 // from being spilled.
1492 getInterval(ImpUse).markNotSpillable();
1495 unsigned MBBId = MBB->getNumber();
1496 unsigned ThisVReg = 0;
1498 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1499 if (NVI != MBBVRegsMap.end()) {
1500 ThisVReg = NVI->second;
1507 // It's better to start a new interval to avoid artifically
1508 // extend the new interval.
1509 if (MI->readsWritesVirtualRegister(li.reg) ==
1510 std::make_pair(false,true)) {
1511 MBBVRegsMap.erase(MBB->getNumber());
1517 bool IsNew = ThisVReg == 0;
1519 // This ends the previous live interval. If all of its def / use
1520 // can be folded, give it a low spill weight.
1521 if (NewVReg && TrySplit && AllCanFold) {
1522 LiveInterval &nI = getOrCreateInterval(NewVReg);
1529 bool HasDef = false;
1530 bool HasUse = false;
1531 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1532 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1533 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1534 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1535 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1536 if (!HasDef && !HasUse)
1539 AllCanFold &= CanFold;
1541 // Update weight of spill interval.
1542 LiveInterval &nI = getOrCreateInterval(NewVReg);
1544 // The spill weight is now infinity as it cannot be spilled again.
1545 nI.markNotSpillable();
1549 // Keep track of the last def and first use in each MBB.
1551 if (MI != ReMatOrigDefMI || !CanDelete) {
1552 bool HasKill = false;
1554 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1556 // If this is a two-address code, then this index starts a new VNInfo.
1557 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1559 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1561 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1562 SpillIdxes.find(MBBId);
1564 if (SII == SpillIdxes.end()) {
1565 std::vector<SRInfo> S;
1566 S.push_back(SRInfo(index, NewVReg, true));
1567 SpillIdxes.insert(std::make_pair(MBBId, S));
1568 } else if (SII->second.back().vreg != NewVReg) {
1569 SII->second.push_back(SRInfo(index, NewVReg, true));
1570 } else if (index > SII->second.back().index) {
1571 // If there is an earlier def and this is a two-address
1572 // instruction, then it's not possible to fold the store (which
1573 // would also fold the load).
1574 SRInfo &Info = SII->second.back();
1576 Info.canFold = !HasUse;
1578 SpillMBBs.set(MBBId);
1579 } else if (SII != SpillIdxes.end() &&
1580 SII->second.back().vreg == NewVReg &&
1581 index > SII->second.back().index) {
1582 // There is an earlier def that's not killed (must be two-address).
1583 // The spill is no longer needed.
1584 SII->second.pop_back();
1585 if (SII->second.empty()) {
1586 SpillIdxes.erase(MBBId);
1587 SpillMBBs.reset(MBBId);
1594 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1595 SpillIdxes.find(MBBId);
1596 if (SII != SpillIdxes.end() &&
1597 SII->second.back().vreg == NewVReg &&
1598 index > SII->second.back().index)
1599 // Use(s) following the last def, it's not safe to fold the spill.
1600 SII->second.back().canFold = false;
1601 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1602 RestoreIdxes.find(MBBId);
1603 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1604 // If we are splitting live intervals, only fold if it's the first
1605 // use and there isn't another use later in the MBB.
1606 RII->second.back().canFold = false;
1608 // Only need a reload if there isn't an earlier def / use.
1609 if (RII == RestoreIdxes.end()) {
1610 std::vector<SRInfo> Infos;
1611 Infos.push_back(SRInfo(index, NewVReg, true));
1612 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1614 RII->second.push_back(SRInfo(index, NewVReg, true));
1616 RestoreMBBs.set(MBBId);
1620 // Update spill weight.
1621 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1622 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1625 if (NewVReg && TrySplit && AllCanFold) {
1626 // If all of its def / use can be folded, give it a low spill weight.
1627 LiveInterval &nI = getOrCreateInterval(NewVReg);
1632 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1633 unsigned vr, BitVector &RestoreMBBs,
1634 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1635 if (!RestoreMBBs[Id])
1637 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1638 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1639 if (Restores[i].index == index &&
1640 Restores[i].vreg == vr &&
1641 Restores[i].canFold)
1646 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1647 unsigned vr, BitVector &RestoreMBBs,
1648 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1649 if (!RestoreMBBs[Id])
1651 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1652 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1653 if (Restores[i].index == index && Restores[i].vreg)
1654 Restores[i].index = SlotIndex();
1657 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1658 /// spilled and create empty intervals for their uses.
1660 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1661 const TargetRegisterClass* rc,
1662 std::vector<LiveInterval*> &NewLIs) {
1663 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1664 re = mri_->reg_end(); ri != re; ) {
1665 MachineOperand &O = ri.getOperand();
1666 MachineInstr *MI = &*ri;
1668 if (MI->isDebugValue()) {
1669 // Remove debug info for now.
1671 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1675 assert(MI->isImplicitDef() &&
1676 "Register def was not rewritten?");
1677 RemoveMachineInstrFromMaps(MI);
1678 vrm.RemoveMachineInstrFromMaps(MI);
1679 MI->eraseFromParent();
1681 // This must be an use of an implicit_def so it's not part of the live
1682 // interval. Create a new empty live interval for it.
1683 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1684 unsigned NewVReg = mri_->createVirtualRegister(rc);
1686 vrm.setIsImplicitlyDefined(NewVReg);
1687 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1688 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1689 MachineOperand &MO = MI->getOperand(i);
1690 if (MO.isReg() && MO.getReg() == li.reg) {
1700 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1701 // Limit the loop depth ridiculousness.
1702 if (loopDepth > 200)
1705 // The loop depth is used to roughly estimate the number of times the
1706 // instruction is executed. Something like 10^d is simple, but will quickly
1707 // overflow a float. This expression behaves like 10^d for small d, but is
1708 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1709 // headroom before overflow.
1710 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1712 return (isDef + isUse) * lc;
1715 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1716 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1718 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1721 std::vector<LiveInterval*> LiveIntervals::
1722 addIntervalsForSpills(const LiveInterval &li,
1723 const SmallVectorImpl<LiveInterval*> *SpillIs,
1724 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1725 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1728 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1729 li.print(dbgs(), tri_);
1733 // Each bit specify whether a spill is required in the MBB.
1734 BitVector SpillMBBs(mf_->getNumBlockIDs());
1735 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1736 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1737 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1738 DenseMap<unsigned,unsigned> MBBVRegsMap;
1739 std::vector<LiveInterval*> NewLIs;
1740 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1742 unsigned NumValNums = li.getNumValNums();
1743 SmallVector<MachineInstr*, 4> ReMatDefs;
1744 ReMatDefs.resize(NumValNums, NULL);
1745 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1746 ReMatOrigDefs.resize(NumValNums, NULL);
1747 SmallVector<int, 4> ReMatIds;
1748 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1749 BitVector ReMatDelete(NumValNums);
1750 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1752 // Spilling a split live interval. It cannot be split any further. Also,
1753 // it's also guaranteed to be a single val# / range interval.
1754 if (vrm.getPreSplitReg(li.reg)) {
1755 vrm.setIsSplitFromReg(li.reg, 0);
1756 // Unset the split kill marker on the last use.
1757 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1758 if (KillIdx != SlotIndex()) {
1759 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1760 assert(KillMI && "Last use disappeared?");
1761 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1762 assert(KillOp != -1 && "Last use disappeared?");
1763 KillMI->getOperand(KillOp).setIsKill(false);
1765 vrm.removeKillPoint(li.reg);
1766 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1767 Slot = vrm.getStackSlot(li.reg);
1768 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1769 MachineInstr *ReMatDefMI = DefIsReMat ?
1770 vrm.getReMaterializedMI(li.reg) : NULL;
1772 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1773 bool isLoad = isLoadSS ||
1774 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1775 bool IsFirstRange = true;
1776 for (LiveInterval::Ranges::const_iterator
1777 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1778 // If this is a split live interval with multiple ranges, it means there
1779 // are two-address instructions that re-defined the value. Only the
1780 // first def can be rematerialized!
1782 // Note ReMatOrigDefMI has already been deleted.
1783 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1784 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1785 false, vrm, rc, ReMatIds, loopInfo,
1786 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1787 MBBVRegsMap, NewLIs);
1789 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1790 Slot, 0, false, false, false,
1791 false, vrm, rc, ReMatIds, loopInfo,
1792 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1793 MBBVRegsMap, NewLIs);
1795 IsFirstRange = false;
1798 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1799 normalizeSpillWeights(NewLIs);
1803 bool TrySplit = !intervalIsInOneMBB(li);
1806 bool NeedStackSlot = false;
1807 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1809 const VNInfo *VNI = *i;
1810 unsigned VN = VNI->id;
1811 if (VNI->isUnused())
1812 continue; // Dead val#.
1813 // Is the def for the val# rematerializable?
1814 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1816 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1817 // Remember how to remat the def of this val#.
1818 ReMatOrigDefs[VN] = ReMatDefMI;
1819 // Original def may be modified so we have to make a copy here.
1820 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1821 CloneMIs.push_back(Clone);
1822 ReMatDefs[VN] = Clone;
1824 bool CanDelete = true;
1825 if (VNI->hasPHIKill()) {
1826 // A kill is a phi node, not all of its uses can be rematerialized.
1827 // It must not be deleted.
1829 // Need a stack slot if there is any live range where uses cannot be
1831 NeedStackSlot = true;
1834 ReMatDelete.set(VN);
1836 // Need a stack slot if there is any live range where uses cannot be
1838 NeedStackSlot = true;
1842 // One stack slot per live interval.
1843 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1844 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1845 Slot = vrm.assignVirt2StackSlot(li.reg);
1847 // This case only occurs when the prealloc splitter has already assigned
1848 // a stack slot to this vreg.
1850 Slot = vrm.getStackSlot(li.reg);
1853 // Create new intervals and rewrite defs and uses.
1854 for (LiveInterval::Ranges::const_iterator
1855 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1856 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1857 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1858 bool DefIsReMat = ReMatDefMI != NULL;
1859 bool CanDelete = ReMatDelete[I->valno->id];
1861 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1862 bool isLoad = isLoadSS ||
1863 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1864 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1865 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1866 CanDelete, vrm, rc, ReMatIds, loopInfo,
1867 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1868 MBBVRegsMap, NewLIs);
1871 // Insert spills / restores if we are splitting.
1873 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1874 normalizeSpillWeights(NewLIs);
1878 SmallPtrSet<LiveInterval*, 4> AddedKill;
1879 SmallVector<unsigned, 2> Ops;
1880 if (NeedStackSlot) {
1881 int Id = SpillMBBs.find_first();
1883 std::vector<SRInfo> &spills = SpillIdxes[Id];
1884 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1885 SlotIndex index = spills[i].index;
1886 unsigned VReg = spills[i].vreg;
1887 LiveInterval &nI = getOrCreateInterval(VReg);
1888 bool isReMat = vrm.isReMaterialized(VReg);
1889 MachineInstr *MI = getInstructionFromIndex(index);
1890 bool CanFold = false;
1891 bool FoundUse = false;
1893 if (spills[i].canFold) {
1895 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1896 MachineOperand &MO = MI->getOperand(j);
1897 if (!MO.isReg() || MO.getReg() != VReg)
1904 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1905 RestoreMBBs, RestoreIdxes))) {
1906 // MI has two-address uses of the same register. If the use
1907 // isn't the first and only use in the BB, then we can't fold
1908 // it. FIXME: Move this to rewriteInstructionsForSpills.
1915 // Fold the store into the def if possible.
1916 bool Folded = false;
1917 if (CanFold && !Ops.empty()) {
1918 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1921 // Also folded uses, do not issue a load.
1922 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1923 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1925 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1929 // Otherwise tell the spiller to issue a spill.
1931 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1932 bool isKill = LR->end == index.getStoreIndex();
1933 if (!MI->registerDefIsDead(nI.reg))
1934 // No need to spill a dead def.
1935 vrm.addSpillPoint(VReg, isKill, MI);
1937 AddedKill.insert(&nI);
1940 Id = SpillMBBs.find_next(Id);
1944 int Id = RestoreMBBs.find_first();
1946 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1947 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1948 SlotIndex index = restores[i].index;
1949 if (index == SlotIndex())
1951 unsigned VReg = restores[i].vreg;
1952 LiveInterval &nI = getOrCreateInterval(VReg);
1953 bool isReMat = vrm.isReMaterialized(VReg);
1954 MachineInstr *MI = getInstructionFromIndex(index);
1955 bool CanFold = false;
1957 if (restores[i].canFold) {
1959 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1960 MachineOperand &MO = MI->getOperand(j);
1961 if (!MO.isReg() || MO.getReg() != VReg)
1965 // If this restore were to be folded, it would have been folded
1974 // Fold the load into the use if possible.
1975 bool Folded = false;
1976 if (CanFold && !Ops.empty()) {
1978 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1980 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1982 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1983 // If the rematerializable def is a load, also try to fold it.
1984 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1985 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1986 Ops, isLoadSS, LdSlot, VReg);
1988 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1990 // Re-matting an instruction with virtual register use. Add the
1991 // register as an implicit use on the use MI and mark the register
1992 // interval as unspillable.
1993 LiveInterval &ImpLi = getInterval(ImpUse);
1994 ImpLi.markNotSpillable();
1995 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2000 // If folding is not possible / failed, then tell the spiller to issue a
2001 // load / rematerialization for us.
2003 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2005 vrm.addRestorePoint(VReg, MI);
2007 Id = RestoreMBBs.find_next(Id);
2010 // Finalize intervals: add kills, finalize spill weights, and filter out
2012 std::vector<LiveInterval*> RetNewLIs;
2013 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2014 LiveInterval *LI = NewLIs[i];
2016 if (!AddedKill.count(LI)) {
2017 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2018 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2019 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2020 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2021 assert(UseIdx != -1);
2022 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2023 LastUse->getOperand(UseIdx).setIsKill();
2024 vrm.addKillPoint(LI->reg, LastUseIdx);
2027 RetNewLIs.push_back(LI);
2031 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2032 normalizeSpillWeights(RetNewLIs);
2036 /// hasAllocatableSuperReg - Return true if the specified physical register has
2037 /// any super register that's allocatable.
2038 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2039 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2040 if (allocatableRegs_[*AS] && hasInterval(*AS))
2045 /// getRepresentativeReg - Find the largest super register of the specified
2046 /// physical register.
2047 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2048 // Find the largest super-register that is allocatable.
2049 unsigned BestReg = Reg;
2050 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2051 unsigned SuperReg = *AS;
2052 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2060 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2061 /// specified interval that conflicts with the specified physical register.
2062 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2063 unsigned PhysReg) const {
2064 unsigned NumConflicts = 0;
2065 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2066 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2067 E = mri_->reg_end(); I != E; ++I) {
2068 MachineOperand &O = I.getOperand();
2069 MachineInstr *MI = O.getParent();
2070 if (MI->isDebugValue())
2072 SlotIndex Index = getInstructionIndex(MI);
2073 if (pli.liveAt(Index))
2076 return NumConflicts;
2079 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2080 /// around all defs and uses of the specified interval. Return true if it
2081 /// was able to cut its interval.
2082 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2083 unsigned PhysReg, VirtRegMap &vrm) {
2084 unsigned SpillReg = getRepresentativeReg(PhysReg);
2086 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2087 << " represented by " << tri_->getName(SpillReg) << '\n');
2089 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2090 // If there are registers which alias PhysReg, but which are not a
2091 // sub-register of the chosen representative super register. Assert
2092 // since we can't handle it yet.
2093 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2094 tri_->isSuperRegister(*AS, SpillReg));
2097 SmallVector<unsigned, 4> PRegs;
2098 if (hasInterval(SpillReg))
2099 PRegs.push_back(SpillReg);
2100 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2101 if (hasInterval(*SR))
2102 PRegs.push_back(*SR);
2105 dbgs() << "Trying to spill:";
2106 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2107 dbgs() << ' ' << tri_->getName(PRegs[i]);
2111 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2112 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2113 E = mri_->reg_end(); I != E; ++I) {
2114 MachineOperand &O = I.getOperand();
2115 MachineInstr *MI = O.getParent();
2116 if (MI->isDebugValue() || SeenMIs.count(MI))
2119 SlotIndex Index = getInstructionIndex(MI);
2120 bool LiveReg = false;
2121 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2122 unsigned PReg = PRegs[i];
2123 LiveInterval &pli = getInterval(PReg);
2124 if (!pli.liveAt(Index))
2127 SlotIndex StartIdx = Index.getLoadIndex();
2128 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2129 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2131 raw_string_ostream Msg(msg);
2132 Msg << "Ran out of registers during register allocation!";
2133 if (MI->isInlineAsm()) {
2134 Msg << "\nPlease check your inline asm statement for invalid "
2135 << "constraints:\n";
2136 MI->print(Msg, tm_);
2138 report_fatal_error(Msg.str());
2140 pli.removeRange(StartIdx, EndIdx);
2145 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2146 vrm.addEmergencySpill(SpillReg, MI);
2152 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2153 MachineInstr* startInst) {
2154 LiveInterval& Interval = getOrCreateInterval(reg);
2155 VNInfo* VN = Interval.getNextValue(
2156 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2157 startInst, getVNInfoAllocator());
2158 VN->setHasPHIKill(true);
2160 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2161 getMBBEndIdx(startInst->getParent()), VN);
2162 Interval.addRange(LR);