1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
62 AU.addRequired<AliasAnalysis>();
63 AU.addPreserved<AliasAnalysis>();
64 AU.addPreserved<LiveVariables>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreservedID(MachineLoopInfoID);
67 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
74 AU.addRequiredID(TwoAddressInstructionPassID);
75 AU.addPreserved<ProcessImplicitDefs>();
76 AU.addRequired<ProcessImplicitDefs>();
77 AU.addPreserved<SlotIndexes>();
78 AU.addRequiredTransitive<SlotIndexes>();
79 MachineFunctionPass::getAnalysisUsage(AU);
82 void LiveIntervals::releaseMemory() {
83 // Free the live intervals themselves.
84 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
85 E = r2iMap_.end(); I != E; ++I)
90 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
91 VNInfoAllocator.DestroyAll();
92 while (!CloneMIs.empty()) {
93 MachineInstr *MI = CloneMIs.back();
95 mf_->DeleteMachineInstr(MI);
99 /// runOnMachineFunction - Register allocate the whole function
101 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
103 mri_ = &mf_->getRegInfo();
104 tm_ = &fn.getTarget();
105 tri_ = tm_->getRegisterInfo();
106 tii_ = tm_->getInstrInfo();
107 aa_ = &getAnalysis<AliasAnalysis>();
108 lv_ = &getAnalysis<LiveVariables>();
109 indexes_ = &getAnalysis<SlotIndexes>();
110 allocatableRegs_ = tri_->getAllocatableSet(fn);
114 numIntervals += getNumIntervals();
120 /// print - Implement the dump method.
121 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
122 OS << "********** INTERVALS **********\n";
123 for (const_iterator I = begin(), E = end(); I != E; ++I) {
124 I->second->print(OS, tri_);
131 void LiveIntervals::printInstrs(raw_ostream &OS) const {
132 OS << "********** MACHINEINSTRS **********\n";
134 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
135 mbbi != mbbe; ++mbbi) {
136 OS << "BB#" << mbbi->getNumber()
137 << ":\t\t# derived from " << mbbi->getName() << "\n";
138 for (MachineBasicBlock::iterator mii = mbbi->begin(),
139 mie = mbbi->end(); mii != mie; ++mii) {
140 if (mii->isDebugValue())
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
251 if (TargetRegisterInfo::isPhysicalRegister(reg))
252 dbgs() << tri_->getName(reg);
254 dbgs() << "%reg" << reg;
259 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
260 unsigned Reg = MI.getOperand(MOIdx).getReg();
261 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
262 const MachineOperand &MO = MI.getOperand(i);
265 if (MO.getReg() == Reg && MO.isDef()) {
266 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
267 MI.getOperand(MOIdx).getSubReg() &&
275 /// isPartialRedef - Return true if the specified def at the specific index is
276 /// partially re-defining the specified live interval. A common case of this is
277 /// a definition of the sub-register.
278 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
279 LiveInterval &interval) {
280 if (!MO.getSubReg() || MO.isEarlyClobber())
283 SlotIndex RedefIndex = MIIdx.getDefIndex();
284 const LiveRange *OldLR =
285 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
286 if (OldLR->valno->isDefAccurate()) {
287 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
288 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
293 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
294 MachineBasicBlock::iterator mi,
298 LiveInterval &interval) {
300 dbgs() << "\t\tregister: ";
301 printRegName(interval.reg, tri_);
304 // Virtual registers may be defined multiple times (due to phi
305 // elimination and 2-addr elimination). Much of what we do only has to be
306 // done once for the vreg. We use an empty interval to detect the first
307 // time we see a vreg.
308 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
309 if (interval.empty()) {
310 // Get the Idx of the defining instructions.
311 SlotIndex defIndex = MIIdx.getDefIndex();
312 // Earlyclobbers move back one, so that they overlap the live range
314 if (MO.isEarlyClobber())
315 defIndex = MIIdx.getUseIndex();
317 // Make sure the first definition is not a partial redefinition. Add an
318 // <imp-def> of the full register.
320 mi->addRegisterDefined(interval.reg);
322 MachineInstr *CopyMI = NULL;
323 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
324 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
325 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
328 // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates
329 // implicit defs without really knowing. It shows up as INSERT_SUBREG
330 // using an undefined register.
331 if (mi->isInsertSubreg())
332 mi->getOperand(1).setIsUndef();
335 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
337 assert(ValNo->id == 0 && "First value in interval is not 0?");
339 // Loop over all of the blocks that the vreg is defined in. There are
340 // two cases we have to handle here. The most common case is a vreg
341 // whose lifetime is contained within a basic block. In this case there
342 // will be a single kill, in MBB, which comes after the definition.
343 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
344 // FIXME: what about dead vars?
346 if (vi.Kills[0] != mi)
347 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
349 killIdx = defIndex.getStoreIndex();
351 // If the kill happens after the definition, we have an intra-block
353 if (killIdx > defIndex) {
354 assert(vi.AliveBlocks.empty() &&
355 "Shouldn't be alive across any blocks!");
356 LiveRange LR(defIndex, killIdx, ValNo);
357 interval.addRange(LR);
358 DEBUG(dbgs() << " +" << LR << "\n");
359 ValNo->addKill(killIdx);
364 // The other case we handle is when a virtual register lives to the end
365 // of the defining block, potentially live across some blocks, then is
366 // live into some number of blocks, but gets killed. Start by adding a
367 // range that goes from this definition to the end of the defining block.
368 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
369 DEBUG(dbgs() << " +" << NewLR);
370 interval.addRange(NewLR);
372 bool PHIJoin = lv_->isPHIJoin(interval.reg);
375 // A phi join register is killed at the end of the MBB and revived as a new
376 // valno in the killing blocks.
377 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
378 DEBUG(dbgs() << " phi-join");
379 ValNo->addKill(indexes_->getTerminatorGap(mbb));
380 ValNo->setHasPHIKill(true);
382 // Iterate over all of the blocks that the variable is completely
383 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
385 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
386 E = vi.AliveBlocks.end(); I != E; ++I) {
387 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
388 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
389 interval.addRange(LR);
390 DEBUG(dbgs() << " +" << LR);
394 // Finally, this virtual register is live from the start of any killing
395 // block to the 'use' slot of the killing instruction.
396 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
397 MachineInstr *Kill = vi.Kills[i];
398 SlotIndex Start = getMBBStartIdx(Kill->getParent());
399 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
401 // Create interval with one of a NEW value number. Note that this value
402 // number isn't actually defined by an instruction, weird huh? :)
404 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
406 ValNo->setIsPHIDef(true);
408 LiveRange LR(Start, killIdx, ValNo);
409 interval.addRange(LR);
410 ValNo->addKill(killIdx);
411 DEBUG(dbgs() << " +" << LR);
415 if (MultipleDefsBySameMI(*mi, MOIdx))
416 // Multiple defs of the same virtual register by the same instruction.
417 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
418 // This is likely due to elimination of REG_SEQUENCE instructions. Return
419 // here since there is nothing to do.
422 // If this is the second time we see a virtual register definition, it
423 // must be due to phi elimination or two addr elimination. If this is
424 // the result of two address elimination, then the vreg is one of the
425 // def-and-use register operand.
427 // It may also be partial redef like this:
428 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
429 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
430 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
431 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
432 // If this is a two-address definition, then we have already processed
433 // the live range. The only problem is that we didn't realize there
434 // are actually two values in the live interval. Because of this we
435 // need to take the LiveRegion that defines this register and split it
437 SlotIndex RedefIndex = MIIdx.getDefIndex();
438 if (MO.isEarlyClobber())
439 RedefIndex = MIIdx.getUseIndex();
441 const LiveRange *OldLR =
442 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
443 VNInfo *OldValNo = OldLR->valno;
444 SlotIndex DefIndex = OldValNo->def.getDefIndex();
446 // Delete the previous value, which should be short and continuous,
447 // because the 2-addr copy must be in the same MBB as the redef.
448 interval.removeRange(DefIndex, RedefIndex);
450 // The new value number (#1) is defined by the instruction we claimed
452 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
453 false, // update at *
455 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
457 // Value#0 is now defined by the 2-addr instruction.
458 OldValNo->def = RedefIndex;
459 OldValNo->setCopy(0);
461 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
462 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
464 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
465 OldValNo->setCopy(&*mi);
467 // Add the new live interval which replaces the range for the input copy.
468 LiveRange LR(DefIndex, RedefIndex, ValNo);
469 DEBUG(dbgs() << " replace range with " << LR);
470 interval.addRange(LR);
471 ValNo->addKill(RedefIndex);
473 // If this redefinition is dead, we need to add a dummy unit live
474 // range covering the def slot.
476 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
480 dbgs() << " RESULT: ";
481 interval.print(dbgs(), tri_);
483 } else if (lv_->isPHIJoin(interval.reg)) {
484 // In the case of PHI elimination, each variable definition is only
485 // live until the end of the block. We've already taken care of the
486 // rest of the live range.
488 SlotIndex defIndex = MIIdx.getDefIndex();
489 if (MO.isEarlyClobber())
490 defIndex = MIIdx.getUseIndex();
493 MachineInstr *CopyMI = NULL;
494 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
495 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
496 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
498 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
500 SlotIndex killIndex = getMBBEndIdx(mbb);
501 LiveRange LR(defIndex, killIndex, ValNo);
502 interval.addRange(LR);
503 ValNo->addKill(indexes_->getTerminatorGap(mbb));
504 ValNo->setHasPHIKill(true);
505 DEBUG(dbgs() << " phi-join +" << LR);
507 llvm_unreachable("Multiply defined register");
511 DEBUG(dbgs() << '\n');
514 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
515 MachineBasicBlock::iterator mi,
518 LiveInterval &interval,
519 MachineInstr *CopyMI) {
520 // A physical register cannot be live across basic block, so its
521 // lifetime must end somewhere in its defining basic block.
523 dbgs() << "\t\tregister: ";
524 printRegName(interval.reg, tri_);
527 SlotIndex baseIndex = MIIdx;
528 SlotIndex start = baseIndex.getDefIndex();
529 // Earlyclobbers move back one.
530 if (MO.isEarlyClobber())
531 start = MIIdx.getUseIndex();
532 SlotIndex end = start;
534 // If it is not used after definition, it is considered dead at
535 // the instruction defining it. Hence its interval is:
536 // [defSlot(def), defSlot(def)+1)
537 // For earlyclobbers, the defSlot was pushed back one; the extra
538 // advance below compensates.
540 DEBUG(dbgs() << " dead");
541 end = start.getStoreIndex();
545 // If it is not dead on definition, it must be killed by a
546 // subsequent instruction. Hence its interval is:
547 // [defSlot(def), useSlot(kill)+1)
548 baseIndex = baseIndex.getNextIndex();
549 while (++mi != MBB->end()) {
551 if (mi->isDebugValue())
553 if (getInstructionFromIndex(baseIndex) == 0)
554 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
556 if (mi->killsRegister(interval.reg, tri_)) {
557 DEBUG(dbgs() << " killed");
558 end = baseIndex.getDefIndex();
561 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
563 if (mi->isRegTiedToUseOperand(DefIdx)) {
564 // Two-address instruction.
565 end = baseIndex.getDefIndex();
567 // Another instruction redefines the register before it is ever read.
568 // Then the register is essentially dead at the instruction that
569 // defines it. Hence its interval is:
570 // [defSlot(def), defSlot(def)+1)
571 DEBUG(dbgs() << " dead");
572 end = start.getStoreIndex();
578 baseIndex = baseIndex.getNextIndex();
581 // The only case we should have a dead physreg here without a killing or
582 // instruction where we know it's dead is if it is live-in to the function
583 // and never used. Another possible case is the implicit use of the
584 // physical register has been deleted by two-address pass.
585 end = start.getStoreIndex();
588 assert(start < end && "did not find end of interval?");
590 // Already exists? Extend old live interval.
591 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
592 bool Extend = OldLR != interval.end();
593 VNInfo *ValNo = Extend
594 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
595 if (MO.isEarlyClobber() && Extend)
596 ValNo->setHasRedefByEC(true);
597 LiveRange LR(start, end, ValNo);
598 interval.addRange(LR);
599 LR.valno->addKill(end);
600 DEBUG(dbgs() << " +" << LR << '\n');
603 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
604 MachineBasicBlock::iterator MI,
608 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
609 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
610 getOrCreateInterval(MO.getReg()));
611 else if (allocatableRegs_[MO.getReg()]) {
612 MachineInstr *CopyMI = NULL;
613 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
614 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
615 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
617 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618 getOrCreateInterval(MO.getReg()), CopyMI);
619 // Def of a register also defines its sub-registers.
620 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
621 // If MI also modifies the sub-register explicitly, avoid processing it
622 // more than once. Do not pass in TRI here so it checks for exact match.
623 if (!MI->definesRegister(*AS))
624 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625 getOrCreateInterval(*AS), 0);
629 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
631 LiveInterval &interval, bool isAlias) {
633 dbgs() << "\t\tlivein register: ";
634 printRegName(interval.reg, tri_);
637 // Look for kills, if it reaches a def before it's killed, then it shouldn't
638 // be considered a livein.
639 MachineBasicBlock::iterator mi = MBB->begin();
640 MachineBasicBlock::iterator E = MBB->end();
641 // Skip over DBG_VALUE at the start of the MBB.
642 if (mi != E && mi->isDebugValue()) {
643 while (++mi != E && mi->isDebugValue())
646 // MBB is empty except for DBG_VALUE's.
650 SlotIndex baseIndex = MIIdx;
651 SlotIndex start = baseIndex;
652 if (getInstructionFromIndex(baseIndex) == 0)
653 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
655 SlotIndex end = baseIndex;
656 bool SeenDefUse = false;
659 if (mi->killsRegister(interval.reg, tri_)) {
660 DEBUG(dbgs() << " killed");
661 end = baseIndex.getDefIndex();
664 } else if (mi->definesRegister(interval.reg, tri_)) {
665 // Another instruction redefines the register before it is ever read.
666 // Then the register is essentially dead at the instruction that defines
667 // it. Hence its interval is:
668 // [defSlot(def), defSlot(def)+1)
669 DEBUG(dbgs() << " dead");
670 end = start.getStoreIndex();
675 while (++mi != E && mi->isDebugValue())
676 // Skip over DBG_VALUE.
679 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
682 // Live-in register might not be used at all.
685 DEBUG(dbgs() << " dead");
686 end = MIIdx.getStoreIndex();
688 DEBUG(dbgs() << " live through");
694 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
695 0, false, VNInfoAllocator);
696 vni->setIsPHIDef(true);
697 LiveRange LR(start, end, vni);
699 interval.addRange(LR);
700 LR.valno->addKill(end);
701 DEBUG(dbgs() << " +" << LR << '\n');
704 /// computeIntervals - computes the live intervals for virtual
705 /// registers. for some ordering of the machine instructions [1,N] a
706 /// live interval is an interval [i, j) where 1 <= i <= j < N for
707 /// which a variable is live
708 void LiveIntervals::computeIntervals() {
709 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
710 << "********** Function: "
711 << ((Value*)mf_->getFunction())->getName() << '\n');
713 SmallVector<unsigned, 8> UndefUses;
714 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
716 MachineBasicBlock *MBB = MBBI;
720 // Track the index of the current machine instr.
721 SlotIndex MIIndex = getMBBStartIdx(MBB);
722 DEBUG(dbgs() << "BB#" << MBB->getNumber()
723 << ":\t\t# derived from " << MBB->getName() << "\n");
725 // Create intervals for live-ins to this BB first.
726 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
727 LE = MBB->livein_end(); LI != LE; ++LI) {
728 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
729 // Multiple live-ins can alias the same register.
730 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
731 if (!hasInterval(*AS))
732 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
736 // Skip over empty initial indices.
737 if (getInstructionFromIndex(MIIndex) == 0)
738 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
740 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
742 DEBUG(dbgs() << MIIndex << "\t" << *MI);
743 if (MI->isDebugValue())
747 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
748 MachineOperand &MO = MI->getOperand(i);
749 if (!MO.isReg() || !MO.getReg())
752 // handle register defs - build intervals
754 handleRegisterDef(MBB, MI, MIIndex, MO, i);
755 else if (MO.isUndef())
756 UndefUses.push_back(MO.getReg());
759 // Move to the next instr slot.
760 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
764 // Create empty intervals for registers defined by implicit_def's (except
765 // for those implicit_def that define values which are liveout of their
767 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
768 unsigned UndefReg = UndefUses[i];
769 (void)getOrCreateInterval(UndefReg);
773 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
774 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
775 return new LiveInterval(reg, Weight);
778 /// dupInterval - Duplicate a live interval. The caller is responsible for
779 /// managing the allocated memory.
780 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
781 LiveInterval *NewLI = createInterval(li->reg);
782 NewLI->Copy(*li, mri_, getVNInfoAllocator());
786 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
787 /// copy field and returns the source register that defines it.
788 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
792 if (VNI->getCopy()->isExtractSubreg()) {
793 // If it's extracting out of a physical register, return the sub-register.
794 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
795 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
796 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
797 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
798 if (SrcSubReg == DstSubReg)
799 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
800 // reg1034 can still be coalesced to EDX.
802 assert(DstSubReg == 0);
803 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
806 } else if (VNI->getCopy()->isInsertSubreg() ||
807 VNI->getCopy()->isSubregToReg())
808 return VNI->getCopy()->getOperand(2).getReg();
810 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
811 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
813 llvm_unreachable("Unrecognized copy instruction!");
817 //===----------------------------------------------------------------------===//
818 // Register allocator hooks.
821 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
822 /// allow one) virtual register operand, then its uses are implicitly using
823 /// the register. Returns the virtual register.
824 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
825 MachineInstr *MI) const {
827 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
828 MachineOperand &MO = MI->getOperand(i);
829 if (!MO.isReg() || !MO.isUse())
831 unsigned Reg = MO.getReg();
832 if (Reg == 0 || Reg == li.reg)
835 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
836 !allocatableRegs_[Reg])
838 // FIXME: For now, only remat MI with at most one register operand.
840 "Can't rematerialize instruction with multiple register operand!");
849 /// isValNoAvailableAt - Return true if the val# of the specified interval
850 /// which reaches the given instruction also reaches the specified use index.
851 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
852 SlotIndex UseIdx) const {
853 SlotIndex Index = getInstructionIndex(MI);
854 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
855 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
856 return UI != li.end() && UI->valno == ValNo;
859 /// isReMaterializable - Returns true if the definition MI of the specified
860 /// val# of the specified interval is re-materializable.
861 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
862 const VNInfo *ValNo, MachineInstr *MI,
863 SmallVectorImpl<LiveInterval*> &SpillIs,
868 if (!tii_->isTriviallyReMaterializable(MI, aa_))
871 // Target-specific code can mark an instruction as being rematerializable
872 // if it has one virtual reg use, though it had better be something like
873 // a PIC base register which is likely to be live everywhere.
874 unsigned ImpUse = getReMatImplicitUse(li, MI);
876 const LiveInterval &ImpLi = getInterval(ImpUse);
877 for (MachineRegisterInfo::use_nodbg_iterator
878 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
880 MachineInstr *UseMI = &*ri;
881 SlotIndex UseIdx = getInstructionIndex(UseMI);
882 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
884 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
888 // If a register operand of the re-materialized instruction is going to
889 // be spilled next, then it's not legal to re-materialize this instruction.
890 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
891 if (ImpUse == SpillIs[i]->reg)
897 /// isReMaterializable - Returns true if the definition MI of the specified
898 /// val# of the specified interval is re-materializable.
899 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
900 const VNInfo *ValNo, MachineInstr *MI) {
901 SmallVector<LiveInterval*, 4> Dummy1;
903 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
906 /// isReMaterializable - Returns true if every definition of MI of every
907 /// val# of the specified interval is re-materializable.
908 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
909 SmallVectorImpl<LiveInterval*> &SpillIs,
912 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
914 const VNInfo *VNI = *i;
916 continue; // Dead val#.
917 // Is the def for the val# rematerializable?
918 if (!VNI->isDefAccurate())
920 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
921 bool DefIsLoad = false;
923 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
930 /// FilterFoldedOps - Filter out two-address use operands. Return
931 /// true if it finds any issue with the operands that ought to prevent
933 static bool FilterFoldedOps(MachineInstr *MI,
934 SmallVector<unsigned, 2> &Ops,
936 SmallVector<unsigned, 2> &FoldOps) {
938 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
939 unsigned OpIdx = Ops[i];
940 MachineOperand &MO = MI->getOperand(OpIdx);
941 // FIXME: fold subreg use.
945 MRInfo |= (unsigned)VirtRegMap::isMod;
947 // Filter out two-address use operand(s).
948 if (MI->isRegTiedToDefOperand(OpIdx)) {
949 MRInfo = VirtRegMap::isModRef;
952 MRInfo |= (unsigned)VirtRegMap::isRef;
954 FoldOps.push_back(OpIdx);
960 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
961 /// slot / to reg or any rematerialized load into ith operand of specified
962 /// MI. If it is successul, MI is updated with the newly created MI and
964 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
965 VirtRegMap &vrm, MachineInstr *DefMI,
967 SmallVector<unsigned, 2> &Ops,
968 bool isSS, int Slot, unsigned Reg) {
969 // If it is an implicit def instruction, just delete it.
970 if (MI->isImplicitDef()) {
971 RemoveMachineInstrFromMaps(MI);
972 vrm.RemoveMachineInstrFromMaps(MI);
973 MI->eraseFromParent();
978 // Filter the list of operand indexes that are to be folded. Abort if
979 // any operand will prevent folding.
981 SmallVector<unsigned, 2> FoldOps;
982 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
985 // The only time it's safe to fold into a two address instruction is when
986 // it's folding reload and spill from / into a spill stack slot.
987 if (DefMI && (MRInfo & VirtRegMap::isMod))
990 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
991 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
993 // Remember this instruction uses the spill slot.
994 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
996 // Attempt to fold the memory reference into the instruction. If
997 // we can do this, we don't need to insert spill code.
998 MachineBasicBlock &MBB = *MI->getParent();
999 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1000 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1001 vrm.transferSpillPts(MI, fmi);
1002 vrm.transferRestorePts(MI, fmi);
1003 vrm.transferEmergencySpills(MI, fmi);
1004 ReplaceMachineInstrInMaps(MI, fmi);
1005 MI = MBB.insert(MBB.erase(MI), fmi);
1012 /// canFoldMemoryOperand - Returns true if the specified load / store
1013 /// folding is possible.
1014 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1015 SmallVector<unsigned, 2> &Ops,
1017 // Filter the list of operand indexes that are to be folded. Abort if
1018 // any operand will prevent folding.
1019 unsigned MRInfo = 0;
1020 SmallVector<unsigned, 2> FoldOps;
1021 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1024 // It's only legal to remat for a use, not a def.
1025 if (ReMat && (MRInfo & VirtRegMap::isMod))
1028 return tii_->canFoldMemoryOperand(MI, FoldOps);
1031 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1032 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1034 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1039 for (++itr; itr != li.ranges.end(); ++itr) {
1040 MachineBasicBlock *mbb2 =
1041 indexes_->getMBBCoveringRange(itr->start, itr->end);
1050 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1051 /// interval on to-be re-materialized operands of MI) with new register.
1052 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1053 MachineInstr *MI, unsigned NewVReg,
1055 // There is an implicit use. That means one of the other operand is
1056 // being remat'ed and the remat'ed instruction has li.reg as an
1057 // use operand. Make sure we rewrite that as well.
1058 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1059 MachineOperand &MO = MI->getOperand(i);
1062 unsigned Reg = MO.getReg();
1063 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1065 if (!vrm.isReMaterialized(Reg))
1067 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1068 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1070 UseMO->setReg(NewVReg);
1074 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1075 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1076 bool LiveIntervals::
1077 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1078 bool TrySplit, SlotIndex index, SlotIndex end,
1080 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1081 unsigned Slot, int LdSlot,
1082 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1084 const TargetRegisterClass* rc,
1085 SmallVector<int, 4> &ReMatIds,
1086 const MachineLoopInfo *loopInfo,
1087 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1088 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1089 std::vector<LiveInterval*> &NewLIs) {
1090 bool CanFold = false;
1092 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1093 MachineOperand& mop = MI->getOperand(i);
1096 unsigned Reg = mop.getReg();
1097 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1102 bool TryFold = !DefIsReMat;
1103 bool FoldSS = true; // Default behavior unless it's a remat.
1104 int FoldSlot = Slot;
1106 // If this is the rematerializable definition MI itself and
1107 // all of its uses are rematerialized, simply delete it.
1108 if (MI == ReMatOrigDefMI && CanDelete) {
1109 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1111 RemoveMachineInstrFromMaps(MI);
1112 vrm.RemoveMachineInstrFromMaps(MI);
1113 MI->eraseFromParent();
1117 // If def for this use can't be rematerialized, then try folding.
1118 // If def is rematerializable and it's a load, also try folding.
1119 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1121 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1127 // Scan all of the operands of this instruction rewriting operands
1128 // to use NewVReg instead of li.reg as appropriate. We do this for
1131 // 1. If the instr reads the same spilled vreg multiple times, we
1132 // want to reuse the NewVReg.
1133 // 2. If the instr is a two-addr instruction, we are required to
1134 // keep the src/dst regs pinned.
1136 // Keep track of whether we replace a use and/or def so that we can
1137 // create the spill interval with the appropriate range.
1138 SmallVector<unsigned, 2> Ops;
1139 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1141 // Create a new virtual register for the spill interval.
1142 // Create the new register now so we can map the fold instruction
1143 // to the new register so when it is unfolded we get the correct
1145 bool CreatedNewVReg = false;
1147 NewVReg = mri_->createVirtualRegister(rc);
1149 CreatedNewVReg = true;
1151 // The new virtual register should get the same allocation hints as the
1153 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1154 if (Hint.first || Hint.second)
1155 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1161 // Do not fold load / store here if we are splitting. We'll find an
1162 // optimal point to insert a load / store later.
1164 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1165 Ops, FoldSS, FoldSlot, NewVReg)) {
1166 // Folding the load/store can completely change the instruction in
1167 // unpredictable ways, rescan it from the beginning.
1170 // We need to give the new vreg the same stack slot as the
1171 // spilled interval.
1172 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1178 if (isNotInMIMap(MI))
1180 goto RestartInstruction;
1183 // We'll try to fold it later if it's profitable.
1184 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1188 mop.setReg(NewVReg);
1189 if (mop.isImplicit())
1190 rewriteImplicitOps(li, MI, NewVReg, vrm);
1192 // Reuse NewVReg for other reads.
1193 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1194 MachineOperand &mopj = MI->getOperand(Ops[j]);
1195 mopj.setReg(NewVReg);
1196 if (mopj.isImplicit())
1197 rewriteImplicitOps(li, MI, NewVReg, vrm);
1200 if (CreatedNewVReg) {
1202 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1203 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1204 // Each valnum may have its own remat id.
1205 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1207 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1209 if (!CanDelete || (HasUse && HasDef)) {
1210 // If this is a two-addr instruction then its use operands are
1211 // rematerializable but its def is not. It should be assigned a
1213 vrm.assignVirt2StackSlot(NewVReg, Slot);
1216 vrm.assignVirt2StackSlot(NewVReg, Slot);
1218 } else if (HasUse && HasDef &&
1219 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1220 // If this interval hasn't been assigned a stack slot (because earlier
1221 // def is a deleted remat def), do it now.
1222 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1223 vrm.assignVirt2StackSlot(NewVReg, Slot);
1226 // Re-matting an instruction with virtual register use. Add the
1227 // register as an implicit use on the use MI.
1228 if (DefIsReMat && ImpUse)
1229 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1231 // Create a new register interval for this spill / remat.
1232 LiveInterval &nI = getOrCreateInterval(NewVReg);
1233 if (CreatedNewVReg) {
1234 NewLIs.push_back(&nI);
1235 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1237 vrm.setIsSplitFromReg(NewVReg, li.reg);
1241 if (CreatedNewVReg) {
1242 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1243 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1244 DEBUG(dbgs() << " +" << LR);
1247 // Extend the split live interval to this def / use.
1248 SlotIndex End = index.getDefIndex();
1249 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1250 nI.getValNumInfo(nI.getNumValNums()-1));
1251 DEBUG(dbgs() << " +" << LR);
1256 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1257 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1258 DEBUG(dbgs() << " +" << LR);
1263 dbgs() << "\t\t\t\tAdded new interval: ";
1264 nI.print(dbgs(), tri_);
1270 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1272 MachineBasicBlock *MBB,
1273 SlotIndex Idx) const {
1274 SlotIndex End = getMBBEndIdx(MBB);
1275 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1276 if (VNI->kills[j].isPHI())
1279 SlotIndex KillIdx = VNI->kills[j];
1280 assert(getInstructionFromIndex(KillIdx) && "Dangling kill");
1281 if (KillIdx > Idx && KillIdx <= End)
1287 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1288 /// during spilling.
1290 struct RewriteInfo {
1293 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1296 struct RewriteInfoCompare {
1297 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1298 return LHS.Index < RHS.Index;
1303 void LiveIntervals::
1304 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1305 LiveInterval::Ranges::const_iterator &I,
1306 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1307 unsigned Slot, int LdSlot,
1308 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1310 const TargetRegisterClass* rc,
1311 SmallVector<int, 4> &ReMatIds,
1312 const MachineLoopInfo *loopInfo,
1313 BitVector &SpillMBBs,
1314 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1315 BitVector &RestoreMBBs,
1316 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1317 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1318 std::vector<LiveInterval*> &NewLIs) {
1319 bool AllCanFold = true;
1320 unsigned NewVReg = 0;
1321 SlotIndex start = I->start.getBaseIndex();
1322 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1324 // First collect all the def / use in this live range that will be rewritten.
1325 // Make sure they are sorted according to instruction index.
1326 std::vector<RewriteInfo> RewriteMIs;
1327 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1328 re = mri_->reg_end(); ri != re; ) {
1329 MachineInstr *MI = &*ri;
1330 MachineOperand &O = ri.getOperand();
1332 if (MI->isDebugValue()) {
1333 // Modify DBG_VALUE now that the value is in a spill slot.
1334 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1335 uint64_t Offset = MI->getOperand(1).getImm();
1336 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1337 DebugLoc DL = MI->getDebugLoc();
1338 int FI = isLoadSS ? LdSlot : (int)Slot;
1339 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1340 Offset, MDPtr, DL)) {
1341 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1342 ReplaceMachineInstrInMaps(MI, NewDV);
1343 MachineBasicBlock *MBB = MI->getParent();
1344 MBB->insert(MBB->erase(MI), NewDV);
1349 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1350 RemoveMachineInstrFromMaps(MI);
1351 vrm.RemoveMachineInstrFromMaps(MI);
1352 MI->eraseFromParent();
1355 assert(!(O.isImplicit() && O.isUse()) &&
1356 "Spilling register that's used as implicit use?");
1357 SlotIndex index = getInstructionIndex(MI);
1358 if (index < start || index >= end)
1362 // Must be defined by an implicit def. It should not be spilled. Note,
1363 // this is for correctness reason. e.g.
1364 // 8 %reg1024<def> = IMPLICIT_DEF
1365 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1366 // The live range [12, 14) are not part of the r1024 live interval since
1367 // it's defined by an implicit def. It will not conflicts with live
1368 // interval of r1025. Now suppose both registers are spilled, you can
1369 // easily see a situation where both registers are reloaded before
1370 // the INSERT_SUBREG and both target registers that would overlap.
1372 RewriteMIs.push_back(RewriteInfo(index, MI));
1374 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1376 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1377 // Now rewrite the defs and uses.
1378 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1379 RewriteInfo &rwi = RewriteMIs[i];
1381 SlotIndex index = rwi.Index;
1382 MachineInstr *MI = rwi.MI;
1383 // If MI def and/or use the same register multiple times, then there
1384 // are multiple entries.
1385 while (i != e && RewriteMIs[i].MI == MI) {
1386 assert(RewriteMIs[i].Index == index);
1389 MachineBasicBlock *MBB = MI->getParent();
1391 if (ImpUse && MI != ReMatDefMI) {
1392 // Re-matting an instruction with virtual register use. Prevent interval
1393 // from being spilled.
1394 getInterval(ImpUse).markNotSpillable();
1397 unsigned MBBId = MBB->getNumber();
1398 unsigned ThisVReg = 0;
1400 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1401 if (NVI != MBBVRegsMap.end()) {
1402 ThisVReg = NVI->second;
1409 // It's better to start a new interval to avoid artifically
1410 // extend the new interval.
1411 if (MI->readsWritesVirtualRegister(li.reg) ==
1412 std::make_pair(false,true)) {
1413 MBBVRegsMap.erase(MBB->getNumber());
1419 bool IsNew = ThisVReg == 0;
1421 // This ends the previous live interval. If all of its def / use
1422 // can be folded, give it a low spill weight.
1423 if (NewVReg && TrySplit && AllCanFold) {
1424 LiveInterval &nI = getOrCreateInterval(NewVReg);
1431 bool HasDef = false;
1432 bool HasUse = false;
1433 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1434 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1435 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1436 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1437 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1438 if (!HasDef && !HasUse)
1441 AllCanFold &= CanFold;
1443 // Update weight of spill interval.
1444 LiveInterval &nI = getOrCreateInterval(NewVReg);
1446 // The spill weight is now infinity as it cannot be spilled again.
1447 nI.markNotSpillable();
1451 // Keep track of the last def and first use in each MBB.
1453 if (MI != ReMatOrigDefMI || !CanDelete) {
1454 bool HasKill = false;
1456 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1458 // If this is a two-address code, then this index starts a new VNInfo.
1459 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1461 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1463 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1464 SpillIdxes.find(MBBId);
1466 if (SII == SpillIdxes.end()) {
1467 std::vector<SRInfo> S;
1468 S.push_back(SRInfo(index, NewVReg, true));
1469 SpillIdxes.insert(std::make_pair(MBBId, S));
1470 } else if (SII->second.back().vreg != NewVReg) {
1471 SII->second.push_back(SRInfo(index, NewVReg, true));
1472 } else if (index > SII->second.back().index) {
1473 // If there is an earlier def and this is a two-address
1474 // instruction, then it's not possible to fold the store (which
1475 // would also fold the load).
1476 SRInfo &Info = SII->second.back();
1478 Info.canFold = !HasUse;
1480 SpillMBBs.set(MBBId);
1481 } else if (SII != SpillIdxes.end() &&
1482 SII->second.back().vreg == NewVReg &&
1483 index > SII->second.back().index) {
1484 // There is an earlier def that's not killed (must be two-address).
1485 // The spill is no longer needed.
1486 SII->second.pop_back();
1487 if (SII->second.empty()) {
1488 SpillIdxes.erase(MBBId);
1489 SpillMBBs.reset(MBBId);
1496 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1497 SpillIdxes.find(MBBId);
1498 if (SII != SpillIdxes.end() &&
1499 SII->second.back().vreg == NewVReg &&
1500 index > SII->second.back().index)
1501 // Use(s) following the last def, it's not safe to fold the spill.
1502 SII->second.back().canFold = false;
1503 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1504 RestoreIdxes.find(MBBId);
1505 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1506 // If we are splitting live intervals, only fold if it's the first
1507 // use and there isn't another use later in the MBB.
1508 RII->second.back().canFold = false;
1510 // Only need a reload if there isn't an earlier def / use.
1511 if (RII == RestoreIdxes.end()) {
1512 std::vector<SRInfo> Infos;
1513 Infos.push_back(SRInfo(index, NewVReg, true));
1514 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1516 RII->second.push_back(SRInfo(index, NewVReg, true));
1518 RestoreMBBs.set(MBBId);
1522 // Update spill weight.
1523 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1524 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1527 if (NewVReg && TrySplit && AllCanFold) {
1528 // If all of its def / use can be folded, give it a low spill weight.
1529 LiveInterval &nI = getOrCreateInterval(NewVReg);
1534 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1535 unsigned vr, BitVector &RestoreMBBs,
1536 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1537 if (!RestoreMBBs[Id])
1539 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1540 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1541 if (Restores[i].index == index &&
1542 Restores[i].vreg == vr &&
1543 Restores[i].canFold)
1548 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1549 unsigned vr, BitVector &RestoreMBBs,
1550 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1551 if (!RestoreMBBs[Id])
1553 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1554 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1555 if (Restores[i].index == index && Restores[i].vreg)
1556 Restores[i].index = SlotIndex();
1559 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1560 /// spilled and create empty intervals for their uses.
1562 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1563 const TargetRegisterClass* rc,
1564 std::vector<LiveInterval*> &NewLIs) {
1565 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1566 re = mri_->reg_end(); ri != re; ) {
1567 MachineOperand &O = ri.getOperand();
1568 MachineInstr *MI = &*ri;
1570 if (MI->isDebugValue()) {
1571 // Remove debug info for now.
1573 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1577 assert(MI->isImplicitDef() &&
1578 "Register def was not rewritten?");
1579 RemoveMachineInstrFromMaps(MI);
1580 vrm.RemoveMachineInstrFromMaps(MI);
1581 MI->eraseFromParent();
1583 // This must be an use of an implicit_def so it's not part of the live
1584 // interval. Create a new empty live interval for it.
1585 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1586 unsigned NewVReg = mri_->createVirtualRegister(rc);
1588 vrm.setIsImplicitlyDefined(NewVReg);
1589 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1590 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1591 MachineOperand &MO = MI->getOperand(i);
1592 if (MO.isReg() && MO.getReg() == li.reg) {
1602 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1603 // Limit the loop depth ridiculousness.
1604 if (loopDepth > 200)
1607 // The loop depth is used to roughly estimate the number of times the
1608 // instruction is executed. Something like 10^d is simple, but will quickly
1609 // overflow a float. This expression behaves like 10^d for small d, but is
1610 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1611 // headroom before overflow.
1612 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1614 return (isDef + isUse) * lc;
1618 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1619 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1620 normalizeSpillWeight(*NewLIs[i]);
1623 std::vector<LiveInterval*> LiveIntervals::
1624 addIntervalsForSpills(const LiveInterval &li,
1625 SmallVectorImpl<LiveInterval*> &SpillIs,
1626 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1627 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1630 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1631 li.print(dbgs(), tri_);
1635 // Each bit specify whether a spill is required in the MBB.
1636 BitVector SpillMBBs(mf_->getNumBlockIDs());
1637 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1638 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1639 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1640 DenseMap<unsigned,unsigned> MBBVRegsMap;
1641 std::vector<LiveInterval*> NewLIs;
1642 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1644 unsigned NumValNums = li.getNumValNums();
1645 SmallVector<MachineInstr*, 4> ReMatDefs;
1646 ReMatDefs.resize(NumValNums, NULL);
1647 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1648 ReMatOrigDefs.resize(NumValNums, NULL);
1649 SmallVector<int, 4> ReMatIds;
1650 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1651 BitVector ReMatDelete(NumValNums);
1652 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1654 // Spilling a split live interval. It cannot be split any further. Also,
1655 // it's also guaranteed to be a single val# / range interval.
1656 if (vrm.getPreSplitReg(li.reg)) {
1657 vrm.setIsSplitFromReg(li.reg, 0);
1658 // Unset the split kill marker on the last use.
1659 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1660 if (KillIdx != SlotIndex()) {
1661 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1662 assert(KillMI && "Last use disappeared?");
1663 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1664 assert(KillOp != -1 && "Last use disappeared?");
1665 KillMI->getOperand(KillOp).setIsKill(false);
1667 vrm.removeKillPoint(li.reg);
1668 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1669 Slot = vrm.getStackSlot(li.reg);
1670 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1671 MachineInstr *ReMatDefMI = DefIsReMat ?
1672 vrm.getReMaterializedMI(li.reg) : NULL;
1674 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1675 bool isLoad = isLoadSS ||
1676 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1677 bool IsFirstRange = true;
1678 for (LiveInterval::Ranges::const_iterator
1679 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1680 // If this is a split live interval with multiple ranges, it means there
1681 // are two-address instructions that re-defined the value. Only the
1682 // first def can be rematerialized!
1684 // Note ReMatOrigDefMI has already been deleted.
1685 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1686 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1687 false, vrm, rc, ReMatIds, loopInfo,
1688 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1689 MBBVRegsMap, NewLIs);
1691 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1692 Slot, 0, false, false, false,
1693 false, vrm, rc, ReMatIds, loopInfo,
1694 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1695 MBBVRegsMap, NewLIs);
1697 IsFirstRange = false;
1700 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1701 normalizeSpillWeights(NewLIs);
1705 bool TrySplit = !intervalIsInOneMBB(li);
1708 bool NeedStackSlot = false;
1709 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1711 const VNInfo *VNI = *i;
1712 unsigned VN = VNI->id;
1713 if (VNI->isUnused())
1714 continue; // Dead val#.
1715 // Is the def for the val# rematerializable?
1716 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1717 ? getInstructionFromIndex(VNI->def) : 0;
1719 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1720 // Remember how to remat the def of this val#.
1721 ReMatOrigDefs[VN] = ReMatDefMI;
1722 // Original def may be modified so we have to make a copy here.
1723 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1724 CloneMIs.push_back(Clone);
1725 ReMatDefs[VN] = Clone;
1727 bool CanDelete = true;
1728 if (VNI->hasPHIKill()) {
1729 // A kill is a phi node, not all of its uses can be rematerialized.
1730 // It must not be deleted.
1732 // Need a stack slot if there is any live range where uses cannot be
1734 NeedStackSlot = true;
1737 ReMatDelete.set(VN);
1739 // Need a stack slot if there is any live range where uses cannot be
1741 NeedStackSlot = true;
1745 // One stack slot per live interval.
1746 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1747 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1748 Slot = vrm.assignVirt2StackSlot(li.reg);
1750 // This case only occurs when the prealloc splitter has already assigned
1751 // a stack slot to this vreg.
1753 Slot = vrm.getStackSlot(li.reg);
1756 // Create new intervals and rewrite defs and uses.
1757 for (LiveInterval::Ranges::const_iterator
1758 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1759 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1760 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1761 bool DefIsReMat = ReMatDefMI != NULL;
1762 bool CanDelete = ReMatDelete[I->valno->id];
1764 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1765 bool isLoad = isLoadSS ||
1766 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1767 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1768 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1769 CanDelete, vrm, rc, ReMatIds, loopInfo,
1770 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1771 MBBVRegsMap, NewLIs);
1774 // Insert spills / restores if we are splitting.
1776 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1777 normalizeSpillWeights(NewLIs);
1781 SmallPtrSet<LiveInterval*, 4> AddedKill;
1782 SmallVector<unsigned, 2> Ops;
1783 if (NeedStackSlot) {
1784 int Id = SpillMBBs.find_first();
1786 std::vector<SRInfo> &spills = SpillIdxes[Id];
1787 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1788 SlotIndex index = spills[i].index;
1789 unsigned VReg = spills[i].vreg;
1790 LiveInterval &nI = getOrCreateInterval(VReg);
1791 bool isReMat = vrm.isReMaterialized(VReg);
1792 MachineInstr *MI = getInstructionFromIndex(index);
1793 bool CanFold = false;
1794 bool FoundUse = false;
1796 if (spills[i].canFold) {
1798 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1799 MachineOperand &MO = MI->getOperand(j);
1800 if (!MO.isReg() || MO.getReg() != VReg)
1807 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1808 RestoreMBBs, RestoreIdxes))) {
1809 // MI has two-address uses of the same register. If the use
1810 // isn't the first and only use in the BB, then we can't fold
1811 // it. FIXME: Move this to rewriteInstructionsForSpills.
1818 // Fold the store into the def if possible.
1819 bool Folded = false;
1820 if (CanFold && !Ops.empty()) {
1821 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1824 // Also folded uses, do not issue a load.
1825 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1826 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1828 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1832 // Otherwise tell the spiller to issue a spill.
1834 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1835 bool isKill = LR->end == index.getStoreIndex();
1836 if (!MI->registerDefIsDead(nI.reg))
1837 // No need to spill a dead def.
1838 vrm.addSpillPoint(VReg, isKill, MI);
1840 AddedKill.insert(&nI);
1843 Id = SpillMBBs.find_next(Id);
1847 int Id = RestoreMBBs.find_first();
1849 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1850 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1851 SlotIndex index = restores[i].index;
1852 if (index == SlotIndex())
1854 unsigned VReg = restores[i].vreg;
1855 LiveInterval &nI = getOrCreateInterval(VReg);
1856 bool isReMat = vrm.isReMaterialized(VReg);
1857 MachineInstr *MI = getInstructionFromIndex(index);
1858 bool CanFold = false;
1860 if (restores[i].canFold) {
1862 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1863 MachineOperand &MO = MI->getOperand(j);
1864 if (!MO.isReg() || MO.getReg() != VReg)
1868 // If this restore were to be folded, it would have been folded
1877 // Fold the load into the use if possible.
1878 bool Folded = false;
1879 if (CanFold && !Ops.empty()) {
1881 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1883 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1885 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1886 // If the rematerializable def is a load, also try to fold it.
1887 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1888 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1889 Ops, isLoadSS, LdSlot, VReg);
1891 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1893 // Re-matting an instruction with virtual register use. Add the
1894 // register as an implicit use on the use MI and mark the register
1895 // interval as unspillable.
1896 LiveInterval &ImpLi = getInterval(ImpUse);
1897 ImpLi.markNotSpillable();
1898 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1903 // If folding is not possible / failed, then tell the spiller to issue a
1904 // load / rematerialization for us.
1906 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1908 vrm.addRestorePoint(VReg, MI);
1910 Id = RestoreMBBs.find_next(Id);
1913 // Finalize intervals: add kills, finalize spill weights, and filter out
1915 std::vector<LiveInterval*> RetNewLIs;
1916 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1917 LiveInterval *LI = NewLIs[i];
1919 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1920 if (!AddedKill.count(LI)) {
1921 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1922 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1923 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1924 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1925 assert(UseIdx != -1);
1926 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1927 LastUse->getOperand(UseIdx).setIsKill();
1928 vrm.addKillPoint(LI->reg, LastUseIdx);
1931 RetNewLIs.push_back(LI);
1935 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1936 normalizeSpillWeights(RetNewLIs);
1940 /// hasAllocatableSuperReg - Return true if the specified physical register has
1941 /// any super register that's allocatable.
1942 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1943 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1944 if (allocatableRegs_[*AS] && hasInterval(*AS))
1949 /// getRepresentativeReg - Find the largest super register of the specified
1950 /// physical register.
1951 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1952 // Find the largest super-register that is allocatable.
1953 unsigned BestReg = Reg;
1954 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1955 unsigned SuperReg = *AS;
1956 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1964 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1965 /// specified interval that conflicts with the specified physical register.
1966 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1967 unsigned PhysReg) const {
1968 unsigned NumConflicts = 0;
1969 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1970 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1971 E = mri_->reg_end(); I != E; ++I) {
1972 MachineOperand &O = I.getOperand();
1973 MachineInstr *MI = O.getParent();
1974 if (MI->isDebugValue())
1976 SlotIndex Index = getInstructionIndex(MI);
1977 if (pli.liveAt(Index))
1980 return NumConflicts;
1983 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1984 /// around all defs and uses of the specified interval. Return true if it
1985 /// was able to cut its interval.
1986 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1987 unsigned PhysReg, VirtRegMap &vrm) {
1988 unsigned SpillReg = getRepresentativeReg(PhysReg);
1990 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1991 // If there are registers which alias PhysReg, but which are not a
1992 // sub-register of the chosen representative super register. Assert
1993 // since we can't handle it yet.
1994 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1995 tri_->isSuperRegister(*AS, SpillReg));
1998 SmallVector<unsigned, 4> PRegs;
1999 if (hasInterval(SpillReg))
2000 PRegs.push_back(SpillReg);
2002 SmallSet<unsigned, 4> Added;
2003 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2004 if (Added.insert(*AS) && hasInterval(*AS)) {
2005 PRegs.push_back(*AS);
2006 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2011 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2012 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2013 E = mri_->reg_end(); I != E; ++I) {
2014 MachineOperand &O = I.getOperand();
2015 MachineInstr *MI = O.getParent();
2016 if (MI->isDebugValue() || SeenMIs.count(MI))
2019 SlotIndex Index = getInstructionIndex(MI);
2020 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2021 unsigned PReg = PRegs[i];
2022 LiveInterval &pli = getInterval(PReg);
2023 if (!pli.liveAt(Index))
2025 vrm.addEmergencySpill(PReg, MI);
2026 SlotIndex StartIdx = Index.getLoadIndex();
2027 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2028 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2029 pli.removeRange(StartIdx, EndIdx);
2033 raw_string_ostream Msg(msg);
2034 Msg << "Ran out of registers during register allocation!";
2035 if (MI->isInlineAsm()) {
2036 Msg << "\nPlease check your inline asm statement for invalid "
2037 << "constraints:\n";
2038 MI->print(Msg, tm_);
2040 report_fatal_error(Msg.str());
2042 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2043 if (!hasInterval(*AS))
2045 LiveInterval &spli = getInterval(*AS);
2046 if (spli.liveAt(Index))
2047 spli.removeRange(Index.getLoadIndex(),
2048 Index.getNextIndex().getBaseIndex());
2055 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2056 MachineInstr* startInst) {
2057 LiveInterval& Interval = getOrCreateInterval(reg);
2058 VNInfo* VN = Interval.getNextValue(
2059 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2060 startInst, true, getVNInfoAllocator());
2061 VN->setHasPHIKill(true);
2062 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2064 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2065 getMBBEndIdx(startInst->getParent()), VN);
2066 Interval.addRange(LR);