1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
62 AU.addRequired<AliasAnalysis>();
63 AU.addPreserved<AliasAnalysis>();
64 AU.addPreserved<LiveVariables>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreservedID(MachineLoopInfoID);
67 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
74 AU.addRequiredID(TwoAddressInstructionPassID);
75 AU.addPreserved<ProcessImplicitDefs>();
76 AU.addRequired<ProcessImplicitDefs>();
77 AU.addPreserved<SlotIndexes>();
78 AU.addRequiredTransitive<SlotIndexes>();
79 MachineFunctionPass::getAnalysisUsage(AU);
82 void LiveIntervals::releaseMemory() {
83 // Free the live intervals themselves.
84 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
85 E = r2iMap_.end(); I != E; ++I)
90 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
91 VNInfoAllocator.Reset();
92 while (!CloneMIs.empty()) {
93 MachineInstr *MI = CloneMIs.back();
95 mf_->DeleteMachineInstr(MI);
99 /// runOnMachineFunction - Register allocate the whole function
101 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
103 mri_ = &mf_->getRegInfo();
104 tm_ = &fn.getTarget();
105 tri_ = tm_->getRegisterInfo();
106 tii_ = tm_->getInstrInfo();
107 aa_ = &getAnalysis<AliasAnalysis>();
108 lv_ = &getAnalysis<LiveVariables>();
109 indexes_ = &getAnalysis<SlotIndexes>();
110 allocatableRegs_ = tri_->getAllocatableSet(fn);
114 numIntervals += getNumIntervals();
120 /// print - Implement the dump method.
121 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
122 OS << "********** INTERVALS **********\n";
123 for (const_iterator I = begin(), E = end(); I != E; ++I) {
124 I->second->print(OS, tri_);
131 void LiveIntervals::printInstrs(raw_ostream &OS) const {
132 OS << "********** MACHINEINSTRS **********\n";
134 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
135 mbbi != mbbe; ++mbbi) {
136 OS << "BB#" << mbbi->getNumber()
137 << ":\t\t# derived from " << mbbi->getName() << "\n";
138 for (MachineBasicBlock::iterator mii = mbbi->begin(),
139 mie = mbbi->end(); mii != mie; ++mii) {
140 if (mii->isDebugValue())
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 if (MI.getOperand(0).getReg() == li.reg ||
197 MI.getOperand(1).getReg() == li.reg)
200 // Check for operands using reg
201 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
202 const MachineOperand& mop = MI.getOperand(i);
205 unsigned PhysReg = mop.getReg();
206 if (PhysReg == 0 || PhysReg == li.reg)
208 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
209 if (!vrm.hasPhys(PhysReg))
211 PhysReg = vrm.getPhys(PhysReg);
213 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
218 // No conflicts found.
222 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
223 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
224 for (LiveInterval::Ranges::const_iterator
225 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
226 for (SlotIndex index = I->start.getBaseIndex(),
227 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
229 index = index.getNextIndex()) {
230 MachineInstr *MI = getInstructionFromIndex(index);
232 continue; // skip deleted instructions
234 if (JoinedCopies.count(MI))
236 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
237 MachineOperand& MO = MI->getOperand(i);
240 unsigned PhysReg = MO.getReg();
241 if (PhysReg == 0 || PhysReg == Reg ||
242 TargetRegisterInfo::isVirtualRegister(PhysReg))
244 if (tri_->regsOverlap(Reg, PhysReg))
254 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
255 if (TargetRegisterInfo::isPhysicalRegister(reg))
256 dbgs() << tri_->getName(reg);
258 dbgs() << "%reg" << reg;
263 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
264 unsigned Reg = MI.getOperand(MOIdx).getReg();
265 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
266 const MachineOperand &MO = MI.getOperand(i);
269 if (MO.getReg() == Reg && MO.isDef()) {
270 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
271 MI.getOperand(MOIdx).getSubReg() &&
272 (MO.getSubReg() || MO.isImplicit()));
279 /// isPartialRedef - Return true if the specified def at the specific index is
280 /// partially re-defining the specified live interval. A common case of this is
281 /// a definition of the sub-register.
282 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
283 LiveInterval &interval) {
284 if (!MO.getSubReg() || MO.isEarlyClobber())
287 SlotIndex RedefIndex = MIIdx.getDefIndex();
288 const LiveRange *OldLR =
289 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
290 if (OldLR->valno->isDefAccurate()) {
291 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
292 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
297 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
298 MachineBasicBlock::iterator mi,
302 LiveInterval &interval) {
304 dbgs() << "\t\tregister: ";
305 printRegName(interval.reg, tri_);
308 // Virtual registers may be defined multiple times (due to phi
309 // elimination and 2-addr elimination). Much of what we do only has to be
310 // done once for the vreg. We use an empty interval to detect the first
311 // time we see a vreg.
312 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
313 if (interval.empty()) {
314 // Get the Idx of the defining instructions.
315 SlotIndex defIndex = MIIdx.getDefIndex();
316 // Earlyclobbers move back one, so that they overlap the live range
318 if (MO.isEarlyClobber())
319 defIndex = MIIdx.getUseIndex();
321 // Make sure the first definition is not a partial redefinition. Add an
322 // <imp-def> of the full register.
324 mi->addRegisterDefined(interval.reg);
326 MachineInstr *CopyMI = NULL;
327 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
328 if (mi->isCopyLike() ||
329 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
333 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
335 assert(ValNo->id == 0 && "First value in interval is not 0?");
337 // Loop over all of the blocks that the vreg is defined in. There are
338 // two cases we have to handle here. The most common case is a vreg
339 // whose lifetime is contained within a basic block. In this case there
340 // will be a single kill, in MBB, which comes after the definition.
341 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
342 // FIXME: what about dead vars?
344 if (vi.Kills[0] != mi)
345 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
347 killIdx = defIndex.getStoreIndex();
349 // If the kill happens after the definition, we have an intra-block
351 if (killIdx > defIndex) {
352 assert(vi.AliveBlocks.empty() &&
353 "Shouldn't be alive across any blocks!");
354 LiveRange LR(defIndex, killIdx, ValNo);
355 interval.addRange(LR);
356 DEBUG(dbgs() << " +" << LR << "\n");
361 // The other case we handle is when a virtual register lives to the end
362 // of the defining block, potentially live across some blocks, then is
363 // live into some number of blocks, but gets killed. Start by adding a
364 // range that goes from this definition to the end of the defining block.
365 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
366 DEBUG(dbgs() << " +" << NewLR);
367 interval.addRange(NewLR);
369 bool PHIJoin = lv_->isPHIJoin(interval.reg);
372 // A phi join register is killed at the end of the MBB and revived as a new
373 // valno in the killing blocks.
374 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
375 DEBUG(dbgs() << " phi-join");
376 ValNo->setHasPHIKill(true);
378 // Iterate over all of the blocks that the variable is completely
379 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
381 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
382 E = vi.AliveBlocks.end(); I != E; ++I) {
383 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
384 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
385 interval.addRange(LR);
386 DEBUG(dbgs() << " +" << LR);
390 // Finally, this virtual register is live from the start of any killing
391 // block to the 'use' slot of the killing instruction.
392 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
393 MachineInstr *Kill = vi.Kills[i];
394 SlotIndex Start = getMBBStartIdx(Kill->getParent());
395 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
397 // Create interval with one of a NEW value number. Note that this value
398 // number isn't actually defined by an instruction, weird huh? :)
400 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
402 ValNo->setIsPHIDef(true);
404 LiveRange LR(Start, killIdx, ValNo);
405 interval.addRange(LR);
406 DEBUG(dbgs() << " +" << LR);
410 if (MultipleDefsBySameMI(*mi, MOIdx))
411 // Multiple defs of the same virtual register by the same instruction.
412 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
413 // This is likely due to elimination of REG_SEQUENCE instructions. Return
414 // here since there is nothing to do.
417 // If this is the second time we see a virtual register definition, it
418 // must be due to phi elimination or two addr elimination. If this is
419 // the result of two address elimination, then the vreg is one of the
420 // def-and-use register operand.
422 // It may also be partial redef like this:
423 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
424 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
425 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
426 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
427 // If this is a two-address definition, then we have already processed
428 // the live range. The only problem is that we didn't realize there
429 // are actually two values in the live interval. Because of this we
430 // need to take the LiveRegion that defines this register and split it
432 SlotIndex RedefIndex = MIIdx.getDefIndex();
433 if (MO.isEarlyClobber())
434 RedefIndex = MIIdx.getUseIndex();
436 const LiveRange *OldLR =
437 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
438 VNInfo *OldValNo = OldLR->valno;
439 SlotIndex DefIndex = OldValNo->def.getDefIndex();
441 // Delete the previous value, which should be short and continuous,
442 // because the 2-addr copy must be in the same MBB as the redef.
443 interval.removeRange(DefIndex, RedefIndex);
445 // The new value number (#1) is defined by the instruction we claimed
447 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
448 false, // update at *
450 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
452 // Value#0 is now defined by the 2-addr instruction.
453 OldValNo->def = RedefIndex;
454 OldValNo->setCopy(0);
456 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
457 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
458 if (PartReDef && (mi->isCopyLike() ||
459 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)))
460 OldValNo->setCopy(&*mi);
462 // Add the new live interval which replaces the range for the input copy.
463 LiveRange LR(DefIndex, RedefIndex, ValNo);
464 DEBUG(dbgs() << " replace range with " << LR);
465 interval.addRange(LR);
467 // If this redefinition is dead, we need to add a dummy unit live
468 // range covering the def slot.
470 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
474 dbgs() << " RESULT: ";
475 interval.print(dbgs(), tri_);
477 } else if (lv_->isPHIJoin(interval.reg)) {
478 // In the case of PHI elimination, each variable definition is only
479 // live until the end of the block. We've already taken care of the
480 // rest of the live range.
482 SlotIndex defIndex = MIIdx.getDefIndex();
483 if (MO.isEarlyClobber())
484 defIndex = MIIdx.getUseIndex();
487 MachineInstr *CopyMI = NULL;
488 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
489 if (mi->isCopyLike() ||
490 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
492 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
494 SlotIndex killIndex = getMBBEndIdx(mbb);
495 LiveRange LR(defIndex, killIndex, ValNo);
496 interval.addRange(LR);
497 ValNo->setHasPHIKill(true);
498 DEBUG(dbgs() << " phi-join +" << LR);
500 llvm_unreachable("Multiply defined register");
504 DEBUG(dbgs() << '\n');
507 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
508 MachineBasicBlock::iterator mi,
511 LiveInterval &interval,
512 MachineInstr *CopyMI) {
513 // A physical register cannot be live across basic block, so its
514 // lifetime must end somewhere in its defining basic block.
516 dbgs() << "\t\tregister: ";
517 printRegName(interval.reg, tri_);
520 SlotIndex baseIndex = MIIdx;
521 SlotIndex start = baseIndex.getDefIndex();
522 // Earlyclobbers move back one.
523 if (MO.isEarlyClobber())
524 start = MIIdx.getUseIndex();
525 SlotIndex end = start;
527 // If it is not used after definition, it is considered dead at
528 // the instruction defining it. Hence its interval is:
529 // [defSlot(def), defSlot(def)+1)
530 // For earlyclobbers, the defSlot was pushed back one; the extra
531 // advance below compensates.
533 DEBUG(dbgs() << " dead");
534 end = start.getStoreIndex();
538 // If it is not dead on definition, it must be killed by a
539 // subsequent instruction. Hence its interval is:
540 // [defSlot(def), useSlot(kill)+1)
541 baseIndex = baseIndex.getNextIndex();
542 while (++mi != MBB->end()) {
544 if (mi->isDebugValue())
546 if (getInstructionFromIndex(baseIndex) == 0)
547 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
549 if (mi->killsRegister(interval.reg, tri_)) {
550 DEBUG(dbgs() << " killed");
551 end = baseIndex.getDefIndex();
554 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
556 if (mi->isRegTiedToUseOperand(DefIdx)) {
557 // Two-address instruction.
558 end = baseIndex.getDefIndex();
560 // Another instruction redefines the register before it is ever read.
561 // Then the register is essentially dead at the instruction that
562 // defines it. Hence its interval is:
563 // [defSlot(def), defSlot(def)+1)
564 DEBUG(dbgs() << " dead");
565 end = start.getStoreIndex();
571 baseIndex = baseIndex.getNextIndex();
574 // The only case we should have a dead physreg here without a killing or
575 // instruction where we know it's dead is if it is live-in to the function
576 // and never used. Another possible case is the implicit use of the
577 // physical register has been deleted by two-address pass.
578 end = start.getStoreIndex();
581 assert(start < end && "did not find end of interval?");
583 // Already exists? Extend old live interval.
584 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
585 bool Extend = OldLR != interval.end();
586 VNInfo *ValNo = Extend
587 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
588 if (MO.isEarlyClobber() && Extend)
589 ValNo->setHasRedefByEC(true);
590 LiveRange LR(start, end, ValNo);
591 interval.addRange(LR);
592 DEBUG(dbgs() << " +" << LR << '\n');
595 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
596 MachineBasicBlock::iterator MI,
600 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
601 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
602 getOrCreateInterval(MO.getReg()));
603 else if (allocatableRegs_[MO.getReg()]) {
604 MachineInstr *CopyMI = NULL;
605 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
606 if (MI->isCopyLike() ||
607 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
609 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
610 getOrCreateInterval(MO.getReg()), CopyMI);
611 // Def of a register also defines its sub-registers.
612 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
613 // If MI also modifies the sub-register explicitly, avoid processing it
614 // more than once. Do not pass in TRI here so it checks for exact match.
615 if (!MI->definesRegister(*AS))
616 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
617 getOrCreateInterval(*AS), 0);
621 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
623 LiveInterval &interval, bool isAlias) {
625 dbgs() << "\t\tlivein register: ";
626 printRegName(interval.reg, tri_);
629 // Look for kills, if it reaches a def before it's killed, then it shouldn't
630 // be considered a livein.
631 MachineBasicBlock::iterator mi = MBB->begin();
632 MachineBasicBlock::iterator E = MBB->end();
633 // Skip over DBG_VALUE at the start of the MBB.
634 if (mi != E && mi->isDebugValue()) {
635 while (++mi != E && mi->isDebugValue())
638 // MBB is empty except for DBG_VALUE's.
642 SlotIndex baseIndex = MIIdx;
643 SlotIndex start = baseIndex;
644 if (getInstructionFromIndex(baseIndex) == 0)
645 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
647 SlotIndex end = baseIndex;
648 bool SeenDefUse = false;
651 if (mi->killsRegister(interval.reg, tri_)) {
652 DEBUG(dbgs() << " killed");
653 end = baseIndex.getDefIndex();
656 } else if (mi->definesRegister(interval.reg, tri_)) {
657 // Another instruction redefines the register before it is ever read.
658 // Then the register is essentially dead at the instruction that defines
659 // it. Hence its interval is:
660 // [defSlot(def), defSlot(def)+1)
661 DEBUG(dbgs() << " dead");
662 end = start.getStoreIndex();
667 while (++mi != E && mi->isDebugValue())
668 // Skip over DBG_VALUE.
671 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
674 // Live-in register might not be used at all.
677 DEBUG(dbgs() << " dead");
678 end = MIIdx.getStoreIndex();
680 DEBUG(dbgs() << " live through");
686 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
687 0, false, VNInfoAllocator);
688 vni->setIsPHIDef(true);
689 LiveRange LR(start, end, vni);
691 interval.addRange(LR);
692 DEBUG(dbgs() << " +" << LR << '\n');
695 /// computeIntervals - computes the live intervals for virtual
696 /// registers. for some ordering of the machine instructions [1,N] a
697 /// live interval is an interval [i, j) where 1 <= i <= j < N for
698 /// which a variable is live
699 void LiveIntervals::computeIntervals() {
700 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
701 << "********** Function: "
702 << ((Value*)mf_->getFunction())->getName() << '\n');
704 SmallVector<unsigned, 8> UndefUses;
705 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
707 MachineBasicBlock *MBB = MBBI;
711 // Track the index of the current machine instr.
712 SlotIndex MIIndex = getMBBStartIdx(MBB);
713 DEBUG(dbgs() << "BB#" << MBB->getNumber()
714 << ":\t\t# derived from " << MBB->getName() << "\n");
716 // Create intervals for live-ins to this BB first.
717 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
718 LE = MBB->livein_end(); LI != LE; ++LI) {
719 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
720 // Multiple live-ins can alias the same register.
721 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
722 if (!hasInterval(*AS))
723 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
727 // Skip over empty initial indices.
728 if (getInstructionFromIndex(MIIndex) == 0)
729 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
731 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
733 DEBUG(dbgs() << MIIndex << "\t" << *MI);
734 if (MI->isDebugValue())
738 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
739 MachineOperand &MO = MI->getOperand(i);
740 if (!MO.isReg() || !MO.getReg())
743 // handle register defs - build intervals
745 handleRegisterDef(MBB, MI, MIIndex, MO, i);
746 else if (MO.isUndef())
747 UndefUses.push_back(MO.getReg());
750 // Move to the next instr slot.
751 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
755 // Create empty intervals for registers defined by implicit_def's (except
756 // for those implicit_def that define values which are liveout of their
758 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
759 unsigned UndefReg = UndefUses[i];
760 (void)getOrCreateInterval(UndefReg);
764 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
765 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
766 return new LiveInterval(reg, Weight);
769 /// dupInterval - Duplicate a live interval. The caller is responsible for
770 /// managing the allocated memory.
771 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
772 LiveInterval *NewLI = createInterval(li->reg);
773 NewLI->Copy(*li, mri_, getVNInfoAllocator());
777 //===----------------------------------------------------------------------===//
778 // Register allocator hooks.
781 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
782 /// allow one) virtual register operand, then its uses are implicitly using
783 /// the register. Returns the virtual register.
784 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
785 MachineInstr *MI) const {
787 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
788 MachineOperand &MO = MI->getOperand(i);
789 if (!MO.isReg() || !MO.isUse())
791 unsigned Reg = MO.getReg();
792 if (Reg == 0 || Reg == li.reg)
795 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
796 !allocatableRegs_[Reg])
798 // FIXME: For now, only remat MI with at most one register operand.
800 "Can't rematerialize instruction with multiple register operand!");
809 /// isValNoAvailableAt - Return true if the val# of the specified interval
810 /// which reaches the given instruction also reaches the specified use index.
811 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
812 SlotIndex UseIdx) const {
813 SlotIndex Index = getInstructionIndex(MI);
814 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
815 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
816 return UI != li.end() && UI->valno == ValNo;
819 /// isReMaterializable - Returns true if the definition MI of the specified
820 /// val# of the specified interval is re-materializable.
821 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
822 const VNInfo *ValNo, MachineInstr *MI,
823 SmallVectorImpl<LiveInterval*> &SpillIs,
828 if (!tii_->isTriviallyReMaterializable(MI, aa_))
831 // Target-specific code can mark an instruction as being rematerializable
832 // if it has one virtual reg use, though it had better be something like
833 // a PIC base register which is likely to be live everywhere.
834 unsigned ImpUse = getReMatImplicitUse(li, MI);
836 const LiveInterval &ImpLi = getInterval(ImpUse);
837 for (MachineRegisterInfo::use_nodbg_iterator
838 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
840 MachineInstr *UseMI = &*ri;
841 SlotIndex UseIdx = getInstructionIndex(UseMI);
842 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
844 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
848 // If a register operand of the re-materialized instruction is going to
849 // be spilled next, then it's not legal to re-materialize this instruction.
850 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
851 if (ImpUse == SpillIs[i]->reg)
857 /// isReMaterializable - Returns true if the definition MI of the specified
858 /// val# of the specified interval is re-materializable.
859 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
860 const VNInfo *ValNo, MachineInstr *MI) {
861 SmallVector<LiveInterval*, 4> Dummy1;
863 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
866 /// isReMaterializable - Returns true if every definition of MI of every
867 /// val# of the specified interval is re-materializable.
868 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
869 SmallVectorImpl<LiveInterval*> &SpillIs,
872 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
874 const VNInfo *VNI = *i;
876 continue; // Dead val#.
877 // Is the def for the val# rematerializable?
878 if (!VNI->isDefAccurate())
880 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
881 bool DefIsLoad = false;
883 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
890 /// FilterFoldedOps - Filter out two-address use operands. Return
891 /// true if it finds any issue with the operands that ought to prevent
893 static bool FilterFoldedOps(MachineInstr *MI,
894 SmallVector<unsigned, 2> &Ops,
896 SmallVector<unsigned, 2> &FoldOps) {
898 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
899 unsigned OpIdx = Ops[i];
900 MachineOperand &MO = MI->getOperand(OpIdx);
901 // FIXME: fold subreg use.
905 MRInfo |= (unsigned)VirtRegMap::isMod;
907 // Filter out two-address use operand(s).
908 if (MI->isRegTiedToDefOperand(OpIdx)) {
909 MRInfo = VirtRegMap::isModRef;
912 MRInfo |= (unsigned)VirtRegMap::isRef;
914 FoldOps.push_back(OpIdx);
920 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
921 /// slot / to reg or any rematerialized load into ith operand of specified
922 /// MI. If it is successul, MI is updated with the newly created MI and
924 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
925 VirtRegMap &vrm, MachineInstr *DefMI,
927 SmallVector<unsigned, 2> &Ops,
928 bool isSS, int Slot, unsigned Reg) {
929 // If it is an implicit def instruction, just delete it.
930 if (MI->isImplicitDef()) {
931 RemoveMachineInstrFromMaps(MI);
932 vrm.RemoveMachineInstrFromMaps(MI);
933 MI->eraseFromParent();
938 // Filter the list of operand indexes that are to be folded. Abort if
939 // any operand will prevent folding.
941 SmallVector<unsigned, 2> FoldOps;
942 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
945 // The only time it's safe to fold into a two address instruction is when
946 // it's folding reload and spill from / into a spill stack slot.
947 if (DefMI && (MRInfo & VirtRegMap::isMod))
950 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
951 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
953 // Remember this instruction uses the spill slot.
954 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
956 // Attempt to fold the memory reference into the instruction. If
957 // we can do this, we don't need to insert spill code.
958 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
959 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
960 vrm.transferSpillPts(MI, fmi);
961 vrm.transferRestorePts(MI, fmi);
962 vrm.transferEmergencySpills(MI, fmi);
963 ReplaceMachineInstrInMaps(MI, fmi);
964 MI->eraseFromParent();
972 /// canFoldMemoryOperand - Returns true if the specified load / store
973 /// folding is possible.
974 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
975 SmallVector<unsigned, 2> &Ops,
977 // Filter the list of operand indexes that are to be folded. Abort if
978 // any operand will prevent folding.
980 SmallVector<unsigned, 2> FoldOps;
981 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
984 // It's only legal to remat for a use, not a def.
985 if (ReMat && (MRInfo & VirtRegMap::isMod))
988 return tii_->canFoldMemoryOperand(MI, FoldOps);
991 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
992 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
994 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
999 for (++itr; itr != li.ranges.end(); ++itr) {
1000 MachineBasicBlock *mbb2 =
1001 indexes_->getMBBCoveringRange(itr->start, itr->end);
1010 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1011 /// interval on to-be re-materialized operands of MI) with new register.
1012 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1013 MachineInstr *MI, unsigned NewVReg,
1015 // There is an implicit use. That means one of the other operand is
1016 // being remat'ed and the remat'ed instruction has li.reg as an
1017 // use operand. Make sure we rewrite that as well.
1018 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1019 MachineOperand &MO = MI->getOperand(i);
1022 unsigned Reg = MO.getReg();
1023 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1025 if (!vrm.isReMaterialized(Reg))
1027 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1028 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1030 UseMO->setReg(NewVReg);
1034 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1035 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1036 bool LiveIntervals::
1037 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1038 bool TrySplit, SlotIndex index, SlotIndex end,
1040 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1041 unsigned Slot, int LdSlot,
1042 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1044 const TargetRegisterClass* rc,
1045 SmallVector<int, 4> &ReMatIds,
1046 const MachineLoopInfo *loopInfo,
1047 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1048 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1049 std::vector<LiveInterval*> &NewLIs) {
1050 bool CanFold = false;
1052 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1053 MachineOperand& mop = MI->getOperand(i);
1056 unsigned Reg = mop.getReg();
1057 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1062 bool TryFold = !DefIsReMat;
1063 bool FoldSS = true; // Default behavior unless it's a remat.
1064 int FoldSlot = Slot;
1066 // If this is the rematerializable definition MI itself and
1067 // all of its uses are rematerialized, simply delete it.
1068 if (MI == ReMatOrigDefMI && CanDelete) {
1069 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1071 RemoveMachineInstrFromMaps(MI);
1072 vrm.RemoveMachineInstrFromMaps(MI);
1073 MI->eraseFromParent();
1077 // If def for this use can't be rematerialized, then try folding.
1078 // If def is rematerializable and it's a load, also try folding.
1079 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1081 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1087 // Scan all of the operands of this instruction rewriting operands
1088 // to use NewVReg instead of li.reg as appropriate. We do this for
1091 // 1. If the instr reads the same spilled vreg multiple times, we
1092 // want to reuse the NewVReg.
1093 // 2. If the instr is a two-addr instruction, we are required to
1094 // keep the src/dst regs pinned.
1096 // Keep track of whether we replace a use and/or def so that we can
1097 // create the spill interval with the appropriate range.
1098 SmallVector<unsigned, 2> Ops;
1099 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1101 // Create a new virtual register for the spill interval.
1102 // Create the new register now so we can map the fold instruction
1103 // to the new register so when it is unfolded we get the correct
1105 bool CreatedNewVReg = false;
1107 NewVReg = mri_->createVirtualRegister(rc);
1109 CreatedNewVReg = true;
1111 // The new virtual register should get the same allocation hints as the
1113 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1114 if (Hint.first || Hint.second)
1115 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1121 // Do not fold load / store here if we are splitting. We'll find an
1122 // optimal point to insert a load / store later.
1124 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1125 Ops, FoldSS, FoldSlot, NewVReg)) {
1126 // Folding the load/store can completely change the instruction in
1127 // unpredictable ways, rescan it from the beginning.
1130 // We need to give the new vreg the same stack slot as the
1131 // spilled interval.
1132 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1138 if (isNotInMIMap(MI))
1140 goto RestartInstruction;
1143 // We'll try to fold it later if it's profitable.
1144 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1148 mop.setReg(NewVReg);
1149 if (mop.isImplicit())
1150 rewriteImplicitOps(li, MI, NewVReg, vrm);
1152 // Reuse NewVReg for other reads.
1153 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1154 MachineOperand &mopj = MI->getOperand(Ops[j]);
1155 mopj.setReg(NewVReg);
1156 if (mopj.isImplicit())
1157 rewriteImplicitOps(li, MI, NewVReg, vrm);
1160 if (CreatedNewVReg) {
1162 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1163 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1164 // Each valnum may have its own remat id.
1165 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1167 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1169 if (!CanDelete || (HasUse && HasDef)) {
1170 // If this is a two-addr instruction then its use operands are
1171 // rematerializable but its def is not. It should be assigned a
1173 vrm.assignVirt2StackSlot(NewVReg, Slot);
1176 vrm.assignVirt2StackSlot(NewVReg, Slot);
1178 } else if (HasUse && HasDef &&
1179 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1180 // If this interval hasn't been assigned a stack slot (because earlier
1181 // def is a deleted remat def), do it now.
1182 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1183 vrm.assignVirt2StackSlot(NewVReg, Slot);
1186 // Re-matting an instruction with virtual register use. Add the
1187 // register as an implicit use on the use MI.
1188 if (DefIsReMat && ImpUse)
1189 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1191 // Create a new register interval for this spill / remat.
1192 LiveInterval &nI = getOrCreateInterval(NewVReg);
1193 if (CreatedNewVReg) {
1194 NewLIs.push_back(&nI);
1195 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1197 vrm.setIsSplitFromReg(NewVReg, li.reg);
1201 if (CreatedNewVReg) {
1202 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1203 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1204 DEBUG(dbgs() << " +" << LR);
1207 // Extend the split live interval to this def / use.
1208 SlotIndex End = index.getDefIndex();
1209 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1210 nI.getValNumInfo(nI.getNumValNums()-1));
1211 DEBUG(dbgs() << " +" << LR);
1216 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1217 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1218 DEBUG(dbgs() << " +" << LR);
1223 dbgs() << "\t\t\t\tAdded new interval: ";
1224 nI.print(dbgs(), tri_);
1230 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1232 MachineBasicBlock *MBB,
1233 SlotIndex Idx) const {
1234 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1237 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1238 /// during spilling.
1240 struct RewriteInfo {
1243 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1246 struct RewriteInfoCompare {
1247 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1248 return LHS.Index < RHS.Index;
1253 void LiveIntervals::
1254 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1255 LiveInterval::Ranges::const_iterator &I,
1256 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1257 unsigned Slot, int LdSlot,
1258 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1260 const TargetRegisterClass* rc,
1261 SmallVector<int, 4> &ReMatIds,
1262 const MachineLoopInfo *loopInfo,
1263 BitVector &SpillMBBs,
1264 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1265 BitVector &RestoreMBBs,
1266 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1267 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1268 std::vector<LiveInterval*> &NewLIs) {
1269 bool AllCanFold = true;
1270 unsigned NewVReg = 0;
1271 SlotIndex start = I->start.getBaseIndex();
1272 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1274 // First collect all the def / use in this live range that will be rewritten.
1275 // Make sure they are sorted according to instruction index.
1276 std::vector<RewriteInfo> RewriteMIs;
1277 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1278 re = mri_->reg_end(); ri != re; ) {
1279 MachineInstr *MI = &*ri;
1280 MachineOperand &O = ri.getOperand();
1282 if (MI->isDebugValue()) {
1283 // Modify DBG_VALUE now that the value is in a spill slot.
1284 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1285 uint64_t Offset = MI->getOperand(1).getImm();
1286 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1287 DebugLoc DL = MI->getDebugLoc();
1288 int FI = isLoadSS ? LdSlot : (int)Slot;
1289 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1290 Offset, MDPtr, DL)) {
1291 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1292 ReplaceMachineInstrInMaps(MI, NewDV);
1293 MachineBasicBlock *MBB = MI->getParent();
1294 MBB->insert(MBB->erase(MI), NewDV);
1299 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1300 RemoveMachineInstrFromMaps(MI);
1301 vrm.RemoveMachineInstrFromMaps(MI);
1302 MI->eraseFromParent();
1305 assert(!(O.isImplicit() && O.isUse()) &&
1306 "Spilling register that's used as implicit use?");
1307 SlotIndex index = getInstructionIndex(MI);
1308 if (index < start || index >= end)
1312 // Must be defined by an implicit def. It should not be spilled. Note,
1313 // this is for correctness reason. e.g.
1314 // 8 %reg1024<def> = IMPLICIT_DEF
1315 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1316 // The live range [12, 14) are not part of the r1024 live interval since
1317 // it's defined by an implicit def. It will not conflicts with live
1318 // interval of r1025. Now suppose both registers are spilled, you can
1319 // easily see a situation where both registers are reloaded before
1320 // the INSERT_SUBREG and both target registers that would overlap.
1322 RewriteMIs.push_back(RewriteInfo(index, MI));
1324 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1326 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1327 // Now rewrite the defs and uses.
1328 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1329 RewriteInfo &rwi = RewriteMIs[i];
1331 SlotIndex index = rwi.Index;
1332 MachineInstr *MI = rwi.MI;
1333 // If MI def and/or use the same register multiple times, then there
1334 // are multiple entries.
1335 while (i != e && RewriteMIs[i].MI == MI) {
1336 assert(RewriteMIs[i].Index == index);
1339 MachineBasicBlock *MBB = MI->getParent();
1341 if (ImpUse && MI != ReMatDefMI) {
1342 // Re-matting an instruction with virtual register use. Prevent interval
1343 // from being spilled.
1344 getInterval(ImpUse).markNotSpillable();
1347 unsigned MBBId = MBB->getNumber();
1348 unsigned ThisVReg = 0;
1350 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1351 if (NVI != MBBVRegsMap.end()) {
1352 ThisVReg = NVI->second;
1359 // It's better to start a new interval to avoid artifically
1360 // extend the new interval.
1361 if (MI->readsWritesVirtualRegister(li.reg) ==
1362 std::make_pair(false,true)) {
1363 MBBVRegsMap.erase(MBB->getNumber());
1369 bool IsNew = ThisVReg == 0;
1371 // This ends the previous live interval. If all of its def / use
1372 // can be folded, give it a low spill weight.
1373 if (NewVReg && TrySplit && AllCanFold) {
1374 LiveInterval &nI = getOrCreateInterval(NewVReg);
1381 bool HasDef = false;
1382 bool HasUse = false;
1383 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1384 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1385 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1386 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1387 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1388 if (!HasDef && !HasUse)
1391 AllCanFold &= CanFold;
1393 // Update weight of spill interval.
1394 LiveInterval &nI = getOrCreateInterval(NewVReg);
1396 // The spill weight is now infinity as it cannot be spilled again.
1397 nI.markNotSpillable();
1401 // Keep track of the last def and first use in each MBB.
1403 if (MI != ReMatOrigDefMI || !CanDelete) {
1404 bool HasKill = false;
1406 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1408 // If this is a two-address code, then this index starts a new VNInfo.
1409 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1411 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1413 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1414 SpillIdxes.find(MBBId);
1416 if (SII == SpillIdxes.end()) {
1417 std::vector<SRInfo> S;
1418 S.push_back(SRInfo(index, NewVReg, true));
1419 SpillIdxes.insert(std::make_pair(MBBId, S));
1420 } else if (SII->second.back().vreg != NewVReg) {
1421 SII->second.push_back(SRInfo(index, NewVReg, true));
1422 } else if (index > SII->second.back().index) {
1423 // If there is an earlier def and this is a two-address
1424 // instruction, then it's not possible to fold the store (which
1425 // would also fold the load).
1426 SRInfo &Info = SII->second.back();
1428 Info.canFold = !HasUse;
1430 SpillMBBs.set(MBBId);
1431 } else if (SII != SpillIdxes.end() &&
1432 SII->second.back().vreg == NewVReg &&
1433 index > SII->second.back().index) {
1434 // There is an earlier def that's not killed (must be two-address).
1435 // The spill is no longer needed.
1436 SII->second.pop_back();
1437 if (SII->second.empty()) {
1438 SpillIdxes.erase(MBBId);
1439 SpillMBBs.reset(MBBId);
1446 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1447 SpillIdxes.find(MBBId);
1448 if (SII != SpillIdxes.end() &&
1449 SII->second.back().vreg == NewVReg &&
1450 index > SII->second.back().index)
1451 // Use(s) following the last def, it's not safe to fold the spill.
1452 SII->second.back().canFold = false;
1453 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1454 RestoreIdxes.find(MBBId);
1455 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1456 // If we are splitting live intervals, only fold if it's the first
1457 // use and there isn't another use later in the MBB.
1458 RII->second.back().canFold = false;
1460 // Only need a reload if there isn't an earlier def / use.
1461 if (RII == RestoreIdxes.end()) {
1462 std::vector<SRInfo> Infos;
1463 Infos.push_back(SRInfo(index, NewVReg, true));
1464 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1466 RII->second.push_back(SRInfo(index, NewVReg, true));
1468 RestoreMBBs.set(MBBId);
1472 // Update spill weight.
1473 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1474 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1477 if (NewVReg && TrySplit && AllCanFold) {
1478 // If all of its def / use can be folded, give it a low spill weight.
1479 LiveInterval &nI = getOrCreateInterval(NewVReg);
1484 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1485 unsigned vr, BitVector &RestoreMBBs,
1486 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1487 if (!RestoreMBBs[Id])
1489 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1490 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1491 if (Restores[i].index == index &&
1492 Restores[i].vreg == vr &&
1493 Restores[i].canFold)
1498 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1499 unsigned vr, BitVector &RestoreMBBs,
1500 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1501 if (!RestoreMBBs[Id])
1503 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1504 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1505 if (Restores[i].index == index && Restores[i].vreg)
1506 Restores[i].index = SlotIndex();
1509 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1510 /// spilled and create empty intervals for their uses.
1512 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1513 const TargetRegisterClass* rc,
1514 std::vector<LiveInterval*> &NewLIs) {
1515 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1516 re = mri_->reg_end(); ri != re; ) {
1517 MachineOperand &O = ri.getOperand();
1518 MachineInstr *MI = &*ri;
1520 if (MI->isDebugValue()) {
1521 // Remove debug info for now.
1523 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1527 assert(MI->isImplicitDef() &&
1528 "Register def was not rewritten?");
1529 RemoveMachineInstrFromMaps(MI);
1530 vrm.RemoveMachineInstrFromMaps(MI);
1531 MI->eraseFromParent();
1533 // This must be an use of an implicit_def so it's not part of the live
1534 // interval. Create a new empty live interval for it.
1535 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1536 unsigned NewVReg = mri_->createVirtualRegister(rc);
1538 vrm.setIsImplicitlyDefined(NewVReg);
1539 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1540 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1541 MachineOperand &MO = MI->getOperand(i);
1542 if (MO.isReg() && MO.getReg() == li.reg) {
1552 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1553 // Limit the loop depth ridiculousness.
1554 if (loopDepth > 200)
1557 // The loop depth is used to roughly estimate the number of times the
1558 // instruction is executed. Something like 10^d is simple, but will quickly
1559 // overflow a float. This expression behaves like 10^d for small d, but is
1560 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1561 // headroom before overflow.
1562 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1564 return (isDef + isUse) * lc;
1568 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1569 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1570 normalizeSpillWeight(*NewLIs[i]);
1573 std::vector<LiveInterval*> LiveIntervals::
1574 addIntervalsForSpills(const LiveInterval &li,
1575 SmallVectorImpl<LiveInterval*> &SpillIs,
1576 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1577 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1580 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1581 li.print(dbgs(), tri_);
1585 // Each bit specify whether a spill is required in the MBB.
1586 BitVector SpillMBBs(mf_->getNumBlockIDs());
1587 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1588 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1589 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1590 DenseMap<unsigned,unsigned> MBBVRegsMap;
1591 std::vector<LiveInterval*> NewLIs;
1592 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1594 unsigned NumValNums = li.getNumValNums();
1595 SmallVector<MachineInstr*, 4> ReMatDefs;
1596 ReMatDefs.resize(NumValNums, NULL);
1597 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1598 ReMatOrigDefs.resize(NumValNums, NULL);
1599 SmallVector<int, 4> ReMatIds;
1600 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1601 BitVector ReMatDelete(NumValNums);
1602 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1604 // Spilling a split live interval. It cannot be split any further. Also,
1605 // it's also guaranteed to be a single val# / range interval.
1606 if (vrm.getPreSplitReg(li.reg)) {
1607 vrm.setIsSplitFromReg(li.reg, 0);
1608 // Unset the split kill marker on the last use.
1609 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1610 if (KillIdx != SlotIndex()) {
1611 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1612 assert(KillMI && "Last use disappeared?");
1613 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1614 assert(KillOp != -1 && "Last use disappeared?");
1615 KillMI->getOperand(KillOp).setIsKill(false);
1617 vrm.removeKillPoint(li.reg);
1618 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1619 Slot = vrm.getStackSlot(li.reg);
1620 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1621 MachineInstr *ReMatDefMI = DefIsReMat ?
1622 vrm.getReMaterializedMI(li.reg) : NULL;
1624 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1625 bool isLoad = isLoadSS ||
1626 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1627 bool IsFirstRange = true;
1628 for (LiveInterval::Ranges::const_iterator
1629 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1630 // If this is a split live interval with multiple ranges, it means there
1631 // are two-address instructions that re-defined the value. Only the
1632 // first def can be rematerialized!
1634 // Note ReMatOrigDefMI has already been deleted.
1635 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1636 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1637 false, vrm, rc, ReMatIds, loopInfo,
1638 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1639 MBBVRegsMap, NewLIs);
1641 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1642 Slot, 0, false, false, false,
1643 false, vrm, rc, ReMatIds, loopInfo,
1644 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1645 MBBVRegsMap, NewLIs);
1647 IsFirstRange = false;
1650 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1651 normalizeSpillWeights(NewLIs);
1655 bool TrySplit = !intervalIsInOneMBB(li);
1658 bool NeedStackSlot = false;
1659 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1661 const VNInfo *VNI = *i;
1662 unsigned VN = VNI->id;
1663 if (VNI->isUnused())
1664 continue; // Dead val#.
1665 // Is the def for the val# rematerializable?
1666 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1667 ? getInstructionFromIndex(VNI->def) : 0;
1669 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1670 // Remember how to remat the def of this val#.
1671 ReMatOrigDefs[VN] = ReMatDefMI;
1672 // Original def may be modified so we have to make a copy here.
1673 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1674 CloneMIs.push_back(Clone);
1675 ReMatDefs[VN] = Clone;
1677 bool CanDelete = true;
1678 if (VNI->hasPHIKill()) {
1679 // A kill is a phi node, not all of its uses can be rematerialized.
1680 // It must not be deleted.
1682 // Need a stack slot if there is any live range where uses cannot be
1684 NeedStackSlot = true;
1687 ReMatDelete.set(VN);
1689 // Need a stack slot if there is any live range where uses cannot be
1691 NeedStackSlot = true;
1695 // One stack slot per live interval.
1696 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1697 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1698 Slot = vrm.assignVirt2StackSlot(li.reg);
1700 // This case only occurs when the prealloc splitter has already assigned
1701 // a stack slot to this vreg.
1703 Slot = vrm.getStackSlot(li.reg);
1706 // Create new intervals and rewrite defs and uses.
1707 for (LiveInterval::Ranges::const_iterator
1708 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1709 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1710 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1711 bool DefIsReMat = ReMatDefMI != NULL;
1712 bool CanDelete = ReMatDelete[I->valno->id];
1714 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1715 bool isLoad = isLoadSS ||
1716 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1717 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1718 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1719 CanDelete, vrm, rc, ReMatIds, loopInfo,
1720 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1721 MBBVRegsMap, NewLIs);
1724 // Insert spills / restores if we are splitting.
1726 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1727 normalizeSpillWeights(NewLIs);
1731 SmallPtrSet<LiveInterval*, 4> AddedKill;
1732 SmallVector<unsigned, 2> Ops;
1733 if (NeedStackSlot) {
1734 int Id = SpillMBBs.find_first();
1736 std::vector<SRInfo> &spills = SpillIdxes[Id];
1737 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1738 SlotIndex index = spills[i].index;
1739 unsigned VReg = spills[i].vreg;
1740 LiveInterval &nI = getOrCreateInterval(VReg);
1741 bool isReMat = vrm.isReMaterialized(VReg);
1742 MachineInstr *MI = getInstructionFromIndex(index);
1743 bool CanFold = false;
1744 bool FoundUse = false;
1746 if (spills[i].canFold) {
1748 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1749 MachineOperand &MO = MI->getOperand(j);
1750 if (!MO.isReg() || MO.getReg() != VReg)
1757 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1758 RestoreMBBs, RestoreIdxes))) {
1759 // MI has two-address uses of the same register. If the use
1760 // isn't the first and only use in the BB, then we can't fold
1761 // it. FIXME: Move this to rewriteInstructionsForSpills.
1768 // Fold the store into the def if possible.
1769 bool Folded = false;
1770 if (CanFold && !Ops.empty()) {
1771 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1774 // Also folded uses, do not issue a load.
1775 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1776 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1778 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1782 // Otherwise tell the spiller to issue a spill.
1784 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1785 bool isKill = LR->end == index.getStoreIndex();
1786 if (!MI->registerDefIsDead(nI.reg))
1787 // No need to spill a dead def.
1788 vrm.addSpillPoint(VReg, isKill, MI);
1790 AddedKill.insert(&nI);
1793 Id = SpillMBBs.find_next(Id);
1797 int Id = RestoreMBBs.find_first();
1799 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1800 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1801 SlotIndex index = restores[i].index;
1802 if (index == SlotIndex())
1804 unsigned VReg = restores[i].vreg;
1805 LiveInterval &nI = getOrCreateInterval(VReg);
1806 bool isReMat = vrm.isReMaterialized(VReg);
1807 MachineInstr *MI = getInstructionFromIndex(index);
1808 bool CanFold = false;
1810 if (restores[i].canFold) {
1812 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1813 MachineOperand &MO = MI->getOperand(j);
1814 if (!MO.isReg() || MO.getReg() != VReg)
1818 // If this restore were to be folded, it would have been folded
1827 // Fold the load into the use if possible.
1828 bool Folded = false;
1829 if (CanFold && !Ops.empty()) {
1831 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1833 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1835 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1836 // If the rematerializable def is a load, also try to fold it.
1837 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1838 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1839 Ops, isLoadSS, LdSlot, VReg);
1841 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1843 // Re-matting an instruction with virtual register use. Add the
1844 // register as an implicit use on the use MI and mark the register
1845 // interval as unspillable.
1846 LiveInterval &ImpLi = getInterval(ImpUse);
1847 ImpLi.markNotSpillable();
1848 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1853 // If folding is not possible / failed, then tell the spiller to issue a
1854 // load / rematerialization for us.
1856 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1858 vrm.addRestorePoint(VReg, MI);
1860 Id = RestoreMBBs.find_next(Id);
1863 // Finalize intervals: add kills, finalize spill weights, and filter out
1865 std::vector<LiveInterval*> RetNewLIs;
1866 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1867 LiveInterval *LI = NewLIs[i];
1869 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1870 if (!AddedKill.count(LI)) {
1871 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1872 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1873 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1874 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1875 assert(UseIdx != -1);
1876 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1877 LastUse->getOperand(UseIdx).setIsKill();
1878 vrm.addKillPoint(LI->reg, LastUseIdx);
1881 RetNewLIs.push_back(LI);
1885 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1886 normalizeSpillWeights(RetNewLIs);
1890 /// hasAllocatableSuperReg - Return true if the specified physical register has
1891 /// any super register that's allocatable.
1892 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1893 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1894 if (allocatableRegs_[*AS] && hasInterval(*AS))
1899 /// getRepresentativeReg - Find the largest super register of the specified
1900 /// physical register.
1901 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1902 // Find the largest super-register that is allocatable.
1903 unsigned BestReg = Reg;
1904 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1905 unsigned SuperReg = *AS;
1906 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1914 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1915 /// specified interval that conflicts with the specified physical register.
1916 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1917 unsigned PhysReg) const {
1918 unsigned NumConflicts = 0;
1919 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1920 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1921 E = mri_->reg_end(); I != E; ++I) {
1922 MachineOperand &O = I.getOperand();
1923 MachineInstr *MI = O.getParent();
1924 if (MI->isDebugValue())
1926 SlotIndex Index = getInstructionIndex(MI);
1927 if (pli.liveAt(Index))
1930 return NumConflicts;
1933 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1934 /// around all defs and uses of the specified interval. Return true if it
1935 /// was able to cut its interval.
1936 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1937 unsigned PhysReg, VirtRegMap &vrm) {
1938 unsigned SpillReg = getRepresentativeReg(PhysReg);
1940 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1941 // If there are registers which alias PhysReg, but which are not a
1942 // sub-register of the chosen representative super register. Assert
1943 // since we can't handle it yet.
1944 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1945 tri_->isSuperRegister(*AS, SpillReg));
1948 SmallVector<unsigned, 4> PRegs;
1949 if (hasInterval(SpillReg))
1950 PRegs.push_back(SpillReg);
1952 SmallSet<unsigned, 4> Added;
1953 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1954 if (Added.insert(*AS) && hasInterval(*AS)) {
1955 PRegs.push_back(*AS);
1956 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1961 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1962 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1963 E = mri_->reg_end(); I != E; ++I) {
1964 MachineOperand &O = I.getOperand();
1965 MachineInstr *MI = O.getParent();
1966 if (MI->isDebugValue() || SeenMIs.count(MI))
1969 SlotIndex Index = getInstructionIndex(MI);
1970 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1971 unsigned PReg = PRegs[i];
1972 LiveInterval &pli = getInterval(PReg);
1973 if (!pli.liveAt(Index))
1975 vrm.addEmergencySpill(PReg, MI);
1976 SlotIndex StartIdx = Index.getLoadIndex();
1977 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1978 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1979 pli.removeRange(StartIdx, EndIdx);
1983 raw_string_ostream Msg(msg);
1984 Msg << "Ran out of registers during register allocation!";
1985 if (MI->isInlineAsm()) {
1986 Msg << "\nPlease check your inline asm statement for invalid "
1987 << "constraints:\n";
1988 MI->print(Msg, tm_);
1990 report_fatal_error(Msg.str());
1992 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1993 if (!hasInterval(*AS))
1995 LiveInterval &spli = getInterval(*AS);
1996 if (spli.liveAt(Index))
1997 spli.removeRange(Index.getLoadIndex(),
1998 Index.getNextIndex().getBaseIndex());
2005 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2006 MachineInstr* startInst) {
2007 LiveInterval& Interval = getOrCreateInterval(reg);
2008 VNInfo* VN = Interval.getNextValue(
2009 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2010 startInst, true, getVNInfoAllocator());
2011 VN->setHasPHIKill(true);
2013 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2014 getMBBEndIdx(startInst->getParent()), VN);
2015 Interval.addRange(LR);