1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
266 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267 unsigned Reg = MI.getOperand(MOIdx).getReg();
268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269 const MachineOperand &MO = MI.getOperand(i);
272 if (MO.getReg() == Reg && MO.isDef()) {
273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274 MI.getOperand(MOIdx).getSubReg() &&
282 /// isPartialRedef - Return true if the specified def at the specific index is
283 /// partially re-defining the specified live interval. A common case of this is
284 /// a definition of the sub-register.
285 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286 LiveInterval &interval) {
287 if (!MO.getSubReg() || MO.isEarlyClobber())
290 SlotIndex RedefIndex = MIIdx.getDefIndex();
291 const LiveRange *OldLR =
292 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293 if (OldLR->valno->isDefAccurate()) {
294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
300 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301 MachineBasicBlock::iterator mi,
305 LiveInterval &interval) {
307 dbgs() << "\t\tregister: ";
308 printRegName(interval.reg, tri_);
311 // Virtual registers may be defined multiple times (due to phi
312 // elimination and 2-addr elimination). Much of what we do only has to be
313 // done once for the vreg. We use an empty interval to detect the first
314 // time we see a vreg.
315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316 if (interval.empty()) {
317 // Get the Idx of the defining instructions.
318 SlotIndex defIndex = MIIdx.getDefIndex();
319 // Earlyclobbers move back one, so that they overlap the live range
321 if (MO.isEarlyClobber())
322 defIndex = MIIdx.getUseIndex();
323 MachineInstr *CopyMI = NULL;
324 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
325 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
326 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
331 assert(ValNo->id == 0 && "First value in interval is not 0?");
333 // Loop over all of the blocks that the vreg is defined in. There are
334 // two cases we have to handle here. The most common case is a vreg
335 // whose lifetime is contained within a basic block. In this case there
336 // will be a single kill, in MBB, which comes after the definition.
337 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338 // FIXME: what about dead vars?
340 if (vi.Kills[0] != mi)
341 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
343 killIdx = defIndex.getStoreIndex();
345 // If the kill happens after the definition, we have an intra-block
347 if (killIdx > defIndex) {
348 assert(vi.AliveBlocks.empty() &&
349 "Shouldn't be alive across any blocks!");
350 LiveRange LR(defIndex, killIdx, ValNo);
351 interval.addRange(LR);
352 DEBUG(dbgs() << " +" << LR << "\n");
353 ValNo->addKill(killIdx);
358 // The other case we handle is when a virtual register lives to the end
359 // of the defining block, potentially live across some blocks, then is
360 // live into some number of blocks, but gets killed. Start by adding a
361 // range that goes from this definition to the end of the defining block.
362 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
363 DEBUG(dbgs() << " +" << NewLR);
364 interval.addRange(NewLR);
366 bool PHIJoin = lv_->isPHIJoin(interval.reg);
369 // A phi join register is killed at the end of the MBB and revived as a new
370 // valno in the killing blocks.
371 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
372 DEBUG(dbgs() << " phi-join");
373 ValNo->addKill(indexes_->getTerminatorGap(mbb));
374 ValNo->setHasPHIKill(true);
376 // Iterate over all of the blocks that the variable is completely
377 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
379 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
380 E = vi.AliveBlocks.end(); I != E; ++I) {
381 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
382 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
383 interval.addRange(LR);
384 DEBUG(dbgs() << " +" << LR);
388 // Finally, this virtual register is live from the start of any killing
389 // block to the 'use' slot of the killing instruction.
390 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
391 MachineInstr *Kill = vi.Kills[i];
392 SlotIndex Start = getMBBStartIdx(Kill->getParent());
393 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
395 // Create interval with one of a NEW value number. Note that this value
396 // number isn't actually defined by an instruction, weird huh? :)
398 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
400 ValNo->setIsPHIDef(true);
402 LiveRange LR(Start, killIdx, ValNo);
403 interval.addRange(LR);
404 ValNo->addKill(killIdx);
405 DEBUG(dbgs() << " +" << LR);
409 if (MultipleDefsBySameMI(*mi, MOIdx))
410 // Mutple defs of the same virtual register by the same instruction. e.g.
411 // %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
412 // This is likely due to elimination of REG_SEQUENCE instructions. Return
413 // here since there is nothing to do.
416 // If this is the second time we see a virtual register definition, it
417 // must be due to phi elimination or two addr elimination. If this is
418 // the result of two address elimination, then the vreg is one of the
419 // def-and-use register operand.
421 // It may also be partial redef like this:
422 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
423 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
424 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
425 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
426 // If this is a two-address definition, then we have already processed
427 // the live range. The only problem is that we didn't realize there
428 // are actually two values in the live interval. Because of this we
429 // need to take the LiveRegion that defines this register and split it
431 // Two-address vregs should always only be redefined once. This means
432 // that at this point, there should be exactly one value number in it.
433 assert((PartReDef || interval.containsOneValue()) &&
434 "Unexpected 2-addr liveint!");
435 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
436 SlotIndex RedefIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 RedefIndex = MIIdx.getUseIndex();
440 const LiveRange *OldLR =
441 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
442 VNInfo *OldValNo = OldLR->valno;
444 // Delete the initial value, which should be short and continuous,
445 // because the 2-addr copy must be in the same MBB as the redef.
446 interval.removeRange(DefIndex, RedefIndex);
448 // The new value number (#1) is defined by the instruction we claimed
450 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
451 false, // update at *
453 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
455 // Value#0 is now defined by the 2-addr instruction.
456 OldValNo->def = RedefIndex;
457 OldValNo->setCopy(0);
459 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
460 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
462 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
463 OldValNo->setCopy(&*mi);
465 // Add the new live interval which replaces the range for the input copy.
466 LiveRange LR(DefIndex, RedefIndex, ValNo);
467 DEBUG(dbgs() << " replace range with " << LR);
468 interval.addRange(LR);
469 ValNo->addKill(RedefIndex);
471 // If this redefinition is dead, we need to add a dummy unit live
472 // range covering the def slot.
474 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
478 dbgs() << " RESULT: ";
479 interval.print(dbgs(), tri_);
481 } else if (lv_->isPHIJoin(interval.reg)) {
482 // In the case of PHI elimination, each variable definition is only
483 // live until the end of the block. We've already taken care of the
484 // rest of the live range.
486 SlotIndex defIndex = MIIdx.getDefIndex();
487 if (MO.isEarlyClobber())
488 defIndex = MIIdx.getUseIndex();
491 MachineInstr *CopyMI = NULL;
492 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
493 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
494 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
496 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
498 SlotIndex killIndex = getMBBEndIdx(mbb);
499 LiveRange LR(defIndex, killIndex, ValNo);
500 interval.addRange(LR);
501 ValNo->addKill(indexes_->getTerminatorGap(mbb));
502 ValNo->setHasPHIKill(true);
503 DEBUG(dbgs() << " phi-join +" << LR);
505 llvm_unreachable("Multiply defined register");
509 DEBUG(dbgs() << '\n');
512 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
513 MachineBasicBlock::iterator mi,
516 LiveInterval &interval,
517 MachineInstr *CopyMI) {
518 // A physical register cannot be live across basic block, so its
519 // lifetime must end somewhere in its defining basic block.
521 dbgs() << "\t\tregister: ";
522 printRegName(interval.reg, tri_);
525 SlotIndex baseIndex = MIIdx;
526 SlotIndex start = baseIndex.getDefIndex();
527 // Earlyclobbers move back one.
528 if (MO.isEarlyClobber())
529 start = MIIdx.getUseIndex();
530 SlotIndex end = start;
532 // If it is not used after definition, it is considered dead at
533 // the instruction defining it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 // For earlyclobbers, the defSlot was pushed back one; the extra
536 // advance below compensates.
538 DEBUG(dbgs() << " dead");
539 end = start.getStoreIndex();
543 // If it is not dead on definition, it must be killed by a
544 // subsequent instruction. Hence its interval is:
545 // [defSlot(def), useSlot(kill)+1)
546 baseIndex = baseIndex.getNextIndex();
547 while (++mi != MBB->end()) {
549 if (mi->isDebugValue())
551 if (getInstructionFromIndex(baseIndex) == 0)
552 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
554 if (mi->killsRegister(interval.reg, tri_)) {
555 DEBUG(dbgs() << " killed");
556 end = baseIndex.getDefIndex();
559 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
561 if (mi->isRegTiedToUseOperand(DefIdx)) {
562 // Two-address instruction.
563 end = baseIndex.getDefIndex();
565 // Another instruction redefines the register before it is ever read.
566 // Then the register is essentially dead at the instruction that
567 // defines it. Hence its interval is:
568 // [defSlot(def), defSlot(def)+1)
569 DEBUG(dbgs() << " dead");
570 end = start.getStoreIndex();
576 baseIndex = baseIndex.getNextIndex();
579 // The only case we should have a dead physreg here without a killing or
580 // instruction where we know it's dead is if it is live-in to the function
581 // and never used. Another possible case is the implicit use of the
582 // physical register has been deleted by two-address pass.
583 end = start.getStoreIndex();
586 assert(start < end && "did not find end of interval?");
588 // Already exists? Extend old live interval.
589 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
590 bool Extend = OldLR != interval.end();
591 VNInfo *ValNo = Extend
592 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
593 if (MO.isEarlyClobber() && Extend)
594 ValNo->setHasRedefByEC(true);
595 LiveRange LR(start, end, ValNo);
596 interval.addRange(LR);
597 LR.valno->addKill(end);
598 DEBUG(dbgs() << " +" << LR << '\n');
601 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
602 MachineBasicBlock::iterator MI,
606 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
607 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
608 getOrCreateInterval(MO.getReg()));
609 else if (allocatableRegs_[MO.getReg()]) {
610 MachineInstr *CopyMI = NULL;
611 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
612 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
613 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
615 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
616 getOrCreateInterval(MO.getReg()), CopyMI);
617 // Def of a register also defines its sub-registers.
618 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
619 // If MI also modifies the sub-register explicitly, avoid processing it
620 // more than once. Do not pass in TRI here so it checks for exact match.
621 if (!MI->modifiesRegister(*AS))
622 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
623 getOrCreateInterval(*AS), 0);
627 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
629 LiveInterval &interval, bool isAlias) {
631 dbgs() << "\t\tlivein register: ";
632 printRegName(interval.reg, tri_);
635 // Look for kills, if it reaches a def before it's killed, then it shouldn't
636 // be considered a livein.
637 MachineBasicBlock::iterator mi = MBB->begin();
638 MachineBasicBlock::iterator E = MBB->end();
639 // Skip over DBG_VALUE at the start of the MBB.
640 if (mi != E && mi->isDebugValue()) {
641 while (++mi != E && mi->isDebugValue())
644 // MBB is empty except for DBG_VALUE's.
648 SlotIndex baseIndex = MIIdx;
649 SlotIndex start = baseIndex;
650 if (getInstructionFromIndex(baseIndex) == 0)
651 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
653 SlotIndex end = baseIndex;
654 bool SeenDefUse = false;
657 if (mi->killsRegister(interval.reg, tri_)) {
658 DEBUG(dbgs() << " killed");
659 end = baseIndex.getDefIndex();
662 } else if (mi->modifiesRegister(interval.reg, tri_)) {
663 // Another instruction redefines the register before it is ever read.
664 // Then the register is essentially dead at the instruction that defines
665 // it. Hence its interval is:
666 // [defSlot(def), defSlot(def)+1)
667 DEBUG(dbgs() << " dead");
668 end = start.getStoreIndex();
673 while (++mi != E && mi->isDebugValue())
674 // Skip over DBG_VALUE.
677 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
680 // Live-in register might not be used at all.
683 DEBUG(dbgs() << " dead");
684 end = MIIdx.getStoreIndex();
686 DEBUG(dbgs() << " live through");
692 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
693 0, false, VNInfoAllocator);
694 vni->setIsPHIDef(true);
695 LiveRange LR(start, end, vni);
697 interval.addRange(LR);
698 LR.valno->addKill(end);
699 DEBUG(dbgs() << " +" << LR << '\n');
702 /// computeIntervals - computes the live intervals for virtual
703 /// registers. for some ordering of the machine instructions [1,N] a
704 /// live interval is an interval [i, j) where 1 <= i <= j < N for
705 /// which a variable is live
706 void LiveIntervals::computeIntervals() {
707 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
708 << "********** Function: "
709 << ((Value*)mf_->getFunction())->getName() << '\n');
711 SmallVector<unsigned, 8> UndefUses;
712 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
714 MachineBasicBlock *MBB = MBBI;
718 // Track the index of the current machine instr.
719 SlotIndex MIIndex = getMBBStartIdx(MBB);
720 DEBUG(dbgs() << "BB#" << MBB->getNumber()
721 << ":\t\t# derived from " << MBB->getName() << "\n");
723 // Create intervals for live-ins to this BB first.
724 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
725 LE = MBB->livein_end(); LI != LE; ++LI) {
726 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
727 // Multiple live-ins can alias the same register.
728 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
729 if (!hasInterval(*AS))
730 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
734 // Skip over empty initial indices.
735 if (getInstructionFromIndex(MIIndex) == 0)
736 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
738 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
740 DEBUG(dbgs() << MIIndex << "\t" << *MI);
741 if (MI->isDebugValue())
745 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
746 MachineOperand &MO = MI->getOperand(i);
747 if (!MO.isReg() || !MO.getReg())
750 // handle register defs - build intervals
752 handleRegisterDef(MBB, MI, MIIndex, MO, i);
753 else if (MO.isUndef())
754 UndefUses.push_back(MO.getReg());
757 // Move to the next instr slot.
758 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
762 // Create empty intervals for registers defined by implicit_def's (except
763 // for those implicit_def that define values which are liveout of their
765 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
766 unsigned UndefReg = UndefUses[i];
767 (void)getOrCreateInterval(UndefReg);
771 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
772 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
773 return new LiveInterval(reg, Weight);
776 /// dupInterval - Duplicate a live interval. The caller is responsible for
777 /// managing the allocated memory.
778 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
779 LiveInterval *NewLI = createInterval(li->reg);
780 NewLI->Copy(*li, mri_, getVNInfoAllocator());
784 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
785 /// copy field and returns the source register that defines it.
786 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
790 if (VNI->getCopy()->isExtractSubreg()) {
791 // If it's extracting out of a physical register, return the sub-register.
792 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
793 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
794 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
795 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
796 if (SrcSubReg == DstSubReg)
797 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
798 // reg1034 can still be coalesced to EDX.
800 assert(DstSubReg == 0);
801 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
804 } else if (VNI->getCopy()->isInsertSubreg() ||
805 VNI->getCopy()->isSubregToReg())
806 return VNI->getCopy()->getOperand(2).getReg();
808 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
809 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
811 llvm_unreachable("Unrecognized copy instruction!");
815 //===----------------------------------------------------------------------===//
816 // Register allocator hooks.
819 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
820 /// allow one) virtual register operand, then its uses are implicitly using
821 /// the register. Returns the virtual register.
822 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
823 MachineInstr *MI) const {
825 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
826 MachineOperand &MO = MI->getOperand(i);
827 if (!MO.isReg() || !MO.isUse())
829 unsigned Reg = MO.getReg();
830 if (Reg == 0 || Reg == li.reg)
833 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
834 !allocatableRegs_[Reg])
836 // FIXME: For now, only remat MI with at most one register operand.
838 "Can't rematerialize instruction with multiple register operand!");
847 /// isValNoAvailableAt - Return true if the val# of the specified interval
848 /// which reaches the given instruction also reaches the specified use index.
849 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
850 SlotIndex UseIdx) const {
851 SlotIndex Index = getInstructionIndex(MI);
852 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
853 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
854 return UI != li.end() && UI->valno == ValNo;
857 /// isReMaterializable - Returns true if the definition MI of the specified
858 /// val# of the specified interval is re-materializable.
859 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
860 const VNInfo *ValNo, MachineInstr *MI,
861 SmallVectorImpl<LiveInterval*> &SpillIs,
866 if (!tii_->isTriviallyReMaterializable(MI, aa_))
869 // Target-specific code can mark an instruction as being rematerializable
870 // if it has one virtual reg use, though it had better be something like
871 // a PIC base register which is likely to be live everywhere.
872 unsigned ImpUse = getReMatImplicitUse(li, MI);
874 const LiveInterval &ImpLi = getInterval(ImpUse);
875 for (MachineRegisterInfo::use_nodbg_iterator
876 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
878 MachineInstr *UseMI = &*ri;
879 SlotIndex UseIdx = getInstructionIndex(UseMI);
880 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
882 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
886 // If a register operand of the re-materialized instruction is going to
887 // be spilled next, then it's not legal to re-materialize this instruction.
888 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
889 if (ImpUse == SpillIs[i]->reg)
895 /// isReMaterializable - Returns true if the definition MI of the specified
896 /// val# of the specified interval is re-materializable.
897 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
898 const VNInfo *ValNo, MachineInstr *MI) {
899 SmallVector<LiveInterval*, 4> Dummy1;
901 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
904 /// isReMaterializable - Returns true if every definition of MI of every
905 /// val# of the specified interval is re-materializable.
906 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
907 SmallVectorImpl<LiveInterval*> &SpillIs,
910 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
912 const VNInfo *VNI = *i;
914 continue; // Dead val#.
915 // Is the def for the val# rematerializable?
916 if (!VNI->isDefAccurate())
918 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
919 bool DefIsLoad = false;
921 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
928 /// FilterFoldedOps - Filter out two-address use operands. Return
929 /// true if it finds any issue with the operands that ought to prevent
931 static bool FilterFoldedOps(MachineInstr *MI,
932 SmallVector<unsigned, 2> &Ops,
934 SmallVector<unsigned, 2> &FoldOps) {
936 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
937 unsigned OpIdx = Ops[i];
938 MachineOperand &MO = MI->getOperand(OpIdx);
939 // FIXME: fold subreg use.
943 MRInfo |= (unsigned)VirtRegMap::isMod;
945 // Filter out two-address use operand(s).
946 if (MI->isRegTiedToDefOperand(OpIdx)) {
947 MRInfo = VirtRegMap::isModRef;
950 MRInfo |= (unsigned)VirtRegMap::isRef;
952 FoldOps.push_back(OpIdx);
958 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
959 /// slot / to reg or any rematerialized load into ith operand of specified
960 /// MI. If it is successul, MI is updated with the newly created MI and
962 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
963 VirtRegMap &vrm, MachineInstr *DefMI,
965 SmallVector<unsigned, 2> &Ops,
966 bool isSS, int Slot, unsigned Reg) {
967 // If it is an implicit def instruction, just delete it.
968 if (MI->isImplicitDef()) {
969 RemoveMachineInstrFromMaps(MI);
970 vrm.RemoveMachineInstrFromMaps(MI);
971 MI->eraseFromParent();
976 // Filter the list of operand indexes that are to be folded. Abort if
977 // any operand will prevent folding.
979 SmallVector<unsigned, 2> FoldOps;
980 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
983 // The only time it's safe to fold into a two address instruction is when
984 // it's folding reload and spill from / into a spill stack slot.
985 if (DefMI && (MRInfo & VirtRegMap::isMod))
988 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
989 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
991 // Remember this instruction uses the spill slot.
992 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
994 // Attempt to fold the memory reference into the instruction. If
995 // we can do this, we don't need to insert spill code.
996 MachineBasicBlock &MBB = *MI->getParent();
997 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
998 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
999 vrm.transferSpillPts(MI, fmi);
1000 vrm.transferRestorePts(MI, fmi);
1001 vrm.transferEmergencySpills(MI, fmi);
1002 ReplaceMachineInstrInMaps(MI, fmi);
1003 MI = MBB.insert(MBB.erase(MI), fmi);
1010 /// canFoldMemoryOperand - Returns true if the specified load / store
1011 /// folding is possible.
1012 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1013 SmallVector<unsigned, 2> &Ops,
1015 // Filter the list of operand indexes that are to be folded. Abort if
1016 // any operand will prevent folding.
1017 unsigned MRInfo = 0;
1018 SmallVector<unsigned, 2> FoldOps;
1019 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1022 // It's only legal to remat for a use, not a def.
1023 if (ReMat && (MRInfo & VirtRegMap::isMod))
1026 return tii_->canFoldMemoryOperand(MI, FoldOps);
1029 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1030 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1032 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1037 for (++itr; itr != li.ranges.end(); ++itr) {
1038 MachineBasicBlock *mbb2 =
1039 indexes_->getMBBCoveringRange(itr->start, itr->end);
1048 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1049 /// interval on to-be re-materialized operands of MI) with new register.
1050 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1051 MachineInstr *MI, unsigned NewVReg,
1053 // There is an implicit use. That means one of the other operand is
1054 // being remat'ed and the remat'ed instruction has li.reg as an
1055 // use operand. Make sure we rewrite that as well.
1056 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1057 MachineOperand &MO = MI->getOperand(i);
1060 unsigned Reg = MO.getReg();
1061 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1063 if (!vrm.isReMaterialized(Reg))
1065 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1066 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1068 UseMO->setReg(NewVReg);
1072 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1073 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1074 bool LiveIntervals::
1075 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1076 bool TrySplit, SlotIndex index, SlotIndex end,
1078 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1079 unsigned Slot, int LdSlot,
1080 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1082 const TargetRegisterClass* rc,
1083 SmallVector<int, 4> &ReMatIds,
1084 const MachineLoopInfo *loopInfo,
1085 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1086 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1087 std::vector<LiveInterval*> &NewLIs) {
1088 bool CanFold = false;
1090 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1091 MachineOperand& mop = MI->getOperand(i);
1094 unsigned Reg = mop.getReg();
1095 unsigned RegI = Reg;
1096 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1101 bool TryFold = !DefIsReMat;
1102 bool FoldSS = true; // Default behavior unless it's a remat.
1103 int FoldSlot = Slot;
1105 // If this is the rematerializable definition MI itself and
1106 // all of its uses are rematerialized, simply delete it.
1107 if (MI == ReMatOrigDefMI && CanDelete) {
1108 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1110 RemoveMachineInstrFromMaps(MI);
1111 vrm.RemoveMachineInstrFromMaps(MI);
1112 MI->eraseFromParent();
1116 // If def for this use can't be rematerialized, then try folding.
1117 // If def is rematerializable and it's a load, also try folding.
1118 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1120 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1126 // Scan all of the operands of this instruction rewriting operands
1127 // to use NewVReg instead of li.reg as appropriate. We do this for
1130 // 1. If the instr reads the same spilled vreg multiple times, we
1131 // want to reuse the NewVReg.
1132 // 2. If the instr is a two-addr instruction, we are required to
1133 // keep the src/dst regs pinned.
1135 // Keep track of whether we replace a use and/or def so that we can
1136 // create the spill interval with the appropriate range.
1138 HasUse = mop.isUse();
1139 HasDef = mop.isDef();
1140 SmallVector<unsigned, 2> Ops;
1142 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1143 const MachineOperand &MOj = MI->getOperand(j);
1146 unsigned RegJ = MOj.getReg();
1147 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1151 if (!MOj.isUndef()) {
1152 HasUse |= MOj.isUse();
1153 HasDef |= MOj.isDef();
1158 // Create a new virtual register for the spill interval.
1159 // Create the new register now so we can map the fold instruction
1160 // to the new register so when it is unfolded we get the correct
1162 bool CreatedNewVReg = false;
1164 NewVReg = mri_->createVirtualRegister(rc);
1166 CreatedNewVReg = true;
1168 // The new virtual register should get the same allocation hints as the
1170 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1171 if (Hint.first || Hint.second)
1172 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1178 // Do not fold load / store here if we are splitting. We'll find an
1179 // optimal point to insert a load / store later.
1181 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1182 Ops, FoldSS, FoldSlot, NewVReg)) {
1183 // Folding the load/store can completely change the instruction in
1184 // unpredictable ways, rescan it from the beginning.
1187 // We need to give the new vreg the same stack slot as the
1188 // spilled interval.
1189 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1195 if (isNotInMIMap(MI))
1197 goto RestartInstruction;
1200 // We'll try to fold it later if it's profitable.
1201 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1205 mop.setReg(NewVReg);
1206 if (mop.isImplicit())
1207 rewriteImplicitOps(li, MI, NewVReg, vrm);
1209 // Reuse NewVReg for other reads.
1210 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1211 MachineOperand &mopj = MI->getOperand(Ops[j]);
1212 mopj.setReg(NewVReg);
1213 if (mopj.isImplicit())
1214 rewriteImplicitOps(li, MI, NewVReg, vrm);
1217 if (CreatedNewVReg) {
1219 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1220 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1221 // Each valnum may have its own remat id.
1222 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1224 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1226 if (!CanDelete || (HasUse && HasDef)) {
1227 // If this is a two-addr instruction then its use operands are
1228 // rematerializable but its def is not. It should be assigned a
1230 vrm.assignVirt2StackSlot(NewVReg, Slot);
1233 vrm.assignVirt2StackSlot(NewVReg, Slot);
1235 } else if (HasUse && HasDef &&
1236 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1237 // If this interval hasn't been assigned a stack slot (because earlier
1238 // def is a deleted remat def), do it now.
1239 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1240 vrm.assignVirt2StackSlot(NewVReg, Slot);
1243 // Re-matting an instruction with virtual register use. Add the
1244 // register as an implicit use on the use MI.
1245 if (DefIsReMat && ImpUse)
1246 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1248 // Create a new register interval for this spill / remat.
1249 LiveInterval &nI = getOrCreateInterval(NewVReg);
1250 if (CreatedNewVReg) {
1251 NewLIs.push_back(&nI);
1252 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1254 vrm.setIsSplitFromReg(NewVReg, li.reg);
1258 if (CreatedNewVReg) {
1259 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1260 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1261 DEBUG(dbgs() << " +" << LR);
1264 // Extend the split live interval to this def / use.
1265 SlotIndex End = index.getDefIndex();
1266 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1267 nI.getValNumInfo(nI.getNumValNums()-1));
1268 DEBUG(dbgs() << " +" << LR);
1273 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1274 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1275 DEBUG(dbgs() << " +" << LR);
1280 dbgs() << "\t\t\t\tAdded new interval: ";
1281 nI.print(dbgs(), tri_);
1287 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1289 MachineBasicBlock *MBB,
1290 SlotIndex Idx) const {
1291 SlotIndex End = getMBBEndIdx(MBB);
1292 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1293 if (VNI->kills[j].isPHI())
1296 SlotIndex KillIdx = VNI->kills[j];
1297 if (KillIdx > Idx && KillIdx <= End)
1303 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1304 /// during spilling.
1306 struct RewriteInfo {
1311 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1312 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1315 struct RewriteInfoCompare {
1316 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1317 return LHS.Index < RHS.Index;
1322 void LiveIntervals::
1323 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1324 LiveInterval::Ranges::const_iterator &I,
1325 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1326 unsigned Slot, int LdSlot,
1327 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1329 const TargetRegisterClass* rc,
1330 SmallVector<int, 4> &ReMatIds,
1331 const MachineLoopInfo *loopInfo,
1332 BitVector &SpillMBBs,
1333 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1334 BitVector &RestoreMBBs,
1335 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1336 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1337 std::vector<LiveInterval*> &NewLIs) {
1338 bool AllCanFold = true;
1339 unsigned NewVReg = 0;
1340 SlotIndex start = I->start.getBaseIndex();
1341 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1343 // First collect all the def / use in this live range that will be rewritten.
1344 // Make sure they are sorted according to instruction index.
1345 std::vector<RewriteInfo> RewriteMIs;
1346 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1347 re = mri_->reg_end(); ri != re; ) {
1348 MachineInstr *MI = &*ri;
1349 MachineOperand &O = ri.getOperand();
1351 if (MI->isDebugValue()) {
1352 // Modify DBG_VALUE now that the value is in a spill slot.
1353 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1354 uint64_t Offset = MI->getOperand(1).getImm();
1355 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1356 DebugLoc DL = MI->getDebugLoc();
1357 int FI = isLoadSS ? LdSlot : (int)Slot;
1358 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1359 Offset, MDPtr, DL)) {
1360 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1361 ReplaceMachineInstrInMaps(MI, NewDV);
1362 MachineBasicBlock *MBB = MI->getParent();
1363 MBB->insert(MBB->erase(MI), NewDV);
1368 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1369 RemoveMachineInstrFromMaps(MI);
1370 vrm.RemoveMachineInstrFromMaps(MI);
1371 MI->eraseFromParent();
1374 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1375 SlotIndex index = getInstructionIndex(MI);
1376 if (index < start || index >= end)
1380 // Must be defined by an implicit def. It should not be spilled. Note,
1381 // this is for correctness reason. e.g.
1382 // 8 %reg1024<def> = IMPLICIT_DEF
1383 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1384 // The live range [12, 14) are not part of the r1024 live interval since
1385 // it's defined by an implicit def. It will not conflicts with live
1386 // interval of r1025. Now suppose both registers are spilled, you can
1387 // easily see a situation where both registers are reloaded before
1388 // the INSERT_SUBREG and both target registers that would overlap.
1390 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1392 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1394 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1395 // Now rewrite the defs and uses.
1396 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1397 RewriteInfo &rwi = RewriteMIs[i];
1399 SlotIndex index = rwi.Index;
1400 bool MIHasUse = rwi.HasUse;
1401 bool MIHasDef = rwi.HasDef;
1402 MachineInstr *MI = rwi.MI;
1403 // If MI def and/or use the same register multiple times, then there
1404 // are multiple entries.
1405 unsigned NumUses = MIHasUse;
1406 while (i != e && RewriteMIs[i].MI == MI) {
1407 assert(RewriteMIs[i].Index == index);
1408 bool isUse = RewriteMIs[i].HasUse;
1409 if (isUse) ++NumUses;
1411 MIHasDef |= RewriteMIs[i].HasDef;
1414 MachineBasicBlock *MBB = MI->getParent();
1416 if (ImpUse && MI != ReMatDefMI) {
1417 // Re-matting an instruction with virtual register use. Prevent interval
1418 // from being spilled.
1419 getInterval(ImpUse).markNotSpillable();
1422 unsigned MBBId = MBB->getNumber();
1423 unsigned ThisVReg = 0;
1425 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1426 if (NVI != MBBVRegsMap.end()) {
1427 ThisVReg = NVI->second;
1434 // It's better to start a new interval to avoid artifically
1435 // extend the new interval.
1436 if (MIHasDef && !MIHasUse) {
1437 MBBVRegsMap.erase(MBB->getNumber());
1443 bool IsNew = ThisVReg == 0;
1445 // This ends the previous live interval. If all of its def / use
1446 // can be folded, give it a low spill weight.
1447 if (NewVReg && TrySplit && AllCanFold) {
1448 LiveInterval &nI = getOrCreateInterval(NewVReg);
1455 bool HasDef = false;
1456 bool HasUse = false;
1457 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1458 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1459 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1460 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1461 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1462 if (!HasDef && !HasUse)
1465 AllCanFold &= CanFold;
1467 // Update weight of spill interval.
1468 LiveInterval &nI = getOrCreateInterval(NewVReg);
1470 // The spill weight is now infinity as it cannot be spilled again.
1471 nI.markNotSpillable();
1475 // Keep track of the last def and first use in each MBB.
1477 if (MI != ReMatOrigDefMI || !CanDelete) {
1478 bool HasKill = false;
1480 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1482 // If this is a two-address code, then this index starts a new VNInfo.
1483 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1485 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1487 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1488 SpillIdxes.find(MBBId);
1490 if (SII == SpillIdxes.end()) {
1491 std::vector<SRInfo> S;
1492 S.push_back(SRInfo(index, NewVReg, true));
1493 SpillIdxes.insert(std::make_pair(MBBId, S));
1494 } else if (SII->second.back().vreg != NewVReg) {
1495 SII->second.push_back(SRInfo(index, NewVReg, true));
1496 } else if (index > SII->second.back().index) {
1497 // If there is an earlier def and this is a two-address
1498 // instruction, then it's not possible to fold the store (which
1499 // would also fold the load).
1500 SRInfo &Info = SII->second.back();
1502 Info.canFold = !HasUse;
1504 SpillMBBs.set(MBBId);
1505 } else if (SII != SpillIdxes.end() &&
1506 SII->second.back().vreg == NewVReg &&
1507 index > SII->second.back().index) {
1508 // There is an earlier def that's not killed (must be two-address).
1509 // The spill is no longer needed.
1510 SII->second.pop_back();
1511 if (SII->second.empty()) {
1512 SpillIdxes.erase(MBBId);
1513 SpillMBBs.reset(MBBId);
1520 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1521 SpillIdxes.find(MBBId);
1522 if (SII != SpillIdxes.end() &&
1523 SII->second.back().vreg == NewVReg &&
1524 index > SII->second.back().index)
1525 // Use(s) following the last def, it's not safe to fold the spill.
1526 SII->second.back().canFold = false;
1527 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1528 RestoreIdxes.find(MBBId);
1529 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1530 // If we are splitting live intervals, only fold if it's the first
1531 // use and there isn't another use later in the MBB.
1532 RII->second.back().canFold = false;
1534 // Only need a reload if there isn't an earlier def / use.
1535 if (RII == RestoreIdxes.end()) {
1536 std::vector<SRInfo> Infos;
1537 Infos.push_back(SRInfo(index, NewVReg, true));
1538 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1540 RII->second.push_back(SRInfo(index, NewVReg, true));
1542 RestoreMBBs.set(MBBId);
1546 // Update spill weight.
1547 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1548 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1551 if (NewVReg && TrySplit && AllCanFold) {
1552 // If all of its def / use can be folded, give it a low spill weight.
1553 LiveInterval &nI = getOrCreateInterval(NewVReg);
1558 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1559 unsigned vr, BitVector &RestoreMBBs,
1560 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1561 if (!RestoreMBBs[Id])
1563 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1564 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1565 if (Restores[i].index == index &&
1566 Restores[i].vreg == vr &&
1567 Restores[i].canFold)
1572 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1573 unsigned vr, BitVector &RestoreMBBs,
1574 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1575 if (!RestoreMBBs[Id])
1577 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1578 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1579 if (Restores[i].index == index && Restores[i].vreg)
1580 Restores[i].index = SlotIndex();
1583 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1584 /// spilled and create empty intervals for their uses.
1586 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1587 const TargetRegisterClass* rc,
1588 std::vector<LiveInterval*> &NewLIs) {
1589 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1590 re = mri_->reg_end(); ri != re; ) {
1591 MachineOperand &O = ri.getOperand();
1592 MachineInstr *MI = &*ri;
1594 if (MI->isDebugValue()) {
1595 // Remove debug info for now.
1597 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1601 assert(MI->isImplicitDef() &&
1602 "Register def was not rewritten?");
1603 RemoveMachineInstrFromMaps(MI);
1604 vrm.RemoveMachineInstrFromMaps(MI);
1605 MI->eraseFromParent();
1607 // This must be an use of an implicit_def so it's not part of the live
1608 // interval. Create a new empty live interval for it.
1609 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1610 unsigned NewVReg = mri_->createVirtualRegister(rc);
1612 vrm.setIsImplicitlyDefined(NewVReg);
1613 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1614 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1615 MachineOperand &MO = MI->getOperand(i);
1616 if (MO.isReg() && MO.getReg() == li.reg) {
1626 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1627 // Limit the loop depth ridiculousness.
1628 if (loopDepth > 200)
1631 // The loop depth is used to roughly estimate the number of times the
1632 // instruction is executed. Something like 10^d is simple, but will quickly
1633 // overflow a float. This expression behaves like 10^d for small d, but is
1634 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1635 // headroom before overflow.
1636 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1638 return (isDef + isUse) * lc;
1642 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1643 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1644 normalizeSpillWeight(*NewLIs[i]);
1647 std::vector<LiveInterval*> LiveIntervals::
1648 addIntervalsForSpillsFast(const LiveInterval &li,
1649 const MachineLoopInfo *loopInfo,
1651 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1653 std::vector<LiveInterval*> added;
1655 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1658 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1663 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1665 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1666 while (RI != mri_->reg_end()) {
1667 MachineInstr* MI = &*RI;
1669 SmallVector<unsigned, 2> Indices;
1670 bool HasUse = false;
1671 bool HasDef = false;
1673 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1674 MachineOperand& mop = MI->getOperand(i);
1675 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1677 HasUse |= MI->getOperand(i).isUse();
1678 HasDef |= MI->getOperand(i).isDef();
1680 Indices.push_back(i);
1683 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1684 Indices, true, slot, li.reg)) {
1685 unsigned NewVReg = mri_->createVirtualRegister(rc);
1687 vrm.assignVirt2StackSlot(NewVReg, slot);
1689 // create a new register for this spill
1690 LiveInterval &nI = getOrCreateInterval(NewVReg);
1691 nI.markNotSpillable();
1693 // Rewrite register operands to use the new vreg.
1694 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1695 E = Indices.end(); I != E; ++I) {
1696 MI->getOperand(*I).setReg(NewVReg);
1698 if (MI->getOperand(*I).isUse())
1699 MI->getOperand(*I).setIsKill(true);
1702 // Fill in the new live interval.
1703 SlotIndex index = getInstructionIndex(MI);
1705 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1706 nI.getNextValue(SlotIndex(), 0, false,
1707 getVNInfoAllocator()));
1708 DEBUG(dbgs() << " +" << LR);
1710 vrm.addRestorePoint(NewVReg, MI);
1713 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1714 nI.getNextValue(SlotIndex(), 0, false,
1715 getVNInfoAllocator()));
1716 DEBUG(dbgs() << " +" << LR);
1718 vrm.addSpillPoint(NewVReg, true, MI);
1721 added.push_back(&nI);
1724 dbgs() << "\t\t\t\tadded new interval: ";
1731 RI = mri_->reg_begin(li.reg);
1737 std::vector<LiveInterval*> LiveIntervals::
1738 addIntervalsForSpills(const LiveInterval &li,
1739 SmallVectorImpl<LiveInterval*> &SpillIs,
1740 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1742 if (EnableFastSpilling)
1743 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1745 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1748 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1749 li.print(dbgs(), tri_);
1753 // Each bit specify whether a spill is required in the MBB.
1754 BitVector SpillMBBs(mf_->getNumBlockIDs());
1755 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1756 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1757 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1758 DenseMap<unsigned,unsigned> MBBVRegsMap;
1759 std::vector<LiveInterval*> NewLIs;
1760 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1762 unsigned NumValNums = li.getNumValNums();
1763 SmallVector<MachineInstr*, 4> ReMatDefs;
1764 ReMatDefs.resize(NumValNums, NULL);
1765 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1766 ReMatOrigDefs.resize(NumValNums, NULL);
1767 SmallVector<int, 4> ReMatIds;
1768 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1769 BitVector ReMatDelete(NumValNums);
1770 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1772 // Spilling a split live interval. It cannot be split any further. Also,
1773 // it's also guaranteed to be a single val# / range interval.
1774 if (vrm.getPreSplitReg(li.reg)) {
1775 vrm.setIsSplitFromReg(li.reg, 0);
1776 // Unset the split kill marker on the last use.
1777 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1778 if (KillIdx != SlotIndex()) {
1779 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1780 assert(KillMI && "Last use disappeared?");
1781 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1782 assert(KillOp != -1 && "Last use disappeared?");
1783 KillMI->getOperand(KillOp).setIsKill(false);
1785 vrm.removeKillPoint(li.reg);
1786 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1787 Slot = vrm.getStackSlot(li.reg);
1788 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1789 MachineInstr *ReMatDefMI = DefIsReMat ?
1790 vrm.getReMaterializedMI(li.reg) : NULL;
1792 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1793 bool isLoad = isLoadSS ||
1794 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1795 bool IsFirstRange = true;
1796 for (LiveInterval::Ranges::const_iterator
1797 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1798 // If this is a split live interval with multiple ranges, it means there
1799 // are two-address instructions that re-defined the value. Only the
1800 // first def can be rematerialized!
1802 // Note ReMatOrigDefMI has already been deleted.
1803 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1804 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1805 false, vrm, rc, ReMatIds, loopInfo,
1806 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1807 MBBVRegsMap, NewLIs);
1809 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1810 Slot, 0, false, false, false,
1811 false, vrm, rc, ReMatIds, loopInfo,
1812 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1813 MBBVRegsMap, NewLIs);
1815 IsFirstRange = false;
1818 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1819 normalizeSpillWeights(NewLIs);
1823 bool TrySplit = !intervalIsInOneMBB(li);
1826 bool NeedStackSlot = false;
1827 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1829 const VNInfo *VNI = *i;
1830 unsigned VN = VNI->id;
1831 if (VNI->isUnused())
1832 continue; // Dead val#.
1833 // Is the def for the val# rematerializable?
1834 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1835 ? getInstructionFromIndex(VNI->def) : 0;
1837 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1838 // Remember how to remat the def of this val#.
1839 ReMatOrigDefs[VN] = ReMatDefMI;
1840 // Original def may be modified so we have to make a copy here.
1841 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1842 CloneMIs.push_back(Clone);
1843 ReMatDefs[VN] = Clone;
1845 bool CanDelete = true;
1846 if (VNI->hasPHIKill()) {
1847 // A kill is a phi node, not all of its uses can be rematerialized.
1848 // It must not be deleted.
1850 // Need a stack slot if there is any live range where uses cannot be
1852 NeedStackSlot = true;
1855 ReMatDelete.set(VN);
1857 // Need a stack slot if there is any live range where uses cannot be
1859 NeedStackSlot = true;
1863 // One stack slot per live interval.
1864 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1865 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1866 Slot = vrm.assignVirt2StackSlot(li.reg);
1868 // This case only occurs when the prealloc splitter has already assigned
1869 // a stack slot to this vreg.
1871 Slot = vrm.getStackSlot(li.reg);
1874 // Create new intervals and rewrite defs and uses.
1875 for (LiveInterval::Ranges::const_iterator
1876 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1877 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1878 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1879 bool DefIsReMat = ReMatDefMI != NULL;
1880 bool CanDelete = ReMatDelete[I->valno->id];
1882 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1883 bool isLoad = isLoadSS ||
1884 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1885 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1886 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1887 CanDelete, vrm, rc, ReMatIds, loopInfo,
1888 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1889 MBBVRegsMap, NewLIs);
1892 // Insert spills / restores if we are splitting.
1894 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1895 normalizeSpillWeights(NewLIs);
1899 SmallPtrSet<LiveInterval*, 4> AddedKill;
1900 SmallVector<unsigned, 2> Ops;
1901 if (NeedStackSlot) {
1902 int Id = SpillMBBs.find_first();
1904 std::vector<SRInfo> &spills = SpillIdxes[Id];
1905 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1906 SlotIndex index = spills[i].index;
1907 unsigned VReg = spills[i].vreg;
1908 LiveInterval &nI = getOrCreateInterval(VReg);
1909 bool isReMat = vrm.isReMaterialized(VReg);
1910 MachineInstr *MI = getInstructionFromIndex(index);
1911 bool CanFold = false;
1912 bool FoundUse = false;
1914 if (spills[i].canFold) {
1916 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1917 MachineOperand &MO = MI->getOperand(j);
1918 if (!MO.isReg() || MO.getReg() != VReg)
1925 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1926 RestoreMBBs, RestoreIdxes))) {
1927 // MI has two-address uses of the same register. If the use
1928 // isn't the first and only use in the BB, then we can't fold
1929 // it. FIXME: Move this to rewriteInstructionsForSpills.
1936 // Fold the store into the def if possible.
1937 bool Folded = false;
1938 if (CanFold && !Ops.empty()) {
1939 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1942 // Also folded uses, do not issue a load.
1943 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1944 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1946 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1950 // Otherwise tell the spiller to issue a spill.
1952 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1953 bool isKill = LR->end == index.getStoreIndex();
1954 if (!MI->registerDefIsDead(nI.reg))
1955 // No need to spill a dead def.
1956 vrm.addSpillPoint(VReg, isKill, MI);
1958 AddedKill.insert(&nI);
1961 Id = SpillMBBs.find_next(Id);
1965 int Id = RestoreMBBs.find_first();
1967 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1968 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1969 SlotIndex index = restores[i].index;
1970 if (index == SlotIndex())
1972 unsigned VReg = restores[i].vreg;
1973 LiveInterval &nI = getOrCreateInterval(VReg);
1974 bool isReMat = vrm.isReMaterialized(VReg);
1975 MachineInstr *MI = getInstructionFromIndex(index);
1976 bool CanFold = false;
1978 if (restores[i].canFold) {
1980 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1981 MachineOperand &MO = MI->getOperand(j);
1982 if (!MO.isReg() || MO.getReg() != VReg)
1986 // If this restore were to be folded, it would have been folded
1995 // Fold the load into the use if possible.
1996 bool Folded = false;
1997 if (CanFold && !Ops.empty()) {
1999 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2001 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2003 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2004 // If the rematerializable def is a load, also try to fold it.
2005 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2006 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2007 Ops, isLoadSS, LdSlot, VReg);
2009 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2011 // Re-matting an instruction with virtual register use. Add the
2012 // register as an implicit use on the use MI and mark the register
2013 // interval as unspillable.
2014 LiveInterval &ImpLi = getInterval(ImpUse);
2015 ImpLi.markNotSpillable();
2016 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2021 // If folding is not possible / failed, then tell the spiller to issue a
2022 // load / rematerialization for us.
2024 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2026 vrm.addRestorePoint(VReg, MI);
2028 Id = RestoreMBBs.find_next(Id);
2031 // Finalize intervals: add kills, finalize spill weights, and filter out
2033 std::vector<LiveInterval*> RetNewLIs;
2034 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2035 LiveInterval *LI = NewLIs[i];
2037 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2038 if (!AddedKill.count(LI)) {
2039 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2040 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2041 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2042 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2043 assert(UseIdx != -1);
2044 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2045 LastUse->getOperand(UseIdx).setIsKill();
2046 vrm.addKillPoint(LI->reg, LastUseIdx);
2049 RetNewLIs.push_back(LI);
2053 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2054 normalizeSpillWeights(RetNewLIs);
2058 /// hasAllocatableSuperReg - Return true if the specified physical register has
2059 /// any super register that's allocatable.
2060 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2061 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2062 if (allocatableRegs_[*AS] && hasInterval(*AS))
2067 /// getRepresentativeReg - Find the largest super register of the specified
2068 /// physical register.
2069 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2070 // Find the largest super-register that is allocatable.
2071 unsigned BestReg = Reg;
2072 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2073 unsigned SuperReg = *AS;
2074 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2082 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2083 /// specified interval that conflicts with the specified physical register.
2084 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2085 unsigned PhysReg) const {
2086 unsigned NumConflicts = 0;
2087 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2088 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2089 E = mri_->reg_end(); I != E; ++I) {
2090 MachineOperand &O = I.getOperand();
2091 MachineInstr *MI = O.getParent();
2092 if (MI->isDebugValue())
2094 SlotIndex Index = getInstructionIndex(MI);
2095 if (pli.liveAt(Index))
2098 return NumConflicts;
2101 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2102 /// around all defs and uses of the specified interval. Return true if it
2103 /// was able to cut its interval.
2104 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2105 unsigned PhysReg, VirtRegMap &vrm) {
2106 unsigned SpillReg = getRepresentativeReg(PhysReg);
2108 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2109 // If there are registers which alias PhysReg, but which are not a
2110 // sub-register of the chosen representative super register. Assert
2111 // since we can't handle it yet.
2112 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2113 tri_->isSuperRegister(*AS, SpillReg));
2116 SmallVector<unsigned, 4> PRegs;
2117 if (hasInterval(SpillReg))
2118 PRegs.push_back(SpillReg);
2120 SmallSet<unsigned, 4> Added;
2121 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2122 if (Added.insert(*AS) && hasInterval(*AS)) {
2123 PRegs.push_back(*AS);
2124 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2129 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2130 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2131 E = mri_->reg_end(); I != E; ++I) {
2132 MachineOperand &O = I.getOperand();
2133 MachineInstr *MI = O.getParent();
2134 if (MI->isDebugValue() || SeenMIs.count(MI))
2137 SlotIndex Index = getInstructionIndex(MI);
2138 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2139 unsigned PReg = PRegs[i];
2140 LiveInterval &pli = getInterval(PReg);
2141 if (!pli.liveAt(Index))
2143 vrm.addEmergencySpill(PReg, MI);
2144 SlotIndex StartIdx = Index.getLoadIndex();
2145 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2146 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2147 pli.removeRange(StartIdx, EndIdx);
2151 raw_string_ostream Msg(msg);
2152 Msg << "Ran out of registers during register allocation!";
2153 if (MI->isInlineAsm()) {
2154 Msg << "\nPlease check your inline asm statement for invalid "
2155 << "constraints:\n";
2156 MI->print(Msg, tm_);
2158 report_fatal_error(Msg.str());
2160 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2161 if (!hasInterval(*AS))
2163 LiveInterval &spli = getInterval(*AS);
2164 if (spli.liveAt(Index))
2165 spli.removeRange(Index.getLoadIndex(),
2166 Index.getNextIndex().getBaseIndex());
2173 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2174 MachineInstr* startInst) {
2175 LiveInterval& Interval = getOrCreateInterval(reg);
2176 VNInfo* VN = Interval.getNextValue(
2177 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2178 startInst, true, getVNInfoAllocator());
2179 VN->setHasPHIKill(true);
2180 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2182 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2183 getMBBEndIdx(startInst->getParent()), VN);
2184 Interval.addRange(LR);