1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
266 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267 unsigned Reg = MI.getOperand(MOIdx).getReg();
268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269 const MachineOperand &MO = MI.getOperand(i);
272 if (MO.getReg() == Reg && MO.isDef()) {
273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274 MI.getOperand(MOIdx).getSubReg() &&
282 /// isPartialRedef - Return true if the specified def at the specific index is
283 /// partially re-defining the specified live interval. A common case of this is
284 /// a definition of the sub-register.
285 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286 LiveInterval &interval) {
287 if (!MO.getSubReg() || MO.isEarlyClobber())
290 SlotIndex RedefIndex = MIIdx.getDefIndex();
291 const LiveRange *OldLR =
292 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293 if (OldLR->valno->isDefAccurate()) {
294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
300 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301 MachineBasicBlock::iterator mi,
305 LiveInterval &interval) {
307 dbgs() << "\t\tregister: ";
308 printRegName(interval.reg, tri_);
311 // Virtual registers may be defined multiple times (due to phi
312 // elimination and 2-addr elimination). Much of what we do only has to be
313 // done once for the vreg. We use an empty interval to detect the first
314 // time we see a vreg.
315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316 if (interval.empty()) {
317 // Get the Idx of the defining instructions.
318 SlotIndex defIndex = MIIdx.getDefIndex();
319 // Earlyclobbers move back one, so that they overlap the live range
321 if (MO.isEarlyClobber())
322 defIndex = MIIdx.getUseIndex();
324 // Make sure the first definition is not a partial redefinition. Add an
325 // <imp-def> of the full register.
327 mi->addRegisterDefined(interval.reg);
329 MachineInstr *CopyMI = NULL;
330 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
331 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
332 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
335 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
337 assert(ValNo->id == 0 && "First value in interval is not 0?");
339 // Loop over all of the blocks that the vreg is defined in. There are
340 // two cases we have to handle here. The most common case is a vreg
341 // whose lifetime is contained within a basic block. In this case there
342 // will be a single kill, in MBB, which comes after the definition.
343 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
344 // FIXME: what about dead vars?
346 if (vi.Kills[0] != mi)
347 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
349 killIdx = defIndex.getStoreIndex();
351 // If the kill happens after the definition, we have an intra-block
353 if (killIdx > defIndex) {
354 assert(vi.AliveBlocks.empty() &&
355 "Shouldn't be alive across any blocks!");
356 LiveRange LR(defIndex, killIdx, ValNo);
357 interval.addRange(LR);
358 DEBUG(dbgs() << " +" << LR << "\n");
359 ValNo->addKill(killIdx);
364 // The other case we handle is when a virtual register lives to the end
365 // of the defining block, potentially live across some blocks, then is
366 // live into some number of blocks, but gets killed. Start by adding a
367 // range that goes from this definition to the end of the defining block.
368 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
369 DEBUG(dbgs() << " +" << NewLR);
370 interval.addRange(NewLR);
372 bool PHIJoin = lv_->isPHIJoin(interval.reg);
375 // A phi join register is killed at the end of the MBB and revived as a new
376 // valno in the killing blocks.
377 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
378 DEBUG(dbgs() << " phi-join");
379 ValNo->addKill(indexes_->getTerminatorGap(mbb));
380 ValNo->setHasPHIKill(true);
382 // Iterate over all of the blocks that the variable is completely
383 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
385 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
386 E = vi.AliveBlocks.end(); I != E; ++I) {
387 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
388 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
389 interval.addRange(LR);
390 DEBUG(dbgs() << " +" << LR);
394 // Finally, this virtual register is live from the start of any killing
395 // block to the 'use' slot of the killing instruction.
396 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
397 MachineInstr *Kill = vi.Kills[i];
398 SlotIndex Start = getMBBStartIdx(Kill->getParent());
399 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
401 // Create interval with one of a NEW value number. Note that this value
402 // number isn't actually defined by an instruction, weird huh? :)
404 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
406 ValNo->setIsPHIDef(true);
408 LiveRange LR(Start, killIdx, ValNo);
409 interval.addRange(LR);
410 ValNo->addKill(killIdx);
411 DEBUG(dbgs() << " +" << LR);
415 if (MultipleDefsBySameMI(*mi, MOIdx))
416 // Multiple defs of the same virtual register by the same instruction.
417 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
418 // This is likely due to elimination of REG_SEQUENCE instructions. Return
419 // here since there is nothing to do.
422 // If this is the second time we see a virtual register definition, it
423 // must be due to phi elimination or two addr elimination. If this is
424 // the result of two address elimination, then the vreg is one of the
425 // def-and-use register operand.
427 // It may also be partial redef like this:
428 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
429 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
430 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
431 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
432 // If this is a two-address definition, then we have already processed
433 // the live range. The only problem is that we didn't realize there
434 // are actually two values in the live interval. Because of this we
435 // need to take the LiveRegion that defines this register and split it
437 // Two-address vregs should always only be redefined once. This means
438 // that at this point, there should be exactly one value number in it.
439 assert((PartReDef || interval.containsOneValue()) &&
440 "Unexpected 2-addr liveint!");
441 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
442 SlotIndex RedefIndex = MIIdx.getDefIndex();
443 if (MO.isEarlyClobber())
444 RedefIndex = MIIdx.getUseIndex();
446 const LiveRange *OldLR =
447 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
448 VNInfo *OldValNo = OldLR->valno;
450 // Delete the initial value, which should be short and continuous,
451 // because the 2-addr copy must be in the same MBB as the redef.
452 interval.removeRange(DefIndex, RedefIndex);
454 // The new value number (#1) is defined by the instruction we claimed
456 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
457 false, // update at *
459 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
461 // Value#0 is now defined by the 2-addr instruction.
462 OldValNo->def = RedefIndex;
463 OldValNo->setCopy(0);
465 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
466 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
468 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
469 OldValNo->setCopy(&*mi);
471 // Add the new live interval which replaces the range for the input copy.
472 LiveRange LR(DefIndex, RedefIndex, ValNo);
473 DEBUG(dbgs() << " replace range with " << LR);
474 interval.addRange(LR);
475 ValNo->addKill(RedefIndex);
477 // If this redefinition is dead, we need to add a dummy unit live
478 // range covering the def slot.
480 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
484 dbgs() << " RESULT: ";
485 interval.print(dbgs(), tri_);
487 } else if (lv_->isPHIJoin(interval.reg)) {
488 // In the case of PHI elimination, each variable definition is only
489 // live until the end of the block. We've already taken care of the
490 // rest of the live range.
492 SlotIndex defIndex = MIIdx.getDefIndex();
493 if (MO.isEarlyClobber())
494 defIndex = MIIdx.getUseIndex();
497 MachineInstr *CopyMI = NULL;
498 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
499 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
500 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
502 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
504 SlotIndex killIndex = getMBBEndIdx(mbb);
505 LiveRange LR(defIndex, killIndex, ValNo);
506 interval.addRange(LR);
507 ValNo->addKill(indexes_->getTerminatorGap(mbb));
508 ValNo->setHasPHIKill(true);
509 DEBUG(dbgs() << " phi-join +" << LR);
511 llvm_unreachable("Multiply defined register");
515 DEBUG(dbgs() << '\n');
518 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
519 MachineBasicBlock::iterator mi,
522 LiveInterval &interval,
523 MachineInstr *CopyMI) {
524 // A physical register cannot be live across basic block, so its
525 // lifetime must end somewhere in its defining basic block.
527 dbgs() << "\t\tregister: ";
528 printRegName(interval.reg, tri_);
531 SlotIndex baseIndex = MIIdx;
532 SlotIndex start = baseIndex.getDefIndex();
533 // Earlyclobbers move back one.
534 if (MO.isEarlyClobber())
535 start = MIIdx.getUseIndex();
536 SlotIndex end = start;
538 // If it is not used after definition, it is considered dead at
539 // the instruction defining it. Hence its interval is:
540 // [defSlot(def), defSlot(def)+1)
541 // For earlyclobbers, the defSlot was pushed back one; the extra
542 // advance below compensates.
544 DEBUG(dbgs() << " dead");
545 end = start.getStoreIndex();
549 // If it is not dead on definition, it must be killed by a
550 // subsequent instruction. Hence its interval is:
551 // [defSlot(def), useSlot(kill)+1)
552 baseIndex = baseIndex.getNextIndex();
553 while (++mi != MBB->end()) {
555 if (mi->isDebugValue())
557 if (getInstructionFromIndex(baseIndex) == 0)
558 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
560 if (mi->killsRegister(interval.reg, tri_)) {
561 DEBUG(dbgs() << " killed");
562 end = baseIndex.getDefIndex();
565 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
567 if (mi->isRegTiedToUseOperand(DefIdx)) {
568 // Two-address instruction.
569 end = baseIndex.getDefIndex();
571 // Another instruction redefines the register before it is ever read.
572 // Then the register is essentially dead at the instruction that
573 // defines it. Hence its interval is:
574 // [defSlot(def), defSlot(def)+1)
575 DEBUG(dbgs() << " dead");
576 end = start.getStoreIndex();
582 baseIndex = baseIndex.getNextIndex();
585 // The only case we should have a dead physreg here without a killing or
586 // instruction where we know it's dead is if it is live-in to the function
587 // and never used. Another possible case is the implicit use of the
588 // physical register has been deleted by two-address pass.
589 end = start.getStoreIndex();
592 assert(start < end && "did not find end of interval?");
594 // Already exists? Extend old live interval.
595 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
596 bool Extend = OldLR != interval.end();
597 VNInfo *ValNo = Extend
598 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
599 if (MO.isEarlyClobber() && Extend)
600 ValNo->setHasRedefByEC(true);
601 LiveRange LR(start, end, ValNo);
602 interval.addRange(LR);
603 LR.valno->addKill(end);
604 DEBUG(dbgs() << " +" << LR << '\n');
607 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
608 MachineBasicBlock::iterator MI,
612 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
613 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
614 getOrCreateInterval(MO.getReg()));
615 else if (allocatableRegs_[MO.getReg()]) {
616 MachineInstr *CopyMI = NULL;
617 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
618 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
619 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
621 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
622 getOrCreateInterval(MO.getReg()), CopyMI);
623 // Def of a register also defines its sub-registers.
624 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
625 // If MI also modifies the sub-register explicitly, avoid processing it
626 // more than once. Do not pass in TRI here so it checks for exact match.
627 if (!MI->definesRegister(*AS))
628 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
629 getOrCreateInterval(*AS), 0);
633 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
635 LiveInterval &interval, bool isAlias) {
637 dbgs() << "\t\tlivein register: ";
638 printRegName(interval.reg, tri_);
641 // Look for kills, if it reaches a def before it's killed, then it shouldn't
642 // be considered a livein.
643 MachineBasicBlock::iterator mi = MBB->begin();
644 MachineBasicBlock::iterator E = MBB->end();
645 // Skip over DBG_VALUE at the start of the MBB.
646 if (mi != E && mi->isDebugValue()) {
647 while (++mi != E && mi->isDebugValue())
650 // MBB is empty except for DBG_VALUE's.
654 SlotIndex baseIndex = MIIdx;
655 SlotIndex start = baseIndex;
656 if (getInstructionFromIndex(baseIndex) == 0)
657 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
659 SlotIndex end = baseIndex;
660 bool SeenDefUse = false;
663 if (mi->killsRegister(interval.reg, tri_)) {
664 DEBUG(dbgs() << " killed");
665 end = baseIndex.getDefIndex();
668 } else if (mi->definesRegister(interval.reg, tri_)) {
669 // Another instruction redefines the register before it is ever read.
670 // Then the register is essentially dead at the instruction that defines
671 // it. Hence its interval is:
672 // [defSlot(def), defSlot(def)+1)
673 DEBUG(dbgs() << " dead");
674 end = start.getStoreIndex();
679 while (++mi != E && mi->isDebugValue())
680 // Skip over DBG_VALUE.
683 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
686 // Live-in register might not be used at all.
689 DEBUG(dbgs() << " dead");
690 end = MIIdx.getStoreIndex();
692 DEBUG(dbgs() << " live through");
698 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
699 0, false, VNInfoAllocator);
700 vni->setIsPHIDef(true);
701 LiveRange LR(start, end, vni);
703 interval.addRange(LR);
704 LR.valno->addKill(end);
705 DEBUG(dbgs() << " +" << LR << '\n');
708 /// computeIntervals - computes the live intervals for virtual
709 /// registers. for some ordering of the machine instructions [1,N] a
710 /// live interval is an interval [i, j) where 1 <= i <= j < N for
711 /// which a variable is live
712 void LiveIntervals::computeIntervals() {
713 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
714 << "********** Function: "
715 << ((Value*)mf_->getFunction())->getName() << '\n');
717 SmallVector<unsigned, 8> UndefUses;
718 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
720 MachineBasicBlock *MBB = MBBI;
724 // Track the index of the current machine instr.
725 SlotIndex MIIndex = getMBBStartIdx(MBB);
726 DEBUG(dbgs() << "BB#" << MBB->getNumber()
727 << ":\t\t# derived from " << MBB->getName() << "\n");
729 // Create intervals for live-ins to this BB first.
730 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
731 LE = MBB->livein_end(); LI != LE; ++LI) {
732 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
733 // Multiple live-ins can alias the same register.
734 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
735 if (!hasInterval(*AS))
736 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
740 // Skip over empty initial indices.
741 if (getInstructionFromIndex(MIIndex) == 0)
742 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
744 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
746 DEBUG(dbgs() << MIIndex << "\t" << *MI);
747 if (MI->isDebugValue())
751 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
752 MachineOperand &MO = MI->getOperand(i);
753 if (!MO.isReg() || !MO.getReg())
756 // handle register defs - build intervals
758 handleRegisterDef(MBB, MI, MIIndex, MO, i);
759 else if (MO.isUndef())
760 UndefUses.push_back(MO.getReg());
763 // Move to the next instr slot.
764 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
768 // Create empty intervals for registers defined by implicit_def's (except
769 // for those implicit_def that define values which are liveout of their
771 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
772 unsigned UndefReg = UndefUses[i];
773 (void)getOrCreateInterval(UndefReg);
777 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
778 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
779 return new LiveInterval(reg, Weight);
782 /// dupInterval - Duplicate a live interval. The caller is responsible for
783 /// managing the allocated memory.
784 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
785 LiveInterval *NewLI = createInterval(li->reg);
786 NewLI->Copy(*li, mri_, getVNInfoAllocator());
790 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
791 /// copy field and returns the source register that defines it.
792 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
796 if (VNI->getCopy()->isExtractSubreg()) {
797 // If it's extracting out of a physical register, return the sub-register.
798 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
799 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
800 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
801 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
802 if (SrcSubReg == DstSubReg)
803 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
804 // reg1034 can still be coalesced to EDX.
806 assert(DstSubReg == 0);
807 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
810 } else if (VNI->getCopy()->isInsertSubreg() ||
811 VNI->getCopy()->isSubregToReg())
812 return VNI->getCopy()->getOperand(2).getReg();
814 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
815 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
817 llvm_unreachable("Unrecognized copy instruction!");
821 //===----------------------------------------------------------------------===//
822 // Register allocator hooks.
825 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
826 /// allow one) virtual register operand, then its uses are implicitly using
827 /// the register. Returns the virtual register.
828 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
829 MachineInstr *MI) const {
831 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
832 MachineOperand &MO = MI->getOperand(i);
833 if (!MO.isReg() || !MO.isUse())
835 unsigned Reg = MO.getReg();
836 if (Reg == 0 || Reg == li.reg)
839 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
840 !allocatableRegs_[Reg])
842 // FIXME: For now, only remat MI with at most one register operand.
844 "Can't rematerialize instruction with multiple register operand!");
853 /// isValNoAvailableAt - Return true if the val# of the specified interval
854 /// which reaches the given instruction also reaches the specified use index.
855 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
856 SlotIndex UseIdx) const {
857 SlotIndex Index = getInstructionIndex(MI);
858 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
859 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
860 return UI != li.end() && UI->valno == ValNo;
863 /// isReMaterializable - Returns true if the definition MI of the specified
864 /// val# of the specified interval is re-materializable.
865 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
866 const VNInfo *ValNo, MachineInstr *MI,
867 SmallVectorImpl<LiveInterval*> &SpillIs,
872 if (!tii_->isTriviallyReMaterializable(MI, aa_))
875 // Target-specific code can mark an instruction as being rematerializable
876 // if it has one virtual reg use, though it had better be something like
877 // a PIC base register which is likely to be live everywhere.
878 unsigned ImpUse = getReMatImplicitUse(li, MI);
880 const LiveInterval &ImpLi = getInterval(ImpUse);
881 for (MachineRegisterInfo::use_nodbg_iterator
882 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
884 MachineInstr *UseMI = &*ri;
885 SlotIndex UseIdx = getInstructionIndex(UseMI);
886 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
888 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
892 // If a register operand of the re-materialized instruction is going to
893 // be spilled next, then it's not legal to re-materialize this instruction.
894 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
895 if (ImpUse == SpillIs[i]->reg)
901 /// isReMaterializable - Returns true if the definition MI of the specified
902 /// val# of the specified interval is re-materializable.
903 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
904 const VNInfo *ValNo, MachineInstr *MI) {
905 SmallVector<LiveInterval*, 4> Dummy1;
907 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
910 /// isReMaterializable - Returns true if every definition of MI of every
911 /// val# of the specified interval is re-materializable.
912 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
913 SmallVectorImpl<LiveInterval*> &SpillIs,
916 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
918 const VNInfo *VNI = *i;
920 continue; // Dead val#.
921 // Is the def for the val# rematerializable?
922 if (!VNI->isDefAccurate())
924 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
925 bool DefIsLoad = false;
927 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
934 /// FilterFoldedOps - Filter out two-address use operands. Return
935 /// true if it finds any issue with the operands that ought to prevent
937 static bool FilterFoldedOps(MachineInstr *MI,
938 SmallVector<unsigned, 2> &Ops,
940 SmallVector<unsigned, 2> &FoldOps) {
942 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
943 unsigned OpIdx = Ops[i];
944 MachineOperand &MO = MI->getOperand(OpIdx);
945 // FIXME: fold subreg use.
949 MRInfo |= (unsigned)VirtRegMap::isMod;
951 // Filter out two-address use operand(s).
952 if (MI->isRegTiedToDefOperand(OpIdx)) {
953 MRInfo = VirtRegMap::isModRef;
956 MRInfo |= (unsigned)VirtRegMap::isRef;
958 FoldOps.push_back(OpIdx);
964 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
965 /// slot / to reg or any rematerialized load into ith operand of specified
966 /// MI. If it is successul, MI is updated with the newly created MI and
968 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
969 VirtRegMap &vrm, MachineInstr *DefMI,
971 SmallVector<unsigned, 2> &Ops,
972 bool isSS, int Slot, unsigned Reg) {
973 // If it is an implicit def instruction, just delete it.
974 if (MI->isImplicitDef()) {
975 RemoveMachineInstrFromMaps(MI);
976 vrm.RemoveMachineInstrFromMaps(MI);
977 MI->eraseFromParent();
982 // Filter the list of operand indexes that are to be folded. Abort if
983 // any operand will prevent folding.
985 SmallVector<unsigned, 2> FoldOps;
986 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
989 // The only time it's safe to fold into a two address instruction is when
990 // it's folding reload and spill from / into a spill stack slot.
991 if (DefMI && (MRInfo & VirtRegMap::isMod))
994 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
995 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
997 // Remember this instruction uses the spill slot.
998 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1000 // Attempt to fold the memory reference into the instruction. If
1001 // we can do this, we don't need to insert spill code.
1002 MachineBasicBlock &MBB = *MI->getParent();
1003 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1004 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1005 vrm.transferSpillPts(MI, fmi);
1006 vrm.transferRestorePts(MI, fmi);
1007 vrm.transferEmergencySpills(MI, fmi);
1008 ReplaceMachineInstrInMaps(MI, fmi);
1009 MI = MBB.insert(MBB.erase(MI), fmi);
1016 /// canFoldMemoryOperand - Returns true if the specified load / store
1017 /// folding is possible.
1018 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1019 SmallVector<unsigned, 2> &Ops,
1021 // Filter the list of operand indexes that are to be folded. Abort if
1022 // any operand will prevent folding.
1023 unsigned MRInfo = 0;
1024 SmallVector<unsigned, 2> FoldOps;
1025 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1028 // It's only legal to remat for a use, not a def.
1029 if (ReMat && (MRInfo & VirtRegMap::isMod))
1032 return tii_->canFoldMemoryOperand(MI, FoldOps);
1035 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1036 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1038 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1043 for (++itr; itr != li.ranges.end(); ++itr) {
1044 MachineBasicBlock *mbb2 =
1045 indexes_->getMBBCoveringRange(itr->start, itr->end);
1054 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1055 /// interval on to-be re-materialized operands of MI) with new register.
1056 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1057 MachineInstr *MI, unsigned NewVReg,
1059 // There is an implicit use. That means one of the other operand is
1060 // being remat'ed and the remat'ed instruction has li.reg as an
1061 // use operand. Make sure we rewrite that as well.
1062 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1063 MachineOperand &MO = MI->getOperand(i);
1066 unsigned Reg = MO.getReg();
1067 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1069 if (!vrm.isReMaterialized(Reg))
1071 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1072 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1074 UseMO->setReg(NewVReg);
1078 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1079 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1080 bool LiveIntervals::
1081 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1082 bool TrySplit, SlotIndex index, SlotIndex end,
1084 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1085 unsigned Slot, int LdSlot,
1086 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1088 const TargetRegisterClass* rc,
1089 SmallVector<int, 4> &ReMatIds,
1090 const MachineLoopInfo *loopInfo,
1091 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1092 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1093 std::vector<LiveInterval*> &NewLIs) {
1094 bool CanFold = false;
1096 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1097 MachineOperand& mop = MI->getOperand(i);
1100 unsigned Reg = mop.getReg();
1101 unsigned RegI = Reg;
1102 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1107 bool TryFold = !DefIsReMat;
1108 bool FoldSS = true; // Default behavior unless it's a remat.
1109 int FoldSlot = Slot;
1111 // If this is the rematerializable definition MI itself and
1112 // all of its uses are rematerialized, simply delete it.
1113 if (MI == ReMatOrigDefMI && CanDelete) {
1114 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1116 RemoveMachineInstrFromMaps(MI);
1117 vrm.RemoveMachineInstrFromMaps(MI);
1118 MI->eraseFromParent();
1122 // If def for this use can't be rematerialized, then try folding.
1123 // If def is rematerializable and it's a load, also try folding.
1124 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1126 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1132 // Scan all of the operands of this instruction rewriting operands
1133 // to use NewVReg instead of li.reg as appropriate. We do this for
1136 // 1. If the instr reads the same spilled vreg multiple times, we
1137 // want to reuse the NewVReg.
1138 // 2. If the instr is a two-addr instruction, we are required to
1139 // keep the src/dst regs pinned.
1141 // Keep track of whether we replace a use and/or def so that we can
1142 // create the spill interval with the appropriate range.
1144 HasUse = mop.isUse();
1145 HasDef = mop.isDef();
1146 SmallVector<unsigned, 2> Ops;
1148 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1149 const MachineOperand &MOj = MI->getOperand(j);
1152 unsigned RegJ = MOj.getReg();
1153 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1157 if (!MOj.isUndef()) {
1158 HasUse |= MOj.isUse();
1159 HasDef |= MOj.isDef();
1164 // Create a new virtual register for the spill interval.
1165 // Create the new register now so we can map the fold instruction
1166 // to the new register so when it is unfolded we get the correct
1168 bool CreatedNewVReg = false;
1170 NewVReg = mri_->createVirtualRegister(rc);
1172 CreatedNewVReg = true;
1174 // The new virtual register should get the same allocation hints as the
1176 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1177 if (Hint.first || Hint.second)
1178 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1184 // Do not fold load / store here if we are splitting. We'll find an
1185 // optimal point to insert a load / store later.
1187 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1188 Ops, FoldSS, FoldSlot, NewVReg)) {
1189 // Folding the load/store can completely change the instruction in
1190 // unpredictable ways, rescan it from the beginning.
1193 // We need to give the new vreg the same stack slot as the
1194 // spilled interval.
1195 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1201 if (isNotInMIMap(MI))
1203 goto RestartInstruction;
1206 // We'll try to fold it later if it's profitable.
1207 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1211 mop.setReg(NewVReg);
1212 if (mop.isImplicit())
1213 rewriteImplicitOps(li, MI, NewVReg, vrm);
1215 // Reuse NewVReg for other reads.
1216 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1217 MachineOperand &mopj = MI->getOperand(Ops[j]);
1218 mopj.setReg(NewVReg);
1219 if (mopj.isImplicit())
1220 rewriteImplicitOps(li, MI, NewVReg, vrm);
1223 if (CreatedNewVReg) {
1225 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1226 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1227 // Each valnum may have its own remat id.
1228 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1230 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1232 if (!CanDelete || (HasUse && HasDef)) {
1233 // If this is a two-addr instruction then its use operands are
1234 // rematerializable but its def is not. It should be assigned a
1236 vrm.assignVirt2StackSlot(NewVReg, Slot);
1239 vrm.assignVirt2StackSlot(NewVReg, Slot);
1241 } else if (HasUse && HasDef &&
1242 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1243 // If this interval hasn't been assigned a stack slot (because earlier
1244 // def is a deleted remat def), do it now.
1245 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1246 vrm.assignVirt2StackSlot(NewVReg, Slot);
1249 // Re-matting an instruction with virtual register use. Add the
1250 // register as an implicit use on the use MI.
1251 if (DefIsReMat && ImpUse)
1252 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1254 // Create a new register interval for this spill / remat.
1255 LiveInterval &nI = getOrCreateInterval(NewVReg);
1256 if (CreatedNewVReg) {
1257 NewLIs.push_back(&nI);
1258 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1260 vrm.setIsSplitFromReg(NewVReg, li.reg);
1264 if (CreatedNewVReg) {
1265 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1266 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1267 DEBUG(dbgs() << " +" << LR);
1270 // Extend the split live interval to this def / use.
1271 SlotIndex End = index.getDefIndex();
1272 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1273 nI.getValNumInfo(nI.getNumValNums()-1));
1274 DEBUG(dbgs() << " +" << LR);
1279 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1280 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1281 DEBUG(dbgs() << " +" << LR);
1286 dbgs() << "\t\t\t\tAdded new interval: ";
1287 nI.print(dbgs(), tri_);
1293 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1295 MachineBasicBlock *MBB,
1296 SlotIndex Idx) const {
1297 SlotIndex End = getMBBEndIdx(MBB);
1298 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1299 if (VNI->kills[j].isPHI())
1302 SlotIndex KillIdx = VNI->kills[j];
1303 if (KillIdx > Idx && KillIdx <= End)
1309 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1310 /// during spilling.
1312 struct RewriteInfo {
1317 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1318 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1321 struct RewriteInfoCompare {
1322 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1323 return LHS.Index < RHS.Index;
1328 void LiveIntervals::
1329 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1330 LiveInterval::Ranges::const_iterator &I,
1331 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1332 unsigned Slot, int LdSlot,
1333 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1335 const TargetRegisterClass* rc,
1336 SmallVector<int, 4> &ReMatIds,
1337 const MachineLoopInfo *loopInfo,
1338 BitVector &SpillMBBs,
1339 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1340 BitVector &RestoreMBBs,
1341 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1342 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1343 std::vector<LiveInterval*> &NewLIs) {
1344 bool AllCanFold = true;
1345 unsigned NewVReg = 0;
1346 SlotIndex start = I->start.getBaseIndex();
1347 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1349 // First collect all the def / use in this live range that will be rewritten.
1350 // Make sure they are sorted according to instruction index.
1351 std::vector<RewriteInfo> RewriteMIs;
1352 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1353 re = mri_->reg_end(); ri != re; ) {
1354 MachineInstr *MI = &*ri;
1355 MachineOperand &O = ri.getOperand();
1357 if (MI->isDebugValue()) {
1358 // Modify DBG_VALUE now that the value is in a spill slot.
1359 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1360 uint64_t Offset = MI->getOperand(1).getImm();
1361 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1362 DebugLoc DL = MI->getDebugLoc();
1363 int FI = isLoadSS ? LdSlot : (int)Slot;
1364 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1365 Offset, MDPtr, DL)) {
1366 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1367 ReplaceMachineInstrInMaps(MI, NewDV);
1368 MachineBasicBlock *MBB = MI->getParent();
1369 MBB->insert(MBB->erase(MI), NewDV);
1374 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1375 RemoveMachineInstrFromMaps(MI);
1376 vrm.RemoveMachineInstrFromMaps(MI);
1377 MI->eraseFromParent();
1380 assert(!(O.isImplicit() && O.isUse()) &&
1381 "Spilling register that's used as implicit use?");
1382 SlotIndex index = getInstructionIndex(MI);
1383 if (index < start || index >= end)
1387 // Must be defined by an implicit def. It should not be spilled. Note,
1388 // this is for correctness reason. e.g.
1389 // 8 %reg1024<def> = IMPLICIT_DEF
1390 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1391 // The live range [12, 14) are not part of the r1024 live interval since
1392 // it's defined by an implicit def. It will not conflicts with live
1393 // interval of r1025. Now suppose both registers are spilled, you can
1394 // easily see a situation where both registers are reloaded before
1395 // the INSERT_SUBREG and both target registers that would overlap.
1397 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1399 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1401 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1402 // Now rewrite the defs and uses.
1403 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1404 RewriteInfo &rwi = RewriteMIs[i];
1406 SlotIndex index = rwi.Index;
1407 bool MIHasUse = rwi.HasUse;
1408 bool MIHasDef = rwi.HasDef;
1409 MachineInstr *MI = rwi.MI;
1410 // If MI def and/or use the same register multiple times, then there
1411 // are multiple entries.
1412 unsigned NumUses = MIHasUse;
1413 while (i != e && RewriteMIs[i].MI == MI) {
1414 assert(RewriteMIs[i].Index == index);
1415 bool isUse = RewriteMIs[i].HasUse;
1416 if (isUse) ++NumUses;
1418 MIHasDef |= RewriteMIs[i].HasDef;
1421 MachineBasicBlock *MBB = MI->getParent();
1423 if (ImpUse && MI != ReMatDefMI) {
1424 // Re-matting an instruction with virtual register use. Prevent interval
1425 // from being spilled.
1426 getInterval(ImpUse).markNotSpillable();
1429 unsigned MBBId = MBB->getNumber();
1430 unsigned ThisVReg = 0;
1432 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1433 if (NVI != MBBVRegsMap.end()) {
1434 ThisVReg = NVI->second;
1441 // It's better to start a new interval to avoid artifically
1442 // extend the new interval.
1443 if (MIHasDef && !MIHasUse) {
1444 MBBVRegsMap.erase(MBB->getNumber());
1450 bool IsNew = ThisVReg == 0;
1452 // This ends the previous live interval. If all of its def / use
1453 // can be folded, give it a low spill weight.
1454 if (NewVReg && TrySplit && AllCanFold) {
1455 LiveInterval &nI = getOrCreateInterval(NewVReg);
1462 bool HasDef = false;
1463 bool HasUse = false;
1464 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1465 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1466 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1467 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1468 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1469 if (!HasDef && !HasUse)
1472 AllCanFold &= CanFold;
1474 // Update weight of spill interval.
1475 LiveInterval &nI = getOrCreateInterval(NewVReg);
1477 // The spill weight is now infinity as it cannot be spilled again.
1478 nI.markNotSpillable();
1482 // Keep track of the last def and first use in each MBB.
1484 if (MI != ReMatOrigDefMI || !CanDelete) {
1485 bool HasKill = false;
1487 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1489 // If this is a two-address code, then this index starts a new VNInfo.
1490 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1492 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1494 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1495 SpillIdxes.find(MBBId);
1497 if (SII == SpillIdxes.end()) {
1498 std::vector<SRInfo> S;
1499 S.push_back(SRInfo(index, NewVReg, true));
1500 SpillIdxes.insert(std::make_pair(MBBId, S));
1501 } else if (SII->second.back().vreg != NewVReg) {
1502 SII->second.push_back(SRInfo(index, NewVReg, true));
1503 } else if (index > SII->second.back().index) {
1504 // If there is an earlier def and this is a two-address
1505 // instruction, then it's not possible to fold the store (which
1506 // would also fold the load).
1507 SRInfo &Info = SII->second.back();
1509 Info.canFold = !HasUse;
1511 SpillMBBs.set(MBBId);
1512 } else if (SII != SpillIdxes.end() &&
1513 SII->second.back().vreg == NewVReg &&
1514 index > SII->second.back().index) {
1515 // There is an earlier def that's not killed (must be two-address).
1516 // The spill is no longer needed.
1517 SII->second.pop_back();
1518 if (SII->second.empty()) {
1519 SpillIdxes.erase(MBBId);
1520 SpillMBBs.reset(MBBId);
1527 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1528 SpillIdxes.find(MBBId);
1529 if (SII != SpillIdxes.end() &&
1530 SII->second.back().vreg == NewVReg &&
1531 index > SII->second.back().index)
1532 // Use(s) following the last def, it's not safe to fold the spill.
1533 SII->second.back().canFold = false;
1534 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1535 RestoreIdxes.find(MBBId);
1536 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1537 // If we are splitting live intervals, only fold if it's the first
1538 // use and there isn't another use later in the MBB.
1539 RII->second.back().canFold = false;
1541 // Only need a reload if there isn't an earlier def / use.
1542 if (RII == RestoreIdxes.end()) {
1543 std::vector<SRInfo> Infos;
1544 Infos.push_back(SRInfo(index, NewVReg, true));
1545 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1547 RII->second.push_back(SRInfo(index, NewVReg, true));
1549 RestoreMBBs.set(MBBId);
1553 // Update spill weight.
1554 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1555 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1558 if (NewVReg && TrySplit && AllCanFold) {
1559 // If all of its def / use can be folded, give it a low spill weight.
1560 LiveInterval &nI = getOrCreateInterval(NewVReg);
1565 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1566 unsigned vr, BitVector &RestoreMBBs,
1567 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1568 if (!RestoreMBBs[Id])
1570 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1571 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1572 if (Restores[i].index == index &&
1573 Restores[i].vreg == vr &&
1574 Restores[i].canFold)
1579 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1580 unsigned vr, BitVector &RestoreMBBs,
1581 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1582 if (!RestoreMBBs[Id])
1584 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1585 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1586 if (Restores[i].index == index && Restores[i].vreg)
1587 Restores[i].index = SlotIndex();
1590 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1591 /// spilled and create empty intervals for their uses.
1593 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1594 const TargetRegisterClass* rc,
1595 std::vector<LiveInterval*> &NewLIs) {
1596 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1597 re = mri_->reg_end(); ri != re; ) {
1598 MachineOperand &O = ri.getOperand();
1599 MachineInstr *MI = &*ri;
1601 if (MI->isDebugValue()) {
1602 // Remove debug info for now.
1604 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1608 assert(MI->isImplicitDef() &&
1609 "Register def was not rewritten?");
1610 RemoveMachineInstrFromMaps(MI);
1611 vrm.RemoveMachineInstrFromMaps(MI);
1612 MI->eraseFromParent();
1614 // This must be an use of an implicit_def so it's not part of the live
1615 // interval. Create a new empty live interval for it.
1616 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1617 unsigned NewVReg = mri_->createVirtualRegister(rc);
1619 vrm.setIsImplicitlyDefined(NewVReg);
1620 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1621 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1622 MachineOperand &MO = MI->getOperand(i);
1623 if (MO.isReg() && MO.getReg() == li.reg) {
1633 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1634 // Limit the loop depth ridiculousness.
1635 if (loopDepth > 200)
1638 // The loop depth is used to roughly estimate the number of times the
1639 // instruction is executed. Something like 10^d is simple, but will quickly
1640 // overflow a float. This expression behaves like 10^d for small d, but is
1641 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1642 // headroom before overflow.
1643 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1645 return (isDef + isUse) * lc;
1649 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1650 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1651 normalizeSpillWeight(*NewLIs[i]);
1654 std::vector<LiveInterval*> LiveIntervals::
1655 addIntervalsForSpillsFast(const LiveInterval &li,
1656 const MachineLoopInfo *loopInfo,
1658 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1660 std::vector<LiveInterval*> added;
1662 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1665 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1670 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1672 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1673 while (RI != mri_->reg_end()) {
1674 MachineInstr* MI = &*RI;
1676 SmallVector<unsigned, 2> Indices;
1677 bool HasUse = false;
1678 bool HasDef = false;
1680 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1681 MachineOperand& mop = MI->getOperand(i);
1682 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1684 HasUse |= MI->getOperand(i).isUse();
1685 HasDef |= MI->getOperand(i).isDef();
1687 Indices.push_back(i);
1690 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1691 Indices, true, slot, li.reg)) {
1692 unsigned NewVReg = mri_->createVirtualRegister(rc);
1694 vrm.assignVirt2StackSlot(NewVReg, slot);
1696 // create a new register for this spill
1697 LiveInterval &nI = getOrCreateInterval(NewVReg);
1698 nI.markNotSpillable();
1700 // Rewrite register operands to use the new vreg.
1701 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1702 E = Indices.end(); I != E; ++I) {
1703 MI->getOperand(*I).setReg(NewVReg);
1705 if (MI->getOperand(*I).isUse())
1706 MI->getOperand(*I).setIsKill(true);
1709 // Fill in the new live interval.
1710 SlotIndex index = getInstructionIndex(MI);
1712 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1713 nI.getNextValue(SlotIndex(), 0, false,
1714 getVNInfoAllocator()));
1715 DEBUG(dbgs() << " +" << LR);
1717 vrm.addRestorePoint(NewVReg, MI);
1720 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1721 nI.getNextValue(SlotIndex(), 0, false,
1722 getVNInfoAllocator()));
1723 DEBUG(dbgs() << " +" << LR);
1725 vrm.addSpillPoint(NewVReg, true, MI);
1728 added.push_back(&nI);
1731 dbgs() << "\t\t\t\tadded new interval: ";
1738 RI = mri_->reg_begin(li.reg);
1744 std::vector<LiveInterval*> LiveIntervals::
1745 addIntervalsForSpills(const LiveInterval &li,
1746 SmallVectorImpl<LiveInterval*> &SpillIs,
1747 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1749 if (EnableFastSpilling)
1750 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1752 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1755 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1756 li.print(dbgs(), tri_);
1760 // Each bit specify whether a spill is required in the MBB.
1761 BitVector SpillMBBs(mf_->getNumBlockIDs());
1762 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1763 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1764 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1765 DenseMap<unsigned,unsigned> MBBVRegsMap;
1766 std::vector<LiveInterval*> NewLIs;
1767 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1769 unsigned NumValNums = li.getNumValNums();
1770 SmallVector<MachineInstr*, 4> ReMatDefs;
1771 ReMatDefs.resize(NumValNums, NULL);
1772 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1773 ReMatOrigDefs.resize(NumValNums, NULL);
1774 SmallVector<int, 4> ReMatIds;
1775 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1776 BitVector ReMatDelete(NumValNums);
1777 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1779 // Spilling a split live interval. It cannot be split any further. Also,
1780 // it's also guaranteed to be a single val# / range interval.
1781 if (vrm.getPreSplitReg(li.reg)) {
1782 vrm.setIsSplitFromReg(li.reg, 0);
1783 // Unset the split kill marker on the last use.
1784 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1785 if (KillIdx != SlotIndex()) {
1786 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1787 assert(KillMI && "Last use disappeared?");
1788 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1789 assert(KillOp != -1 && "Last use disappeared?");
1790 KillMI->getOperand(KillOp).setIsKill(false);
1792 vrm.removeKillPoint(li.reg);
1793 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1794 Slot = vrm.getStackSlot(li.reg);
1795 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1796 MachineInstr *ReMatDefMI = DefIsReMat ?
1797 vrm.getReMaterializedMI(li.reg) : NULL;
1799 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1800 bool isLoad = isLoadSS ||
1801 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1802 bool IsFirstRange = true;
1803 for (LiveInterval::Ranges::const_iterator
1804 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1805 // If this is a split live interval with multiple ranges, it means there
1806 // are two-address instructions that re-defined the value. Only the
1807 // first def can be rematerialized!
1809 // Note ReMatOrigDefMI has already been deleted.
1810 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1811 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1812 false, vrm, rc, ReMatIds, loopInfo,
1813 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1814 MBBVRegsMap, NewLIs);
1816 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1817 Slot, 0, false, false, false,
1818 false, vrm, rc, ReMatIds, loopInfo,
1819 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1820 MBBVRegsMap, NewLIs);
1822 IsFirstRange = false;
1825 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1826 normalizeSpillWeights(NewLIs);
1830 bool TrySplit = !intervalIsInOneMBB(li);
1833 bool NeedStackSlot = false;
1834 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1836 const VNInfo *VNI = *i;
1837 unsigned VN = VNI->id;
1838 if (VNI->isUnused())
1839 continue; // Dead val#.
1840 // Is the def for the val# rematerializable?
1841 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1842 ? getInstructionFromIndex(VNI->def) : 0;
1844 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1845 // Remember how to remat the def of this val#.
1846 ReMatOrigDefs[VN] = ReMatDefMI;
1847 // Original def may be modified so we have to make a copy here.
1848 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1849 CloneMIs.push_back(Clone);
1850 ReMatDefs[VN] = Clone;
1852 bool CanDelete = true;
1853 if (VNI->hasPHIKill()) {
1854 // A kill is a phi node, not all of its uses can be rematerialized.
1855 // It must not be deleted.
1857 // Need a stack slot if there is any live range where uses cannot be
1859 NeedStackSlot = true;
1862 ReMatDelete.set(VN);
1864 // Need a stack slot if there is any live range where uses cannot be
1866 NeedStackSlot = true;
1870 // One stack slot per live interval.
1871 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1872 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1873 Slot = vrm.assignVirt2StackSlot(li.reg);
1875 // This case only occurs when the prealloc splitter has already assigned
1876 // a stack slot to this vreg.
1878 Slot = vrm.getStackSlot(li.reg);
1881 // Create new intervals and rewrite defs and uses.
1882 for (LiveInterval::Ranges::const_iterator
1883 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1884 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1885 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1886 bool DefIsReMat = ReMatDefMI != NULL;
1887 bool CanDelete = ReMatDelete[I->valno->id];
1889 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1890 bool isLoad = isLoadSS ||
1891 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1892 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1893 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1894 CanDelete, vrm, rc, ReMatIds, loopInfo,
1895 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1896 MBBVRegsMap, NewLIs);
1899 // Insert spills / restores if we are splitting.
1901 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1902 normalizeSpillWeights(NewLIs);
1906 SmallPtrSet<LiveInterval*, 4> AddedKill;
1907 SmallVector<unsigned, 2> Ops;
1908 if (NeedStackSlot) {
1909 int Id = SpillMBBs.find_first();
1911 std::vector<SRInfo> &spills = SpillIdxes[Id];
1912 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1913 SlotIndex index = spills[i].index;
1914 unsigned VReg = spills[i].vreg;
1915 LiveInterval &nI = getOrCreateInterval(VReg);
1916 bool isReMat = vrm.isReMaterialized(VReg);
1917 MachineInstr *MI = getInstructionFromIndex(index);
1918 bool CanFold = false;
1919 bool FoundUse = false;
1921 if (spills[i].canFold) {
1923 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1924 MachineOperand &MO = MI->getOperand(j);
1925 if (!MO.isReg() || MO.getReg() != VReg)
1932 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1933 RestoreMBBs, RestoreIdxes))) {
1934 // MI has two-address uses of the same register. If the use
1935 // isn't the first and only use in the BB, then we can't fold
1936 // it. FIXME: Move this to rewriteInstructionsForSpills.
1943 // Fold the store into the def if possible.
1944 bool Folded = false;
1945 if (CanFold && !Ops.empty()) {
1946 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1949 // Also folded uses, do not issue a load.
1950 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1951 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1953 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1957 // Otherwise tell the spiller to issue a spill.
1959 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1960 bool isKill = LR->end == index.getStoreIndex();
1961 if (!MI->registerDefIsDead(nI.reg))
1962 // No need to spill a dead def.
1963 vrm.addSpillPoint(VReg, isKill, MI);
1965 AddedKill.insert(&nI);
1968 Id = SpillMBBs.find_next(Id);
1972 int Id = RestoreMBBs.find_first();
1974 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1975 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1976 SlotIndex index = restores[i].index;
1977 if (index == SlotIndex())
1979 unsigned VReg = restores[i].vreg;
1980 LiveInterval &nI = getOrCreateInterval(VReg);
1981 bool isReMat = vrm.isReMaterialized(VReg);
1982 MachineInstr *MI = getInstructionFromIndex(index);
1983 bool CanFold = false;
1985 if (restores[i].canFold) {
1987 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1988 MachineOperand &MO = MI->getOperand(j);
1989 if (!MO.isReg() || MO.getReg() != VReg)
1993 // If this restore were to be folded, it would have been folded
2002 // Fold the load into the use if possible.
2003 bool Folded = false;
2004 if (CanFold && !Ops.empty()) {
2006 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2008 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2010 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2011 // If the rematerializable def is a load, also try to fold it.
2012 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2013 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2014 Ops, isLoadSS, LdSlot, VReg);
2016 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2018 // Re-matting an instruction with virtual register use. Add the
2019 // register as an implicit use on the use MI and mark the register
2020 // interval as unspillable.
2021 LiveInterval &ImpLi = getInterval(ImpUse);
2022 ImpLi.markNotSpillable();
2023 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2028 // If folding is not possible / failed, then tell the spiller to issue a
2029 // load / rematerialization for us.
2031 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2033 vrm.addRestorePoint(VReg, MI);
2035 Id = RestoreMBBs.find_next(Id);
2038 // Finalize intervals: add kills, finalize spill weights, and filter out
2040 std::vector<LiveInterval*> RetNewLIs;
2041 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2042 LiveInterval *LI = NewLIs[i];
2044 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2045 if (!AddedKill.count(LI)) {
2046 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2047 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2048 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2049 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2050 assert(UseIdx != -1);
2051 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2052 LastUse->getOperand(UseIdx).setIsKill();
2053 vrm.addKillPoint(LI->reg, LastUseIdx);
2056 RetNewLIs.push_back(LI);
2060 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2061 normalizeSpillWeights(RetNewLIs);
2065 /// hasAllocatableSuperReg - Return true if the specified physical register has
2066 /// any super register that's allocatable.
2067 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2068 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2069 if (allocatableRegs_[*AS] && hasInterval(*AS))
2074 /// getRepresentativeReg - Find the largest super register of the specified
2075 /// physical register.
2076 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2077 // Find the largest super-register that is allocatable.
2078 unsigned BestReg = Reg;
2079 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2080 unsigned SuperReg = *AS;
2081 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2089 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2090 /// specified interval that conflicts with the specified physical register.
2091 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2092 unsigned PhysReg) const {
2093 unsigned NumConflicts = 0;
2094 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2095 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2096 E = mri_->reg_end(); I != E; ++I) {
2097 MachineOperand &O = I.getOperand();
2098 MachineInstr *MI = O.getParent();
2099 if (MI->isDebugValue())
2101 SlotIndex Index = getInstructionIndex(MI);
2102 if (pli.liveAt(Index))
2105 return NumConflicts;
2108 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2109 /// around all defs and uses of the specified interval. Return true if it
2110 /// was able to cut its interval.
2111 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2112 unsigned PhysReg, VirtRegMap &vrm) {
2113 unsigned SpillReg = getRepresentativeReg(PhysReg);
2115 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2116 // If there are registers which alias PhysReg, but which are not a
2117 // sub-register of the chosen representative super register. Assert
2118 // since we can't handle it yet.
2119 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2120 tri_->isSuperRegister(*AS, SpillReg));
2123 SmallVector<unsigned, 4> PRegs;
2124 if (hasInterval(SpillReg))
2125 PRegs.push_back(SpillReg);
2127 SmallSet<unsigned, 4> Added;
2128 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2129 if (Added.insert(*AS) && hasInterval(*AS)) {
2130 PRegs.push_back(*AS);
2131 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2136 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2137 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2138 E = mri_->reg_end(); I != E; ++I) {
2139 MachineOperand &O = I.getOperand();
2140 MachineInstr *MI = O.getParent();
2141 if (MI->isDebugValue() || SeenMIs.count(MI))
2144 SlotIndex Index = getInstructionIndex(MI);
2145 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2146 unsigned PReg = PRegs[i];
2147 LiveInterval &pli = getInterval(PReg);
2148 if (!pli.liveAt(Index))
2150 vrm.addEmergencySpill(PReg, MI);
2151 SlotIndex StartIdx = Index.getLoadIndex();
2152 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2153 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2154 pli.removeRange(StartIdx, EndIdx);
2158 raw_string_ostream Msg(msg);
2159 Msg << "Ran out of registers during register allocation!";
2160 if (MI->isInlineAsm()) {
2161 Msg << "\nPlease check your inline asm statement for invalid "
2162 << "constraints:\n";
2163 MI->print(Msg, tm_);
2165 report_fatal_error(Msg.str());
2167 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2168 if (!hasInterval(*AS))
2170 LiveInterval &spli = getInterval(*AS);
2171 if (spli.liveAt(Index))
2172 spli.removeRange(Index.getLoadIndex(),
2173 Index.getNextIndex().getBaseIndex());
2180 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2181 MachineInstr* startInst) {
2182 LiveInterval& Interval = getOrCreateInterval(reg);
2183 VNInfo* VN = Interval.getNextValue(
2184 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2185 startInst, true, getVNInfoAllocator());
2186 VN->setHasPHIKill(true);
2187 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2189 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2190 getMBBEndIdx(startInst->getParent()), VN);
2191 Interval.addRange(LR);