1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
266 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267 unsigned Reg = MI.getOperand(MOIdx).getReg();
268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269 const MachineOperand &MO = MI.getOperand(i);
272 if (MO.getReg() == Reg && MO.isDef()) {
273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274 MI.getOperand(MOIdx).getSubReg() &&
282 /// isPartialRedef - Return true if the specified def at the specific index is
283 /// partially re-defining the specified live interval. A common case of this is
284 /// a definition of the sub-register.
285 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286 LiveInterval &interval) {
287 if (!MO.getSubReg() || MO.isEarlyClobber())
290 SlotIndex RedefIndex = MIIdx.getDefIndex();
291 const LiveRange *OldLR =
292 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293 if (OldLR->valno->isDefAccurate()) {
294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
300 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301 MachineBasicBlock::iterator mi,
305 LiveInterval &interval) {
307 dbgs() << "\t\tregister: ";
308 printRegName(interval.reg, tri_);
311 // Virtual registers may be defined multiple times (due to phi
312 // elimination and 2-addr elimination). Much of what we do only has to be
313 // done once for the vreg. We use an empty interval to detect the first
314 // time we see a vreg.
315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316 if (interval.empty()) {
317 // Get the Idx of the defining instructions.
318 SlotIndex defIndex = MIIdx.getDefIndex();
319 // Earlyclobbers move back one, so that they overlap the live range
321 if (MO.isEarlyClobber())
322 defIndex = MIIdx.getUseIndex();
324 // Make sure the first definition is not a partial redefinition. Add an
325 // <imp-def> of the full register.
327 mi->addRegisterDefined(interval.reg);
329 MachineInstr *CopyMI = NULL;
330 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
331 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
332 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
335 // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates
336 // implicit defs without really knowing. It shows up as INSERT_SUBREG
337 // using an undefined register.
338 if (mi->isInsertSubreg())
339 mi->getOperand(1).setIsUndef();
342 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
344 assert(ValNo->id == 0 && "First value in interval is not 0?");
346 // Loop over all of the blocks that the vreg is defined in. There are
347 // two cases we have to handle here. The most common case is a vreg
348 // whose lifetime is contained within a basic block. In this case there
349 // will be a single kill, in MBB, which comes after the definition.
350 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
351 // FIXME: what about dead vars?
353 if (vi.Kills[0] != mi)
354 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
356 killIdx = defIndex.getStoreIndex();
358 // If the kill happens after the definition, we have an intra-block
360 if (killIdx > defIndex) {
361 assert(vi.AliveBlocks.empty() &&
362 "Shouldn't be alive across any blocks!");
363 LiveRange LR(defIndex, killIdx, ValNo);
364 interval.addRange(LR);
365 DEBUG(dbgs() << " +" << LR << "\n");
366 ValNo->addKill(killIdx);
371 // The other case we handle is when a virtual register lives to the end
372 // of the defining block, potentially live across some blocks, then is
373 // live into some number of blocks, but gets killed. Start by adding a
374 // range that goes from this definition to the end of the defining block.
375 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
376 DEBUG(dbgs() << " +" << NewLR);
377 interval.addRange(NewLR);
379 bool PHIJoin = lv_->isPHIJoin(interval.reg);
382 // A phi join register is killed at the end of the MBB and revived as a new
383 // valno in the killing blocks.
384 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
385 DEBUG(dbgs() << " phi-join");
386 ValNo->addKill(indexes_->getTerminatorGap(mbb));
387 ValNo->setHasPHIKill(true);
389 // Iterate over all of the blocks that the variable is completely
390 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
392 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
393 E = vi.AliveBlocks.end(); I != E; ++I) {
394 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
395 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
396 interval.addRange(LR);
397 DEBUG(dbgs() << " +" << LR);
401 // Finally, this virtual register is live from the start of any killing
402 // block to the 'use' slot of the killing instruction.
403 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
404 MachineInstr *Kill = vi.Kills[i];
405 SlotIndex Start = getMBBStartIdx(Kill->getParent());
406 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
408 // Create interval with one of a NEW value number. Note that this value
409 // number isn't actually defined by an instruction, weird huh? :)
411 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
413 ValNo->setIsPHIDef(true);
415 LiveRange LR(Start, killIdx, ValNo);
416 interval.addRange(LR);
417 ValNo->addKill(killIdx);
418 DEBUG(dbgs() << " +" << LR);
422 if (MultipleDefsBySameMI(*mi, MOIdx))
423 // Multiple defs of the same virtual register by the same instruction.
424 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
425 // This is likely due to elimination of REG_SEQUENCE instructions. Return
426 // here since there is nothing to do.
429 // If this is the second time we see a virtual register definition, it
430 // must be due to phi elimination or two addr elimination. If this is
431 // the result of two address elimination, then the vreg is one of the
432 // def-and-use register operand.
434 // It may also be partial redef like this:
435 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
436 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
437 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
438 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
439 // If this is a two-address definition, then we have already processed
440 // the live range. The only problem is that we didn't realize there
441 // are actually two values in the live interval. Because of this we
442 // need to take the LiveRegion that defines this register and split it
444 SlotIndex RedefIndex = MIIdx.getDefIndex();
445 if (MO.isEarlyClobber())
446 RedefIndex = MIIdx.getUseIndex();
448 const LiveRange *OldLR =
449 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
450 VNInfo *OldValNo = OldLR->valno;
451 SlotIndex DefIndex = OldValNo->def.getDefIndex();
453 // Delete the previous value, which should be short and continuous,
454 // because the 2-addr copy must be in the same MBB as the redef.
455 interval.removeRange(DefIndex, RedefIndex);
457 // The new value number (#1) is defined by the instruction we claimed
459 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
460 false, // update at *
462 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
464 // Value#0 is now defined by the 2-addr instruction.
465 OldValNo->def = RedefIndex;
466 OldValNo->setCopy(0);
468 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
469 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
471 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
472 OldValNo->setCopy(&*mi);
474 // Add the new live interval which replaces the range for the input copy.
475 LiveRange LR(DefIndex, RedefIndex, ValNo);
476 DEBUG(dbgs() << " replace range with " << LR);
477 interval.addRange(LR);
478 ValNo->addKill(RedefIndex);
480 // If this redefinition is dead, we need to add a dummy unit live
481 // range covering the def slot.
483 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
487 dbgs() << " RESULT: ";
488 interval.print(dbgs(), tri_);
490 } else if (lv_->isPHIJoin(interval.reg)) {
491 // In the case of PHI elimination, each variable definition is only
492 // live until the end of the block. We've already taken care of the
493 // rest of the live range.
495 SlotIndex defIndex = MIIdx.getDefIndex();
496 if (MO.isEarlyClobber())
497 defIndex = MIIdx.getUseIndex();
500 MachineInstr *CopyMI = NULL;
501 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
502 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
503 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
505 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
507 SlotIndex killIndex = getMBBEndIdx(mbb);
508 LiveRange LR(defIndex, killIndex, ValNo);
509 interval.addRange(LR);
510 ValNo->addKill(indexes_->getTerminatorGap(mbb));
511 ValNo->setHasPHIKill(true);
512 DEBUG(dbgs() << " phi-join +" << LR);
514 llvm_unreachable("Multiply defined register");
518 DEBUG(dbgs() << '\n');
521 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
522 MachineBasicBlock::iterator mi,
525 LiveInterval &interval,
526 MachineInstr *CopyMI) {
527 // A physical register cannot be live across basic block, so its
528 // lifetime must end somewhere in its defining basic block.
530 dbgs() << "\t\tregister: ";
531 printRegName(interval.reg, tri_);
534 SlotIndex baseIndex = MIIdx;
535 SlotIndex start = baseIndex.getDefIndex();
536 // Earlyclobbers move back one.
537 if (MO.isEarlyClobber())
538 start = MIIdx.getUseIndex();
539 SlotIndex end = start;
541 // If it is not used after definition, it is considered dead at
542 // the instruction defining it. Hence its interval is:
543 // [defSlot(def), defSlot(def)+1)
544 // For earlyclobbers, the defSlot was pushed back one; the extra
545 // advance below compensates.
547 DEBUG(dbgs() << " dead");
548 end = start.getStoreIndex();
552 // If it is not dead on definition, it must be killed by a
553 // subsequent instruction. Hence its interval is:
554 // [defSlot(def), useSlot(kill)+1)
555 baseIndex = baseIndex.getNextIndex();
556 while (++mi != MBB->end()) {
558 if (mi->isDebugValue())
560 if (getInstructionFromIndex(baseIndex) == 0)
561 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
563 if (mi->killsRegister(interval.reg, tri_)) {
564 DEBUG(dbgs() << " killed");
565 end = baseIndex.getDefIndex();
568 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
570 if (mi->isRegTiedToUseOperand(DefIdx)) {
571 // Two-address instruction.
572 end = baseIndex.getDefIndex();
574 // Another instruction redefines the register before it is ever read.
575 // Then the register is essentially dead at the instruction that
576 // defines it. Hence its interval is:
577 // [defSlot(def), defSlot(def)+1)
578 DEBUG(dbgs() << " dead");
579 end = start.getStoreIndex();
585 baseIndex = baseIndex.getNextIndex();
588 // The only case we should have a dead physreg here without a killing or
589 // instruction where we know it's dead is if it is live-in to the function
590 // and never used. Another possible case is the implicit use of the
591 // physical register has been deleted by two-address pass.
592 end = start.getStoreIndex();
595 assert(start < end && "did not find end of interval?");
597 // Already exists? Extend old live interval.
598 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
599 bool Extend = OldLR != interval.end();
600 VNInfo *ValNo = Extend
601 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
602 if (MO.isEarlyClobber() && Extend)
603 ValNo->setHasRedefByEC(true);
604 LiveRange LR(start, end, ValNo);
605 interval.addRange(LR);
606 LR.valno->addKill(end);
607 DEBUG(dbgs() << " +" << LR << '\n');
610 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
611 MachineBasicBlock::iterator MI,
615 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
616 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
617 getOrCreateInterval(MO.getReg()));
618 else if (allocatableRegs_[MO.getReg()]) {
619 MachineInstr *CopyMI = NULL;
620 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
621 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
622 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
624 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625 getOrCreateInterval(MO.getReg()), CopyMI);
626 // Def of a register also defines its sub-registers.
627 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
628 // If MI also modifies the sub-register explicitly, avoid processing it
629 // more than once. Do not pass in TRI here so it checks for exact match.
630 if (!MI->definesRegister(*AS))
631 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
632 getOrCreateInterval(*AS), 0);
636 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
638 LiveInterval &interval, bool isAlias) {
640 dbgs() << "\t\tlivein register: ";
641 printRegName(interval.reg, tri_);
644 // Look for kills, if it reaches a def before it's killed, then it shouldn't
645 // be considered a livein.
646 MachineBasicBlock::iterator mi = MBB->begin();
647 MachineBasicBlock::iterator E = MBB->end();
648 // Skip over DBG_VALUE at the start of the MBB.
649 if (mi != E && mi->isDebugValue()) {
650 while (++mi != E && mi->isDebugValue())
653 // MBB is empty except for DBG_VALUE's.
657 SlotIndex baseIndex = MIIdx;
658 SlotIndex start = baseIndex;
659 if (getInstructionFromIndex(baseIndex) == 0)
660 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
662 SlotIndex end = baseIndex;
663 bool SeenDefUse = false;
666 if (mi->killsRegister(interval.reg, tri_)) {
667 DEBUG(dbgs() << " killed");
668 end = baseIndex.getDefIndex();
671 } else if (mi->definesRegister(interval.reg, tri_)) {
672 // Another instruction redefines the register before it is ever read.
673 // Then the register is essentially dead at the instruction that defines
674 // it. Hence its interval is:
675 // [defSlot(def), defSlot(def)+1)
676 DEBUG(dbgs() << " dead");
677 end = start.getStoreIndex();
682 while (++mi != E && mi->isDebugValue())
683 // Skip over DBG_VALUE.
686 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
689 // Live-in register might not be used at all.
692 DEBUG(dbgs() << " dead");
693 end = MIIdx.getStoreIndex();
695 DEBUG(dbgs() << " live through");
701 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
702 0, false, VNInfoAllocator);
703 vni->setIsPHIDef(true);
704 LiveRange LR(start, end, vni);
706 interval.addRange(LR);
707 LR.valno->addKill(end);
708 DEBUG(dbgs() << " +" << LR << '\n');
711 /// computeIntervals - computes the live intervals for virtual
712 /// registers. for some ordering of the machine instructions [1,N] a
713 /// live interval is an interval [i, j) where 1 <= i <= j < N for
714 /// which a variable is live
715 void LiveIntervals::computeIntervals() {
716 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
717 << "********** Function: "
718 << ((Value*)mf_->getFunction())->getName() << '\n');
720 SmallVector<unsigned, 8> UndefUses;
721 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
723 MachineBasicBlock *MBB = MBBI;
727 // Track the index of the current machine instr.
728 SlotIndex MIIndex = getMBBStartIdx(MBB);
729 DEBUG(dbgs() << "BB#" << MBB->getNumber()
730 << ":\t\t# derived from " << MBB->getName() << "\n");
732 // Create intervals for live-ins to this BB first.
733 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
734 LE = MBB->livein_end(); LI != LE; ++LI) {
735 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
736 // Multiple live-ins can alias the same register.
737 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
738 if (!hasInterval(*AS))
739 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
743 // Skip over empty initial indices.
744 if (getInstructionFromIndex(MIIndex) == 0)
745 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
747 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
749 DEBUG(dbgs() << MIIndex << "\t" << *MI);
750 if (MI->isDebugValue())
754 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
755 MachineOperand &MO = MI->getOperand(i);
756 if (!MO.isReg() || !MO.getReg())
759 // handle register defs - build intervals
761 handleRegisterDef(MBB, MI, MIIndex, MO, i);
762 else if (MO.isUndef())
763 UndefUses.push_back(MO.getReg());
766 // Move to the next instr slot.
767 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
771 // Create empty intervals for registers defined by implicit_def's (except
772 // for those implicit_def that define values which are liveout of their
774 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
775 unsigned UndefReg = UndefUses[i];
776 (void)getOrCreateInterval(UndefReg);
780 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
781 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
782 return new LiveInterval(reg, Weight);
785 /// dupInterval - Duplicate a live interval. The caller is responsible for
786 /// managing the allocated memory.
787 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
788 LiveInterval *NewLI = createInterval(li->reg);
789 NewLI->Copy(*li, mri_, getVNInfoAllocator());
793 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
794 /// copy field and returns the source register that defines it.
795 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
799 if (VNI->getCopy()->isExtractSubreg()) {
800 // If it's extracting out of a physical register, return the sub-register.
801 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
802 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
803 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
804 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
805 if (SrcSubReg == DstSubReg)
806 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
807 // reg1034 can still be coalesced to EDX.
809 assert(DstSubReg == 0);
810 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
813 } else if (VNI->getCopy()->isInsertSubreg() ||
814 VNI->getCopy()->isSubregToReg())
815 return VNI->getCopy()->getOperand(2).getReg();
817 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
818 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
820 llvm_unreachable("Unrecognized copy instruction!");
824 //===----------------------------------------------------------------------===//
825 // Register allocator hooks.
828 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
829 /// allow one) virtual register operand, then its uses are implicitly using
830 /// the register. Returns the virtual register.
831 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
832 MachineInstr *MI) const {
834 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
835 MachineOperand &MO = MI->getOperand(i);
836 if (!MO.isReg() || !MO.isUse())
838 unsigned Reg = MO.getReg();
839 if (Reg == 0 || Reg == li.reg)
842 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
843 !allocatableRegs_[Reg])
845 // FIXME: For now, only remat MI with at most one register operand.
847 "Can't rematerialize instruction with multiple register operand!");
856 /// isValNoAvailableAt - Return true if the val# of the specified interval
857 /// which reaches the given instruction also reaches the specified use index.
858 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
859 SlotIndex UseIdx) const {
860 SlotIndex Index = getInstructionIndex(MI);
861 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
862 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
863 return UI != li.end() && UI->valno == ValNo;
866 /// isReMaterializable - Returns true if the definition MI of the specified
867 /// val# of the specified interval is re-materializable.
868 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
869 const VNInfo *ValNo, MachineInstr *MI,
870 SmallVectorImpl<LiveInterval*> &SpillIs,
875 if (!tii_->isTriviallyReMaterializable(MI, aa_))
878 // Target-specific code can mark an instruction as being rematerializable
879 // if it has one virtual reg use, though it had better be something like
880 // a PIC base register which is likely to be live everywhere.
881 unsigned ImpUse = getReMatImplicitUse(li, MI);
883 const LiveInterval &ImpLi = getInterval(ImpUse);
884 for (MachineRegisterInfo::use_nodbg_iterator
885 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
887 MachineInstr *UseMI = &*ri;
888 SlotIndex UseIdx = getInstructionIndex(UseMI);
889 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
891 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
895 // If a register operand of the re-materialized instruction is going to
896 // be spilled next, then it's not legal to re-materialize this instruction.
897 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
898 if (ImpUse == SpillIs[i]->reg)
904 /// isReMaterializable - Returns true if the definition MI of the specified
905 /// val# of the specified interval is re-materializable.
906 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
907 const VNInfo *ValNo, MachineInstr *MI) {
908 SmallVector<LiveInterval*, 4> Dummy1;
910 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
913 /// isReMaterializable - Returns true if every definition of MI of every
914 /// val# of the specified interval is re-materializable.
915 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
916 SmallVectorImpl<LiveInterval*> &SpillIs,
919 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
921 const VNInfo *VNI = *i;
923 continue; // Dead val#.
924 // Is the def for the val# rematerializable?
925 if (!VNI->isDefAccurate())
927 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
928 bool DefIsLoad = false;
930 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
937 /// FilterFoldedOps - Filter out two-address use operands. Return
938 /// true if it finds any issue with the operands that ought to prevent
940 static bool FilterFoldedOps(MachineInstr *MI,
941 SmallVector<unsigned, 2> &Ops,
943 SmallVector<unsigned, 2> &FoldOps) {
945 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
946 unsigned OpIdx = Ops[i];
947 MachineOperand &MO = MI->getOperand(OpIdx);
948 // FIXME: fold subreg use.
952 MRInfo |= (unsigned)VirtRegMap::isMod;
954 // Filter out two-address use operand(s).
955 if (MI->isRegTiedToDefOperand(OpIdx)) {
956 MRInfo = VirtRegMap::isModRef;
959 MRInfo |= (unsigned)VirtRegMap::isRef;
961 FoldOps.push_back(OpIdx);
967 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
968 /// slot / to reg or any rematerialized load into ith operand of specified
969 /// MI. If it is successul, MI is updated with the newly created MI and
971 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
972 VirtRegMap &vrm, MachineInstr *DefMI,
974 SmallVector<unsigned, 2> &Ops,
975 bool isSS, int Slot, unsigned Reg) {
976 // If it is an implicit def instruction, just delete it.
977 if (MI->isImplicitDef()) {
978 RemoveMachineInstrFromMaps(MI);
979 vrm.RemoveMachineInstrFromMaps(MI);
980 MI->eraseFromParent();
985 // Filter the list of operand indexes that are to be folded. Abort if
986 // any operand will prevent folding.
988 SmallVector<unsigned, 2> FoldOps;
989 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
992 // The only time it's safe to fold into a two address instruction is when
993 // it's folding reload and spill from / into a spill stack slot.
994 if (DefMI && (MRInfo & VirtRegMap::isMod))
997 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
998 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1000 // Remember this instruction uses the spill slot.
1001 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1003 // Attempt to fold the memory reference into the instruction. If
1004 // we can do this, we don't need to insert spill code.
1005 MachineBasicBlock &MBB = *MI->getParent();
1006 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1007 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1008 vrm.transferSpillPts(MI, fmi);
1009 vrm.transferRestorePts(MI, fmi);
1010 vrm.transferEmergencySpills(MI, fmi);
1011 ReplaceMachineInstrInMaps(MI, fmi);
1012 MI = MBB.insert(MBB.erase(MI), fmi);
1019 /// canFoldMemoryOperand - Returns true if the specified load / store
1020 /// folding is possible.
1021 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1022 SmallVector<unsigned, 2> &Ops,
1024 // Filter the list of operand indexes that are to be folded. Abort if
1025 // any operand will prevent folding.
1026 unsigned MRInfo = 0;
1027 SmallVector<unsigned, 2> FoldOps;
1028 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1031 // It's only legal to remat for a use, not a def.
1032 if (ReMat && (MRInfo & VirtRegMap::isMod))
1035 return tii_->canFoldMemoryOperand(MI, FoldOps);
1038 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1039 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1041 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1046 for (++itr; itr != li.ranges.end(); ++itr) {
1047 MachineBasicBlock *mbb2 =
1048 indexes_->getMBBCoveringRange(itr->start, itr->end);
1057 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1058 /// interval on to-be re-materialized operands of MI) with new register.
1059 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1060 MachineInstr *MI, unsigned NewVReg,
1062 // There is an implicit use. That means one of the other operand is
1063 // being remat'ed and the remat'ed instruction has li.reg as an
1064 // use operand. Make sure we rewrite that as well.
1065 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1066 MachineOperand &MO = MI->getOperand(i);
1069 unsigned Reg = MO.getReg();
1070 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1072 if (!vrm.isReMaterialized(Reg))
1074 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1075 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1077 UseMO->setReg(NewVReg);
1081 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1082 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1083 bool LiveIntervals::
1084 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1085 bool TrySplit, SlotIndex index, SlotIndex end,
1087 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1088 unsigned Slot, int LdSlot,
1089 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1091 const TargetRegisterClass* rc,
1092 SmallVector<int, 4> &ReMatIds,
1093 const MachineLoopInfo *loopInfo,
1094 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1095 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1096 std::vector<LiveInterval*> &NewLIs) {
1097 bool CanFold = false;
1099 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1100 MachineOperand& mop = MI->getOperand(i);
1103 unsigned Reg = mop.getReg();
1104 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1109 bool TryFold = !DefIsReMat;
1110 bool FoldSS = true; // Default behavior unless it's a remat.
1111 int FoldSlot = Slot;
1113 // If this is the rematerializable definition MI itself and
1114 // all of its uses are rematerialized, simply delete it.
1115 if (MI == ReMatOrigDefMI && CanDelete) {
1116 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1118 RemoveMachineInstrFromMaps(MI);
1119 vrm.RemoveMachineInstrFromMaps(MI);
1120 MI->eraseFromParent();
1124 // If def for this use can't be rematerialized, then try folding.
1125 // If def is rematerializable and it's a load, also try folding.
1126 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1128 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1134 // Scan all of the operands of this instruction rewriting operands
1135 // to use NewVReg instead of li.reg as appropriate. We do this for
1138 // 1. If the instr reads the same spilled vreg multiple times, we
1139 // want to reuse the NewVReg.
1140 // 2. If the instr is a two-addr instruction, we are required to
1141 // keep the src/dst regs pinned.
1143 // Keep track of whether we replace a use and/or def so that we can
1144 // create the spill interval with the appropriate range.
1145 SmallVector<unsigned, 2> Ops;
1146 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1148 // Create a new virtual register for the spill interval.
1149 // Create the new register now so we can map the fold instruction
1150 // to the new register so when it is unfolded we get the correct
1152 bool CreatedNewVReg = false;
1154 NewVReg = mri_->createVirtualRegister(rc);
1156 CreatedNewVReg = true;
1158 // The new virtual register should get the same allocation hints as the
1160 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1161 if (Hint.first || Hint.second)
1162 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1168 // Do not fold load / store here if we are splitting. We'll find an
1169 // optimal point to insert a load / store later.
1171 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1172 Ops, FoldSS, FoldSlot, NewVReg)) {
1173 // Folding the load/store can completely change the instruction in
1174 // unpredictable ways, rescan it from the beginning.
1177 // We need to give the new vreg the same stack slot as the
1178 // spilled interval.
1179 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1185 if (isNotInMIMap(MI))
1187 goto RestartInstruction;
1190 // We'll try to fold it later if it's profitable.
1191 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1195 mop.setReg(NewVReg);
1196 if (mop.isImplicit())
1197 rewriteImplicitOps(li, MI, NewVReg, vrm);
1199 // Reuse NewVReg for other reads.
1200 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1201 MachineOperand &mopj = MI->getOperand(Ops[j]);
1202 mopj.setReg(NewVReg);
1203 if (mopj.isImplicit())
1204 rewriteImplicitOps(li, MI, NewVReg, vrm);
1207 if (CreatedNewVReg) {
1209 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1210 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1211 // Each valnum may have its own remat id.
1212 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1214 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1216 if (!CanDelete || (HasUse && HasDef)) {
1217 // If this is a two-addr instruction then its use operands are
1218 // rematerializable but its def is not. It should be assigned a
1220 vrm.assignVirt2StackSlot(NewVReg, Slot);
1223 vrm.assignVirt2StackSlot(NewVReg, Slot);
1225 } else if (HasUse && HasDef &&
1226 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1227 // If this interval hasn't been assigned a stack slot (because earlier
1228 // def is a deleted remat def), do it now.
1229 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1230 vrm.assignVirt2StackSlot(NewVReg, Slot);
1233 // Re-matting an instruction with virtual register use. Add the
1234 // register as an implicit use on the use MI.
1235 if (DefIsReMat && ImpUse)
1236 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1238 // Create a new register interval for this spill / remat.
1239 LiveInterval &nI = getOrCreateInterval(NewVReg);
1240 if (CreatedNewVReg) {
1241 NewLIs.push_back(&nI);
1242 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1244 vrm.setIsSplitFromReg(NewVReg, li.reg);
1248 if (CreatedNewVReg) {
1249 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1250 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1251 DEBUG(dbgs() << " +" << LR);
1254 // Extend the split live interval to this def / use.
1255 SlotIndex End = index.getDefIndex();
1256 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1257 nI.getValNumInfo(nI.getNumValNums()-1));
1258 DEBUG(dbgs() << " +" << LR);
1263 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1264 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1265 DEBUG(dbgs() << " +" << LR);
1270 dbgs() << "\t\t\t\tAdded new interval: ";
1271 nI.print(dbgs(), tri_);
1277 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1279 MachineBasicBlock *MBB,
1280 SlotIndex Idx) const {
1281 SlotIndex End = getMBBEndIdx(MBB);
1282 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1283 if (VNI->kills[j].isPHI())
1286 SlotIndex KillIdx = VNI->kills[j];
1287 if (KillIdx > Idx && KillIdx <= End)
1293 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1294 /// during spilling.
1296 struct RewriteInfo {
1299 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1302 struct RewriteInfoCompare {
1303 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1304 return LHS.Index < RHS.Index;
1309 void LiveIntervals::
1310 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1311 LiveInterval::Ranges::const_iterator &I,
1312 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1313 unsigned Slot, int LdSlot,
1314 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1316 const TargetRegisterClass* rc,
1317 SmallVector<int, 4> &ReMatIds,
1318 const MachineLoopInfo *loopInfo,
1319 BitVector &SpillMBBs,
1320 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1321 BitVector &RestoreMBBs,
1322 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1323 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1324 std::vector<LiveInterval*> &NewLIs) {
1325 bool AllCanFold = true;
1326 unsigned NewVReg = 0;
1327 SlotIndex start = I->start.getBaseIndex();
1328 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1330 // First collect all the def / use in this live range that will be rewritten.
1331 // Make sure they are sorted according to instruction index.
1332 std::vector<RewriteInfo> RewriteMIs;
1333 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1334 re = mri_->reg_end(); ri != re; ) {
1335 MachineInstr *MI = &*ri;
1336 MachineOperand &O = ri.getOperand();
1338 if (MI->isDebugValue()) {
1339 // Modify DBG_VALUE now that the value is in a spill slot.
1340 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1341 uint64_t Offset = MI->getOperand(1).getImm();
1342 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1343 DebugLoc DL = MI->getDebugLoc();
1344 int FI = isLoadSS ? LdSlot : (int)Slot;
1345 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1346 Offset, MDPtr, DL)) {
1347 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1348 ReplaceMachineInstrInMaps(MI, NewDV);
1349 MachineBasicBlock *MBB = MI->getParent();
1350 MBB->insert(MBB->erase(MI), NewDV);
1355 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1356 RemoveMachineInstrFromMaps(MI);
1357 vrm.RemoveMachineInstrFromMaps(MI);
1358 MI->eraseFromParent();
1361 assert(!(O.isImplicit() && O.isUse()) &&
1362 "Spilling register that's used as implicit use?");
1363 SlotIndex index = getInstructionIndex(MI);
1364 if (index < start || index >= end)
1368 // Must be defined by an implicit def. It should not be spilled. Note,
1369 // this is for correctness reason. e.g.
1370 // 8 %reg1024<def> = IMPLICIT_DEF
1371 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1372 // The live range [12, 14) are not part of the r1024 live interval since
1373 // it's defined by an implicit def. It will not conflicts with live
1374 // interval of r1025. Now suppose both registers are spilled, you can
1375 // easily see a situation where both registers are reloaded before
1376 // the INSERT_SUBREG and both target registers that would overlap.
1378 RewriteMIs.push_back(RewriteInfo(index, MI));
1380 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1382 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1383 // Now rewrite the defs and uses.
1384 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1385 RewriteInfo &rwi = RewriteMIs[i];
1387 SlotIndex index = rwi.Index;
1388 MachineInstr *MI = rwi.MI;
1389 // If MI def and/or use the same register multiple times, then there
1390 // are multiple entries.
1391 while (i != e && RewriteMIs[i].MI == MI) {
1392 assert(RewriteMIs[i].Index == index);
1395 MachineBasicBlock *MBB = MI->getParent();
1397 if (ImpUse && MI != ReMatDefMI) {
1398 // Re-matting an instruction with virtual register use. Prevent interval
1399 // from being spilled.
1400 getInterval(ImpUse).markNotSpillable();
1403 unsigned MBBId = MBB->getNumber();
1404 unsigned ThisVReg = 0;
1406 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1407 if (NVI != MBBVRegsMap.end()) {
1408 ThisVReg = NVI->second;
1415 // It's better to start a new interval to avoid artifically
1416 // extend the new interval.
1417 if (MI->readsWritesVirtualRegister(li.reg) ==
1418 std::make_pair(false,true)) {
1419 MBBVRegsMap.erase(MBB->getNumber());
1425 bool IsNew = ThisVReg == 0;
1427 // This ends the previous live interval. If all of its def / use
1428 // can be folded, give it a low spill weight.
1429 if (NewVReg && TrySplit && AllCanFold) {
1430 LiveInterval &nI = getOrCreateInterval(NewVReg);
1437 bool HasDef = false;
1438 bool HasUse = false;
1439 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1440 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1441 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1442 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1443 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1444 if (!HasDef && !HasUse)
1447 AllCanFold &= CanFold;
1449 // Update weight of spill interval.
1450 LiveInterval &nI = getOrCreateInterval(NewVReg);
1452 // The spill weight is now infinity as it cannot be spilled again.
1453 nI.markNotSpillable();
1457 // Keep track of the last def and first use in each MBB.
1459 if (MI != ReMatOrigDefMI || !CanDelete) {
1460 bool HasKill = false;
1462 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1464 // If this is a two-address code, then this index starts a new VNInfo.
1465 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1467 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1469 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1470 SpillIdxes.find(MBBId);
1472 if (SII == SpillIdxes.end()) {
1473 std::vector<SRInfo> S;
1474 S.push_back(SRInfo(index, NewVReg, true));
1475 SpillIdxes.insert(std::make_pair(MBBId, S));
1476 } else if (SII->second.back().vreg != NewVReg) {
1477 SII->second.push_back(SRInfo(index, NewVReg, true));
1478 } else if (index > SII->second.back().index) {
1479 // If there is an earlier def and this is a two-address
1480 // instruction, then it's not possible to fold the store (which
1481 // would also fold the load).
1482 SRInfo &Info = SII->second.back();
1484 Info.canFold = !HasUse;
1486 SpillMBBs.set(MBBId);
1487 } else if (SII != SpillIdxes.end() &&
1488 SII->second.back().vreg == NewVReg &&
1489 index > SII->second.back().index) {
1490 // There is an earlier def that's not killed (must be two-address).
1491 // The spill is no longer needed.
1492 SII->second.pop_back();
1493 if (SII->second.empty()) {
1494 SpillIdxes.erase(MBBId);
1495 SpillMBBs.reset(MBBId);
1502 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1503 SpillIdxes.find(MBBId);
1504 if (SII != SpillIdxes.end() &&
1505 SII->second.back().vreg == NewVReg &&
1506 index > SII->second.back().index)
1507 // Use(s) following the last def, it's not safe to fold the spill.
1508 SII->second.back().canFold = false;
1509 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1510 RestoreIdxes.find(MBBId);
1511 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1512 // If we are splitting live intervals, only fold if it's the first
1513 // use and there isn't another use later in the MBB.
1514 RII->second.back().canFold = false;
1516 // Only need a reload if there isn't an earlier def / use.
1517 if (RII == RestoreIdxes.end()) {
1518 std::vector<SRInfo> Infos;
1519 Infos.push_back(SRInfo(index, NewVReg, true));
1520 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1522 RII->second.push_back(SRInfo(index, NewVReg, true));
1524 RestoreMBBs.set(MBBId);
1528 // Update spill weight.
1529 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1530 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1533 if (NewVReg && TrySplit && AllCanFold) {
1534 // If all of its def / use can be folded, give it a low spill weight.
1535 LiveInterval &nI = getOrCreateInterval(NewVReg);
1540 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1541 unsigned vr, BitVector &RestoreMBBs,
1542 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1543 if (!RestoreMBBs[Id])
1545 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1546 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1547 if (Restores[i].index == index &&
1548 Restores[i].vreg == vr &&
1549 Restores[i].canFold)
1554 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1555 unsigned vr, BitVector &RestoreMBBs,
1556 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1557 if (!RestoreMBBs[Id])
1559 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1560 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1561 if (Restores[i].index == index && Restores[i].vreg)
1562 Restores[i].index = SlotIndex();
1565 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1566 /// spilled and create empty intervals for their uses.
1568 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1569 const TargetRegisterClass* rc,
1570 std::vector<LiveInterval*> &NewLIs) {
1571 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1572 re = mri_->reg_end(); ri != re; ) {
1573 MachineOperand &O = ri.getOperand();
1574 MachineInstr *MI = &*ri;
1576 if (MI->isDebugValue()) {
1577 // Remove debug info for now.
1579 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1583 assert(MI->isImplicitDef() &&
1584 "Register def was not rewritten?");
1585 RemoveMachineInstrFromMaps(MI);
1586 vrm.RemoveMachineInstrFromMaps(MI);
1587 MI->eraseFromParent();
1589 // This must be an use of an implicit_def so it's not part of the live
1590 // interval. Create a new empty live interval for it.
1591 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1592 unsigned NewVReg = mri_->createVirtualRegister(rc);
1594 vrm.setIsImplicitlyDefined(NewVReg);
1595 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1596 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1597 MachineOperand &MO = MI->getOperand(i);
1598 if (MO.isReg() && MO.getReg() == li.reg) {
1608 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1609 // Limit the loop depth ridiculousness.
1610 if (loopDepth > 200)
1613 // The loop depth is used to roughly estimate the number of times the
1614 // instruction is executed. Something like 10^d is simple, but will quickly
1615 // overflow a float. This expression behaves like 10^d for small d, but is
1616 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1617 // headroom before overflow.
1618 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1620 return (isDef + isUse) * lc;
1624 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1625 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1626 normalizeSpillWeight(*NewLIs[i]);
1629 std::vector<LiveInterval*> LiveIntervals::
1630 addIntervalsForSpillsFast(const LiveInterval &li,
1631 const MachineLoopInfo *loopInfo,
1633 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1635 std::vector<LiveInterval*> added;
1637 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1640 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1645 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1647 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1648 while (RI != mri_->reg_end()) {
1649 MachineInstr* MI = &*RI;
1651 SmallVector<unsigned, 2> Indices;
1652 bool HasUse, HasDef;
1653 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(li.reg, &Indices);
1655 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1656 Indices, true, slot, li.reg)) {
1657 unsigned NewVReg = mri_->createVirtualRegister(rc);
1659 vrm.assignVirt2StackSlot(NewVReg, slot);
1661 // create a new register for this spill
1662 LiveInterval &nI = getOrCreateInterval(NewVReg);
1663 nI.markNotSpillable();
1665 // Rewrite register operands to use the new vreg.
1666 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1667 E = Indices.end(); I != E; ++I) {
1668 MI->getOperand(*I).setReg(NewVReg);
1670 if (MI->getOperand(*I).isUse())
1671 MI->getOperand(*I).setIsKill(true);
1674 // Fill in the new live interval.
1675 SlotIndex index = getInstructionIndex(MI);
1677 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1678 nI.getNextValue(SlotIndex(), 0, false,
1679 getVNInfoAllocator()));
1680 DEBUG(dbgs() << " +" << LR);
1682 vrm.addRestorePoint(NewVReg, MI);
1685 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1686 nI.getNextValue(SlotIndex(), 0, false,
1687 getVNInfoAllocator()));
1688 DEBUG(dbgs() << " +" << LR);
1690 vrm.addSpillPoint(NewVReg, true, MI);
1693 added.push_back(&nI);
1696 dbgs() << "\t\t\t\tadded new interval: ";
1703 RI = mri_->reg_begin(li.reg);
1709 std::vector<LiveInterval*> LiveIntervals::
1710 addIntervalsForSpills(const LiveInterval &li,
1711 SmallVectorImpl<LiveInterval*> &SpillIs,
1712 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1714 if (EnableFastSpilling)
1715 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1717 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1720 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1721 li.print(dbgs(), tri_);
1725 // Each bit specify whether a spill is required in the MBB.
1726 BitVector SpillMBBs(mf_->getNumBlockIDs());
1727 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1728 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1729 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1730 DenseMap<unsigned,unsigned> MBBVRegsMap;
1731 std::vector<LiveInterval*> NewLIs;
1732 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1734 unsigned NumValNums = li.getNumValNums();
1735 SmallVector<MachineInstr*, 4> ReMatDefs;
1736 ReMatDefs.resize(NumValNums, NULL);
1737 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1738 ReMatOrigDefs.resize(NumValNums, NULL);
1739 SmallVector<int, 4> ReMatIds;
1740 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1741 BitVector ReMatDelete(NumValNums);
1742 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1744 // Spilling a split live interval. It cannot be split any further. Also,
1745 // it's also guaranteed to be a single val# / range interval.
1746 if (vrm.getPreSplitReg(li.reg)) {
1747 vrm.setIsSplitFromReg(li.reg, 0);
1748 // Unset the split kill marker on the last use.
1749 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1750 if (KillIdx != SlotIndex()) {
1751 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1752 assert(KillMI && "Last use disappeared?");
1753 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1754 assert(KillOp != -1 && "Last use disappeared?");
1755 KillMI->getOperand(KillOp).setIsKill(false);
1757 vrm.removeKillPoint(li.reg);
1758 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1759 Slot = vrm.getStackSlot(li.reg);
1760 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1761 MachineInstr *ReMatDefMI = DefIsReMat ?
1762 vrm.getReMaterializedMI(li.reg) : NULL;
1764 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1765 bool isLoad = isLoadSS ||
1766 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1767 bool IsFirstRange = true;
1768 for (LiveInterval::Ranges::const_iterator
1769 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1770 // If this is a split live interval with multiple ranges, it means there
1771 // are two-address instructions that re-defined the value. Only the
1772 // first def can be rematerialized!
1774 // Note ReMatOrigDefMI has already been deleted.
1775 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1776 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1777 false, vrm, rc, ReMatIds, loopInfo,
1778 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1779 MBBVRegsMap, NewLIs);
1781 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1782 Slot, 0, false, false, false,
1783 false, vrm, rc, ReMatIds, loopInfo,
1784 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1785 MBBVRegsMap, NewLIs);
1787 IsFirstRange = false;
1790 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1791 normalizeSpillWeights(NewLIs);
1795 bool TrySplit = !intervalIsInOneMBB(li);
1798 bool NeedStackSlot = false;
1799 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1801 const VNInfo *VNI = *i;
1802 unsigned VN = VNI->id;
1803 if (VNI->isUnused())
1804 continue; // Dead val#.
1805 // Is the def for the val# rematerializable?
1806 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1807 ? getInstructionFromIndex(VNI->def) : 0;
1809 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1810 // Remember how to remat the def of this val#.
1811 ReMatOrigDefs[VN] = ReMatDefMI;
1812 // Original def may be modified so we have to make a copy here.
1813 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1814 CloneMIs.push_back(Clone);
1815 ReMatDefs[VN] = Clone;
1817 bool CanDelete = true;
1818 if (VNI->hasPHIKill()) {
1819 // A kill is a phi node, not all of its uses can be rematerialized.
1820 // It must not be deleted.
1822 // Need a stack slot if there is any live range where uses cannot be
1824 NeedStackSlot = true;
1827 ReMatDelete.set(VN);
1829 // Need a stack slot if there is any live range where uses cannot be
1831 NeedStackSlot = true;
1835 // One stack slot per live interval.
1836 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1837 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1838 Slot = vrm.assignVirt2StackSlot(li.reg);
1840 // This case only occurs when the prealloc splitter has already assigned
1841 // a stack slot to this vreg.
1843 Slot = vrm.getStackSlot(li.reg);
1846 // Create new intervals and rewrite defs and uses.
1847 for (LiveInterval::Ranges::const_iterator
1848 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1849 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1850 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1851 bool DefIsReMat = ReMatDefMI != NULL;
1852 bool CanDelete = ReMatDelete[I->valno->id];
1854 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1855 bool isLoad = isLoadSS ||
1856 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1857 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1858 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1859 CanDelete, vrm, rc, ReMatIds, loopInfo,
1860 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1861 MBBVRegsMap, NewLIs);
1864 // Insert spills / restores if we are splitting.
1866 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1867 normalizeSpillWeights(NewLIs);
1871 SmallPtrSet<LiveInterval*, 4> AddedKill;
1872 SmallVector<unsigned, 2> Ops;
1873 if (NeedStackSlot) {
1874 int Id = SpillMBBs.find_first();
1876 std::vector<SRInfo> &spills = SpillIdxes[Id];
1877 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1878 SlotIndex index = spills[i].index;
1879 unsigned VReg = spills[i].vreg;
1880 LiveInterval &nI = getOrCreateInterval(VReg);
1881 bool isReMat = vrm.isReMaterialized(VReg);
1882 MachineInstr *MI = getInstructionFromIndex(index);
1883 bool CanFold = false;
1884 bool FoundUse = false;
1886 if (spills[i].canFold) {
1888 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1889 MachineOperand &MO = MI->getOperand(j);
1890 if (!MO.isReg() || MO.getReg() != VReg)
1897 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1898 RestoreMBBs, RestoreIdxes))) {
1899 // MI has two-address uses of the same register. If the use
1900 // isn't the first and only use in the BB, then we can't fold
1901 // it. FIXME: Move this to rewriteInstructionsForSpills.
1908 // Fold the store into the def if possible.
1909 bool Folded = false;
1910 if (CanFold && !Ops.empty()) {
1911 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1914 // Also folded uses, do not issue a load.
1915 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1916 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1918 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1922 // Otherwise tell the spiller to issue a spill.
1924 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1925 bool isKill = LR->end == index.getStoreIndex();
1926 if (!MI->registerDefIsDead(nI.reg))
1927 // No need to spill a dead def.
1928 vrm.addSpillPoint(VReg, isKill, MI);
1930 AddedKill.insert(&nI);
1933 Id = SpillMBBs.find_next(Id);
1937 int Id = RestoreMBBs.find_first();
1939 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1940 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1941 SlotIndex index = restores[i].index;
1942 if (index == SlotIndex())
1944 unsigned VReg = restores[i].vreg;
1945 LiveInterval &nI = getOrCreateInterval(VReg);
1946 bool isReMat = vrm.isReMaterialized(VReg);
1947 MachineInstr *MI = getInstructionFromIndex(index);
1948 bool CanFold = false;
1950 if (restores[i].canFold) {
1952 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1953 MachineOperand &MO = MI->getOperand(j);
1954 if (!MO.isReg() || MO.getReg() != VReg)
1958 // If this restore were to be folded, it would have been folded
1967 // Fold the load into the use if possible.
1968 bool Folded = false;
1969 if (CanFold && !Ops.empty()) {
1971 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1973 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1975 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1976 // If the rematerializable def is a load, also try to fold it.
1977 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1978 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1979 Ops, isLoadSS, LdSlot, VReg);
1981 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1983 // Re-matting an instruction with virtual register use. Add the
1984 // register as an implicit use on the use MI and mark the register
1985 // interval as unspillable.
1986 LiveInterval &ImpLi = getInterval(ImpUse);
1987 ImpLi.markNotSpillable();
1988 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1993 // If folding is not possible / failed, then tell the spiller to issue a
1994 // load / rematerialization for us.
1996 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1998 vrm.addRestorePoint(VReg, MI);
2000 Id = RestoreMBBs.find_next(Id);
2003 // Finalize intervals: add kills, finalize spill weights, and filter out
2005 std::vector<LiveInterval*> RetNewLIs;
2006 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2007 LiveInterval *LI = NewLIs[i];
2009 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2010 if (!AddedKill.count(LI)) {
2011 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2012 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2013 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2014 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2015 assert(UseIdx != -1);
2016 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2017 LastUse->getOperand(UseIdx).setIsKill();
2018 vrm.addKillPoint(LI->reg, LastUseIdx);
2021 RetNewLIs.push_back(LI);
2025 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2026 normalizeSpillWeights(RetNewLIs);
2030 /// hasAllocatableSuperReg - Return true if the specified physical register has
2031 /// any super register that's allocatable.
2032 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2033 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2034 if (allocatableRegs_[*AS] && hasInterval(*AS))
2039 /// getRepresentativeReg - Find the largest super register of the specified
2040 /// physical register.
2041 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2042 // Find the largest super-register that is allocatable.
2043 unsigned BestReg = Reg;
2044 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2045 unsigned SuperReg = *AS;
2046 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2054 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2055 /// specified interval that conflicts with the specified physical register.
2056 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2057 unsigned PhysReg) const {
2058 unsigned NumConflicts = 0;
2059 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2060 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2061 E = mri_->reg_end(); I != E; ++I) {
2062 MachineOperand &O = I.getOperand();
2063 MachineInstr *MI = O.getParent();
2064 if (MI->isDebugValue())
2066 SlotIndex Index = getInstructionIndex(MI);
2067 if (pli.liveAt(Index))
2070 return NumConflicts;
2073 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2074 /// around all defs and uses of the specified interval. Return true if it
2075 /// was able to cut its interval.
2076 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2077 unsigned PhysReg, VirtRegMap &vrm) {
2078 unsigned SpillReg = getRepresentativeReg(PhysReg);
2080 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2081 // If there are registers which alias PhysReg, but which are not a
2082 // sub-register of the chosen representative super register. Assert
2083 // since we can't handle it yet.
2084 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2085 tri_->isSuperRegister(*AS, SpillReg));
2088 SmallVector<unsigned, 4> PRegs;
2089 if (hasInterval(SpillReg))
2090 PRegs.push_back(SpillReg);
2092 SmallSet<unsigned, 4> Added;
2093 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2094 if (Added.insert(*AS) && hasInterval(*AS)) {
2095 PRegs.push_back(*AS);
2096 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2101 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2102 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2103 E = mri_->reg_end(); I != E; ++I) {
2104 MachineOperand &O = I.getOperand();
2105 MachineInstr *MI = O.getParent();
2106 if (MI->isDebugValue() || SeenMIs.count(MI))
2109 SlotIndex Index = getInstructionIndex(MI);
2110 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2111 unsigned PReg = PRegs[i];
2112 LiveInterval &pli = getInterval(PReg);
2113 if (!pli.liveAt(Index))
2115 vrm.addEmergencySpill(PReg, MI);
2116 SlotIndex StartIdx = Index.getLoadIndex();
2117 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2118 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2119 pli.removeRange(StartIdx, EndIdx);
2123 raw_string_ostream Msg(msg);
2124 Msg << "Ran out of registers during register allocation!";
2125 if (MI->isInlineAsm()) {
2126 Msg << "\nPlease check your inline asm statement for invalid "
2127 << "constraints:\n";
2128 MI->print(Msg, tm_);
2130 report_fatal_error(Msg.str());
2132 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2133 if (!hasInterval(*AS))
2135 LiveInterval &spli = getInterval(*AS);
2136 if (spli.liveAt(Index))
2137 spli.removeRange(Index.getLoadIndex(),
2138 Index.getNextIndex().getBaseIndex());
2145 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2146 MachineInstr* startInst) {
2147 LiveInterval& Interval = getOrCreateInterval(reg);
2148 VNInfo* VN = Interval.getNextValue(
2149 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2150 startInst, true, getVNInfoAllocator());
2151 VN->setHasPHIKill(true);
2152 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2154 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2155 getMBBEndIdx(startInst->getParent()), VN);
2156 Interval.addRange(LR);