1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 bool PHIJoin = lv_->isPHIJoin(interval.reg);
335 // A phi join register is killed at the end of the MBB and revived as a new
336 // valno in the killing blocks.
337 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338 DEBUG(dbgs() << " phi-join");
339 ValNo->addKill(indexes_->getTerminatorGap(mbb));
340 ValNo->setHasPHIKill(true);
342 // Iterate over all of the blocks that the variable is completely
343 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
345 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346 E = vi.AliveBlocks.end(); I != E; ++I) {
347 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349 interval.addRange(LR);
350 DEBUG(dbgs() << " +" << LR);
354 // Finally, this virtual register is live from the start of any killing
355 // block to the 'use' slot of the killing instruction.
356 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357 MachineInstr *Kill = vi.Kills[i];
358 SlotIndex Start = getMBBStartIdx(Kill->getParent());
359 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
361 // Create interval with one of a NEW value number. Note that this value
362 // number isn't actually defined by an instruction, weird huh? :)
364 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
366 ValNo->setIsPHIDef(true);
368 LiveRange LR(Start, killIdx, ValNo);
369 interval.addRange(LR);
370 ValNo->addKill(killIdx);
371 DEBUG(dbgs() << " +" << LR);
375 // If this is the second time we see a virtual register definition, it
376 // must be due to phi elimination or two addr elimination. If this is
377 // the result of two address elimination, then the vreg is one of the
378 // def-and-use register operand.
379 if (mi->isRegTiedToUseOperand(MOIdx)) {
380 // If this is a two-address definition, then we have already processed
381 // the live range. The only problem is that we didn't realize there
382 // are actually two values in the live interval. Because of this we
383 // need to take the LiveRegion that defines this register and split it
385 assert(interval.containsOneValue());
386 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387 SlotIndex RedefIndex = MIIdx.getDefIndex();
388 if (MO.isEarlyClobber())
389 RedefIndex = MIIdx.getUseIndex();
391 const LiveRange *OldLR =
392 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393 VNInfo *OldValNo = OldLR->valno;
395 // Delete the initial value, which should be short and continuous,
396 // because the 2-addr copy must be in the same MBB as the redef.
397 interval.removeRange(DefIndex, RedefIndex);
399 // Two-address vregs should always only be redefined once. This means
400 // that at this point, there should be exactly one value number in it.
401 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
403 // The new value number (#1) is defined by the instruction we claimed
405 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406 false, // update at *
408 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
410 // Value#0 is now defined by the 2-addr instruction.
411 OldValNo->def = RedefIndex;
412 OldValNo->setCopy(0);
414 // Add the new live interval which replaces the range for the input copy.
415 LiveRange LR(DefIndex, RedefIndex, ValNo);
416 DEBUG(dbgs() << " replace range with " << LR);
417 interval.addRange(LR);
418 ValNo->addKill(RedefIndex);
420 // If this redefinition is dead, we need to add a dummy unit live
421 // range covering the def slot.
423 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
427 dbgs() << " RESULT: ";
428 interval.print(dbgs(), tri_);
431 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432 // In the case of PHI elimination, each variable definition is only
433 // live until the end of the block. We've already taken care of the
434 // rest of the live range.
436 SlotIndex defIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 defIndex = MIIdx.getUseIndex();
441 MachineInstr *CopyMI = NULL;
442 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
446 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
448 SlotIndex killIndex = getMBBEndIdx(mbb);
449 LiveRange LR(defIndex, killIndex, ValNo);
450 interval.addRange(LR);
451 ValNo->addKill(indexes_->getTerminatorGap(mbb));
452 ValNo->setHasPHIKill(true);
453 DEBUG(dbgs() << " phi-join +" << LR);
457 DEBUG(dbgs() << '\n');
460 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461 MachineBasicBlock::iterator mi,
464 LiveInterval &interval,
465 MachineInstr *CopyMI) {
466 // A physical register cannot be live across basic block, so its
467 // lifetime must end somewhere in its defining basic block.
469 dbgs() << "\t\tregister: ";
470 printRegName(interval.reg, tri_);
473 SlotIndex baseIndex = MIIdx;
474 SlotIndex start = baseIndex.getDefIndex();
475 // Earlyclobbers move back one.
476 if (MO.isEarlyClobber())
477 start = MIIdx.getUseIndex();
478 SlotIndex end = start;
480 // If it is not used after definition, it is considered dead at
481 // the instruction defining it. Hence its interval is:
482 // [defSlot(def), defSlot(def)+1)
483 // For earlyclobbers, the defSlot was pushed back one; the extra
484 // advance below compensates.
486 DEBUG(dbgs() << " dead");
487 end = start.getStoreIndex();
491 // If it is not dead on definition, it must be killed by a
492 // subsequent instruction. Hence its interval is:
493 // [defSlot(def), useSlot(kill)+1)
494 baseIndex = baseIndex.getNextIndex();
495 while (++mi != MBB->end()) {
497 if (mi->isDebugValue())
499 if (getInstructionFromIndex(baseIndex) == 0)
500 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
502 if (mi->killsRegister(interval.reg, tri_)) {
503 DEBUG(dbgs() << " killed");
504 end = baseIndex.getDefIndex();
507 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
509 if (mi->isRegTiedToUseOperand(DefIdx)) {
510 // Two-address instruction.
511 end = baseIndex.getDefIndex();
513 // Another instruction redefines the register before it is ever read.
514 // Then the register is essentially dead at the instruction that
515 // defines it. Hence its interval is:
516 // [defSlot(def), defSlot(def)+1)
517 DEBUG(dbgs() << " dead");
518 end = start.getStoreIndex();
524 baseIndex = baseIndex.getNextIndex();
527 // The only case we should have a dead physreg here without a killing or
528 // instruction where we know it's dead is if it is live-in to the function
529 // and never used. Another possible case is the implicit use of the
530 // physical register has been deleted by two-address pass.
531 end = start.getStoreIndex();
534 assert(start < end && "did not find end of interval?");
536 // Already exists? Extend old live interval.
537 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538 bool Extend = OldLR != interval.end();
539 VNInfo *ValNo = Extend
540 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541 if (MO.isEarlyClobber() && Extend)
542 ValNo->setHasRedefByEC(true);
543 LiveRange LR(start, end, ValNo);
544 interval.addRange(LR);
545 LR.valno->addKill(end);
546 DEBUG(dbgs() << " +" << LR << '\n');
549 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550 MachineBasicBlock::iterator MI,
554 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556 getOrCreateInterval(MO.getReg()));
557 else if (allocatableRegs_[MO.getReg()]) {
558 MachineInstr *CopyMI = NULL;
559 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
563 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564 getOrCreateInterval(MO.getReg()), CopyMI);
565 // Def of a register also defines its sub-registers.
566 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567 // If MI also modifies the sub-register explicitly, avoid processing it
568 // more than once. Do not pass in TRI here so it checks for exact match.
569 if (!MI->modifiesRegister(*AS))
570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571 getOrCreateInterval(*AS), 0);
575 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
577 LiveInterval &interval, bool isAlias) {
579 dbgs() << "\t\tlivein register: ";
580 printRegName(interval.reg, tri_);
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 MachineBasicBlock::iterator E = MBB->end();
587 // Skip over DBG_VALUE at the start of the MBB.
588 if (mi != E && mi->isDebugValue()) {
589 while (++mi != E && mi->isDebugValue())
592 // MBB is empty except for DBG_VALUE's.
596 SlotIndex baseIndex = MIIdx;
597 SlotIndex start = baseIndex;
598 if (getInstructionFromIndex(baseIndex) == 0)
599 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
601 SlotIndex end = baseIndex;
602 bool SeenDefUse = false;
605 if (mi->killsRegister(interval.reg, tri_)) {
606 DEBUG(dbgs() << " killed");
607 end = baseIndex.getDefIndex();
610 } else if (mi->modifiesRegister(interval.reg, tri_)) {
611 // Another instruction redefines the register before it is ever read.
612 // Then the register is essentially dead at the instruction that defines
613 // it. Hence its interval is:
614 // [defSlot(def), defSlot(def)+1)
615 DEBUG(dbgs() << " dead");
616 end = start.getStoreIndex();
621 while (++mi != E && mi->isDebugValue())
622 // Skip over DBG_VALUE.
625 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
628 // Live-in register might not be used at all.
631 DEBUG(dbgs() << " dead");
632 end = MIIdx.getStoreIndex();
634 DEBUG(dbgs() << " live through");
640 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
641 0, false, VNInfoAllocator);
642 vni->setIsPHIDef(true);
643 LiveRange LR(start, end, vni);
645 interval.addRange(LR);
646 LR.valno->addKill(end);
647 DEBUG(dbgs() << " +" << LR << '\n');
650 /// computeIntervals - computes the live intervals for virtual
651 /// registers. for some ordering of the machine instructions [1,N] a
652 /// live interval is an interval [i, j) where 1 <= i <= j < N for
653 /// which a variable is live
654 void LiveIntervals::computeIntervals() {
655 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
656 << "********** Function: "
657 << ((Value*)mf_->getFunction())->getName() << '\n');
659 SmallVector<unsigned, 8> UndefUses;
660 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
662 MachineBasicBlock *MBB = MBBI;
666 // Track the index of the current machine instr.
667 SlotIndex MIIndex = getMBBStartIdx(MBB);
668 DEBUG(dbgs() << "BB#" << MBB->getNumber()
669 << ":\t\t# derived from " << MBB->getName() << "\n");
671 // Create intervals for live-ins to this BB first.
672 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
673 LE = MBB->livein_end(); LI != LE; ++LI) {
674 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
675 // Multiple live-ins can alias the same register.
676 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
677 if (!hasInterval(*AS))
678 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
682 // Skip over empty initial indices.
683 if (getInstructionFromIndex(MIIndex) == 0)
684 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
686 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
688 DEBUG(dbgs() << MIIndex << "\t" << *MI);
689 if (MI->isDebugValue())
693 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
694 MachineOperand &MO = MI->getOperand(i);
695 if (!MO.isReg() || !MO.getReg())
698 // handle register defs - build intervals
700 handleRegisterDef(MBB, MI, MIIndex, MO, i);
701 else if (MO.isUndef())
702 UndefUses.push_back(MO.getReg());
705 // Move to the next instr slot.
706 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
710 // Create empty intervals for registers defined by implicit_def's (except
711 // for those implicit_def that define values which are liveout of their
713 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
714 unsigned UndefReg = UndefUses[i];
715 (void)getOrCreateInterval(UndefReg);
719 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
720 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
721 return new LiveInterval(reg, Weight);
724 /// dupInterval - Duplicate a live interval. The caller is responsible for
725 /// managing the allocated memory.
726 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
727 LiveInterval *NewLI = createInterval(li->reg);
728 NewLI->Copy(*li, mri_, getVNInfoAllocator());
732 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
733 /// copy field and returns the source register that defines it.
734 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
738 if (VNI->getCopy()->isExtractSubreg()) {
739 // If it's extracting out of a physical register, return the sub-register.
740 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
741 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
742 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
743 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
744 if (SrcSubReg == DstSubReg)
745 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
746 // reg1034 can still be coalesced to EDX.
748 assert(DstSubReg == 0);
749 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
752 } else if (VNI->getCopy()->isInsertSubreg() ||
753 VNI->getCopy()->isSubregToReg())
754 return VNI->getCopy()->getOperand(2).getReg();
756 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
757 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
759 llvm_unreachable("Unrecognized copy instruction!");
763 //===----------------------------------------------------------------------===//
764 // Register allocator hooks.
767 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768 /// allow one) virtual register operand, then its uses are implicitly using
769 /// the register. Returns the virtual register.
770 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771 MachineInstr *MI) const {
773 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774 MachineOperand &MO = MI->getOperand(i);
775 if (!MO.isReg() || !MO.isUse())
777 unsigned Reg = MO.getReg();
778 if (Reg == 0 || Reg == li.reg)
781 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
782 !allocatableRegs_[Reg])
784 // FIXME: For now, only remat MI with at most one register operand.
786 "Can't rematerialize instruction with multiple register operand!");
795 /// isValNoAvailableAt - Return true if the val# of the specified interval
796 /// which reaches the given instruction also reaches the specified use index.
797 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
798 SlotIndex UseIdx) const {
799 SlotIndex Index = getInstructionIndex(MI);
800 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
801 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
802 return UI != li.end() && UI->valno == ValNo;
805 /// isReMaterializable - Returns true if the definition MI of the specified
806 /// val# of the specified interval is re-materializable.
807 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
808 const VNInfo *ValNo, MachineInstr *MI,
809 SmallVectorImpl<LiveInterval*> &SpillIs,
814 if (!tii_->isTriviallyReMaterializable(MI, aa_))
817 // Target-specific code can mark an instruction as being rematerializable
818 // if it has one virtual reg use, though it had better be something like
819 // a PIC base register which is likely to be live everywhere.
820 unsigned ImpUse = getReMatImplicitUse(li, MI);
822 const LiveInterval &ImpLi = getInterval(ImpUse);
823 for (MachineRegisterInfo::use_nodbg_iterator
824 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
826 MachineInstr *UseMI = &*ri;
827 SlotIndex UseIdx = getInstructionIndex(UseMI);
828 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
830 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
834 // If a register operand of the re-materialized instruction is going to
835 // be spilled next, then it's not legal to re-materialize this instruction.
836 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
837 if (ImpUse == SpillIs[i]->reg)
843 /// isReMaterializable - Returns true if the definition MI of the specified
844 /// val# of the specified interval is re-materializable.
845 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
846 const VNInfo *ValNo, MachineInstr *MI) {
847 SmallVector<LiveInterval*, 4> Dummy1;
849 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
852 /// isReMaterializable - Returns true if every definition of MI of every
853 /// val# of the specified interval is re-materializable.
854 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
855 SmallVectorImpl<LiveInterval*> &SpillIs,
858 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
860 const VNInfo *VNI = *i;
862 continue; // Dead val#.
863 // Is the def for the val# rematerializable?
864 if (!VNI->isDefAccurate())
866 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
867 bool DefIsLoad = false;
869 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
876 /// FilterFoldedOps - Filter out two-address use operands. Return
877 /// true if it finds any issue with the operands that ought to prevent
879 static bool FilterFoldedOps(MachineInstr *MI,
880 SmallVector<unsigned, 2> &Ops,
882 SmallVector<unsigned, 2> &FoldOps) {
884 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
885 unsigned OpIdx = Ops[i];
886 MachineOperand &MO = MI->getOperand(OpIdx);
887 // FIXME: fold subreg use.
891 MRInfo |= (unsigned)VirtRegMap::isMod;
893 // Filter out two-address use operand(s).
894 if (MI->isRegTiedToDefOperand(OpIdx)) {
895 MRInfo = VirtRegMap::isModRef;
898 MRInfo |= (unsigned)VirtRegMap::isRef;
900 FoldOps.push_back(OpIdx);
906 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
907 /// slot / to reg or any rematerialized load into ith operand of specified
908 /// MI. If it is successul, MI is updated with the newly created MI and
910 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
911 VirtRegMap &vrm, MachineInstr *DefMI,
913 SmallVector<unsigned, 2> &Ops,
914 bool isSS, int Slot, unsigned Reg) {
915 // If it is an implicit def instruction, just delete it.
916 if (MI->isImplicitDef()) {
917 RemoveMachineInstrFromMaps(MI);
918 vrm.RemoveMachineInstrFromMaps(MI);
919 MI->eraseFromParent();
924 // Filter the list of operand indexes that are to be folded. Abort if
925 // any operand will prevent folding.
927 SmallVector<unsigned, 2> FoldOps;
928 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
931 // The only time it's safe to fold into a two address instruction is when
932 // it's folding reload and spill from / into a spill stack slot.
933 if (DefMI && (MRInfo & VirtRegMap::isMod))
936 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
937 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
939 // Remember this instruction uses the spill slot.
940 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
942 // Attempt to fold the memory reference into the instruction. If
943 // we can do this, we don't need to insert spill code.
944 MachineBasicBlock &MBB = *MI->getParent();
945 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
946 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
947 vrm.transferSpillPts(MI, fmi);
948 vrm.transferRestorePts(MI, fmi);
949 vrm.transferEmergencySpills(MI, fmi);
950 ReplaceMachineInstrInMaps(MI, fmi);
951 MI = MBB.insert(MBB.erase(MI), fmi);
958 /// canFoldMemoryOperand - Returns true if the specified load / store
959 /// folding is possible.
960 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
961 SmallVector<unsigned, 2> &Ops,
963 // Filter the list of operand indexes that are to be folded. Abort if
964 // any operand will prevent folding.
966 SmallVector<unsigned, 2> FoldOps;
967 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
970 // It's only legal to remat for a use, not a def.
971 if (ReMat && (MRInfo & VirtRegMap::isMod))
974 return tii_->canFoldMemoryOperand(MI, FoldOps);
977 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
978 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
980 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
985 for (++itr; itr != li.ranges.end(); ++itr) {
986 MachineBasicBlock *mbb2 =
987 indexes_->getMBBCoveringRange(itr->start, itr->end);
996 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
997 /// interval on to-be re-materialized operands of MI) with new register.
998 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
999 MachineInstr *MI, unsigned NewVReg,
1001 // There is an implicit use. That means one of the other operand is
1002 // being remat'ed and the remat'ed instruction has li.reg as an
1003 // use operand. Make sure we rewrite that as well.
1004 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1005 MachineOperand &MO = MI->getOperand(i);
1008 unsigned Reg = MO.getReg();
1009 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1011 if (!vrm.isReMaterialized(Reg))
1013 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1014 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1016 UseMO->setReg(NewVReg);
1020 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1021 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1022 bool LiveIntervals::
1023 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1024 bool TrySplit, SlotIndex index, SlotIndex end,
1026 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1027 unsigned Slot, int LdSlot,
1028 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1030 const TargetRegisterClass* rc,
1031 SmallVector<int, 4> &ReMatIds,
1032 const MachineLoopInfo *loopInfo,
1033 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1034 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1035 std::vector<LiveInterval*> &NewLIs) {
1036 bool CanFold = false;
1038 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1039 MachineOperand& mop = MI->getOperand(i);
1042 unsigned Reg = mop.getReg();
1043 unsigned RegI = Reg;
1044 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1049 bool TryFold = !DefIsReMat;
1050 bool FoldSS = true; // Default behavior unless it's a remat.
1051 int FoldSlot = Slot;
1053 // If this is the rematerializable definition MI itself and
1054 // all of its uses are rematerialized, simply delete it.
1055 if (MI == ReMatOrigDefMI && CanDelete) {
1056 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1058 RemoveMachineInstrFromMaps(MI);
1059 vrm.RemoveMachineInstrFromMaps(MI);
1060 MI->eraseFromParent();
1064 // If def for this use can't be rematerialized, then try folding.
1065 // If def is rematerializable and it's a load, also try folding.
1066 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1068 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1074 // Scan all of the operands of this instruction rewriting operands
1075 // to use NewVReg instead of li.reg as appropriate. We do this for
1078 // 1. If the instr reads the same spilled vreg multiple times, we
1079 // want to reuse the NewVReg.
1080 // 2. If the instr is a two-addr instruction, we are required to
1081 // keep the src/dst regs pinned.
1083 // Keep track of whether we replace a use and/or def so that we can
1084 // create the spill interval with the appropriate range.
1086 HasUse = mop.isUse();
1087 HasDef = mop.isDef();
1088 SmallVector<unsigned, 2> Ops;
1090 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1091 const MachineOperand &MOj = MI->getOperand(j);
1094 unsigned RegJ = MOj.getReg();
1095 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1099 if (!MOj.isUndef()) {
1100 HasUse |= MOj.isUse();
1101 HasDef |= MOj.isDef();
1106 // Create a new virtual register for the spill interval.
1107 // Create the new register now so we can map the fold instruction
1108 // to the new register so when it is unfolded we get the correct
1110 bool CreatedNewVReg = false;
1112 NewVReg = mri_->createVirtualRegister(rc);
1114 CreatedNewVReg = true;
1116 // The new virtual register should get the same allocation hints as the
1118 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1119 if (Hint.first || Hint.second)
1120 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1126 // Do not fold load / store here if we are splitting. We'll find an
1127 // optimal point to insert a load / store later.
1129 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1130 Ops, FoldSS, FoldSlot, NewVReg)) {
1131 // Folding the load/store can completely change the instruction in
1132 // unpredictable ways, rescan it from the beginning.
1135 // We need to give the new vreg the same stack slot as the
1136 // spilled interval.
1137 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1143 if (isNotInMIMap(MI))
1145 goto RestartInstruction;
1148 // We'll try to fold it later if it's profitable.
1149 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1153 mop.setReg(NewVReg);
1154 if (mop.isImplicit())
1155 rewriteImplicitOps(li, MI, NewVReg, vrm);
1157 // Reuse NewVReg for other reads.
1158 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1159 MachineOperand &mopj = MI->getOperand(Ops[j]);
1160 mopj.setReg(NewVReg);
1161 if (mopj.isImplicit())
1162 rewriteImplicitOps(li, MI, NewVReg, vrm);
1165 if (CreatedNewVReg) {
1167 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1168 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1169 // Each valnum may have its own remat id.
1170 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1172 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1174 if (!CanDelete || (HasUse && HasDef)) {
1175 // If this is a two-addr instruction then its use operands are
1176 // rematerializable but its def is not. It should be assigned a
1178 vrm.assignVirt2StackSlot(NewVReg, Slot);
1181 vrm.assignVirt2StackSlot(NewVReg, Slot);
1183 } else if (HasUse && HasDef &&
1184 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1185 // If this interval hasn't been assigned a stack slot (because earlier
1186 // def is a deleted remat def), do it now.
1187 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1188 vrm.assignVirt2StackSlot(NewVReg, Slot);
1191 // Re-matting an instruction with virtual register use. Add the
1192 // register as an implicit use on the use MI.
1193 if (DefIsReMat && ImpUse)
1194 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1196 // Create a new register interval for this spill / remat.
1197 LiveInterval &nI = getOrCreateInterval(NewVReg);
1198 if (CreatedNewVReg) {
1199 NewLIs.push_back(&nI);
1200 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1202 vrm.setIsSplitFromReg(NewVReg, li.reg);
1206 if (CreatedNewVReg) {
1207 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1208 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1209 DEBUG(dbgs() << " +" << LR);
1212 // Extend the split live interval to this def / use.
1213 SlotIndex End = index.getDefIndex();
1214 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1215 nI.getValNumInfo(nI.getNumValNums()-1));
1216 DEBUG(dbgs() << " +" << LR);
1221 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1222 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1223 DEBUG(dbgs() << " +" << LR);
1228 dbgs() << "\t\t\t\tAdded new interval: ";
1229 nI.print(dbgs(), tri_);
1235 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1237 MachineBasicBlock *MBB,
1238 SlotIndex Idx) const {
1239 SlotIndex End = getMBBEndIdx(MBB);
1240 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1241 if (VNI->kills[j].isPHI())
1244 SlotIndex KillIdx = VNI->kills[j];
1245 if (KillIdx > Idx && KillIdx <= End)
1251 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1252 /// during spilling.
1254 struct RewriteInfo {
1259 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1260 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1263 struct RewriteInfoCompare {
1264 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1265 return LHS.Index < RHS.Index;
1270 void LiveIntervals::
1271 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1272 LiveInterval::Ranges::const_iterator &I,
1273 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1274 unsigned Slot, int LdSlot,
1275 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1277 const TargetRegisterClass* rc,
1278 SmallVector<int, 4> &ReMatIds,
1279 const MachineLoopInfo *loopInfo,
1280 BitVector &SpillMBBs,
1281 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1282 BitVector &RestoreMBBs,
1283 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1284 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1285 std::vector<LiveInterval*> &NewLIs) {
1286 bool AllCanFold = true;
1287 unsigned NewVReg = 0;
1288 SlotIndex start = I->start.getBaseIndex();
1289 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1291 // First collect all the def / use in this live range that will be rewritten.
1292 // Make sure they are sorted according to instruction index.
1293 std::vector<RewriteInfo> RewriteMIs;
1294 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1295 re = mri_->reg_end(); ri != re; ) {
1296 MachineInstr *MI = &*ri;
1297 MachineOperand &O = ri.getOperand();
1299 if (MI->isDebugValue()) {
1300 // Modify DBG_VALUE now that the value is in a spill slot.
1301 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1302 uint64_t Offset = MI->getOperand(1).getImm();
1303 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1304 DebugLoc DL = MI->getDebugLoc();
1305 int FI = isLoadSS ? LdSlot : (int)Slot;
1306 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1307 Offset, MDPtr, DL)) {
1308 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1309 ReplaceMachineInstrInMaps(MI, NewDV);
1310 MachineBasicBlock *MBB = MI->getParent();
1311 MBB->insert(MBB->erase(MI), NewDV);
1316 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1317 RemoveMachineInstrFromMaps(MI);
1318 vrm.RemoveMachineInstrFromMaps(MI);
1319 MI->eraseFromParent();
1322 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1323 SlotIndex index = getInstructionIndex(MI);
1324 if (index < start || index >= end)
1328 // Must be defined by an implicit def. It should not be spilled. Note,
1329 // this is for correctness reason. e.g.
1330 // 8 %reg1024<def> = IMPLICIT_DEF
1331 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1332 // The live range [12, 14) are not part of the r1024 live interval since
1333 // it's defined by an implicit def. It will not conflicts with live
1334 // interval of r1025. Now suppose both registers are spilled, you can
1335 // easily see a situation where both registers are reloaded before
1336 // the INSERT_SUBREG and both target registers that would overlap.
1338 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1340 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1342 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1343 // Now rewrite the defs and uses.
1344 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1345 RewriteInfo &rwi = RewriteMIs[i];
1347 SlotIndex index = rwi.Index;
1348 bool MIHasUse = rwi.HasUse;
1349 bool MIHasDef = rwi.HasDef;
1350 MachineInstr *MI = rwi.MI;
1351 // If MI def and/or use the same register multiple times, then there
1352 // are multiple entries.
1353 unsigned NumUses = MIHasUse;
1354 while (i != e && RewriteMIs[i].MI == MI) {
1355 assert(RewriteMIs[i].Index == index);
1356 bool isUse = RewriteMIs[i].HasUse;
1357 if (isUse) ++NumUses;
1359 MIHasDef |= RewriteMIs[i].HasDef;
1362 MachineBasicBlock *MBB = MI->getParent();
1364 if (ImpUse && MI != ReMatDefMI) {
1365 // Re-matting an instruction with virtual register use. Prevent interval
1366 // from being spilled.
1367 getInterval(ImpUse).markNotSpillable();
1370 unsigned MBBId = MBB->getNumber();
1371 unsigned ThisVReg = 0;
1373 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1374 if (NVI != MBBVRegsMap.end()) {
1375 ThisVReg = NVI->second;
1382 // It's better to start a new interval to avoid artifically
1383 // extend the new interval.
1384 if (MIHasDef && !MIHasUse) {
1385 MBBVRegsMap.erase(MBB->getNumber());
1391 bool IsNew = ThisVReg == 0;
1393 // This ends the previous live interval. If all of its def / use
1394 // can be folded, give it a low spill weight.
1395 if (NewVReg && TrySplit && AllCanFold) {
1396 LiveInterval &nI = getOrCreateInterval(NewVReg);
1403 bool HasDef = false;
1404 bool HasUse = false;
1405 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1406 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1407 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1408 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1409 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1410 if (!HasDef && !HasUse)
1413 AllCanFold &= CanFold;
1415 // Update weight of spill interval.
1416 LiveInterval &nI = getOrCreateInterval(NewVReg);
1418 // The spill weight is now infinity as it cannot be spilled again.
1419 nI.markNotSpillable();
1423 // Keep track of the last def and first use in each MBB.
1425 if (MI != ReMatOrigDefMI || !CanDelete) {
1426 bool HasKill = false;
1428 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1430 // If this is a two-address code, then this index starts a new VNInfo.
1431 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1433 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1435 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1436 SpillIdxes.find(MBBId);
1438 if (SII == SpillIdxes.end()) {
1439 std::vector<SRInfo> S;
1440 S.push_back(SRInfo(index, NewVReg, true));
1441 SpillIdxes.insert(std::make_pair(MBBId, S));
1442 } else if (SII->second.back().vreg != NewVReg) {
1443 SII->second.push_back(SRInfo(index, NewVReg, true));
1444 } else if (index > SII->second.back().index) {
1445 // If there is an earlier def and this is a two-address
1446 // instruction, then it's not possible to fold the store (which
1447 // would also fold the load).
1448 SRInfo &Info = SII->second.back();
1450 Info.canFold = !HasUse;
1452 SpillMBBs.set(MBBId);
1453 } else if (SII != SpillIdxes.end() &&
1454 SII->second.back().vreg == NewVReg &&
1455 index > SII->second.back().index) {
1456 // There is an earlier def that's not killed (must be two-address).
1457 // The spill is no longer needed.
1458 SII->second.pop_back();
1459 if (SII->second.empty()) {
1460 SpillIdxes.erase(MBBId);
1461 SpillMBBs.reset(MBBId);
1468 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1469 SpillIdxes.find(MBBId);
1470 if (SII != SpillIdxes.end() &&
1471 SII->second.back().vreg == NewVReg &&
1472 index > SII->second.back().index)
1473 // Use(s) following the last def, it's not safe to fold the spill.
1474 SII->second.back().canFold = false;
1475 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1476 RestoreIdxes.find(MBBId);
1477 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1478 // If we are splitting live intervals, only fold if it's the first
1479 // use and there isn't another use later in the MBB.
1480 RII->second.back().canFold = false;
1482 // Only need a reload if there isn't an earlier def / use.
1483 if (RII == RestoreIdxes.end()) {
1484 std::vector<SRInfo> Infos;
1485 Infos.push_back(SRInfo(index, NewVReg, true));
1486 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1488 RII->second.push_back(SRInfo(index, NewVReg, true));
1490 RestoreMBBs.set(MBBId);
1494 // Update spill weight.
1495 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1496 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1499 if (NewVReg && TrySplit && AllCanFold) {
1500 // If all of its def / use can be folded, give it a low spill weight.
1501 LiveInterval &nI = getOrCreateInterval(NewVReg);
1506 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1507 unsigned vr, BitVector &RestoreMBBs,
1508 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1509 if (!RestoreMBBs[Id])
1511 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1512 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1513 if (Restores[i].index == index &&
1514 Restores[i].vreg == vr &&
1515 Restores[i].canFold)
1520 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1521 unsigned vr, BitVector &RestoreMBBs,
1522 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1523 if (!RestoreMBBs[Id])
1525 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1526 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1527 if (Restores[i].index == index && Restores[i].vreg)
1528 Restores[i].index = SlotIndex();
1531 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1532 /// spilled and create empty intervals for their uses.
1534 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1535 const TargetRegisterClass* rc,
1536 std::vector<LiveInterval*> &NewLIs) {
1537 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1538 re = mri_->reg_end(); ri != re; ) {
1539 MachineOperand &O = ri.getOperand();
1540 MachineInstr *MI = &*ri;
1542 if (MI->isDebugValue()) {
1543 // Remove debug info for now.
1545 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1549 assert(MI->isImplicitDef() &&
1550 "Register def was not rewritten?");
1551 RemoveMachineInstrFromMaps(MI);
1552 vrm.RemoveMachineInstrFromMaps(MI);
1553 MI->eraseFromParent();
1555 // This must be an use of an implicit_def so it's not part of the live
1556 // interval. Create a new empty live interval for it.
1557 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1558 unsigned NewVReg = mri_->createVirtualRegister(rc);
1560 vrm.setIsImplicitlyDefined(NewVReg);
1561 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1562 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1563 MachineOperand &MO = MI->getOperand(i);
1564 if (MO.isReg() && MO.getReg() == li.reg) {
1574 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1575 // Limit the loop depth ridiculousness.
1576 if (loopDepth > 200)
1579 // The loop depth is used to roughly estimate the number of times the
1580 // instruction is executed. Something like 10^d is simple, but will quickly
1581 // overflow a float. This expression behaves like 10^d for small d, but is
1582 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1583 // headroom before overflow.
1584 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1586 return (isDef + isUse) * lc;
1590 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1591 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1592 normalizeSpillWeight(*NewLIs[i]);
1595 std::vector<LiveInterval*> LiveIntervals::
1596 addIntervalsForSpillsFast(const LiveInterval &li,
1597 const MachineLoopInfo *loopInfo,
1599 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1601 std::vector<LiveInterval*> added;
1603 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1606 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1611 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1613 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1614 while (RI != mri_->reg_end()) {
1615 MachineInstr* MI = &*RI;
1617 SmallVector<unsigned, 2> Indices;
1618 bool HasUse = false;
1619 bool HasDef = false;
1621 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1622 MachineOperand& mop = MI->getOperand(i);
1623 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1625 HasUse |= MI->getOperand(i).isUse();
1626 HasDef |= MI->getOperand(i).isDef();
1628 Indices.push_back(i);
1631 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1632 Indices, true, slot, li.reg)) {
1633 unsigned NewVReg = mri_->createVirtualRegister(rc);
1635 vrm.assignVirt2StackSlot(NewVReg, slot);
1637 // create a new register for this spill
1638 LiveInterval &nI = getOrCreateInterval(NewVReg);
1639 nI.markNotSpillable();
1641 // Rewrite register operands to use the new vreg.
1642 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1643 E = Indices.end(); I != E; ++I) {
1644 MI->getOperand(*I).setReg(NewVReg);
1646 if (MI->getOperand(*I).isUse())
1647 MI->getOperand(*I).setIsKill(true);
1650 // Fill in the new live interval.
1651 SlotIndex index = getInstructionIndex(MI);
1653 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1654 nI.getNextValue(SlotIndex(), 0, false,
1655 getVNInfoAllocator()));
1656 DEBUG(dbgs() << " +" << LR);
1658 vrm.addRestorePoint(NewVReg, MI);
1661 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1662 nI.getNextValue(SlotIndex(), 0, false,
1663 getVNInfoAllocator()));
1664 DEBUG(dbgs() << " +" << LR);
1666 vrm.addSpillPoint(NewVReg, true, MI);
1669 added.push_back(&nI);
1672 dbgs() << "\t\t\t\tadded new interval: ";
1679 RI = mri_->reg_begin(li.reg);
1685 std::vector<LiveInterval*> LiveIntervals::
1686 addIntervalsForSpills(const LiveInterval &li,
1687 SmallVectorImpl<LiveInterval*> &SpillIs,
1688 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1690 if (EnableFastSpilling)
1691 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1693 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1696 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1697 li.print(dbgs(), tri_);
1701 // Each bit specify whether a spill is required in the MBB.
1702 BitVector SpillMBBs(mf_->getNumBlockIDs());
1703 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1704 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1705 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1706 DenseMap<unsigned,unsigned> MBBVRegsMap;
1707 std::vector<LiveInterval*> NewLIs;
1708 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1710 unsigned NumValNums = li.getNumValNums();
1711 SmallVector<MachineInstr*, 4> ReMatDefs;
1712 ReMatDefs.resize(NumValNums, NULL);
1713 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1714 ReMatOrigDefs.resize(NumValNums, NULL);
1715 SmallVector<int, 4> ReMatIds;
1716 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1717 BitVector ReMatDelete(NumValNums);
1718 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1720 // Spilling a split live interval. It cannot be split any further. Also,
1721 // it's also guaranteed to be a single val# / range interval.
1722 if (vrm.getPreSplitReg(li.reg)) {
1723 vrm.setIsSplitFromReg(li.reg, 0);
1724 // Unset the split kill marker on the last use.
1725 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1726 if (KillIdx != SlotIndex()) {
1727 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1728 assert(KillMI && "Last use disappeared?");
1729 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1730 assert(KillOp != -1 && "Last use disappeared?");
1731 KillMI->getOperand(KillOp).setIsKill(false);
1733 vrm.removeKillPoint(li.reg);
1734 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1735 Slot = vrm.getStackSlot(li.reg);
1736 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1737 MachineInstr *ReMatDefMI = DefIsReMat ?
1738 vrm.getReMaterializedMI(li.reg) : NULL;
1740 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1741 bool isLoad = isLoadSS ||
1742 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1743 bool IsFirstRange = true;
1744 for (LiveInterval::Ranges::const_iterator
1745 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1746 // If this is a split live interval with multiple ranges, it means there
1747 // are two-address instructions that re-defined the value. Only the
1748 // first def can be rematerialized!
1750 // Note ReMatOrigDefMI has already been deleted.
1751 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1752 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1753 false, vrm, rc, ReMatIds, loopInfo,
1754 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1755 MBBVRegsMap, NewLIs);
1757 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1758 Slot, 0, false, false, false,
1759 false, vrm, rc, ReMatIds, loopInfo,
1760 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1761 MBBVRegsMap, NewLIs);
1763 IsFirstRange = false;
1766 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1767 normalizeSpillWeights(NewLIs);
1771 bool TrySplit = !intervalIsInOneMBB(li);
1774 bool NeedStackSlot = false;
1775 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1777 const VNInfo *VNI = *i;
1778 unsigned VN = VNI->id;
1779 if (VNI->isUnused())
1780 continue; // Dead val#.
1781 // Is the def for the val# rematerializable?
1782 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1783 ? getInstructionFromIndex(VNI->def) : 0;
1785 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1786 // Remember how to remat the def of this val#.
1787 ReMatOrigDefs[VN] = ReMatDefMI;
1788 // Original def may be modified so we have to make a copy here.
1789 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1790 CloneMIs.push_back(Clone);
1791 ReMatDefs[VN] = Clone;
1793 bool CanDelete = true;
1794 if (VNI->hasPHIKill()) {
1795 // A kill is a phi node, not all of its uses can be rematerialized.
1796 // It must not be deleted.
1798 // Need a stack slot if there is any live range where uses cannot be
1800 NeedStackSlot = true;
1803 ReMatDelete.set(VN);
1805 // Need a stack slot if there is any live range where uses cannot be
1807 NeedStackSlot = true;
1811 // One stack slot per live interval.
1812 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1813 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1814 Slot = vrm.assignVirt2StackSlot(li.reg);
1816 // This case only occurs when the prealloc splitter has already assigned
1817 // a stack slot to this vreg.
1819 Slot = vrm.getStackSlot(li.reg);
1822 // Create new intervals and rewrite defs and uses.
1823 for (LiveInterval::Ranges::const_iterator
1824 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1825 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1826 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1827 bool DefIsReMat = ReMatDefMI != NULL;
1828 bool CanDelete = ReMatDelete[I->valno->id];
1830 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1831 bool isLoad = isLoadSS ||
1832 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1833 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1834 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1835 CanDelete, vrm, rc, ReMatIds, loopInfo,
1836 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1837 MBBVRegsMap, NewLIs);
1840 // Insert spills / restores if we are splitting.
1842 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1843 normalizeSpillWeights(NewLIs);
1847 SmallPtrSet<LiveInterval*, 4> AddedKill;
1848 SmallVector<unsigned, 2> Ops;
1849 if (NeedStackSlot) {
1850 int Id = SpillMBBs.find_first();
1852 std::vector<SRInfo> &spills = SpillIdxes[Id];
1853 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1854 SlotIndex index = spills[i].index;
1855 unsigned VReg = spills[i].vreg;
1856 LiveInterval &nI = getOrCreateInterval(VReg);
1857 bool isReMat = vrm.isReMaterialized(VReg);
1858 MachineInstr *MI = getInstructionFromIndex(index);
1859 bool CanFold = false;
1860 bool FoundUse = false;
1862 if (spills[i].canFold) {
1864 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1865 MachineOperand &MO = MI->getOperand(j);
1866 if (!MO.isReg() || MO.getReg() != VReg)
1873 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1874 RestoreMBBs, RestoreIdxes))) {
1875 // MI has two-address uses of the same register. If the use
1876 // isn't the first and only use in the BB, then we can't fold
1877 // it. FIXME: Move this to rewriteInstructionsForSpills.
1884 // Fold the store into the def if possible.
1885 bool Folded = false;
1886 if (CanFold && !Ops.empty()) {
1887 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1890 // Also folded uses, do not issue a load.
1891 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1892 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1894 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1898 // Otherwise tell the spiller to issue a spill.
1900 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1901 bool isKill = LR->end == index.getStoreIndex();
1902 if (!MI->registerDefIsDead(nI.reg))
1903 // No need to spill a dead def.
1904 vrm.addSpillPoint(VReg, isKill, MI);
1906 AddedKill.insert(&nI);
1909 Id = SpillMBBs.find_next(Id);
1913 int Id = RestoreMBBs.find_first();
1915 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1916 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1917 SlotIndex index = restores[i].index;
1918 if (index == SlotIndex())
1920 unsigned VReg = restores[i].vreg;
1921 LiveInterval &nI = getOrCreateInterval(VReg);
1922 bool isReMat = vrm.isReMaterialized(VReg);
1923 MachineInstr *MI = getInstructionFromIndex(index);
1924 bool CanFold = false;
1926 if (restores[i].canFold) {
1928 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1929 MachineOperand &MO = MI->getOperand(j);
1930 if (!MO.isReg() || MO.getReg() != VReg)
1934 // If this restore were to be folded, it would have been folded
1943 // Fold the load into the use if possible.
1944 bool Folded = false;
1945 if (CanFold && !Ops.empty()) {
1947 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1949 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1951 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1952 // If the rematerializable def is a load, also try to fold it.
1953 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1954 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1955 Ops, isLoadSS, LdSlot, VReg);
1957 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1959 // Re-matting an instruction with virtual register use. Add the
1960 // register as an implicit use on the use MI and mark the register
1961 // interval as unspillable.
1962 LiveInterval &ImpLi = getInterval(ImpUse);
1963 ImpLi.markNotSpillable();
1964 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1969 // If folding is not possible / failed, then tell the spiller to issue a
1970 // load / rematerialization for us.
1972 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1974 vrm.addRestorePoint(VReg, MI);
1976 Id = RestoreMBBs.find_next(Id);
1979 // Finalize intervals: add kills, finalize spill weights, and filter out
1981 std::vector<LiveInterval*> RetNewLIs;
1982 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1983 LiveInterval *LI = NewLIs[i];
1985 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1986 if (!AddedKill.count(LI)) {
1987 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1988 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1989 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1990 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1991 assert(UseIdx != -1);
1992 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1993 LastUse->getOperand(UseIdx).setIsKill();
1994 vrm.addKillPoint(LI->reg, LastUseIdx);
1997 RetNewLIs.push_back(LI);
2001 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2002 normalizeSpillWeights(RetNewLIs);
2006 /// hasAllocatableSuperReg - Return true if the specified physical register has
2007 /// any super register that's allocatable.
2008 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2009 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2010 if (allocatableRegs_[*AS] && hasInterval(*AS))
2015 /// getRepresentativeReg - Find the largest super register of the specified
2016 /// physical register.
2017 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2018 // Find the largest super-register that is allocatable.
2019 unsigned BestReg = Reg;
2020 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2021 unsigned SuperReg = *AS;
2022 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2030 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2031 /// specified interval that conflicts with the specified physical register.
2032 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2033 unsigned PhysReg) const {
2034 unsigned NumConflicts = 0;
2035 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2036 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2037 E = mri_->reg_end(); I != E; ++I) {
2038 MachineOperand &O = I.getOperand();
2039 MachineInstr *MI = O.getParent();
2040 if (MI->isDebugValue())
2042 SlotIndex Index = getInstructionIndex(MI);
2043 if (pli.liveAt(Index))
2046 return NumConflicts;
2049 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2050 /// around all defs and uses of the specified interval. Return true if it
2051 /// was able to cut its interval.
2052 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2053 unsigned PhysReg, VirtRegMap &vrm) {
2054 unsigned SpillReg = getRepresentativeReg(PhysReg);
2056 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2057 // If there are registers which alias PhysReg, but which are not a
2058 // sub-register of the chosen representative super register. Assert
2059 // since we can't handle it yet.
2060 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2061 tri_->isSuperRegister(*AS, SpillReg));
2064 SmallVector<unsigned, 4> PRegs;
2065 if (hasInterval(SpillReg))
2066 PRegs.push_back(SpillReg);
2068 SmallSet<unsigned, 4> Added;
2069 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2070 if (Added.insert(*AS) && hasInterval(*AS)) {
2071 PRegs.push_back(*AS);
2072 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2077 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2078 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2079 E = mri_->reg_end(); I != E; ++I) {
2080 MachineOperand &O = I.getOperand();
2081 MachineInstr *MI = O.getParent();
2082 if (MI->isDebugValue() || SeenMIs.count(MI))
2085 SlotIndex Index = getInstructionIndex(MI);
2086 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2087 unsigned PReg = PRegs[i];
2088 LiveInterval &pli = getInterval(PReg);
2089 if (!pli.liveAt(Index))
2091 vrm.addEmergencySpill(PReg, MI);
2092 SlotIndex StartIdx = Index.getLoadIndex();
2093 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2094 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2095 pli.removeRange(StartIdx, EndIdx);
2099 raw_string_ostream Msg(msg);
2100 Msg << "Ran out of registers during register allocation!";
2101 if (MI->isInlineAsm()) {
2102 Msg << "\nPlease check your inline asm statement for invalid "
2103 << "constraints:\n";
2104 MI->print(Msg, tm_);
2106 report_fatal_error(Msg.str());
2108 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2109 if (!hasInterval(*AS))
2111 LiveInterval &spli = getInterval(*AS);
2112 if (spli.liveAt(Index))
2113 spli.removeRange(Index.getLoadIndex(),
2114 Index.getNextIndex().getBaseIndex());
2121 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2122 MachineInstr* startInst) {
2123 LiveInterval& Interval = getOrCreateInterval(reg);
2124 VNInfo* VN = Interval.getNextValue(
2125 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2126 startInst, true, getVNInfoAllocator());
2127 VN->setHasPHIKill(true);
2128 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2130 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2131 getMBBEndIdx(startInst->getParent()), VN);
2132 Interval.addRange(LR);