1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 bool PHIJoin = lv_->isPHIJoin(interval.reg);
335 // A phi join register is killed at the end of the MBB and revived as a new
336 // valno in the killing blocks.
337 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338 DEBUG(dbgs() << " phi-join");
339 ValNo->addKill(indexes_->getTerminatorGap(mbb));
340 ValNo->setHasPHIKill(true);
342 // Iterate over all of the blocks that the variable is completely
343 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
345 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346 E = vi.AliveBlocks.end(); I != E; ++I) {
347 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349 interval.addRange(LR);
350 DEBUG(dbgs() << " +" << LR);
354 // Finally, this virtual register is live from the start of any killing
355 // block to the 'use' slot of the killing instruction.
356 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357 MachineInstr *Kill = vi.Kills[i];
358 SlotIndex Start = getMBBStartIdx(Kill->getParent());
359 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
361 // Create interval with one of a NEW value number. Note that this value
362 // number isn't actually defined by an instruction, weird huh? :)
364 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
366 ValNo->setIsPHIDef(true);
368 LiveRange LR(Start, killIdx, ValNo);
369 interval.addRange(LR);
370 ValNo->addKill(killIdx);
371 DEBUG(dbgs() << " +" << LR);
375 // If this is the second time we see a virtual register definition, it
376 // must be due to phi elimination or two addr elimination. If this is
377 // the result of two address elimination, then the vreg is one of the
378 // def-and-use register operand.
379 if (mi->isRegTiedToUseOperand(MOIdx)) {
380 // If this is a two-address definition, then we have already processed
381 // the live range. The only problem is that we didn't realize there
382 // are actually two values in the live interval. Because of this we
383 // need to take the LiveRegion that defines this register and split it
385 assert(interval.containsOneValue());
386 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387 SlotIndex RedefIndex = MIIdx.getDefIndex();
388 if (MO.isEarlyClobber())
389 RedefIndex = MIIdx.getUseIndex();
391 const LiveRange *OldLR =
392 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393 VNInfo *OldValNo = OldLR->valno;
395 // Delete the initial value, which should be short and continuous,
396 // because the 2-addr copy must be in the same MBB as the redef.
397 interval.removeRange(DefIndex, RedefIndex);
399 // Two-address vregs should always only be redefined once. This means
400 // that at this point, there should be exactly one value number in it.
401 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
403 // The new value number (#1) is defined by the instruction we claimed
405 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406 false, // update at *
408 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
410 // Value#0 is now defined by the 2-addr instruction.
411 OldValNo->def = RedefIndex;
412 OldValNo->setCopy(0);
414 // Add the new live interval which replaces the range for the input copy.
415 LiveRange LR(DefIndex, RedefIndex, ValNo);
416 DEBUG(dbgs() << " replace range with " << LR);
417 interval.addRange(LR);
418 ValNo->addKill(RedefIndex);
420 // If this redefinition is dead, we need to add a dummy unit live
421 // range covering the def slot.
423 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
427 dbgs() << " RESULT: ";
428 interval.print(dbgs(), tri_);
431 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432 // In the case of PHI elimination, each variable definition is only
433 // live until the end of the block. We've already taken care of the
434 // rest of the live range.
436 SlotIndex defIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 defIndex = MIIdx.getUseIndex();
441 MachineInstr *CopyMI = NULL;
442 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
446 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
448 SlotIndex killIndex = getMBBEndIdx(mbb);
449 LiveRange LR(defIndex, killIndex, ValNo);
450 interval.addRange(LR);
451 ValNo->addKill(indexes_->getTerminatorGap(mbb));
452 ValNo->setHasPHIKill(true);
453 DEBUG(dbgs() << " phi-join +" << LR);
457 DEBUG(dbgs() << '\n');
460 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461 MachineBasicBlock::iterator mi,
464 LiveInterval &interval,
465 MachineInstr *CopyMI) {
466 // A physical register cannot be live across basic block, so its
467 // lifetime must end somewhere in its defining basic block.
469 dbgs() << "\t\tregister: ";
470 printRegName(interval.reg, tri_);
473 SlotIndex baseIndex = MIIdx;
474 SlotIndex start = baseIndex.getDefIndex();
475 // Earlyclobbers move back one.
476 if (MO.isEarlyClobber())
477 start = MIIdx.getUseIndex();
478 SlotIndex end = start;
480 // If it is not used after definition, it is considered dead at
481 // the instruction defining it. Hence its interval is:
482 // [defSlot(def), defSlot(def)+1)
483 // For earlyclobbers, the defSlot was pushed back one; the extra
484 // advance below compensates.
486 DEBUG(dbgs() << " dead");
487 end = start.getStoreIndex();
491 // If it is not dead on definition, it must be killed by a
492 // subsequent instruction. Hence its interval is:
493 // [defSlot(def), useSlot(kill)+1)
494 baseIndex = baseIndex.getNextIndex();
495 while (++mi != MBB->end()) {
497 if (mi->isDebugValue())
499 if (getInstructionFromIndex(baseIndex) == 0)
500 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
502 if (mi->killsRegister(interval.reg, tri_)) {
503 DEBUG(dbgs() << " killed");
504 end = baseIndex.getDefIndex();
507 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
509 if (mi->isRegTiedToUseOperand(DefIdx)) {
510 // Two-address instruction.
511 end = baseIndex.getDefIndex();
513 // Another instruction redefines the register before it is ever read.
514 // Then the register is essentially dead at the instruction that
515 // defines it. Hence its interval is:
516 // [defSlot(def), defSlot(def)+1)
517 DEBUG(dbgs() << " dead");
518 end = start.getStoreIndex();
524 baseIndex = baseIndex.getNextIndex();
527 // The only case we should have a dead physreg here without a killing or
528 // instruction where we know it's dead is if it is live-in to the function
529 // and never used. Another possible case is the implicit use of the
530 // physical register has been deleted by two-address pass.
531 end = start.getStoreIndex();
534 assert(start < end && "did not find end of interval?");
536 // Already exists? Extend old live interval.
537 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538 bool Extend = OldLR != interval.end();
539 VNInfo *ValNo = Extend
540 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541 if (MO.isEarlyClobber() && Extend)
542 ValNo->setHasRedefByEC(true);
543 LiveRange LR(start, end, ValNo);
544 interval.addRange(LR);
545 LR.valno->addKill(end);
546 DEBUG(dbgs() << " +" << LR << '\n');
549 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550 MachineBasicBlock::iterator MI,
554 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556 getOrCreateInterval(MO.getReg()));
557 else if (allocatableRegs_[MO.getReg()]) {
558 MachineInstr *CopyMI = NULL;
559 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
563 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564 getOrCreateInterval(MO.getReg()), CopyMI);
565 // Def of a register also defines its sub-registers.
566 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567 // If MI also modifies the sub-register explicitly, avoid processing it
568 // more than once. Do not pass in TRI here so it checks for exact match.
569 if (!MI->modifiesRegister(*AS))
570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571 getOrCreateInterval(*AS), 0);
575 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
577 LiveInterval &interval, bool isAlias) {
579 dbgs() << "\t\tlivein register: ";
580 printRegName(interval.reg, tri_);
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 SlotIndex baseIndex = MIIdx;
587 SlotIndex start = baseIndex;
588 if (getInstructionFromIndex(baseIndex) == 0)
589 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
591 SlotIndex end = baseIndex;
592 bool SeenDefUse = false;
594 MachineBasicBlock::iterator E = MBB->end();
596 while (mi != E && mi->isDebugValue())
600 if (mi->killsRegister(interval.reg, tri_)) {
601 DEBUG(dbgs() << " killed");
602 end = baseIndex.getDefIndex();
605 } else if (mi->modifiesRegister(interval.reg, tri_)) {
606 // Another instruction redefines the register before it is ever read.
607 // Then the register is essentially dead at the instruction that defines
608 // it. Hence its interval is:
609 // [defSlot(def), defSlot(def)+1)
610 DEBUG(dbgs() << " dead");
611 end = start.getStoreIndex();
617 if (mi != E && !mi->isDebugValue()) {
618 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
622 // Live-in register might not be used at all.
625 DEBUG(dbgs() << " dead");
626 end = MIIdx.getStoreIndex();
628 DEBUG(dbgs() << " live through");
634 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
635 0, false, VNInfoAllocator);
636 vni->setIsPHIDef(true);
637 LiveRange LR(start, end, vni);
639 interval.addRange(LR);
640 LR.valno->addKill(end);
641 DEBUG(dbgs() << " +" << LR << '\n');
644 /// computeIntervals - computes the live intervals for virtual
645 /// registers. for some ordering of the machine instructions [1,N] a
646 /// live interval is an interval [i, j) where 1 <= i <= j < N for
647 /// which a variable is live
648 void LiveIntervals::computeIntervals() {
649 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
650 << "********** Function: "
651 << ((Value*)mf_->getFunction())->getName() << '\n');
653 SmallVector<unsigned, 8> UndefUses;
654 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
656 MachineBasicBlock *MBB = MBBI;
660 // Track the index of the current machine instr.
661 SlotIndex MIIndex = getMBBStartIdx(MBB);
662 DEBUG(dbgs() << MBB->getName() << ":\n");
664 // Create intervals for live-ins to this BB first.
665 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
666 LE = MBB->livein_end(); LI != LE; ++LI) {
667 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
668 // Multiple live-ins can alias the same register.
669 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
670 if (!hasInterval(*AS))
671 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
675 // Skip over empty initial indices.
676 if (getInstructionFromIndex(MIIndex) == 0)
677 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
679 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
681 DEBUG(dbgs() << MIIndex << "\t" << *MI);
682 if (MI->isDebugValue())
686 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
687 MachineOperand &MO = MI->getOperand(i);
688 if (!MO.isReg() || !MO.getReg())
691 // handle register defs - build intervals
693 handleRegisterDef(MBB, MI, MIIndex, MO, i);
694 else if (MO.isUndef())
695 UndefUses.push_back(MO.getReg());
698 // Move to the next instr slot.
699 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
703 // Create empty intervals for registers defined by implicit_def's (except
704 // for those implicit_def that define values which are liveout of their
706 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
707 unsigned UndefReg = UndefUses[i];
708 (void)getOrCreateInterval(UndefReg);
712 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
713 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
714 return new LiveInterval(reg, Weight);
717 /// dupInterval - Duplicate a live interval. The caller is responsible for
718 /// managing the allocated memory.
719 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
720 LiveInterval *NewLI = createInterval(li->reg);
721 NewLI->Copy(*li, mri_, getVNInfoAllocator());
725 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
726 /// copy field and returns the source register that defines it.
727 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
731 if (VNI->getCopy()->isExtractSubreg()) {
732 // If it's extracting out of a physical register, return the sub-register.
733 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
734 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
735 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
736 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
737 if (SrcSubReg == DstSubReg)
738 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
739 // reg1034 can still be coalesced to EDX.
741 assert(DstSubReg == 0);
742 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
745 } else if (VNI->getCopy()->isInsertSubreg() ||
746 VNI->getCopy()->isSubregToReg())
747 return VNI->getCopy()->getOperand(2).getReg();
749 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
750 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
752 llvm_unreachable("Unrecognized copy instruction!");
756 //===----------------------------------------------------------------------===//
757 // Register allocator hooks.
760 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
761 /// allow one) virtual register operand, then its uses are implicitly using
762 /// the register. Returns the virtual register.
763 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
764 MachineInstr *MI) const {
766 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
767 MachineOperand &MO = MI->getOperand(i);
768 if (!MO.isReg() || !MO.isUse())
770 unsigned Reg = MO.getReg();
771 if (Reg == 0 || Reg == li.reg)
774 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
775 !allocatableRegs_[Reg])
777 // FIXME: For now, only remat MI with at most one register operand.
779 "Can't rematerialize instruction with multiple register operand!");
788 /// isValNoAvailableAt - Return true if the val# of the specified interval
789 /// which reaches the given instruction also reaches the specified use index.
790 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
791 SlotIndex UseIdx) const {
792 SlotIndex Index = getInstructionIndex(MI);
793 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
794 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
795 return UI != li.end() && UI->valno == ValNo;
798 /// isReMaterializable - Returns true if the definition MI of the specified
799 /// val# of the specified interval is re-materializable.
800 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
801 const VNInfo *ValNo, MachineInstr *MI,
802 SmallVectorImpl<LiveInterval*> &SpillIs,
807 if (!tii_->isTriviallyReMaterializable(MI, aa_))
810 // Target-specific code can mark an instruction as being rematerializable
811 // if it has one virtual reg use, though it had better be something like
812 // a PIC base register which is likely to be live everywhere.
813 unsigned ImpUse = getReMatImplicitUse(li, MI);
815 const LiveInterval &ImpLi = getInterval(ImpUse);
816 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
817 re = mri_->use_end(); ri != re; ++ri) {
818 MachineInstr *UseMI = &*ri;
819 SlotIndex UseIdx = getInstructionIndex(UseMI);
820 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
822 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
826 // If a register operand of the re-materialized instruction is going to
827 // be spilled next, then it's not legal to re-materialize this instruction.
828 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
829 if (ImpUse == SpillIs[i]->reg)
835 /// isReMaterializable - Returns true if the definition MI of the specified
836 /// val# of the specified interval is re-materializable.
837 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
838 const VNInfo *ValNo, MachineInstr *MI) {
839 SmallVector<LiveInterval*, 4> Dummy1;
841 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
844 /// isReMaterializable - Returns true if every definition of MI of every
845 /// val# of the specified interval is re-materializable.
846 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
847 SmallVectorImpl<LiveInterval*> &SpillIs,
850 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
852 const VNInfo *VNI = *i;
854 continue; // Dead val#.
855 // Is the def for the val# rematerializable?
856 if (!VNI->isDefAccurate())
858 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
859 bool DefIsLoad = false;
861 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
868 /// FilterFoldedOps - Filter out two-address use operands. Return
869 /// true if it finds any issue with the operands that ought to prevent
871 static bool FilterFoldedOps(MachineInstr *MI,
872 SmallVector<unsigned, 2> &Ops,
874 SmallVector<unsigned, 2> &FoldOps) {
876 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
877 unsigned OpIdx = Ops[i];
878 MachineOperand &MO = MI->getOperand(OpIdx);
879 // FIXME: fold subreg use.
883 MRInfo |= (unsigned)VirtRegMap::isMod;
885 // Filter out two-address use operand(s).
886 if (MI->isRegTiedToDefOperand(OpIdx)) {
887 MRInfo = VirtRegMap::isModRef;
890 MRInfo |= (unsigned)VirtRegMap::isRef;
892 FoldOps.push_back(OpIdx);
898 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
899 /// slot / to reg or any rematerialized load into ith operand of specified
900 /// MI. If it is successul, MI is updated with the newly created MI and
902 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
903 VirtRegMap &vrm, MachineInstr *DefMI,
905 SmallVector<unsigned, 2> &Ops,
906 bool isSS, int Slot, unsigned Reg) {
907 // If it is an implicit def instruction, just delete it.
908 if (MI->isImplicitDef()) {
909 RemoveMachineInstrFromMaps(MI);
910 vrm.RemoveMachineInstrFromMaps(MI);
911 MI->eraseFromParent();
916 // Filter the list of operand indexes that are to be folded. Abort if
917 // any operand will prevent folding.
919 SmallVector<unsigned, 2> FoldOps;
920 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
923 // The only time it's safe to fold into a two address instruction is when
924 // it's folding reload and spill from / into a spill stack slot.
925 if (DefMI && (MRInfo & VirtRegMap::isMod))
928 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
929 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
931 // Remember this instruction uses the spill slot.
932 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
934 // Attempt to fold the memory reference into the instruction. If
935 // we can do this, we don't need to insert spill code.
936 MachineBasicBlock &MBB = *MI->getParent();
937 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
938 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
939 vrm.transferSpillPts(MI, fmi);
940 vrm.transferRestorePts(MI, fmi);
941 vrm.transferEmergencySpills(MI, fmi);
942 ReplaceMachineInstrInMaps(MI, fmi);
943 MI = MBB.insert(MBB.erase(MI), fmi);
950 /// canFoldMemoryOperand - Returns true if the specified load / store
951 /// folding is possible.
952 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
953 SmallVector<unsigned, 2> &Ops,
955 // Filter the list of operand indexes that are to be folded. Abort if
956 // any operand will prevent folding.
958 SmallVector<unsigned, 2> FoldOps;
959 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
962 // It's only legal to remat for a use, not a def.
963 if (ReMat && (MRInfo & VirtRegMap::isMod))
966 return tii_->canFoldMemoryOperand(MI, FoldOps);
969 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
970 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
972 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
977 for (++itr; itr != li.ranges.end(); ++itr) {
978 MachineBasicBlock *mbb2 =
979 indexes_->getMBBCoveringRange(itr->start, itr->end);
988 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
989 /// interval on to-be re-materialized operands of MI) with new register.
990 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
991 MachineInstr *MI, unsigned NewVReg,
993 // There is an implicit use. That means one of the other operand is
994 // being remat'ed and the remat'ed instruction has li.reg as an
995 // use operand. Make sure we rewrite that as well.
996 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
997 MachineOperand &MO = MI->getOperand(i);
1000 unsigned Reg = MO.getReg();
1001 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1003 if (!vrm.isReMaterialized(Reg))
1005 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1006 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1008 UseMO->setReg(NewVReg);
1012 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1013 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1014 bool LiveIntervals::
1015 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1016 bool TrySplit, SlotIndex index, SlotIndex end,
1018 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1019 unsigned Slot, int LdSlot,
1020 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1022 const TargetRegisterClass* rc,
1023 SmallVector<int, 4> &ReMatIds,
1024 const MachineLoopInfo *loopInfo,
1025 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1026 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1027 std::vector<LiveInterval*> &NewLIs) {
1028 bool CanFold = false;
1030 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1031 MachineOperand& mop = MI->getOperand(i);
1034 unsigned Reg = mop.getReg();
1035 unsigned RegI = Reg;
1036 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1041 bool TryFold = !DefIsReMat;
1042 bool FoldSS = true; // Default behavior unless it's a remat.
1043 int FoldSlot = Slot;
1045 // If this is the rematerializable definition MI itself and
1046 // all of its uses are rematerialized, simply delete it.
1047 if (MI == ReMatOrigDefMI && CanDelete) {
1048 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1050 RemoveMachineInstrFromMaps(MI);
1051 vrm.RemoveMachineInstrFromMaps(MI);
1052 MI->eraseFromParent();
1056 // If def for this use can't be rematerialized, then try folding.
1057 // If def is rematerializable and it's a load, also try folding.
1058 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1060 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1066 // Scan all of the operands of this instruction rewriting operands
1067 // to use NewVReg instead of li.reg as appropriate. We do this for
1070 // 1. If the instr reads the same spilled vreg multiple times, we
1071 // want to reuse the NewVReg.
1072 // 2. If the instr is a two-addr instruction, we are required to
1073 // keep the src/dst regs pinned.
1075 // Keep track of whether we replace a use and/or def so that we can
1076 // create the spill interval with the appropriate range.
1078 HasUse = mop.isUse();
1079 HasDef = mop.isDef();
1080 SmallVector<unsigned, 2> Ops;
1082 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1083 const MachineOperand &MOj = MI->getOperand(j);
1086 unsigned RegJ = MOj.getReg();
1087 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1091 if (!MOj.isUndef()) {
1092 HasUse |= MOj.isUse();
1093 HasDef |= MOj.isDef();
1098 // Create a new virtual register for the spill interval.
1099 // Create the new register now so we can map the fold instruction
1100 // to the new register so when it is unfolded we get the correct
1102 bool CreatedNewVReg = false;
1104 NewVReg = mri_->createVirtualRegister(rc);
1106 CreatedNewVReg = true;
1108 // The new virtual register should get the same allocation hints as the
1110 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1111 if (Hint.first || Hint.second)
1112 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1118 // Do not fold load / store here if we are splitting. We'll find an
1119 // optimal point to insert a load / store later.
1121 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1122 Ops, FoldSS, FoldSlot, NewVReg)) {
1123 // Folding the load/store can completely change the instruction in
1124 // unpredictable ways, rescan it from the beginning.
1127 // We need to give the new vreg the same stack slot as the
1128 // spilled interval.
1129 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1135 if (isNotInMIMap(MI))
1137 goto RestartInstruction;
1140 // We'll try to fold it later if it's profitable.
1141 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1145 mop.setReg(NewVReg);
1146 if (mop.isImplicit())
1147 rewriteImplicitOps(li, MI, NewVReg, vrm);
1149 // Reuse NewVReg for other reads.
1150 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1151 MachineOperand &mopj = MI->getOperand(Ops[j]);
1152 mopj.setReg(NewVReg);
1153 if (mopj.isImplicit())
1154 rewriteImplicitOps(li, MI, NewVReg, vrm);
1157 if (CreatedNewVReg) {
1159 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1160 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1161 // Each valnum may have its own remat id.
1162 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1164 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1166 if (!CanDelete || (HasUse && HasDef)) {
1167 // If this is a two-addr instruction then its use operands are
1168 // rematerializable but its def is not. It should be assigned a
1170 vrm.assignVirt2StackSlot(NewVReg, Slot);
1173 vrm.assignVirt2StackSlot(NewVReg, Slot);
1175 } else if (HasUse && HasDef &&
1176 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1177 // If this interval hasn't been assigned a stack slot (because earlier
1178 // def is a deleted remat def), do it now.
1179 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1180 vrm.assignVirt2StackSlot(NewVReg, Slot);
1183 // Re-matting an instruction with virtual register use. Add the
1184 // register as an implicit use on the use MI.
1185 if (DefIsReMat && ImpUse)
1186 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1188 // Create a new register interval for this spill / remat.
1189 LiveInterval &nI = getOrCreateInterval(NewVReg);
1190 if (CreatedNewVReg) {
1191 NewLIs.push_back(&nI);
1192 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1194 vrm.setIsSplitFromReg(NewVReg, li.reg);
1198 if (CreatedNewVReg) {
1199 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1200 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1201 DEBUG(dbgs() << " +" << LR);
1204 // Extend the split live interval to this def / use.
1205 SlotIndex End = index.getDefIndex();
1206 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1207 nI.getValNumInfo(nI.getNumValNums()-1));
1208 DEBUG(dbgs() << " +" << LR);
1213 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1214 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1215 DEBUG(dbgs() << " +" << LR);
1220 dbgs() << "\t\t\t\tAdded new interval: ";
1221 nI.print(dbgs(), tri_);
1227 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1229 MachineBasicBlock *MBB,
1230 SlotIndex Idx) const {
1231 SlotIndex End = getMBBEndIdx(MBB);
1232 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1233 if (VNI->kills[j].isPHI())
1236 SlotIndex KillIdx = VNI->kills[j];
1237 if (KillIdx > Idx && KillIdx <= End)
1243 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1244 /// during spilling.
1246 struct RewriteInfo {
1251 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1252 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1255 struct RewriteInfoCompare {
1256 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1257 return LHS.Index < RHS.Index;
1262 void LiveIntervals::
1263 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1264 LiveInterval::Ranges::const_iterator &I,
1265 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1266 unsigned Slot, int LdSlot,
1267 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1269 const TargetRegisterClass* rc,
1270 SmallVector<int, 4> &ReMatIds,
1271 const MachineLoopInfo *loopInfo,
1272 BitVector &SpillMBBs,
1273 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1274 BitVector &RestoreMBBs,
1275 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1276 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1277 std::vector<LiveInterval*> &NewLIs) {
1278 bool AllCanFold = true;
1279 unsigned NewVReg = 0;
1280 SlotIndex start = I->start.getBaseIndex();
1281 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1283 // First collect all the def / use in this live range that will be rewritten.
1284 // Make sure they are sorted according to instruction index.
1285 std::vector<RewriteInfo> RewriteMIs;
1286 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1287 re = mri_->reg_end(); ri != re; ) {
1288 MachineInstr *MI = &*ri;
1289 MachineOperand &O = ri.getOperand();
1291 if (MI->isDebugValue()) {
1292 // Remove debug info for now.
1294 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1297 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1298 SlotIndex index = getInstructionIndex(MI);
1299 if (index < start || index >= end)
1303 // Must be defined by an implicit def. It should not be spilled. Note,
1304 // this is for correctness reason. e.g.
1305 // 8 %reg1024<def> = IMPLICIT_DEF
1306 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1307 // The live range [12, 14) are not part of the r1024 live interval since
1308 // it's defined by an implicit def. It will not conflicts with live
1309 // interval of r1025. Now suppose both registers are spilled, you can
1310 // easily see a situation where both registers are reloaded before
1311 // the INSERT_SUBREG and both target registers that would overlap.
1313 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1315 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1317 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1318 // Now rewrite the defs and uses.
1319 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1320 RewriteInfo &rwi = RewriteMIs[i];
1322 SlotIndex index = rwi.Index;
1323 bool MIHasUse = rwi.HasUse;
1324 bool MIHasDef = rwi.HasDef;
1325 MachineInstr *MI = rwi.MI;
1326 // If MI def and/or use the same register multiple times, then there
1327 // are multiple entries.
1328 unsigned NumUses = MIHasUse;
1329 while (i != e && RewriteMIs[i].MI == MI) {
1330 assert(RewriteMIs[i].Index == index);
1331 bool isUse = RewriteMIs[i].HasUse;
1332 if (isUse) ++NumUses;
1334 MIHasDef |= RewriteMIs[i].HasDef;
1337 MachineBasicBlock *MBB = MI->getParent();
1339 if (ImpUse && MI != ReMatDefMI) {
1340 // Re-matting an instruction with virtual register use. Prevent interval
1341 // from being spilled.
1342 getInterval(ImpUse).markNotSpillable();
1345 unsigned MBBId = MBB->getNumber();
1346 unsigned ThisVReg = 0;
1348 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1349 if (NVI != MBBVRegsMap.end()) {
1350 ThisVReg = NVI->second;
1357 // It's better to start a new interval to avoid artifically
1358 // extend the new interval.
1359 if (MIHasDef && !MIHasUse) {
1360 MBBVRegsMap.erase(MBB->getNumber());
1366 bool IsNew = ThisVReg == 0;
1368 // This ends the previous live interval. If all of its def / use
1369 // can be folded, give it a low spill weight.
1370 if (NewVReg && TrySplit && AllCanFold) {
1371 LiveInterval &nI = getOrCreateInterval(NewVReg);
1378 bool HasDef = false;
1379 bool HasUse = false;
1380 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1381 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1382 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1383 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1384 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1385 if (!HasDef && !HasUse)
1388 AllCanFold &= CanFold;
1390 // Update weight of spill interval.
1391 LiveInterval &nI = getOrCreateInterval(NewVReg);
1393 // The spill weight is now infinity as it cannot be spilled again.
1394 nI.markNotSpillable();
1398 // Keep track of the last def and first use in each MBB.
1400 if (MI != ReMatOrigDefMI || !CanDelete) {
1401 bool HasKill = false;
1403 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1405 // If this is a two-address code, then this index starts a new VNInfo.
1406 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1408 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1410 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1411 SpillIdxes.find(MBBId);
1413 if (SII == SpillIdxes.end()) {
1414 std::vector<SRInfo> S;
1415 S.push_back(SRInfo(index, NewVReg, true));
1416 SpillIdxes.insert(std::make_pair(MBBId, S));
1417 } else if (SII->second.back().vreg != NewVReg) {
1418 SII->second.push_back(SRInfo(index, NewVReg, true));
1419 } else if (index > SII->second.back().index) {
1420 // If there is an earlier def and this is a two-address
1421 // instruction, then it's not possible to fold the store (which
1422 // would also fold the load).
1423 SRInfo &Info = SII->second.back();
1425 Info.canFold = !HasUse;
1427 SpillMBBs.set(MBBId);
1428 } else if (SII != SpillIdxes.end() &&
1429 SII->second.back().vreg == NewVReg &&
1430 index > SII->second.back().index) {
1431 // There is an earlier def that's not killed (must be two-address).
1432 // The spill is no longer needed.
1433 SII->second.pop_back();
1434 if (SII->second.empty()) {
1435 SpillIdxes.erase(MBBId);
1436 SpillMBBs.reset(MBBId);
1443 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1444 SpillIdxes.find(MBBId);
1445 if (SII != SpillIdxes.end() &&
1446 SII->second.back().vreg == NewVReg &&
1447 index > SII->second.back().index)
1448 // Use(s) following the last def, it's not safe to fold the spill.
1449 SII->second.back().canFold = false;
1450 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1451 RestoreIdxes.find(MBBId);
1452 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1453 // If we are splitting live intervals, only fold if it's the first
1454 // use and there isn't another use later in the MBB.
1455 RII->second.back().canFold = false;
1457 // Only need a reload if there isn't an earlier def / use.
1458 if (RII == RestoreIdxes.end()) {
1459 std::vector<SRInfo> Infos;
1460 Infos.push_back(SRInfo(index, NewVReg, true));
1461 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1463 RII->second.push_back(SRInfo(index, NewVReg, true));
1465 RestoreMBBs.set(MBBId);
1469 // Update spill weight.
1470 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1471 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1474 if (NewVReg && TrySplit && AllCanFold) {
1475 // If all of its def / use can be folded, give it a low spill weight.
1476 LiveInterval &nI = getOrCreateInterval(NewVReg);
1481 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1482 unsigned vr, BitVector &RestoreMBBs,
1483 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1484 if (!RestoreMBBs[Id])
1486 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1487 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1488 if (Restores[i].index == index &&
1489 Restores[i].vreg == vr &&
1490 Restores[i].canFold)
1495 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1496 unsigned vr, BitVector &RestoreMBBs,
1497 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1498 if (!RestoreMBBs[Id])
1500 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1501 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1502 if (Restores[i].index == index && Restores[i].vreg)
1503 Restores[i].index = SlotIndex();
1506 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1507 /// spilled and create empty intervals for their uses.
1509 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1510 const TargetRegisterClass* rc,
1511 std::vector<LiveInterval*> &NewLIs) {
1512 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1513 re = mri_->reg_end(); ri != re; ) {
1514 MachineOperand &O = ri.getOperand();
1515 MachineInstr *MI = &*ri;
1518 assert(MI->isImplicitDef() &&
1519 "Register def was not rewritten?");
1520 RemoveMachineInstrFromMaps(MI);
1521 vrm.RemoveMachineInstrFromMaps(MI);
1522 MI->eraseFromParent();
1524 // This must be an use of an implicit_def so it's not part of the live
1525 // interval. Create a new empty live interval for it.
1526 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1527 unsigned NewVReg = mri_->createVirtualRegister(rc);
1529 vrm.setIsImplicitlyDefined(NewVReg);
1530 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1531 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1532 MachineOperand &MO = MI->getOperand(i);
1533 if (MO.isReg() && MO.getReg() == li.reg) {
1543 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1544 // Limit the loop depth ridiculousness.
1545 if (loopDepth > 200)
1548 // The loop depth is used to roughly estimate the number of times the
1549 // instruction is executed. Something like 10^d is simple, but will quickly
1550 // overflow a float. This expression behaves like 10^d for small d, but is
1551 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1552 // headroom before overflow.
1553 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1555 return (isDef + isUse) * lc;
1559 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1560 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1561 normalizeSpillWeight(*NewLIs[i]);
1564 std::vector<LiveInterval*> LiveIntervals::
1565 addIntervalsForSpillsFast(const LiveInterval &li,
1566 const MachineLoopInfo *loopInfo,
1568 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1570 std::vector<LiveInterval*> added;
1572 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1575 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1580 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1582 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1583 while (RI != mri_->reg_end()) {
1584 MachineInstr* MI = &*RI;
1586 SmallVector<unsigned, 2> Indices;
1587 bool HasUse = false;
1588 bool HasDef = false;
1590 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1591 MachineOperand& mop = MI->getOperand(i);
1592 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1594 HasUse |= MI->getOperand(i).isUse();
1595 HasDef |= MI->getOperand(i).isDef();
1597 Indices.push_back(i);
1600 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1601 Indices, true, slot, li.reg)) {
1602 unsigned NewVReg = mri_->createVirtualRegister(rc);
1604 vrm.assignVirt2StackSlot(NewVReg, slot);
1606 // create a new register for this spill
1607 LiveInterval &nI = getOrCreateInterval(NewVReg);
1608 nI.markNotSpillable();
1610 // Rewrite register operands to use the new vreg.
1611 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1612 E = Indices.end(); I != E; ++I) {
1613 MI->getOperand(*I).setReg(NewVReg);
1615 if (MI->getOperand(*I).isUse())
1616 MI->getOperand(*I).setIsKill(true);
1619 // Fill in the new live interval.
1620 SlotIndex index = getInstructionIndex(MI);
1622 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1623 nI.getNextValue(SlotIndex(), 0, false,
1624 getVNInfoAllocator()));
1625 DEBUG(dbgs() << " +" << LR);
1627 vrm.addRestorePoint(NewVReg, MI);
1630 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1631 nI.getNextValue(SlotIndex(), 0, false,
1632 getVNInfoAllocator()));
1633 DEBUG(dbgs() << " +" << LR);
1635 vrm.addSpillPoint(NewVReg, true, MI);
1638 added.push_back(&nI);
1641 dbgs() << "\t\t\t\tadded new interval: ";
1648 RI = mri_->reg_begin(li.reg);
1654 std::vector<LiveInterval*> LiveIntervals::
1655 addIntervalsForSpills(const LiveInterval &li,
1656 SmallVectorImpl<LiveInterval*> &SpillIs,
1657 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1659 if (EnableFastSpilling)
1660 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1662 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1665 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1666 li.print(dbgs(), tri_);
1670 // Each bit specify whether a spill is required in the MBB.
1671 BitVector SpillMBBs(mf_->getNumBlockIDs());
1672 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1673 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1674 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1675 DenseMap<unsigned,unsigned> MBBVRegsMap;
1676 std::vector<LiveInterval*> NewLIs;
1677 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1679 unsigned NumValNums = li.getNumValNums();
1680 SmallVector<MachineInstr*, 4> ReMatDefs;
1681 ReMatDefs.resize(NumValNums, NULL);
1682 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1683 ReMatOrigDefs.resize(NumValNums, NULL);
1684 SmallVector<int, 4> ReMatIds;
1685 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1686 BitVector ReMatDelete(NumValNums);
1687 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1689 // Spilling a split live interval. It cannot be split any further. Also,
1690 // it's also guaranteed to be a single val# / range interval.
1691 if (vrm.getPreSplitReg(li.reg)) {
1692 vrm.setIsSplitFromReg(li.reg, 0);
1693 // Unset the split kill marker on the last use.
1694 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1695 if (KillIdx != SlotIndex()) {
1696 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1697 assert(KillMI && "Last use disappeared?");
1698 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1699 assert(KillOp != -1 && "Last use disappeared?");
1700 KillMI->getOperand(KillOp).setIsKill(false);
1702 vrm.removeKillPoint(li.reg);
1703 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1704 Slot = vrm.getStackSlot(li.reg);
1705 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1706 MachineInstr *ReMatDefMI = DefIsReMat ?
1707 vrm.getReMaterializedMI(li.reg) : NULL;
1709 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1710 bool isLoad = isLoadSS ||
1711 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1712 bool IsFirstRange = true;
1713 for (LiveInterval::Ranges::const_iterator
1714 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1715 // If this is a split live interval with multiple ranges, it means there
1716 // are two-address instructions that re-defined the value. Only the
1717 // first def can be rematerialized!
1719 // Note ReMatOrigDefMI has already been deleted.
1720 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1721 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1722 false, vrm, rc, ReMatIds, loopInfo,
1723 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1724 MBBVRegsMap, NewLIs);
1726 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1727 Slot, 0, false, false, false,
1728 false, vrm, rc, ReMatIds, loopInfo,
1729 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1730 MBBVRegsMap, NewLIs);
1732 IsFirstRange = false;
1735 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1736 normalizeSpillWeights(NewLIs);
1740 bool TrySplit = !intervalIsInOneMBB(li);
1743 bool NeedStackSlot = false;
1744 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1746 const VNInfo *VNI = *i;
1747 unsigned VN = VNI->id;
1748 if (VNI->isUnused())
1749 continue; // Dead val#.
1750 // Is the def for the val# rematerializable?
1751 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1752 ? getInstructionFromIndex(VNI->def) : 0;
1754 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1755 // Remember how to remat the def of this val#.
1756 ReMatOrigDefs[VN] = ReMatDefMI;
1757 // Original def may be modified so we have to make a copy here.
1758 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1759 CloneMIs.push_back(Clone);
1760 ReMatDefs[VN] = Clone;
1762 bool CanDelete = true;
1763 if (VNI->hasPHIKill()) {
1764 // A kill is a phi node, not all of its uses can be rematerialized.
1765 // It must not be deleted.
1767 // Need a stack slot if there is any live range where uses cannot be
1769 NeedStackSlot = true;
1772 ReMatDelete.set(VN);
1774 // Need a stack slot if there is any live range where uses cannot be
1776 NeedStackSlot = true;
1780 // One stack slot per live interval.
1781 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1782 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1783 Slot = vrm.assignVirt2StackSlot(li.reg);
1785 // This case only occurs when the prealloc splitter has already assigned
1786 // a stack slot to this vreg.
1788 Slot = vrm.getStackSlot(li.reg);
1791 // Create new intervals and rewrite defs and uses.
1792 for (LiveInterval::Ranges::const_iterator
1793 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1794 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1795 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1796 bool DefIsReMat = ReMatDefMI != NULL;
1797 bool CanDelete = ReMatDelete[I->valno->id];
1799 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1800 bool isLoad = isLoadSS ||
1801 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1802 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1803 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1804 CanDelete, vrm, rc, ReMatIds, loopInfo,
1805 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1806 MBBVRegsMap, NewLIs);
1809 // Insert spills / restores if we are splitting.
1811 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1812 normalizeSpillWeights(NewLIs);
1816 SmallPtrSet<LiveInterval*, 4> AddedKill;
1817 SmallVector<unsigned, 2> Ops;
1818 if (NeedStackSlot) {
1819 int Id = SpillMBBs.find_first();
1821 std::vector<SRInfo> &spills = SpillIdxes[Id];
1822 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1823 SlotIndex index = spills[i].index;
1824 unsigned VReg = spills[i].vreg;
1825 LiveInterval &nI = getOrCreateInterval(VReg);
1826 bool isReMat = vrm.isReMaterialized(VReg);
1827 MachineInstr *MI = getInstructionFromIndex(index);
1828 bool CanFold = false;
1829 bool FoundUse = false;
1831 if (spills[i].canFold) {
1833 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1834 MachineOperand &MO = MI->getOperand(j);
1835 if (!MO.isReg() || MO.getReg() != VReg)
1842 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1843 RestoreMBBs, RestoreIdxes))) {
1844 // MI has two-address uses of the same register. If the use
1845 // isn't the first and only use in the BB, then we can't fold
1846 // it. FIXME: Move this to rewriteInstructionsForSpills.
1853 // Fold the store into the def if possible.
1854 bool Folded = false;
1855 if (CanFold && !Ops.empty()) {
1856 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1859 // Also folded uses, do not issue a load.
1860 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1861 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1863 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1867 // Otherwise tell the spiller to issue a spill.
1869 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1870 bool isKill = LR->end == index.getStoreIndex();
1871 if (!MI->registerDefIsDead(nI.reg))
1872 // No need to spill a dead def.
1873 vrm.addSpillPoint(VReg, isKill, MI);
1875 AddedKill.insert(&nI);
1878 Id = SpillMBBs.find_next(Id);
1882 int Id = RestoreMBBs.find_first();
1884 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1885 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1886 SlotIndex index = restores[i].index;
1887 if (index == SlotIndex())
1889 unsigned VReg = restores[i].vreg;
1890 LiveInterval &nI = getOrCreateInterval(VReg);
1891 bool isReMat = vrm.isReMaterialized(VReg);
1892 MachineInstr *MI = getInstructionFromIndex(index);
1893 bool CanFold = false;
1895 if (restores[i].canFold) {
1897 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1898 MachineOperand &MO = MI->getOperand(j);
1899 if (!MO.isReg() || MO.getReg() != VReg)
1903 // If this restore were to be folded, it would have been folded
1912 // Fold the load into the use if possible.
1913 bool Folded = false;
1914 if (CanFold && !Ops.empty()) {
1916 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1918 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1920 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1921 // If the rematerializable def is a load, also try to fold it.
1922 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1923 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1924 Ops, isLoadSS, LdSlot, VReg);
1926 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1928 // Re-matting an instruction with virtual register use. Add the
1929 // register as an implicit use on the use MI and mark the register
1930 // interval as unspillable.
1931 LiveInterval &ImpLi = getInterval(ImpUse);
1932 ImpLi.markNotSpillable();
1933 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1938 // If folding is not possible / failed, then tell the spiller to issue a
1939 // load / rematerialization for us.
1941 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1943 vrm.addRestorePoint(VReg, MI);
1945 Id = RestoreMBBs.find_next(Id);
1948 // Finalize intervals: add kills, finalize spill weights, and filter out
1950 std::vector<LiveInterval*> RetNewLIs;
1951 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1952 LiveInterval *LI = NewLIs[i];
1954 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1955 if (!AddedKill.count(LI)) {
1956 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1957 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1958 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1959 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1960 assert(UseIdx != -1);
1961 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1962 LastUse->getOperand(UseIdx).setIsKill();
1963 vrm.addKillPoint(LI->reg, LastUseIdx);
1966 RetNewLIs.push_back(LI);
1970 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1971 normalizeSpillWeights(RetNewLIs);
1975 /// hasAllocatableSuperReg - Return true if the specified physical register has
1976 /// any super register that's allocatable.
1977 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1978 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1979 if (allocatableRegs_[*AS] && hasInterval(*AS))
1984 /// getRepresentativeReg - Find the largest super register of the specified
1985 /// physical register.
1986 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1987 // Find the largest super-register that is allocatable.
1988 unsigned BestReg = Reg;
1989 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1990 unsigned SuperReg = *AS;
1991 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1999 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2000 /// specified interval that conflicts with the specified physical register.
2001 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2002 unsigned PhysReg) const {
2003 unsigned NumConflicts = 0;
2004 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2005 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2006 E = mri_->reg_end(); I != E; ++I) {
2007 MachineOperand &O = I.getOperand();
2008 MachineInstr *MI = O.getParent();
2009 SlotIndex Index = getInstructionIndex(MI);
2010 if (pli.liveAt(Index))
2013 return NumConflicts;
2016 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2017 /// around all defs and uses of the specified interval. Return true if it
2018 /// was able to cut its interval.
2019 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2020 unsigned PhysReg, VirtRegMap &vrm) {
2021 unsigned SpillReg = getRepresentativeReg(PhysReg);
2023 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2024 // If there are registers which alias PhysReg, but which are not a
2025 // sub-register of the chosen representative super register. Assert
2026 // since we can't handle it yet.
2027 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2028 tri_->isSuperRegister(*AS, SpillReg));
2031 SmallVector<unsigned, 4> PRegs;
2032 if (hasInterval(SpillReg))
2033 PRegs.push_back(SpillReg);
2035 SmallSet<unsigned, 4> Added;
2036 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2037 if (Added.insert(*AS) && hasInterval(*AS)) {
2038 PRegs.push_back(*AS);
2039 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2044 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2045 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2046 E = mri_->reg_end(); I != E; ++I) {
2047 MachineOperand &O = I.getOperand();
2048 MachineInstr *MI = O.getParent();
2049 if (SeenMIs.count(MI))
2052 SlotIndex Index = getInstructionIndex(MI);
2053 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2054 unsigned PReg = PRegs[i];
2055 LiveInterval &pli = getInterval(PReg);
2056 if (!pli.liveAt(Index))
2058 vrm.addEmergencySpill(PReg, MI);
2059 SlotIndex StartIdx = Index.getLoadIndex();
2060 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2061 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2062 pli.removeRange(StartIdx, EndIdx);
2066 raw_string_ostream Msg(msg);
2067 Msg << "Ran out of registers during register allocation!";
2068 if (MI->isInlineAsm()) {
2069 Msg << "\nPlease check your inline asm statement for invalid "
2070 << "constraints:\n";
2071 MI->print(Msg, tm_);
2073 llvm_report_error(Msg.str());
2075 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2076 if (!hasInterval(*AS))
2078 LiveInterval &spli = getInterval(*AS);
2079 if (spli.liveAt(Index))
2080 spli.removeRange(Index.getLoadIndex(),
2081 Index.getNextIndex().getBaseIndex());
2088 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2089 MachineInstr* startInst) {
2090 LiveInterval& Interval = getOrCreateInterval(reg);
2091 VNInfo* VN = Interval.getNextValue(
2092 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2093 startInst, true, getVNInfoAllocator());
2094 VN->setHasPHIKill(true);
2095 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2097 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2098 getMBBEndIdx(startInst->getParent()), VN);
2099 Interval.addRange(LR);