1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it can check use as well.
223 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 bool PHIJoin = lv_->isPHIJoin(interval.reg);
335 // A phi join register is killed at the end of the MBB and revived as a new
336 // valno in the killing blocks.
337 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338 DEBUG(dbgs() << " phi-join");
339 ValNo->addKill(indexes_->getTerminatorGap(mbb));
340 ValNo->setHasPHIKill(true);
342 // Iterate over all of the blocks that the variable is completely
343 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
345 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346 E = vi.AliveBlocks.end(); I != E; ++I) {
347 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349 interval.addRange(LR);
350 DEBUG(dbgs() << " +" << LR);
354 // Finally, this virtual register is live from the start of any killing
355 // block to the 'use' slot of the killing instruction.
356 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357 MachineInstr *Kill = vi.Kills[i];
358 SlotIndex Start = getMBBStartIdx(Kill->getParent());
359 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
361 // Create interval with one of a NEW value number. Note that this value
362 // number isn't actually defined by an instruction, weird huh? :)
364 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
366 ValNo->setIsPHIDef(true);
368 LiveRange LR(Start, killIdx, ValNo);
369 interval.addRange(LR);
370 ValNo->addKill(killIdx);
371 DEBUG(dbgs() << " +" << LR);
375 // If this is the second time we see a virtual register definition, it
376 // must be due to phi elimination or two addr elimination. If this is
377 // the result of two address elimination, then the vreg is one of the
378 // def-and-use register operand.
379 if (mi->isRegTiedToUseOperand(MOIdx)) {
380 // If this is a two-address definition, then we have already processed
381 // the live range. The only problem is that we didn't realize there
382 // are actually two values in the live interval. Because of this we
383 // need to take the LiveRegion that defines this register and split it
385 assert(interval.containsOneValue());
386 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387 SlotIndex RedefIndex = MIIdx.getDefIndex();
388 if (MO.isEarlyClobber())
389 RedefIndex = MIIdx.getUseIndex();
391 const LiveRange *OldLR =
392 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393 VNInfo *OldValNo = OldLR->valno;
395 // Delete the initial value, which should be short and continuous,
396 // because the 2-addr copy must be in the same MBB as the redef.
397 interval.removeRange(DefIndex, RedefIndex);
399 // Two-address vregs should always only be redefined once. This means
400 // that at this point, there should be exactly one value number in it.
401 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
403 // The new value number (#1) is defined by the instruction we claimed
405 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406 false, // update at *
408 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
410 // Value#0 is now defined by the 2-addr instruction.
411 OldValNo->def = RedefIndex;
412 OldValNo->setCopy(0);
414 // Add the new live interval which replaces the range for the input copy.
415 LiveRange LR(DefIndex, RedefIndex, ValNo);
416 DEBUG(dbgs() << " replace range with " << LR);
417 interval.addRange(LR);
418 ValNo->addKill(RedefIndex);
420 // If this redefinition is dead, we need to add a dummy unit live
421 // range covering the def slot.
423 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
427 dbgs() << " RESULT: ";
428 interval.print(dbgs(), tri_);
431 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432 // In the case of PHI elimination, each variable definition is only
433 // live until the end of the block. We've already taken care of the
434 // rest of the live range.
436 SlotIndex defIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 defIndex = MIIdx.getUseIndex();
441 MachineInstr *CopyMI = NULL;
442 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
446 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
448 SlotIndex killIndex = getMBBEndIdx(mbb);
449 LiveRange LR(defIndex, killIndex, ValNo);
450 interval.addRange(LR);
451 ValNo->addKill(indexes_->getTerminatorGap(mbb));
452 ValNo->setHasPHIKill(true);
453 DEBUG(dbgs() << " phi-join +" << LR);
457 DEBUG(dbgs() << '\n');
460 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461 MachineBasicBlock::iterator mi,
464 LiveInterval &interval,
465 MachineInstr *CopyMI) {
466 // A physical register cannot be live across basic block, so its
467 // lifetime must end somewhere in its defining basic block.
469 dbgs() << "\t\tregister: ";
470 printRegName(interval.reg, tri_);
473 SlotIndex baseIndex = MIIdx;
474 SlotIndex start = baseIndex.getDefIndex();
475 // Earlyclobbers move back one.
476 if (MO.isEarlyClobber())
477 start = MIIdx.getUseIndex();
478 SlotIndex end = start;
480 // If it is not used after definition, it is considered dead at
481 // the instruction defining it. Hence its interval is:
482 // [defSlot(def), defSlot(def)+1)
483 // For earlyclobbers, the defSlot was pushed back one; the extra
484 // advance below compensates.
486 DEBUG(dbgs() << " dead");
487 end = start.getStoreIndex();
491 // If it is not dead on definition, it must be killed by a
492 // subsequent instruction. Hence its interval is:
493 // [defSlot(def), useSlot(kill)+1)
494 baseIndex = baseIndex.getNextIndex();
495 while (++mi != MBB->end()) {
497 if (mi->isDebugValue())
499 if (getInstructionFromIndex(baseIndex) == 0)
500 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
502 if (mi->killsRegister(interval.reg, tri_)) {
503 DEBUG(dbgs() << " killed");
504 end = baseIndex.getDefIndex();
507 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
509 if (mi->isRegTiedToUseOperand(DefIdx)) {
510 // Two-address instruction.
511 end = baseIndex.getDefIndex();
513 // Another instruction redefines the register before it is ever read.
514 // Then the register is essentially dead at the instruction that
515 // defines it. Hence its interval is:
516 // [defSlot(def), defSlot(def)+1)
517 DEBUG(dbgs() << " dead");
518 end = start.getStoreIndex();
524 baseIndex = baseIndex.getNextIndex();
527 // The only case we should have a dead physreg here without a killing or
528 // instruction where we know it's dead is if it is live-in to the function
529 // and never used. Another possible case is the implicit use of the
530 // physical register has been deleted by two-address pass.
531 end = start.getStoreIndex();
534 assert(start < end && "did not find end of interval?");
536 // Already exists? Extend old live interval.
537 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538 bool Extend = OldLR != interval.end();
539 VNInfo *ValNo = Extend
540 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541 if (MO.isEarlyClobber() && Extend)
542 ValNo->setHasRedefByEC(true);
543 LiveRange LR(start, end, ValNo);
544 interval.addRange(LR);
545 LR.valno->addKill(end);
546 DEBUG(dbgs() << " +" << LR << '\n');
549 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550 MachineBasicBlock::iterator MI,
554 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556 getOrCreateInterval(MO.getReg()));
557 else if (allocatableRegs_[MO.getReg()]) {
558 MachineInstr *CopyMI = NULL;
559 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
563 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564 getOrCreateInterval(MO.getReg()), CopyMI);
565 // Def of a register also defines its sub-registers.
566 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567 // If MI also modifies the sub-register explicitly, avoid processing it
568 // more than once. Do not pass in TRI here so it checks for exact match.
569 if (!MI->modifiesRegister(*AS))
570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571 getOrCreateInterval(*AS), 0);
575 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
577 LiveInterval &interval, bool isAlias) {
579 dbgs() << "\t\tlivein register: ";
580 printRegName(interval.reg, tri_);
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 SlotIndex baseIndex = MIIdx;
587 SlotIndex start = baseIndex;
588 if (getInstructionFromIndex(baseIndex) == 0)
589 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
591 SlotIndex end = baseIndex;
592 bool SeenDefUse = false;
594 MachineBasicBlock::iterator E = MBB->end();
596 if (mi->isDebugValue()) {
598 if (mi != E && !mi->isDebugValue()) {
599 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
603 if (mi->killsRegister(interval.reg, tri_)) {
604 DEBUG(dbgs() << " killed");
605 end = baseIndex.getDefIndex();
608 } else if (mi->modifiesRegister(interval.reg, tri_)) {
609 // Another instruction redefines the register before it is ever read.
610 // Then the register is essentially dead at the instruction that defines
611 // it. Hence its interval is:
612 // [defSlot(def), defSlot(def)+1)
613 DEBUG(dbgs() << " dead");
614 end = start.getStoreIndex();
620 if (mi != E && !mi->isDebugValue()) {
621 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
625 // Live-in register might not be used at all.
628 DEBUG(dbgs() << " dead");
629 end = MIIdx.getStoreIndex();
631 DEBUG(dbgs() << " live through");
637 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
638 0, false, VNInfoAllocator);
639 vni->setIsPHIDef(true);
640 LiveRange LR(start, end, vni);
642 interval.addRange(LR);
643 LR.valno->addKill(end);
644 DEBUG(dbgs() << " +" << LR << '\n');
647 /// computeIntervals - computes the live intervals for virtual
648 /// registers. for some ordering of the machine instructions [1,N] a
649 /// live interval is an interval [i, j) where 1 <= i <= j < N for
650 /// which a variable is live
651 void LiveIntervals::computeIntervals() {
652 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
653 << "********** Function: "
654 << ((Value*)mf_->getFunction())->getName() << '\n');
656 SmallVector<unsigned, 8> UndefUses;
657 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
659 MachineBasicBlock *MBB = MBBI;
663 // Track the index of the current machine instr.
664 SlotIndex MIIndex = getMBBStartIdx(MBB);
665 DEBUG(dbgs() << MBB->getName() << ":\n");
667 // Create intervals for live-ins to this BB first.
668 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
669 LE = MBB->livein_end(); LI != LE; ++LI) {
670 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
671 // Multiple live-ins can alias the same register.
672 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
673 if (!hasInterval(*AS))
674 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
678 // Skip over empty initial indices.
679 if (getInstructionFromIndex(MIIndex) == 0)
680 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
682 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
684 DEBUG(dbgs() << MIIndex << "\t" << *MI);
685 if (MI->isDebugValue())
689 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
690 MachineOperand &MO = MI->getOperand(i);
691 if (!MO.isReg() || !MO.getReg())
694 // handle register defs - build intervals
696 handleRegisterDef(MBB, MI, MIIndex, MO, i);
697 else if (MO.isUndef())
698 UndefUses.push_back(MO.getReg());
701 // Move to the next instr slot.
702 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
706 // Create empty intervals for registers defined by implicit_def's (except
707 // for those implicit_def that define values which are liveout of their
709 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
710 unsigned UndefReg = UndefUses[i];
711 (void)getOrCreateInterval(UndefReg);
715 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
716 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
717 return new LiveInterval(reg, Weight);
720 /// dupInterval - Duplicate a live interval. The caller is responsible for
721 /// managing the allocated memory.
722 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
723 LiveInterval *NewLI = createInterval(li->reg);
724 NewLI->Copy(*li, mri_, getVNInfoAllocator());
728 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
729 /// copy field and returns the source register that defines it.
730 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
734 if (VNI->getCopy()->isExtractSubreg()) {
735 // If it's extracting out of a physical register, return the sub-register.
736 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
737 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
738 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
739 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
740 if (SrcSubReg == DstSubReg)
741 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
742 // reg1034 can still be coalesced to EDX.
744 assert(DstSubReg == 0);
745 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
748 } else if (VNI->getCopy()->isInsertSubreg() ||
749 VNI->getCopy()->isSubregToReg())
750 return VNI->getCopy()->getOperand(2).getReg();
752 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
753 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
755 llvm_unreachable("Unrecognized copy instruction!");
759 //===----------------------------------------------------------------------===//
760 // Register allocator hooks.
763 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
764 /// allow one) virtual register operand, then its uses are implicitly using
765 /// the register. Returns the virtual register.
766 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
767 MachineInstr *MI) const {
769 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
770 MachineOperand &MO = MI->getOperand(i);
771 if (!MO.isReg() || !MO.isUse())
773 unsigned Reg = MO.getReg();
774 if (Reg == 0 || Reg == li.reg)
777 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
778 !allocatableRegs_[Reg])
780 // FIXME: For now, only remat MI with at most one register operand.
782 "Can't rematerialize instruction with multiple register operand!");
791 /// isValNoAvailableAt - Return true if the val# of the specified interval
792 /// which reaches the given instruction also reaches the specified use index.
793 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
794 SlotIndex UseIdx) const {
795 SlotIndex Index = getInstructionIndex(MI);
796 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
797 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
798 return UI != li.end() && UI->valno == ValNo;
801 /// isReMaterializable - Returns true if the definition MI of the specified
802 /// val# of the specified interval is re-materializable.
803 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
804 const VNInfo *ValNo, MachineInstr *MI,
805 SmallVectorImpl<LiveInterval*> &SpillIs,
810 if (!tii_->isTriviallyReMaterializable(MI, aa_))
813 // Target-specific code can mark an instruction as being rematerializable
814 // if it has one virtual reg use, though it had better be something like
815 // a PIC base register which is likely to be live everywhere.
816 unsigned ImpUse = getReMatImplicitUse(li, MI);
818 const LiveInterval &ImpLi = getInterval(ImpUse);
819 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
820 re = mri_->use_end(); ri != re; ++ri) {
821 MachineInstr *UseMI = &*ri;
822 SlotIndex UseIdx = getInstructionIndex(UseMI);
823 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
825 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
829 // If a register operand of the re-materialized instruction is going to
830 // be spilled next, then it's not legal to re-materialize this instruction.
831 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
832 if (ImpUse == SpillIs[i]->reg)
838 /// isReMaterializable - Returns true if the definition MI of the specified
839 /// val# of the specified interval is re-materializable.
840 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
841 const VNInfo *ValNo, MachineInstr *MI) {
842 SmallVector<LiveInterval*, 4> Dummy1;
844 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
847 /// isReMaterializable - Returns true if every definition of MI of every
848 /// val# of the specified interval is re-materializable.
849 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
850 SmallVectorImpl<LiveInterval*> &SpillIs,
853 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
855 const VNInfo *VNI = *i;
857 continue; // Dead val#.
858 // Is the def for the val# rematerializable?
859 if (!VNI->isDefAccurate())
861 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
862 bool DefIsLoad = false;
864 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
871 /// FilterFoldedOps - Filter out two-address use operands. Return
872 /// true if it finds any issue with the operands that ought to prevent
874 static bool FilterFoldedOps(MachineInstr *MI,
875 SmallVector<unsigned, 2> &Ops,
877 SmallVector<unsigned, 2> &FoldOps) {
879 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
880 unsigned OpIdx = Ops[i];
881 MachineOperand &MO = MI->getOperand(OpIdx);
882 // FIXME: fold subreg use.
886 MRInfo |= (unsigned)VirtRegMap::isMod;
888 // Filter out two-address use operand(s).
889 if (MI->isRegTiedToDefOperand(OpIdx)) {
890 MRInfo = VirtRegMap::isModRef;
893 MRInfo |= (unsigned)VirtRegMap::isRef;
895 FoldOps.push_back(OpIdx);
901 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
902 /// slot / to reg or any rematerialized load into ith operand of specified
903 /// MI. If it is successul, MI is updated with the newly created MI and
905 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
906 VirtRegMap &vrm, MachineInstr *DefMI,
908 SmallVector<unsigned, 2> &Ops,
909 bool isSS, int Slot, unsigned Reg) {
910 // If it is an implicit def instruction, just delete it.
911 if (MI->isImplicitDef()) {
912 RemoveMachineInstrFromMaps(MI);
913 vrm.RemoveMachineInstrFromMaps(MI);
914 MI->eraseFromParent();
919 // Filter the list of operand indexes that are to be folded. Abort if
920 // any operand will prevent folding.
922 SmallVector<unsigned, 2> FoldOps;
923 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
926 // The only time it's safe to fold into a two address instruction is when
927 // it's folding reload and spill from / into a spill stack slot.
928 if (DefMI && (MRInfo & VirtRegMap::isMod))
931 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
932 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
934 // Remember this instruction uses the spill slot.
935 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
937 // Attempt to fold the memory reference into the instruction. If
938 // we can do this, we don't need to insert spill code.
939 MachineBasicBlock &MBB = *MI->getParent();
940 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
941 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
942 vrm.transferSpillPts(MI, fmi);
943 vrm.transferRestorePts(MI, fmi);
944 vrm.transferEmergencySpills(MI, fmi);
945 ReplaceMachineInstrInMaps(MI, fmi);
946 MI = MBB.insert(MBB.erase(MI), fmi);
953 /// canFoldMemoryOperand - Returns true if the specified load / store
954 /// folding is possible.
955 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
956 SmallVector<unsigned, 2> &Ops,
958 // Filter the list of operand indexes that are to be folded. Abort if
959 // any operand will prevent folding.
961 SmallVector<unsigned, 2> FoldOps;
962 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
965 // It's only legal to remat for a use, not a def.
966 if (ReMat && (MRInfo & VirtRegMap::isMod))
969 return tii_->canFoldMemoryOperand(MI, FoldOps);
972 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
973 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
975 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
980 for (++itr; itr != li.ranges.end(); ++itr) {
981 MachineBasicBlock *mbb2 =
982 indexes_->getMBBCoveringRange(itr->start, itr->end);
991 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
992 /// interval on to-be re-materialized operands of MI) with new register.
993 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
994 MachineInstr *MI, unsigned NewVReg,
996 // There is an implicit use. That means one of the other operand is
997 // being remat'ed and the remat'ed instruction has li.reg as an
998 // use operand. Make sure we rewrite that as well.
999 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1000 MachineOperand &MO = MI->getOperand(i);
1003 unsigned Reg = MO.getReg();
1004 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1006 if (!vrm.isReMaterialized(Reg))
1008 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1009 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1011 UseMO->setReg(NewVReg);
1015 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1016 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1017 bool LiveIntervals::
1018 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1019 bool TrySplit, SlotIndex index, SlotIndex end,
1021 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1022 unsigned Slot, int LdSlot,
1023 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1025 const TargetRegisterClass* rc,
1026 SmallVector<int, 4> &ReMatIds,
1027 const MachineLoopInfo *loopInfo,
1028 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1029 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1030 std::vector<LiveInterval*> &NewLIs) {
1031 bool CanFold = false;
1033 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1034 MachineOperand& mop = MI->getOperand(i);
1037 unsigned Reg = mop.getReg();
1038 unsigned RegI = Reg;
1039 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1044 bool TryFold = !DefIsReMat;
1045 bool FoldSS = true; // Default behavior unless it's a remat.
1046 int FoldSlot = Slot;
1048 // If this is the rematerializable definition MI itself and
1049 // all of its uses are rematerialized, simply delete it.
1050 if (MI == ReMatOrigDefMI && CanDelete) {
1051 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1053 RemoveMachineInstrFromMaps(MI);
1054 vrm.RemoveMachineInstrFromMaps(MI);
1055 MI->eraseFromParent();
1059 // If def for this use can't be rematerialized, then try folding.
1060 // If def is rematerializable and it's a load, also try folding.
1061 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1063 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1069 // Scan all of the operands of this instruction rewriting operands
1070 // to use NewVReg instead of li.reg as appropriate. We do this for
1073 // 1. If the instr reads the same spilled vreg multiple times, we
1074 // want to reuse the NewVReg.
1075 // 2. If the instr is a two-addr instruction, we are required to
1076 // keep the src/dst regs pinned.
1078 // Keep track of whether we replace a use and/or def so that we can
1079 // create the spill interval with the appropriate range.
1081 HasUse = mop.isUse();
1082 HasDef = mop.isDef();
1083 SmallVector<unsigned, 2> Ops;
1085 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1086 const MachineOperand &MOj = MI->getOperand(j);
1089 unsigned RegJ = MOj.getReg();
1090 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1094 if (!MOj.isUndef()) {
1095 HasUse |= MOj.isUse();
1096 HasDef |= MOj.isDef();
1101 // Create a new virtual register for the spill interval.
1102 // Create the new register now so we can map the fold instruction
1103 // to the new register so when it is unfolded we get the correct
1105 bool CreatedNewVReg = false;
1107 NewVReg = mri_->createVirtualRegister(rc);
1109 CreatedNewVReg = true;
1111 // The new virtual register should get the same allocation hints as the
1113 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1114 if (Hint.first || Hint.second)
1115 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1121 // Do not fold load / store here if we are splitting. We'll find an
1122 // optimal point to insert a load / store later.
1124 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1125 Ops, FoldSS, FoldSlot, NewVReg)) {
1126 // Folding the load/store can completely change the instruction in
1127 // unpredictable ways, rescan it from the beginning.
1130 // We need to give the new vreg the same stack slot as the
1131 // spilled interval.
1132 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1138 if (isNotInMIMap(MI))
1140 goto RestartInstruction;
1143 // We'll try to fold it later if it's profitable.
1144 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1148 mop.setReg(NewVReg);
1149 if (mop.isImplicit())
1150 rewriteImplicitOps(li, MI, NewVReg, vrm);
1152 // Reuse NewVReg for other reads.
1153 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1154 MachineOperand &mopj = MI->getOperand(Ops[j]);
1155 mopj.setReg(NewVReg);
1156 if (mopj.isImplicit())
1157 rewriteImplicitOps(li, MI, NewVReg, vrm);
1160 if (CreatedNewVReg) {
1162 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1163 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1164 // Each valnum may have its own remat id.
1165 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1167 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1169 if (!CanDelete || (HasUse && HasDef)) {
1170 // If this is a two-addr instruction then its use operands are
1171 // rematerializable but its def is not. It should be assigned a
1173 vrm.assignVirt2StackSlot(NewVReg, Slot);
1176 vrm.assignVirt2StackSlot(NewVReg, Slot);
1178 } else if (HasUse && HasDef &&
1179 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1180 // If this interval hasn't been assigned a stack slot (because earlier
1181 // def is a deleted remat def), do it now.
1182 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1183 vrm.assignVirt2StackSlot(NewVReg, Slot);
1186 // Re-matting an instruction with virtual register use. Add the
1187 // register as an implicit use on the use MI.
1188 if (DefIsReMat && ImpUse)
1189 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1191 // Create a new register interval for this spill / remat.
1192 LiveInterval &nI = getOrCreateInterval(NewVReg);
1193 if (CreatedNewVReg) {
1194 NewLIs.push_back(&nI);
1195 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1197 vrm.setIsSplitFromReg(NewVReg, li.reg);
1201 if (CreatedNewVReg) {
1202 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1203 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1204 DEBUG(dbgs() << " +" << LR);
1207 // Extend the split live interval to this def / use.
1208 SlotIndex End = index.getDefIndex();
1209 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1210 nI.getValNumInfo(nI.getNumValNums()-1));
1211 DEBUG(dbgs() << " +" << LR);
1216 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1217 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1218 DEBUG(dbgs() << " +" << LR);
1223 dbgs() << "\t\t\t\tAdded new interval: ";
1224 nI.print(dbgs(), tri_);
1230 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1232 MachineBasicBlock *MBB,
1233 SlotIndex Idx) const {
1234 SlotIndex End = getMBBEndIdx(MBB);
1235 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1236 if (VNI->kills[j].isPHI())
1239 SlotIndex KillIdx = VNI->kills[j];
1240 if (KillIdx > Idx && KillIdx <= End)
1246 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1247 /// during spilling.
1249 struct RewriteInfo {
1254 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1255 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1258 struct RewriteInfoCompare {
1259 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1260 return LHS.Index < RHS.Index;
1265 void LiveIntervals::
1266 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1267 LiveInterval::Ranges::const_iterator &I,
1268 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1269 unsigned Slot, int LdSlot,
1270 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1272 const TargetRegisterClass* rc,
1273 SmallVector<int, 4> &ReMatIds,
1274 const MachineLoopInfo *loopInfo,
1275 BitVector &SpillMBBs,
1276 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1277 BitVector &RestoreMBBs,
1278 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1279 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1280 std::vector<LiveInterval*> &NewLIs) {
1281 bool AllCanFold = true;
1282 unsigned NewVReg = 0;
1283 SlotIndex start = I->start.getBaseIndex();
1284 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1286 // First collect all the def / use in this live range that will be rewritten.
1287 // Make sure they are sorted according to instruction index.
1288 std::vector<RewriteInfo> RewriteMIs;
1289 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1290 re = mri_->reg_end(); ri != re; ) {
1291 MachineInstr *MI = &*ri;
1292 MachineOperand &O = ri.getOperand();
1294 if (MI->isDebugValue()) {
1295 // Remove debug info for now.
1297 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1300 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1301 SlotIndex index = getInstructionIndex(MI);
1302 if (index < start || index >= end)
1306 // Must be defined by an implicit def. It should not be spilled. Note,
1307 // this is for correctness reason. e.g.
1308 // 8 %reg1024<def> = IMPLICIT_DEF
1309 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1310 // The live range [12, 14) are not part of the r1024 live interval since
1311 // it's defined by an implicit def. It will not conflicts with live
1312 // interval of r1025. Now suppose both registers are spilled, you can
1313 // easily see a situation where both registers are reloaded before
1314 // the INSERT_SUBREG and both target registers that would overlap.
1316 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1318 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1320 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1321 // Now rewrite the defs and uses.
1322 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1323 RewriteInfo &rwi = RewriteMIs[i];
1325 SlotIndex index = rwi.Index;
1326 bool MIHasUse = rwi.HasUse;
1327 bool MIHasDef = rwi.HasDef;
1328 MachineInstr *MI = rwi.MI;
1329 // If MI def and/or use the same register multiple times, then there
1330 // are multiple entries.
1331 unsigned NumUses = MIHasUse;
1332 while (i != e && RewriteMIs[i].MI == MI) {
1333 assert(RewriteMIs[i].Index == index);
1334 bool isUse = RewriteMIs[i].HasUse;
1335 if (isUse) ++NumUses;
1337 MIHasDef |= RewriteMIs[i].HasDef;
1340 MachineBasicBlock *MBB = MI->getParent();
1342 if (ImpUse && MI != ReMatDefMI) {
1343 // Re-matting an instruction with virtual register use. Prevent interval
1344 // from being spilled.
1345 getInterval(ImpUse).markNotSpillable();
1348 unsigned MBBId = MBB->getNumber();
1349 unsigned ThisVReg = 0;
1351 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1352 if (NVI != MBBVRegsMap.end()) {
1353 ThisVReg = NVI->second;
1360 // It's better to start a new interval to avoid artifically
1361 // extend the new interval.
1362 if (MIHasDef && !MIHasUse) {
1363 MBBVRegsMap.erase(MBB->getNumber());
1369 bool IsNew = ThisVReg == 0;
1371 // This ends the previous live interval. If all of its def / use
1372 // can be folded, give it a low spill weight.
1373 if (NewVReg && TrySplit && AllCanFold) {
1374 LiveInterval &nI = getOrCreateInterval(NewVReg);
1381 bool HasDef = false;
1382 bool HasUse = false;
1383 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1384 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1385 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1386 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1387 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1388 if (!HasDef && !HasUse)
1391 AllCanFold &= CanFold;
1393 // Update weight of spill interval.
1394 LiveInterval &nI = getOrCreateInterval(NewVReg);
1396 // The spill weight is now infinity as it cannot be spilled again.
1397 nI.markNotSpillable();
1401 // Keep track of the last def and first use in each MBB.
1403 if (MI != ReMatOrigDefMI || !CanDelete) {
1404 bool HasKill = false;
1406 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1408 // If this is a two-address code, then this index starts a new VNInfo.
1409 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1411 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1413 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1414 SpillIdxes.find(MBBId);
1416 if (SII == SpillIdxes.end()) {
1417 std::vector<SRInfo> S;
1418 S.push_back(SRInfo(index, NewVReg, true));
1419 SpillIdxes.insert(std::make_pair(MBBId, S));
1420 } else if (SII->second.back().vreg != NewVReg) {
1421 SII->second.push_back(SRInfo(index, NewVReg, true));
1422 } else if (index > SII->second.back().index) {
1423 // If there is an earlier def and this is a two-address
1424 // instruction, then it's not possible to fold the store (which
1425 // would also fold the load).
1426 SRInfo &Info = SII->second.back();
1428 Info.canFold = !HasUse;
1430 SpillMBBs.set(MBBId);
1431 } else if (SII != SpillIdxes.end() &&
1432 SII->second.back().vreg == NewVReg &&
1433 index > SII->second.back().index) {
1434 // There is an earlier def that's not killed (must be two-address).
1435 // The spill is no longer needed.
1436 SII->second.pop_back();
1437 if (SII->second.empty()) {
1438 SpillIdxes.erase(MBBId);
1439 SpillMBBs.reset(MBBId);
1446 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1447 SpillIdxes.find(MBBId);
1448 if (SII != SpillIdxes.end() &&
1449 SII->second.back().vreg == NewVReg &&
1450 index > SII->second.back().index)
1451 // Use(s) following the last def, it's not safe to fold the spill.
1452 SII->second.back().canFold = false;
1453 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1454 RestoreIdxes.find(MBBId);
1455 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1456 // If we are splitting live intervals, only fold if it's the first
1457 // use and there isn't another use later in the MBB.
1458 RII->second.back().canFold = false;
1460 // Only need a reload if there isn't an earlier def / use.
1461 if (RII == RestoreIdxes.end()) {
1462 std::vector<SRInfo> Infos;
1463 Infos.push_back(SRInfo(index, NewVReg, true));
1464 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1466 RII->second.push_back(SRInfo(index, NewVReg, true));
1468 RestoreMBBs.set(MBBId);
1472 // Update spill weight.
1473 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1474 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1477 if (NewVReg && TrySplit && AllCanFold) {
1478 // If all of its def / use can be folded, give it a low spill weight.
1479 LiveInterval &nI = getOrCreateInterval(NewVReg);
1484 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1485 unsigned vr, BitVector &RestoreMBBs,
1486 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1487 if (!RestoreMBBs[Id])
1489 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1490 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1491 if (Restores[i].index == index &&
1492 Restores[i].vreg == vr &&
1493 Restores[i].canFold)
1498 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1499 unsigned vr, BitVector &RestoreMBBs,
1500 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1501 if (!RestoreMBBs[Id])
1503 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1504 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1505 if (Restores[i].index == index && Restores[i].vreg)
1506 Restores[i].index = SlotIndex();
1509 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1510 /// spilled and create empty intervals for their uses.
1512 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1513 const TargetRegisterClass* rc,
1514 std::vector<LiveInterval*> &NewLIs) {
1515 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1516 re = mri_->reg_end(); ri != re; ) {
1517 MachineOperand &O = ri.getOperand();
1518 MachineInstr *MI = &*ri;
1521 assert(MI->isImplicitDef() &&
1522 "Register def was not rewritten?");
1523 RemoveMachineInstrFromMaps(MI);
1524 vrm.RemoveMachineInstrFromMaps(MI);
1525 MI->eraseFromParent();
1527 // This must be an use of an implicit_def so it's not part of the live
1528 // interval. Create a new empty live interval for it.
1529 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1530 unsigned NewVReg = mri_->createVirtualRegister(rc);
1532 vrm.setIsImplicitlyDefined(NewVReg);
1533 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1534 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1535 MachineOperand &MO = MI->getOperand(i);
1536 if (MO.isReg() && MO.getReg() == li.reg) {
1546 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1547 // Limit the loop depth ridiculousness.
1548 if (loopDepth > 200)
1551 // The loop depth is used to roughly estimate the number of times the
1552 // instruction is executed. Something like 10^d is simple, but will quickly
1553 // overflow a float. This expression behaves like 10^d for small d, but is
1554 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1555 // headroom before overflow.
1556 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1558 return (isDef + isUse) * lc;
1562 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1563 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1564 normalizeSpillWeight(*NewLIs[i]);
1567 std::vector<LiveInterval*> LiveIntervals::
1568 addIntervalsForSpillsFast(const LiveInterval &li,
1569 const MachineLoopInfo *loopInfo,
1571 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1573 std::vector<LiveInterval*> added;
1575 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1578 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1583 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1585 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1586 while (RI != mri_->reg_end()) {
1587 MachineInstr* MI = &*RI;
1589 SmallVector<unsigned, 2> Indices;
1590 bool HasUse = false;
1591 bool HasDef = false;
1593 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1594 MachineOperand& mop = MI->getOperand(i);
1595 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1597 HasUse |= MI->getOperand(i).isUse();
1598 HasDef |= MI->getOperand(i).isDef();
1600 Indices.push_back(i);
1603 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1604 Indices, true, slot, li.reg)) {
1605 unsigned NewVReg = mri_->createVirtualRegister(rc);
1607 vrm.assignVirt2StackSlot(NewVReg, slot);
1609 // create a new register for this spill
1610 LiveInterval &nI = getOrCreateInterval(NewVReg);
1611 nI.markNotSpillable();
1613 // Rewrite register operands to use the new vreg.
1614 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1615 E = Indices.end(); I != E; ++I) {
1616 MI->getOperand(*I).setReg(NewVReg);
1618 if (MI->getOperand(*I).isUse())
1619 MI->getOperand(*I).setIsKill(true);
1622 // Fill in the new live interval.
1623 SlotIndex index = getInstructionIndex(MI);
1625 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1626 nI.getNextValue(SlotIndex(), 0, false,
1627 getVNInfoAllocator()));
1628 DEBUG(dbgs() << " +" << LR);
1630 vrm.addRestorePoint(NewVReg, MI);
1633 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1634 nI.getNextValue(SlotIndex(), 0, false,
1635 getVNInfoAllocator()));
1636 DEBUG(dbgs() << " +" << LR);
1638 vrm.addSpillPoint(NewVReg, true, MI);
1641 added.push_back(&nI);
1644 dbgs() << "\t\t\t\tadded new interval: ";
1651 RI = mri_->reg_begin(li.reg);
1657 std::vector<LiveInterval*> LiveIntervals::
1658 addIntervalsForSpills(const LiveInterval &li,
1659 SmallVectorImpl<LiveInterval*> &SpillIs,
1660 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1662 if (EnableFastSpilling)
1663 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1665 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1668 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1669 li.print(dbgs(), tri_);
1673 // Each bit specify whether a spill is required in the MBB.
1674 BitVector SpillMBBs(mf_->getNumBlockIDs());
1675 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1676 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1677 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1678 DenseMap<unsigned,unsigned> MBBVRegsMap;
1679 std::vector<LiveInterval*> NewLIs;
1680 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1682 unsigned NumValNums = li.getNumValNums();
1683 SmallVector<MachineInstr*, 4> ReMatDefs;
1684 ReMatDefs.resize(NumValNums, NULL);
1685 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1686 ReMatOrigDefs.resize(NumValNums, NULL);
1687 SmallVector<int, 4> ReMatIds;
1688 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1689 BitVector ReMatDelete(NumValNums);
1690 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1692 // Spilling a split live interval. It cannot be split any further. Also,
1693 // it's also guaranteed to be a single val# / range interval.
1694 if (vrm.getPreSplitReg(li.reg)) {
1695 vrm.setIsSplitFromReg(li.reg, 0);
1696 // Unset the split kill marker on the last use.
1697 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1698 if (KillIdx != SlotIndex()) {
1699 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1700 assert(KillMI && "Last use disappeared?");
1701 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1702 assert(KillOp != -1 && "Last use disappeared?");
1703 KillMI->getOperand(KillOp).setIsKill(false);
1705 vrm.removeKillPoint(li.reg);
1706 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1707 Slot = vrm.getStackSlot(li.reg);
1708 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1709 MachineInstr *ReMatDefMI = DefIsReMat ?
1710 vrm.getReMaterializedMI(li.reg) : NULL;
1712 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1713 bool isLoad = isLoadSS ||
1714 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1715 bool IsFirstRange = true;
1716 for (LiveInterval::Ranges::const_iterator
1717 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1718 // If this is a split live interval with multiple ranges, it means there
1719 // are two-address instructions that re-defined the value. Only the
1720 // first def can be rematerialized!
1722 // Note ReMatOrigDefMI has already been deleted.
1723 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1724 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1725 false, vrm, rc, ReMatIds, loopInfo,
1726 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1727 MBBVRegsMap, NewLIs);
1729 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1730 Slot, 0, false, false, false,
1731 false, vrm, rc, ReMatIds, loopInfo,
1732 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1733 MBBVRegsMap, NewLIs);
1735 IsFirstRange = false;
1738 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1739 normalizeSpillWeights(NewLIs);
1743 bool TrySplit = !intervalIsInOneMBB(li);
1746 bool NeedStackSlot = false;
1747 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1749 const VNInfo *VNI = *i;
1750 unsigned VN = VNI->id;
1751 if (VNI->isUnused())
1752 continue; // Dead val#.
1753 // Is the def for the val# rematerializable?
1754 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1755 ? getInstructionFromIndex(VNI->def) : 0;
1757 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1758 // Remember how to remat the def of this val#.
1759 ReMatOrigDefs[VN] = ReMatDefMI;
1760 // Original def may be modified so we have to make a copy here.
1761 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1762 CloneMIs.push_back(Clone);
1763 ReMatDefs[VN] = Clone;
1765 bool CanDelete = true;
1766 if (VNI->hasPHIKill()) {
1767 // A kill is a phi node, not all of its uses can be rematerialized.
1768 // It must not be deleted.
1770 // Need a stack slot if there is any live range where uses cannot be
1772 NeedStackSlot = true;
1775 ReMatDelete.set(VN);
1777 // Need a stack slot if there is any live range where uses cannot be
1779 NeedStackSlot = true;
1783 // One stack slot per live interval.
1784 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1785 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1786 Slot = vrm.assignVirt2StackSlot(li.reg);
1788 // This case only occurs when the prealloc splitter has already assigned
1789 // a stack slot to this vreg.
1791 Slot = vrm.getStackSlot(li.reg);
1794 // Create new intervals and rewrite defs and uses.
1795 for (LiveInterval::Ranges::const_iterator
1796 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1797 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1798 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1799 bool DefIsReMat = ReMatDefMI != NULL;
1800 bool CanDelete = ReMatDelete[I->valno->id];
1802 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1803 bool isLoad = isLoadSS ||
1804 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1805 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1806 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1807 CanDelete, vrm, rc, ReMatIds, loopInfo,
1808 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1809 MBBVRegsMap, NewLIs);
1812 // Insert spills / restores if we are splitting.
1814 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1815 normalizeSpillWeights(NewLIs);
1819 SmallPtrSet<LiveInterval*, 4> AddedKill;
1820 SmallVector<unsigned, 2> Ops;
1821 if (NeedStackSlot) {
1822 int Id = SpillMBBs.find_first();
1824 std::vector<SRInfo> &spills = SpillIdxes[Id];
1825 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1826 SlotIndex index = spills[i].index;
1827 unsigned VReg = spills[i].vreg;
1828 LiveInterval &nI = getOrCreateInterval(VReg);
1829 bool isReMat = vrm.isReMaterialized(VReg);
1830 MachineInstr *MI = getInstructionFromIndex(index);
1831 bool CanFold = false;
1832 bool FoundUse = false;
1834 if (spills[i].canFold) {
1836 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1837 MachineOperand &MO = MI->getOperand(j);
1838 if (!MO.isReg() || MO.getReg() != VReg)
1845 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1846 RestoreMBBs, RestoreIdxes))) {
1847 // MI has two-address uses of the same register. If the use
1848 // isn't the first and only use in the BB, then we can't fold
1849 // it. FIXME: Move this to rewriteInstructionsForSpills.
1856 // Fold the store into the def if possible.
1857 bool Folded = false;
1858 if (CanFold && !Ops.empty()) {
1859 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1862 // Also folded uses, do not issue a load.
1863 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1864 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1866 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1870 // Otherwise tell the spiller to issue a spill.
1872 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1873 bool isKill = LR->end == index.getStoreIndex();
1874 if (!MI->registerDefIsDead(nI.reg))
1875 // No need to spill a dead def.
1876 vrm.addSpillPoint(VReg, isKill, MI);
1878 AddedKill.insert(&nI);
1881 Id = SpillMBBs.find_next(Id);
1885 int Id = RestoreMBBs.find_first();
1887 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1888 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1889 SlotIndex index = restores[i].index;
1890 if (index == SlotIndex())
1892 unsigned VReg = restores[i].vreg;
1893 LiveInterval &nI = getOrCreateInterval(VReg);
1894 bool isReMat = vrm.isReMaterialized(VReg);
1895 MachineInstr *MI = getInstructionFromIndex(index);
1896 bool CanFold = false;
1898 if (restores[i].canFold) {
1900 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1901 MachineOperand &MO = MI->getOperand(j);
1902 if (!MO.isReg() || MO.getReg() != VReg)
1906 // If this restore were to be folded, it would have been folded
1915 // Fold the load into the use if possible.
1916 bool Folded = false;
1917 if (CanFold && !Ops.empty()) {
1919 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1921 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1923 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1924 // If the rematerializable def is a load, also try to fold it.
1925 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1926 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1927 Ops, isLoadSS, LdSlot, VReg);
1929 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1931 // Re-matting an instruction with virtual register use. Add the
1932 // register as an implicit use on the use MI and mark the register
1933 // interval as unspillable.
1934 LiveInterval &ImpLi = getInterval(ImpUse);
1935 ImpLi.markNotSpillable();
1936 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1941 // If folding is not possible / failed, then tell the spiller to issue a
1942 // load / rematerialization for us.
1944 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1946 vrm.addRestorePoint(VReg, MI);
1948 Id = RestoreMBBs.find_next(Id);
1951 // Finalize intervals: add kills, finalize spill weights, and filter out
1953 std::vector<LiveInterval*> RetNewLIs;
1954 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1955 LiveInterval *LI = NewLIs[i];
1957 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1958 if (!AddedKill.count(LI)) {
1959 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1960 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1961 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1962 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1963 assert(UseIdx != -1);
1964 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1965 LastUse->getOperand(UseIdx).setIsKill();
1966 vrm.addKillPoint(LI->reg, LastUseIdx);
1969 RetNewLIs.push_back(LI);
1973 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1974 normalizeSpillWeights(RetNewLIs);
1978 /// hasAllocatableSuperReg - Return true if the specified physical register has
1979 /// any super register that's allocatable.
1980 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1981 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1982 if (allocatableRegs_[*AS] && hasInterval(*AS))
1987 /// getRepresentativeReg - Find the largest super register of the specified
1988 /// physical register.
1989 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1990 // Find the largest super-register that is allocatable.
1991 unsigned BestReg = Reg;
1992 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1993 unsigned SuperReg = *AS;
1994 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2002 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2003 /// specified interval that conflicts with the specified physical register.
2004 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2005 unsigned PhysReg) const {
2006 unsigned NumConflicts = 0;
2007 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2008 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2009 E = mri_->reg_end(); I != E; ++I) {
2010 MachineOperand &O = I.getOperand();
2011 MachineInstr *MI = O.getParent();
2012 SlotIndex Index = getInstructionIndex(MI);
2013 if (pli.liveAt(Index))
2016 return NumConflicts;
2019 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2020 /// around all defs and uses of the specified interval. Return true if it
2021 /// was able to cut its interval.
2022 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2023 unsigned PhysReg, VirtRegMap &vrm) {
2024 unsigned SpillReg = getRepresentativeReg(PhysReg);
2026 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2027 // If there are registers which alias PhysReg, but which are not a
2028 // sub-register of the chosen representative super register. Assert
2029 // since we can't handle it yet.
2030 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2031 tri_->isSuperRegister(*AS, SpillReg));
2034 SmallVector<unsigned, 4> PRegs;
2035 if (hasInterval(SpillReg))
2036 PRegs.push_back(SpillReg);
2038 SmallSet<unsigned, 4> Added;
2039 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2040 if (Added.insert(*AS) && hasInterval(*AS)) {
2041 PRegs.push_back(*AS);
2042 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2047 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2048 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2049 E = mri_->reg_end(); I != E; ++I) {
2050 MachineOperand &O = I.getOperand();
2051 MachineInstr *MI = O.getParent();
2052 if (SeenMIs.count(MI))
2055 SlotIndex Index = getInstructionIndex(MI);
2056 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2057 unsigned PReg = PRegs[i];
2058 LiveInterval &pli = getInterval(PReg);
2059 if (!pli.liveAt(Index))
2061 vrm.addEmergencySpill(PReg, MI);
2062 SlotIndex StartIdx = Index.getLoadIndex();
2063 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2064 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2065 pli.removeRange(StartIdx, EndIdx);
2069 raw_string_ostream Msg(msg);
2070 Msg << "Ran out of registers during register allocation!";
2071 if (MI->isInlineAsm()) {
2072 Msg << "\nPlease check your inline asm statement for invalid "
2073 << "constraints:\n";
2074 MI->print(Msg, tm_);
2076 llvm_report_error(Msg.str());
2078 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2079 if (!hasInterval(*AS))
2081 LiveInterval &spli = getInterval(*AS);
2082 if (spli.liveAt(Index))
2083 spli.removeRange(Index.getLoadIndex(),
2084 Index.getNextIndex().getBaseIndex());
2091 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2092 MachineInstr* startInst) {
2093 LiveInterval& Interval = getOrCreateInterval(reg);
2094 VNInfo* VN = Interval.getNextValue(
2095 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2096 startInst, true, getVNInfoAllocator());
2097 VN->setHasPHIKill(true);
2098 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2100 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2101 getMBBEndIdx(startInst->getParent()), VN);
2102 Interval.addRange(LR);