1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
58 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
59 cl::init(-1), cl::Hidden);
61 STATISTIC(numIntervals , "Number of original intervals");
62 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
63 STATISTIC(numSplits , "Number of intervals split");
64 STATISTIC(numCoalescing, "Number of early coalescing performed");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 AU.addPreserved<ProcessImplicitDefs>();
85 AU.addRequired<ProcessImplicitDefs>();
86 AU.addPreserved<SlotIndexes>();
87 AU.addRequiredTransitive<SlotIndexes>();
88 MachineFunctionPass::getAnalysisUsage(AU);
91 void LiveIntervals::releaseMemory() {
92 // Free the live intervals themselves.
93 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
94 E = r2iMap_.end(); I != E; ++I)
98 phiJoinCopies.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!CloneMIs.empty()) {
103 MachineInstr *MI = CloneMIs.back();
105 mf_->DeleteMachineInstr(MI);
109 /// runOnMachineFunction - Register allocate the whole function
111 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
113 mri_ = &mf_->getRegInfo();
114 tm_ = &fn.getTarget();
115 tri_ = tm_->getRegisterInfo();
116 tii_ = tm_->getInstrInfo();
117 aa_ = &getAnalysis<AliasAnalysis>();
118 lv_ = &getAnalysis<LiveVariables>();
119 indexes_ = &getAnalysis<SlotIndexes>();
120 allocatableRegs_ = tri_->getAllocatableSet(fn);
123 performEarlyCoalescing();
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
145 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
146 mbbi != mbbe; ++mbbi) {
147 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
148 for (MachineBasicBlock::iterator mii = mbbi->begin(),
149 mie = mbbi->end(); mii != mie; ++mii) {
150 OS << getInstructionIndex(mii) << '\t' << *mii;
155 void LiveIntervals::dumpInstrs() const {
159 /// conflictsWithPhysRegDef - Returns true if the specified register
160 /// is defined during the duration of the specified interval.
161 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
162 VirtRegMap &vrm, unsigned reg) {
163 for (LiveInterval::Ranges::const_iterator
164 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
165 for (SlotIndex index = I->start.getBaseIndex(),
166 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
168 index = index.getNextIndex()) {
169 // skip deleted instructions
170 while (index != end && !getInstructionFromIndex(index))
171 index = index.getNextIndex();
172 if (index == end) break;
174 MachineInstr *MI = getInstructionFromIndex(index);
175 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
176 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
177 if (SrcReg == li.reg || DstReg == li.reg)
179 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
180 MachineOperand& mop = MI->getOperand(i);
183 unsigned PhysReg = mop.getReg();
184 if (PhysReg == 0 || PhysReg == li.reg)
186 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
187 if (!vrm.hasPhys(PhysReg))
189 PhysReg = vrm.getPhys(PhysReg);
191 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
200 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
201 /// it can check use as well.
202 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
203 unsigned Reg, bool CheckUse,
204 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
205 for (LiveInterval::Ranges::const_iterator
206 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
207 for (SlotIndex index = I->start.getBaseIndex(),
208 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
210 index = index.getNextIndex()) {
211 // Skip deleted instructions.
212 MachineInstr *MI = 0;
213 while (index != end) {
214 MI = getInstructionFromIndex(index);
217 index = index.getNextIndex();
219 if (index == end) break;
221 if (JoinedCopies.count(MI))
223 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
224 MachineOperand& MO = MI->getOperand(i);
227 if (MO.isUse() && !CheckUse)
229 unsigned PhysReg = MO.getReg();
230 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
232 if (tri_->isSubRegister(Reg, PhysReg))
242 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
243 if (TargetRegisterInfo::isPhysicalRegister(reg))
244 errs() << tri_->getName(reg);
246 errs() << "%reg" << reg;
250 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
251 MachineBasicBlock::iterator mi,
255 LiveInterval &interval) {
257 errs() << "\t\tregister: ";
258 printRegName(interval.reg, tri_);
261 // Virtual registers may be defined multiple times (due to phi
262 // elimination and 2-addr elimination). Much of what we do only has to be
263 // done once for the vreg. We use an empty interval to detect the first
264 // time we see a vreg.
265 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
266 if (interval.empty()) {
267 // Get the Idx of the defining instructions.
268 SlotIndex defIndex = MIIdx.getDefIndex();
269 // Earlyclobbers move back one, so that they overlap the live range
271 if (MO.isEarlyClobber())
272 defIndex = MIIdx.getUseIndex();
274 MachineInstr *CopyMI = NULL;
275 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
276 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
277 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
278 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
279 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
281 // Earlyclobbers move back one.
282 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
284 assert(ValNo->id == 0 && "First value in interval is not 0?");
286 // Loop over all of the blocks that the vreg is defined in. There are
287 // two cases we have to handle here. The most common case is a vreg
288 // whose lifetime is contained within a basic block. In this case there
289 // will be a single kill, in MBB, which comes after the definition.
290 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
291 // FIXME: what about dead vars?
293 if (vi.Kills[0] != mi)
294 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
296 killIdx = defIndex.getStoreIndex();
298 // If the kill happens after the definition, we have an intra-block
300 if (killIdx > defIndex) {
301 assert(vi.AliveBlocks.empty() &&
302 "Shouldn't be alive across any blocks!");
303 LiveRange LR(defIndex, killIdx, ValNo);
304 interval.addRange(LR);
305 DEBUG(errs() << " +" << LR << "\n");
306 ValNo->addKill(killIdx);
311 // The other case we handle is when a virtual register lives to the end
312 // of the defining block, potentially live across some blocks, then is
313 // live into some number of blocks, but gets killed. Start by adding a
314 // range that goes from this definition to the end of the defining block.
315 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
317 DEBUG(errs() << " +" << NewLR);
318 interval.addRange(NewLR);
320 // Iterate over all of the blocks that the variable is completely
321 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
323 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
324 E = vi.AliveBlocks.end(); I != E; ++I) {
326 getMBBStartIdx(mf_->getBlockNumbered(*I)),
327 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
329 interval.addRange(LR);
330 DEBUG(errs() << " +" << LR);
333 // Finally, this virtual register is live from the start of any killing
334 // block to the 'use' slot of the killing instruction.
335 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
336 MachineInstr *Kill = vi.Kills[i];
338 getInstructionIndex(Kill).getDefIndex();
339 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
340 interval.addRange(LR);
341 ValNo->addKill(killIdx);
342 DEBUG(errs() << " +" << LR);
346 // If this is the second time we see a virtual register definition, it
347 // must be due to phi elimination or two addr elimination. If this is
348 // the result of two address elimination, then the vreg is one of the
349 // def-and-use register operand.
350 if (mi->isRegTiedToUseOperand(MOIdx)) {
351 // If this is a two-address definition, then we have already processed
352 // the live range. The only problem is that we didn't realize there
353 // are actually two values in the live interval. Because of this we
354 // need to take the LiveRegion that defines this register and split it
356 assert(interval.containsOneValue());
357 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
358 SlotIndex RedefIndex = MIIdx.getDefIndex();
359 if (MO.isEarlyClobber())
360 RedefIndex = MIIdx.getUseIndex();
362 const LiveRange *OldLR =
363 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
364 VNInfo *OldValNo = OldLR->valno;
366 // Delete the initial value, which should be short and continuous,
367 // because the 2-addr copy must be in the same MBB as the redef.
368 interval.removeRange(DefIndex, RedefIndex);
370 // Two-address vregs should always only be redefined once. This means
371 // that at this point, there should be exactly one value number in it.
372 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
374 // The new value number (#1) is defined by the instruction we claimed
376 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
377 false, // update at *
379 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
381 // Value#0 is now defined by the 2-addr instruction.
382 OldValNo->def = RedefIndex;
383 OldValNo->setCopy(0);
384 if (MO.isEarlyClobber())
385 OldValNo->setHasRedefByEC(true);
387 // Add the new live interval which replaces the range for the input copy.
388 LiveRange LR(DefIndex, RedefIndex, ValNo);
389 DEBUG(errs() << " replace range with " << LR);
390 interval.addRange(LR);
391 ValNo->addKill(RedefIndex);
393 // If this redefinition is dead, we need to add a dummy unit live
394 // range covering the def slot.
396 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
400 errs() << " RESULT: ";
401 interval.print(errs(), tri_);
404 // Otherwise, this must be because of phi elimination. If this is the
405 // first redefinition of the vreg that we have seen, go back and change
406 // the live range in the PHI block to be a different value number.
407 if (interval.containsOneValue()) {
408 // Remove the old range that we now know has an incorrect number.
409 VNInfo *VNI = interval.getValNumInfo(0);
410 MachineInstr *Killer = vi.Kills[0];
411 phiJoinCopies.push_back(Killer);
412 SlotIndex Start = getMBBStartIdx(Killer->getParent());
413 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
415 errs() << " Removing [" << Start << "," << End << "] from: ";
416 interval.print(errs(), tri_);
419 interval.removeRange(Start, End);
420 assert(interval.ranges.size() == 1 &&
421 "Newly discovered PHI interval has >1 ranges.");
422 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
423 VNI->addKill(indexes_->getTerminatorGap(killMBB));
424 VNI->setHasPHIKill(true);
426 errs() << " RESULT: ";
427 interval.print(errs(), tri_);
430 // Replace the interval with one of a NEW value number. Note that this
431 // value number isn't actually defined by an instruction, weird huh? :)
432 LiveRange LR(Start, End,
433 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
434 0, false, VNInfoAllocator));
435 LR.valno->setIsPHIDef(true);
436 DEBUG(errs() << " replace range with " << LR);
437 interval.addRange(LR);
438 LR.valno->addKill(End);
440 errs() << " RESULT: ";
441 interval.print(errs(), tri_);
445 // In the case of PHI elimination, each variable definition is only
446 // live until the end of the block. We've already taken care of the
447 // rest of the live range.
448 SlotIndex defIndex = MIIdx.getDefIndex();
449 if (MO.isEarlyClobber())
450 defIndex = MIIdx.getUseIndex();
453 MachineInstr *CopyMI = NULL;
454 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
455 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
456 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
457 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
458 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
460 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
462 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
463 LiveRange LR(defIndex, killIndex, ValNo);
464 interval.addRange(LR);
465 ValNo->addKill(indexes_->getTerminatorGap(mbb));
466 ValNo->setHasPHIKill(true);
467 DEBUG(errs() << " +" << LR);
471 DEBUG(errs() << '\n');
474 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
475 MachineBasicBlock::iterator mi,
478 LiveInterval &interval,
479 MachineInstr *CopyMI) {
480 // A physical register cannot be live across basic block, so its
481 // lifetime must end somewhere in its defining basic block.
483 errs() << "\t\tregister: ";
484 printRegName(interval.reg, tri_);
487 SlotIndex baseIndex = MIIdx;
488 SlotIndex start = baseIndex.getDefIndex();
489 // Earlyclobbers move back one.
490 if (MO.isEarlyClobber())
491 start = MIIdx.getUseIndex();
492 SlotIndex end = start;
494 // If it is not used after definition, it is considered dead at
495 // the instruction defining it. Hence its interval is:
496 // [defSlot(def), defSlot(def)+1)
497 // For earlyclobbers, the defSlot was pushed back one; the extra
498 // advance below compensates.
500 DEBUG(errs() << " dead");
501 end = start.getStoreIndex();
505 // If it is not dead on definition, it must be killed by a
506 // subsequent instruction. Hence its interval is:
507 // [defSlot(def), useSlot(kill)+1)
508 baseIndex = baseIndex.getNextIndex();
509 while (++mi != MBB->end()) {
511 if (getInstructionFromIndex(baseIndex) == 0)
512 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
514 if (mi->killsRegister(interval.reg, tri_)) {
515 DEBUG(errs() << " killed");
516 end = baseIndex.getDefIndex();
519 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
521 if (mi->isRegTiedToUseOperand(DefIdx)) {
522 // Two-address instruction.
523 end = baseIndex.getDefIndex();
524 assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
525 "Two address instruction is an early clobber?");
527 // Another instruction redefines the register before it is ever read.
528 // Then the register is essentially dead at the instruction that defines
529 // it. Hence its interval is:
530 // [defSlot(def), defSlot(def)+1)
531 DEBUG(errs() << " dead");
532 end = start.getStoreIndex();
538 baseIndex = baseIndex.getNextIndex();
541 // The only case we should have a dead physreg here without a killing or
542 // instruction where we know it's dead is if it is live-in to the function
543 // and never used. Another possible case is the implicit use of the
544 // physical register has been deleted by two-address pass.
545 end = start.getStoreIndex();
548 assert(start < end && "did not find end of interval?");
550 // Already exists? Extend old live interval.
551 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
552 bool Extend = OldLR != interval.end();
553 VNInfo *ValNo = Extend
554 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
555 if (MO.isEarlyClobber() && Extend)
556 ValNo->setHasRedefByEC(true);
557 LiveRange LR(start, end, ValNo);
558 interval.addRange(LR);
559 LR.valno->addKill(end);
560 DEBUG(errs() << " +" << LR << '\n');
563 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
564 MachineBasicBlock::iterator MI,
568 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
569 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
570 getOrCreateInterval(MO.getReg()));
571 else if (allocatableRegs_[MO.getReg()]) {
572 MachineInstr *CopyMI = NULL;
573 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
574 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
575 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
576 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
577 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->modifiesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
595 errs() << "\t\tlivein register: ";
596 printRegName(interval.reg, tri_);
599 // Look for kills, if it reaches a def before it's killed, then it shouldn't
600 // be considered a livein.
601 MachineBasicBlock::iterator mi = MBB->begin();
602 SlotIndex baseIndex = MIIdx;
603 SlotIndex start = baseIndex;
604 if (getInstructionFromIndex(baseIndex) == 0)
605 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
607 SlotIndex end = baseIndex;
608 bool SeenDefUse = false;
610 while (mi != MBB->end()) {
611 if (mi->killsRegister(interval.reg, tri_)) {
612 DEBUG(errs() << " killed");
613 end = baseIndex.getDefIndex();
616 } else if (mi->modifiesRegister(interval.reg, tri_)) {
617 // Another instruction redefines the register before it is ever read.
618 // Then the register is essentially dead at the instruction that defines
619 // it. Hence its interval is:
620 // [defSlot(def), defSlot(def)+1)
621 DEBUG(errs() << " dead");
622 end = start.getStoreIndex();
628 if (mi != MBB->end()) {
629 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
633 // Live-in register might not be used at all.
636 DEBUG(errs() << " dead");
637 end = MIIdx.getStoreIndex();
639 DEBUG(errs() << " live through");
645 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
646 0, false, VNInfoAllocator);
647 vni->setIsPHIDef(true);
648 LiveRange LR(start, end, vni);
650 interval.addRange(LR);
651 LR.valno->addKill(end);
652 DEBUG(errs() << " +" << LR << '\n');
656 isSafeAndProfitableToCoalesce(LiveInterval &DstInt,
657 LiveInterval &SrcInt,
658 SmallVector<MachineInstr*,16> &IdentCopies,
659 SmallVector<MachineInstr*,16> &OtherCopies) {
660 unsigned NumIdent = 0;
661 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
662 re = mri_->def_end(); ri != re; ++ri) {
663 MachineInstr *MI = &*ri;
664 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
665 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
667 if (SrcReg != DstInt.reg) {
668 // Non-identity copy - we cannot handle overlapping intervals
669 if (DstInt.liveAt(getInstructionIndex(MI)))
671 OtherCopies.push_back(MI);
673 IdentCopies.push_back(MI);
678 return IdentCopies.size() > OtherCopies.size();
681 void LiveIntervals::performEarlyCoalescing() {
682 if (!EarlyCoalescing)
685 /// Perform early coalescing: eliminate copies which feed into phi joins
686 /// and whose sources are defined by the phi joins.
687 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
688 MachineInstr *Join = phiJoinCopies[i];
689 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
692 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
693 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
695 assert(isMove && "PHI join instruction must be a move!");
700 LiveInterval &DstInt = getInterval(PHIDst);
701 LiveInterval &SrcInt = getInterval(PHISrc);
702 SmallVector<MachineInstr*, 16> IdentCopies;
703 SmallVector<MachineInstr*, 16> OtherCopies;
704 if (!isSafeAndProfitableToCoalesce(DstInt, SrcInt,
705 IdentCopies, OtherCopies))
708 DEBUG(errs() << "PHI Join: " << *Join);
709 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
710 assert(std::distance(mri_->use_begin(PHISrc), mri_->use_end()) == 1 &&
711 "PHI join src should not be used elsewhere");
712 VNInfo *VNI = DstInt.getValNumInfo(0);
714 // Change the non-identity copies to directly target the phi destination.
715 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
716 MachineInstr *PHICopy = OtherCopies[i];
717 SlotIndex MIIndex = getInstructionIndex(PHICopy);
718 DEBUG(errs() << "Moving: " << MIIndex << ' ' << *PHICopy);
719 SlotIndex DefIndex = MIIndex.getDefIndex();
720 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
721 SlotIndex StartIndex = SLR->start;
722 SlotIndex EndIndex = SLR->end;
724 // Delete val# defined by the now identity copy and add the range from
725 // beginning of the mbb to the end of the range.
726 SrcInt.removeValNo(SLR->valno);
727 DEBUG(errs() << " added range [" << StartIndex << ','
728 << EndIndex << "] to reg" << DstInt.reg << '\n');
729 assert (!DstInt.liveAt(StartIndex) && "Cannot coalesce when dst live!");
730 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
732 NewVNI->setHasPHIKill(true);
733 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
734 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
735 MachineOperand &MO = PHICopy->getOperand(j);
736 if (!MO.isReg() || MO.getReg() != PHISrc)
742 // Now let's eliminate all the would-be identity copies.
743 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
744 MachineInstr *PHICopy = IdentCopies[i];
745 DEBUG(errs() << "Coalescing: " << *PHICopy);
747 SlotIndex MIIndex = getInstructionIndex(PHICopy);
748 SlotIndex DefIndex = MIIndex.getDefIndex();
749 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
750 SlotIndex StartIndex = SLR->start;
751 SlotIndex EndIndex = SLR->end;
753 // Delete val# defined by the now identity copy and add the range from
754 // beginning of the mbb to the end of the range.
755 SrcInt.removeValNo(SLR->valno);
756 RemoveMachineInstrFromMaps(PHICopy);
757 PHICopy->eraseFromParent();
758 DEBUG(errs() << " added range [" << StartIndex << ','
759 << EndIndex << "] to reg" << DstInt.reg << '\n');
760 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
763 // Remove the phi join and update the phi block liveness.
764 SlotIndex MIIndex = getInstructionIndex(Join);
765 SlotIndex UseIndex = MIIndex.getUseIndex();
766 SlotIndex DefIndex = MIIndex.getDefIndex();
767 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
768 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
769 DLR->valno->setCopy(0);
770 DLR->valno->setIsDefAccurate(false);
771 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
772 SrcInt.removeRange(SLR->start, SLR->end);
773 assert(SrcInt.empty());
774 removeInterval(PHISrc);
775 RemoveMachineInstrFromMaps(Join);
776 Join->eraseFromParent();
782 /// computeIntervals - computes the live intervals for virtual
783 /// registers. for some ordering of the machine instructions [1,N] a
784 /// live interval is an interval [i, j) where 1 <= i <= j < N for
785 /// which a variable is live
786 void LiveIntervals::computeIntervals() {
787 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
788 << "********** Function: "
789 << ((Value*)mf_->getFunction())->getName() << '\n');
791 SmallVector<unsigned, 8> UndefUses;
792 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
794 MachineBasicBlock *MBB = MBBI;
795 // Track the index of the current machine instr.
796 SlotIndex MIIndex = getMBBStartIdx(MBB);
797 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
799 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
801 // Create intervals for live-ins to this BB first.
802 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
803 LE = MBB->livein_end(); LI != LE; ++LI) {
804 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
805 // Multiple live-ins can alias the same register.
806 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
807 if (!hasInterval(*AS))
808 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
812 // Skip over empty initial indices.
813 if (getInstructionFromIndex(MIIndex) == 0)
814 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
816 for (; MI != miEnd; ++MI) {
817 DEBUG(errs() << MIIndex << "\t" << *MI);
820 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
821 MachineOperand &MO = MI->getOperand(i);
822 if (!MO.isReg() || !MO.getReg())
825 // handle register defs - build intervals
827 handleRegisterDef(MBB, MI, MIIndex, MO, i);
828 else if (MO.isUndef())
829 UndefUses.push_back(MO.getReg());
832 // Move to the next instr slot.
833 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
837 // Create empty intervals for registers defined by implicit_def's (except
838 // for those implicit_def that define values which are liveout of their
840 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
841 unsigned UndefReg = UndefUses[i];
842 (void)getOrCreateInterval(UndefReg);
846 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
847 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
848 return new LiveInterval(reg, Weight);
851 /// dupInterval - Duplicate a live interval. The caller is responsible for
852 /// managing the allocated memory.
853 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
854 LiveInterval *NewLI = createInterval(li->reg);
855 NewLI->Copy(*li, mri_, getVNInfoAllocator());
859 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
860 /// copy field and returns the source register that defines it.
861 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
865 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
866 // If it's extracting out of a physical register, return the sub-register.
867 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
868 if (TargetRegisterInfo::isPhysicalRegister(Reg))
869 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
871 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
872 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
873 return VNI->getCopy()->getOperand(2).getReg();
875 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
876 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
878 llvm_unreachable("Unrecognized copy instruction!");
882 //===----------------------------------------------------------------------===//
883 // Register allocator hooks.
886 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
887 /// allow one) virtual register operand, then its uses are implicitly using
888 /// the register. Returns the virtual register.
889 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
890 MachineInstr *MI) const {
892 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
893 MachineOperand &MO = MI->getOperand(i);
894 if (!MO.isReg() || !MO.isUse())
896 unsigned Reg = MO.getReg();
897 if (Reg == 0 || Reg == li.reg)
900 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
901 !allocatableRegs_[Reg])
903 // FIXME: For now, only remat MI with at most one register operand.
905 "Can't rematerialize instruction with multiple register operand!");
914 /// isValNoAvailableAt - Return true if the val# of the specified interval
915 /// which reaches the given instruction also reaches the specified use index.
916 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
917 SlotIndex UseIdx) const {
918 SlotIndex Index = getInstructionIndex(MI);
919 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
920 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
921 return UI != li.end() && UI->valno == ValNo;
924 /// isReMaterializable - Returns true if the definition MI of the specified
925 /// val# of the specified interval is re-materializable.
926 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
927 const VNInfo *ValNo, MachineInstr *MI,
928 SmallVectorImpl<LiveInterval*> &SpillIs,
933 if (!tii_->isTriviallyReMaterializable(MI, aa_))
936 // Target-specific code can mark an instruction as being rematerializable
937 // if it has one virtual reg use, though it had better be something like
938 // a PIC base register which is likely to be live everywhere.
939 unsigned ImpUse = getReMatImplicitUse(li, MI);
941 const LiveInterval &ImpLi = getInterval(ImpUse);
942 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
943 re = mri_->use_end(); ri != re; ++ri) {
944 MachineInstr *UseMI = &*ri;
945 SlotIndex UseIdx = getInstructionIndex(UseMI);
946 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
948 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
952 // If a register operand of the re-materialized instruction is going to
953 // be spilled next, then it's not legal to re-materialize this instruction.
954 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
955 if (ImpUse == SpillIs[i]->reg)
961 /// isReMaterializable - Returns true if the definition MI of the specified
962 /// val# of the specified interval is re-materializable.
963 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
964 const VNInfo *ValNo, MachineInstr *MI) {
965 SmallVector<LiveInterval*, 4> Dummy1;
967 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
970 /// isReMaterializable - Returns true if every definition of MI of every
971 /// val# of the specified interval is re-materializable.
972 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
973 SmallVectorImpl<LiveInterval*> &SpillIs,
976 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
978 const VNInfo *VNI = *i;
980 continue; // Dead val#.
981 // Is the def for the val# rematerializable?
982 if (!VNI->isDefAccurate())
984 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
985 bool DefIsLoad = false;
987 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
994 /// FilterFoldedOps - Filter out two-address use operands. Return
995 /// true if it finds any issue with the operands that ought to prevent
997 static bool FilterFoldedOps(MachineInstr *MI,
998 SmallVector<unsigned, 2> &Ops,
1000 SmallVector<unsigned, 2> &FoldOps) {
1002 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1003 unsigned OpIdx = Ops[i];
1004 MachineOperand &MO = MI->getOperand(OpIdx);
1005 // FIXME: fold subreg use.
1009 MRInfo |= (unsigned)VirtRegMap::isMod;
1011 // Filter out two-address use operand(s).
1012 if (MI->isRegTiedToDefOperand(OpIdx)) {
1013 MRInfo = VirtRegMap::isModRef;
1016 MRInfo |= (unsigned)VirtRegMap::isRef;
1018 FoldOps.push_back(OpIdx);
1024 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1025 /// slot / to reg or any rematerialized load into ith operand of specified
1026 /// MI. If it is successul, MI is updated with the newly created MI and
1028 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1029 VirtRegMap &vrm, MachineInstr *DefMI,
1031 SmallVector<unsigned, 2> &Ops,
1032 bool isSS, int Slot, unsigned Reg) {
1033 // If it is an implicit def instruction, just delete it.
1034 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1035 RemoveMachineInstrFromMaps(MI);
1036 vrm.RemoveMachineInstrFromMaps(MI);
1037 MI->eraseFromParent();
1042 // Filter the list of operand indexes that are to be folded. Abort if
1043 // any operand will prevent folding.
1044 unsigned MRInfo = 0;
1045 SmallVector<unsigned, 2> FoldOps;
1046 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1049 // The only time it's safe to fold into a two address instruction is when
1050 // it's folding reload and spill from / into a spill stack slot.
1051 if (DefMI && (MRInfo & VirtRegMap::isMod))
1054 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1055 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1057 // Remember this instruction uses the spill slot.
1058 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1060 // Attempt to fold the memory reference into the instruction. If
1061 // we can do this, we don't need to insert spill code.
1062 MachineBasicBlock &MBB = *MI->getParent();
1063 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1064 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1065 vrm.transferSpillPts(MI, fmi);
1066 vrm.transferRestorePts(MI, fmi);
1067 vrm.transferEmergencySpills(MI, fmi);
1068 ReplaceMachineInstrInMaps(MI, fmi);
1069 MI = MBB.insert(MBB.erase(MI), fmi);
1076 /// canFoldMemoryOperand - Returns true if the specified load / store
1077 /// folding is possible.
1078 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1079 SmallVector<unsigned, 2> &Ops,
1081 // Filter the list of operand indexes that are to be folded. Abort if
1082 // any operand will prevent folding.
1083 unsigned MRInfo = 0;
1084 SmallVector<unsigned, 2> FoldOps;
1085 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1088 // It's only legal to remat for a use, not a def.
1089 if (ReMat && (MRInfo & VirtRegMap::isMod))
1092 return tii_->canFoldMemoryOperand(MI, FoldOps);
1095 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1096 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1098 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1103 for (++itr; itr != li.ranges.end(); ++itr) {
1104 MachineBasicBlock *mbb2 =
1105 indexes_->getMBBCoveringRange(itr->start, itr->end);
1114 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1115 /// interval on to-be re-materialized operands of MI) with new register.
1116 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1117 MachineInstr *MI, unsigned NewVReg,
1119 // There is an implicit use. That means one of the other operand is
1120 // being remat'ed and the remat'ed instruction has li.reg as an
1121 // use operand. Make sure we rewrite that as well.
1122 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1123 MachineOperand &MO = MI->getOperand(i);
1126 unsigned Reg = MO.getReg();
1127 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1129 if (!vrm.isReMaterialized(Reg))
1131 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1132 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1134 UseMO->setReg(NewVReg);
1138 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1139 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1140 bool LiveIntervals::
1141 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1142 bool TrySplit, SlotIndex index, SlotIndex end,
1144 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1145 unsigned Slot, int LdSlot,
1146 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1148 const TargetRegisterClass* rc,
1149 SmallVector<int, 4> &ReMatIds,
1150 const MachineLoopInfo *loopInfo,
1151 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1152 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1153 std::vector<LiveInterval*> &NewLIs) {
1154 bool CanFold = false;
1156 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1157 MachineOperand& mop = MI->getOperand(i);
1160 unsigned Reg = mop.getReg();
1161 unsigned RegI = Reg;
1162 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1167 bool TryFold = !DefIsReMat;
1168 bool FoldSS = true; // Default behavior unless it's a remat.
1169 int FoldSlot = Slot;
1171 // If this is the rematerializable definition MI itself and
1172 // all of its uses are rematerialized, simply delete it.
1173 if (MI == ReMatOrigDefMI && CanDelete) {
1174 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1176 RemoveMachineInstrFromMaps(MI);
1177 vrm.RemoveMachineInstrFromMaps(MI);
1178 MI->eraseFromParent();
1182 // If def for this use can't be rematerialized, then try folding.
1183 // If def is rematerializable and it's a load, also try folding.
1184 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1186 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1192 // Scan all of the operands of this instruction rewriting operands
1193 // to use NewVReg instead of li.reg as appropriate. We do this for
1196 // 1. If the instr reads the same spilled vreg multiple times, we
1197 // want to reuse the NewVReg.
1198 // 2. If the instr is a two-addr instruction, we are required to
1199 // keep the src/dst regs pinned.
1201 // Keep track of whether we replace a use and/or def so that we can
1202 // create the spill interval with the appropriate range.
1204 HasUse = mop.isUse();
1205 HasDef = mop.isDef();
1206 SmallVector<unsigned, 2> Ops;
1208 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1209 const MachineOperand &MOj = MI->getOperand(j);
1212 unsigned RegJ = MOj.getReg();
1213 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1217 if (!MOj.isUndef()) {
1218 HasUse |= MOj.isUse();
1219 HasDef |= MOj.isDef();
1224 // Create a new virtual register for the spill interval.
1225 // Create the new register now so we can map the fold instruction
1226 // to the new register so when it is unfolded we get the correct
1228 bool CreatedNewVReg = false;
1230 NewVReg = mri_->createVirtualRegister(rc);
1232 CreatedNewVReg = true;
1238 // Do not fold load / store here if we are splitting. We'll find an
1239 // optimal point to insert a load / store later.
1241 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1242 Ops, FoldSS, FoldSlot, NewVReg)) {
1243 // Folding the load/store can completely change the instruction in
1244 // unpredictable ways, rescan it from the beginning.
1247 // We need to give the new vreg the same stack slot as the
1248 // spilled interval.
1249 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1255 if (isNotInMIMap(MI))
1257 goto RestartInstruction;
1260 // We'll try to fold it later if it's profitable.
1261 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1265 mop.setReg(NewVReg);
1266 if (mop.isImplicit())
1267 rewriteImplicitOps(li, MI, NewVReg, vrm);
1269 // Reuse NewVReg for other reads.
1270 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1271 MachineOperand &mopj = MI->getOperand(Ops[j]);
1272 mopj.setReg(NewVReg);
1273 if (mopj.isImplicit())
1274 rewriteImplicitOps(li, MI, NewVReg, vrm);
1277 if (CreatedNewVReg) {
1279 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1280 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1281 // Each valnum may have its own remat id.
1282 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1284 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1286 if (!CanDelete || (HasUse && HasDef)) {
1287 // If this is a two-addr instruction then its use operands are
1288 // rematerializable but its def is not. It should be assigned a
1290 vrm.assignVirt2StackSlot(NewVReg, Slot);
1293 vrm.assignVirt2StackSlot(NewVReg, Slot);
1295 } else if (HasUse && HasDef &&
1296 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1297 // If this interval hasn't been assigned a stack slot (because earlier
1298 // def is a deleted remat def), do it now.
1299 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1300 vrm.assignVirt2StackSlot(NewVReg, Slot);
1303 // Re-matting an instruction with virtual register use. Add the
1304 // register as an implicit use on the use MI.
1305 if (DefIsReMat && ImpUse)
1306 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1308 // Create a new register interval for this spill / remat.
1309 LiveInterval &nI = getOrCreateInterval(NewVReg);
1310 if (CreatedNewVReg) {
1311 NewLIs.push_back(&nI);
1312 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1314 vrm.setIsSplitFromReg(NewVReg, li.reg);
1318 if (CreatedNewVReg) {
1319 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1320 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1321 DEBUG(errs() << " +" << LR);
1324 // Extend the split live interval to this def / use.
1325 SlotIndex End = index.getDefIndex();
1326 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1327 nI.getValNumInfo(nI.getNumValNums()-1));
1328 DEBUG(errs() << " +" << LR);
1333 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1334 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1335 DEBUG(errs() << " +" << LR);
1340 errs() << "\t\t\t\tAdded new interval: ";
1341 nI.print(errs(), tri_);
1347 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1349 MachineBasicBlock *MBB,
1350 SlotIndex Idx) const {
1351 SlotIndex End = getMBBEndIdx(MBB);
1352 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1353 if (VNI->kills[j].isPHI())
1356 SlotIndex KillIdx = VNI->kills[j];
1357 if (KillIdx > Idx && KillIdx < End)
1363 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1364 /// during spilling.
1366 struct RewriteInfo {
1371 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1372 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1375 struct RewriteInfoCompare {
1376 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1377 return LHS.Index < RHS.Index;
1382 void LiveIntervals::
1383 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1384 LiveInterval::Ranges::const_iterator &I,
1385 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1386 unsigned Slot, int LdSlot,
1387 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1389 const TargetRegisterClass* rc,
1390 SmallVector<int, 4> &ReMatIds,
1391 const MachineLoopInfo *loopInfo,
1392 BitVector &SpillMBBs,
1393 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1394 BitVector &RestoreMBBs,
1395 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1396 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1397 std::vector<LiveInterval*> &NewLIs) {
1398 bool AllCanFold = true;
1399 unsigned NewVReg = 0;
1400 SlotIndex start = I->start.getBaseIndex();
1401 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1403 // First collect all the def / use in this live range that will be rewritten.
1404 // Make sure they are sorted according to instruction index.
1405 std::vector<RewriteInfo> RewriteMIs;
1406 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1407 re = mri_->reg_end(); ri != re; ) {
1408 MachineInstr *MI = &*ri;
1409 MachineOperand &O = ri.getOperand();
1411 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1412 SlotIndex index = getInstructionIndex(MI);
1413 if (index < start || index >= end)
1417 // Must be defined by an implicit def. It should not be spilled. Note,
1418 // this is for correctness reason. e.g.
1419 // 8 %reg1024<def> = IMPLICIT_DEF
1420 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1421 // The live range [12, 14) are not part of the r1024 live interval since
1422 // it's defined by an implicit def. It will not conflicts with live
1423 // interval of r1025. Now suppose both registers are spilled, you can
1424 // easily see a situation where both registers are reloaded before
1425 // the INSERT_SUBREG and both target registers that would overlap.
1427 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1429 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1431 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1432 // Now rewrite the defs and uses.
1433 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1434 RewriteInfo &rwi = RewriteMIs[i];
1436 SlotIndex index = rwi.Index;
1437 bool MIHasUse = rwi.HasUse;
1438 bool MIHasDef = rwi.HasDef;
1439 MachineInstr *MI = rwi.MI;
1440 // If MI def and/or use the same register multiple times, then there
1441 // are multiple entries.
1442 unsigned NumUses = MIHasUse;
1443 while (i != e && RewriteMIs[i].MI == MI) {
1444 assert(RewriteMIs[i].Index == index);
1445 bool isUse = RewriteMIs[i].HasUse;
1446 if (isUse) ++NumUses;
1448 MIHasDef |= RewriteMIs[i].HasDef;
1451 MachineBasicBlock *MBB = MI->getParent();
1453 if (ImpUse && MI != ReMatDefMI) {
1454 // Re-matting an instruction with virtual register use. Update the
1455 // register interval's spill weight to HUGE_VALF to prevent it from
1457 LiveInterval &ImpLi = getInterval(ImpUse);
1458 ImpLi.weight = HUGE_VALF;
1461 unsigned MBBId = MBB->getNumber();
1462 unsigned ThisVReg = 0;
1464 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1465 if (NVI != MBBVRegsMap.end()) {
1466 ThisVReg = NVI->second;
1473 // It's better to start a new interval to avoid artifically
1474 // extend the new interval.
1475 if (MIHasDef && !MIHasUse) {
1476 MBBVRegsMap.erase(MBB->getNumber());
1482 bool IsNew = ThisVReg == 0;
1484 // This ends the previous live interval. If all of its def / use
1485 // can be folded, give it a low spill weight.
1486 if (NewVReg && TrySplit && AllCanFold) {
1487 LiveInterval &nI = getOrCreateInterval(NewVReg);
1494 bool HasDef = false;
1495 bool HasUse = false;
1496 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1497 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1498 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1499 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1500 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1501 if (!HasDef && !HasUse)
1504 AllCanFold &= CanFold;
1506 // Update weight of spill interval.
1507 LiveInterval &nI = getOrCreateInterval(NewVReg);
1509 // The spill weight is now infinity as it cannot be spilled again.
1510 nI.weight = HUGE_VALF;
1514 // Keep track of the last def and first use in each MBB.
1516 if (MI != ReMatOrigDefMI || !CanDelete) {
1517 bool HasKill = false;
1519 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1521 // If this is a two-address code, then this index starts a new VNInfo.
1522 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1524 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1526 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1527 SpillIdxes.find(MBBId);
1529 if (SII == SpillIdxes.end()) {
1530 std::vector<SRInfo> S;
1531 S.push_back(SRInfo(index, NewVReg, true));
1532 SpillIdxes.insert(std::make_pair(MBBId, S));
1533 } else if (SII->second.back().vreg != NewVReg) {
1534 SII->second.push_back(SRInfo(index, NewVReg, true));
1535 } else if (index > SII->second.back().index) {
1536 // If there is an earlier def and this is a two-address
1537 // instruction, then it's not possible to fold the store (which
1538 // would also fold the load).
1539 SRInfo &Info = SII->second.back();
1541 Info.canFold = !HasUse;
1543 SpillMBBs.set(MBBId);
1544 } else if (SII != SpillIdxes.end() &&
1545 SII->second.back().vreg == NewVReg &&
1546 index > SII->second.back().index) {
1547 // There is an earlier def that's not killed (must be two-address).
1548 // The spill is no longer needed.
1549 SII->second.pop_back();
1550 if (SII->second.empty()) {
1551 SpillIdxes.erase(MBBId);
1552 SpillMBBs.reset(MBBId);
1559 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1560 SpillIdxes.find(MBBId);
1561 if (SII != SpillIdxes.end() &&
1562 SII->second.back().vreg == NewVReg &&
1563 index > SII->second.back().index)
1564 // Use(s) following the last def, it's not safe to fold the spill.
1565 SII->second.back().canFold = false;
1566 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1567 RestoreIdxes.find(MBBId);
1568 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1569 // If we are splitting live intervals, only fold if it's the first
1570 // use and there isn't another use later in the MBB.
1571 RII->second.back().canFold = false;
1573 // Only need a reload if there isn't an earlier def / use.
1574 if (RII == RestoreIdxes.end()) {
1575 std::vector<SRInfo> Infos;
1576 Infos.push_back(SRInfo(index, NewVReg, true));
1577 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1579 RII->second.push_back(SRInfo(index, NewVReg, true));
1581 RestoreMBBs.set(MBBId);
1585 // Update spill weight.
1586 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1587 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1590 if (NewVReg && TrySplit && AllCanFold) {
1591 // If all of its def / use can be folded, give it a low spill weight.
1592 LiveInterval &nI = getOrCreateInterval(NewVReg);
1597 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1598 unsigned vr, BitVector &RestoreMBBs,
1599 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1600 if (!RestoreMBBs[Id])
1602 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1603 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1604 if (Restores[i].index == index &&
1605 Restores[i].vreg == vr &&
1606 Restores[i].canFold)
1611 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1612 unsigned vr, BitVector &RestoreMBBs,
1613 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1614 if (!RestoreMBBs[Id])
1616 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1617 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1618 if (Restores[i].index == index && Restores[i].vreg)
1619 Restores[i].index = SlotIndex();
1622 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1623 /// spilled and create empty intervals for their uses.
1625 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1626 const TargetRegisterClass* rc,
1627 std::vector<LiveInterval*> &NewLIs) {
1628 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1629 re = mri_->reg_end(); ri != re; ) {
1630 MachineOperand &O = ri.getOperand();
1631 MachineInstr *MI = &*ri;
1634 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1635 "Register def was not rewritten?");
1636 RemoveMachineInstrFromMaps(MI);
1637 vrm.RemoveMachineInstrFromMaps(MI);
1638 MI->eraseFromParent();
1640 // This must be an use of an implicit_def so it's not part of the live
1641 // interval. Create a new empty live interval for it.
1642 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1643 unsigned NewVReg = mri_->createVirtualRegister(rc);
1645 vrm.setIsImplicitlyDefined(NewVReg);
1646 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1647 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1648 MachineOperand &MO = MI->getOperand(i);
1649 if (MO.isReg() && MO.getReg() == li.reg) {
1658 std::vector<LiveInterval*> LiveIntervals::
1659 addIntervalsForSpillsFast(const LiveInterval &li,
1660 const MachineLoopInfo *loopInfo,
1662 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1664 std::vector<LiveInterval*> added;
1666 assert(li.weight != HUGE_VALF &&
1667 "attempt to spill already spilled interval!");
1670 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1675 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1677 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1678 while (RI != mri_->reg_end()) {
1679 MachineInstr* MI = &*RI;
1681 SmallVector<unsigned, 2> Indices;
1682 bool HasUse = false;
1683 bool HasDef = false;
1685 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1686 MachineOperand& mop = MI->getOperand(i);
1687 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1689 HasUse |= MI->getOperand(i).isUse();
1690 HasDef |= MI->getOperand(i).isDef();
1692 Indices.push_back(i);
1695 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1696 Indices, true, slot, li.reg)) {
1697 unsigned NewVReg = mri_->createVirtualRegister(rc);
1699 vrm.assignVirt2StackSlot(NewVReg, slot);
1701 // create a new register for this spill
1702 LiveInterval &nI = getOrCreateInterval(NewVReg);
1704 // the spill weight is now infinity as it
1705 // cannot be spilled again
1706 nI.weight = HUGE_VALF;
1708 // Rewrite register operands to use the new vreg.
1709 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1710 E = Indices.end(); I != E; ++I) {
1711 MI->getOperand(*I).setReg(NewVReg);
1713 if (MI->getOperand(*I).isUse())
1714 MI->getOperand(*I).setIsKill(true);
1717 // Fill in the new live interval.
1718 SlotIndex index = getInstructionIndex(MI);
1720 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1721 nI.getNextValue(SlotIndex(), 0, false,
1722 getVNInfoAllocator()));
1723 DEBUG(errs() << " +" << LR);
1725 vrm.addRestorePoint(NewVReg, MI);
1728 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1729 nI.getNextValue(SlotIndex(), 0, false,
1730 getVNInfoAllocator()));
1731 DEBUG(errs() << " +" << LR);
1733 vrm.addSpillPoint(NewVReg, true, MI);
1736 added.push_back(&nI);
1739 errs() << "\t\t\t\tadded new interval: ";
1746 RI = mri_->reg_begin(li.reg);
1752 std::vector<LiveInterval*> LiveIntervals::
1753 addIntervalsForSpills(const LiveInterval &li,
1754 SmallVectorImpl<LiveInterval*> &SpillIs,
1755 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1757 if (EnableFastSpilling)
1758 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1760 assert(li.weight != HUGE_VALF &&
1761 "attempt to spill already spilled interval!");
1764 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1765 li.print(errs(), tri_);
1769 // Each bit specify whether a spill is required in the MBB.
1770 BitVector SpillMBBs(mf_->getNumBlockIDs());
1771 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1772 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1773 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1774 DenseMap<unsigned,unsigned> MBBVRegsMap;
1775 std::vector<LiveInterval*> NewLIs;
1776 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1778 unsigned NumValNums = li.getNumValNums();
1779 SmallVector<MachineInstr*, 4> ReMatDefs;
1780 ReMatDefs.resize(NumValNums, NULL);
1781 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1782 ReMatOrigDefs.resize(NumValNums, NULL);
1783 SmallVector<int, 4> ReMatIds;
1784 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1785 BitVector ReMatDelete(NumValNums);
1786 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1788 // Spilling a split live interval. It cannot be split any further. Also,
1789 // it's also guaranteed to be a single val# / range interval.
1790 if (vrm.getPreSplitReg(li.reg)) {
1791 vrm.setIsSplitFromReg(li.reg, 0);
1792 // Unset the split kill marker on the last use.
1793 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1794 if (KillIdx != SlotIndex()) {
1795 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1796 assert(KillMI && "Last use disappeared?");
1797 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1798 assert(KillOp != -1 && "Last use disappeared?");
1799 KillMI->getOperand(KillOp).setIsKill(false);
1801 vrm.removeKillPoint(li.reg);
1802 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1803 Slot = vrm.getStackSlot(li.reg);
1804 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1805 MachineInstr *ReMatDefMI = DefIsReMat ?
1806 vrm.getReMaterializedMI(li.reg) : NULL;
1808 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1809 bool isLoad = isLoadSS ||
1810 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1811 bool IsFirstRange = true;
1812 for (LiveInterval::Ranges::const_iterator
1813 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1814 // If this is a split live interval with multiple ranges, it means there
1815 // are two-address instructions that re-defined the value. Only the
1816 // first def can be rematerialized!
1818 // Note ReMatOrigDefMI has already been deleted.
1819 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1820 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1821 false, vrm, rc, ReMatIds, loopInfo,
1822 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1823 MBBVRegsMap, NewLIs);
1825 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1826 Slot, 0, false, false, false,
1827 false, vrm, rc, ReMatIds, loopInfo,
1828 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1829 MBBVRegsMap, NewLIs);
1831 IsFirstRange = false;
1834 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1838 bool TrySplit = !intervalIsInOneMBB(li);
1841 bool NeedStackSlot = false;
1842 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1844 const VNInfo *VNI = *i;
1845 unsigned VN = VNI->id;
1846 if (VNI->isUnused())
1847 continue; // Dead val#.
1848 // Is the def for the val# rematerializable?
1849 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1850 ? getInstructionFromIndex(VNI->def) : 0;
1852 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1853 // Remember how to remat the def of this val#.
1854 ReMatOrigDefs[VN] = ReMatDefMI;
1855 // Original def may be modified so we have to make a copy here.
1856 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1857 CloneMIs.push_back(Clone);
1858 ReMatDefs[VN] = Clone;
1860 bool CanDelete = true;
1861 if (VNI->hasPHIKill()) {
1862 // A kill is a phi node, not all of its uses can be rematerialized.
1863 // It must not be deleted.
1865 // Need a stack slot if there is any live range where uses cannot be
1867 NeedStackSlot = true;
1870 ReMatDelete.set(VN);
1872 // Need a stack slot if there is any live range where uses cannot be
1874 NeedStackSlot = true;
1878 // One stack slot per live interval.
1879 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1880 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1881 Slot = vrm.assignVirt2StackSlot(li.reg);
1883 // This case only occurs when the prealloc splitter has already assigned
1884 // a stack slot to this vreg.
1886 Slot = vrm.getStackSlot(li.reg);
1889 // Create new intervals and rewrite defs and uses.
1890 for (LiveInterval::Ranges::const_iterator
1891 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1892 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1893 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1894 bool DefIsReMat = ReMatDefMI != NULL;
1895 bool CanDelete = ReMatDelete[I->valno->id];
1897 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1898 bool isLoad = isLoadSS ||
1899 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1900 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1901 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1902 CanDelete, vrm, rc, ReMatIds, loopInfo,
1903 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1904 MBBVRegsMap, NewLIs);
1907 // Insert spills / restores if we are splitting.
1909 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1913 SmallPtrSet<LiveInterval*, 4> AddedKill;
1914 SmallVector<unsigned, 2> Ops;
1915 if (NeedStackSlot) {
1916 int Id = SpillMBBs.find_first();
1918 std::vector<SRInfo> &spills = SpillIdxes[Id];
1919 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1920 SlotIndex index = spills[i].index;
1921 unsigned VReg = spills[i].vreg;
1922 LiveInterval &nI = getOrCreateInterval(VReg);
1923 bool isReMat = vrm.isReMaterialized(VReg);
1924 MachineInstr *MI = getInstructionFromIndex(index);
1925 bool CanFold = false;
1926 bool FoundUse = false;
1928 if (spills[i].canFold) {
1930 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1931 MachineOperand &MO = MI->getOperand(j);
1932 if (!MO.isReg() || MO.getReg() != VReg)
1939 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1940 RestoreMBBs, RestoreIdxes))) {
1941 // MI has two-address uses of the same register. If the use
1942 // isn't the first and only use in the BB, then we can't fold
1943 // it. FIXME: Move this to rewriteInstructionsForSpills.
1950 // Fold the store into the def if possible.
1951 bool Folded = false;
1952 if (CanFold && !Ops.empty()) {
1953 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1956 // Also folded uses, do not issue a load.
1957 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1958 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1960 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1964 // Otherwise tell the spiller to issue a spill.
1966 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1967 bool isKill = LR->end == index.getStoreIndex();
1968 if (!MI->registerDefIsDead(nI.reg))
1969 // No need to spill a dead def.
1970 vrm.addSpillPoint(VReg, isKill, MI);
1972 AddedKill.insert(&nI);
1975 Id = SpillMBBs.find_next(Id);
1979 int Id = RestoreMBBs.find_first();
1981 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1982 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1983 SlotIndex index = restores[i].index;
1984 if (index == SlotIndex())
1986 unsigned VReg = restores[i].vreg;
1987 LiveInterval &nI = getOrCreateInterval(VReg);
1988 bool isReMat = vrm.isReMaterialized(VReg);
1989 MachineInstr *MI = getInstructionFromIndex(index);
1990 bool CanFold = false;
1992 if (restores[i].canFold) {
1994 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1995 MachineOperand &MO = MI->getOperand(j);
1996 if (!MO.isReg() || MO.getReg() != VReg)
2000 // If this restore were to be folded, it would have been folded
2009 // Fold the load into the use if possible.
2010 bool Folded = false;
2011 if (CanFold && !Ops.empty()) {
2013 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2015 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2017 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2018 // If the rematerializable def is a load, also try to fold it.
2019 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2020 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2021 Ops, isLoadSS, LdSlot, VReg);
2023 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2025 // Re-matting an instruction with virtual register use. Add the
2026 // register as an implicit use on the use MI and update the register
2027 // interval's spill weight to HUGE_VALF to prevent it from being
2029 LiveInterval &ImpLi = getInterval(ImpUse);
2030 ImpLi.weight = HUGE_VALF;
2031 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2036 // If folding is not possible / failed, then tell the spiller to issue a
2037 // load / rematerialization for us.
2039 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2041 vrm.addRestorePoint(VReg, MI);
2043 Id = RestoreMBBs.find_next(Id);
2046 // Finalize intervals: add kills, finalize spill weights, and filter out
2048 std::vector<LiveInterval*> RetNewLIs;
2049 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2050 LiveInterval *LI = NewLIs[i];
2052 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2053 if (!AddedKill.count(LI)) {
2054 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2055 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2056 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2057 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2058 assert(UseIdx != -1);
2059 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2060 LastUse->getOperand(UseIdx).setIsKill();
2061 vrm.addKillPoint(LI->reg, LastUseIdx);
2064 RetNewLIs.push_back(LI);
2068 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2072 /// hasAllocatableSuperReg - Return true if the specified physical register has
2073 /// any super register that's allocatable.
2074 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2075 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2076 if (allocatableRegs_[*AS] && hasInterval(*AS))
2081 /// getRepresentativeReg - Find the largest super register of the specified
2082 /// physical register.
2083 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2084 // Find the largest super-register that is allocatable.
2085 unsigned BestReg = Reg;
2086 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2087 unsigned SuperReg = *AS;
2088 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2096 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2097 /// specified interval that conflicts with the specified physical register.
2098 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2099 unsigned PhysReg) const {
2100 unsigned NumConflicts = 0;
2101 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2102 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2103 E = mri_->reg_end(); I != E; ++I) {
2104 MachineOperand &O = I.getOperand();
2105 MachineInstr *MI = O.getParent();
2106 SlotIndex Index = getInstructionIndex(MI);
2107 if (pli.liveAt(Index))
2110 return NumConflicts;
2113 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2114 /// around all defs and uses of the specified interval. Return true if it
2115 /// was able to cut its interval.
2116 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2117 unsigned PhysReg, VirtRegMap &vrm) {
2118 unsigned SpillReg = getRepresentativeReg(PhysReg);
2120 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2121 // If there are registers which alias PhysReg, but which are not a
2122 // sub-register of the chosen representative super register. Assert
2123 // since we can't handle it yet.
2124 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2125 tri_->isSuperRegister(*AS, SpillReg));
2128 SmallVector<unsigned, 4> PRegs;
2129 if (hasInterval(SpillReg))
2130 PRegs.push_back(SpillReg);
2132 SmallSet<unsigned, 4> Added;
2133 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2134 if (Added.insert(*AS) && hasInterval(*AS)) {
2135 PRegs.push_back(*AS);
2136 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2141 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2142 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2143 E = mri_->reg_end(); I != E; ++I) {
2144 MachineOperand &O = I.getOperand();
2145 MachineInstr *MI = O.getParent();
2146 if (SeenMIs.count(MI))
2149 SlotIndex Index = getInstructionIndex(MI);
2150 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2151 unsigned PReg = PRegs[i];
2152 LiveInterval &pli = getInterval(PReg);
2153 if (!pli.liveAt(Index))
2155 vrm.addEmergencySpill(PReg, MI);
2156 SlotIndex StartIdx = Index.getLoadIndex();
2157 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2158 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2159 pli.removeRange(StartIdx, EndIdx);
2163 raw_string_ostream Msg(msg);
2164 Msg << "Ran out of registers during register allocation!";
2165 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2166 Msg << "\nPlease check your inline asm statement for invalid "
2167 << "constraints:\n";
2168 MI->print(Msg, tm_);
2170 llvm_report_error(Msg.str());
2172 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2173 if (!hasInterval(*AS))
2175 LiveInterval &spli = getInterval(*AS);
2176 if (spli.liveAt(Index))
2177 spli.removeRange(Index.getLoadIndex(),
2178 Index.getNextIndex().getBaseIndex());
2185 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2186 MachineInstr* startInst) {
2187 LiveInterval& Interval = getOrCreateInterval(reg);
2188 VNInfo* VN = Interval.getNextValue(
2189 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2190 startInst, true, getVNInfoAllocator());
2191 VN->setHasPHIKill(true);
2192 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2194 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2195 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2196 Interval.addRange(LR);