1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 /// conflictsWithPhysRegDef - Returns true if the specified register
153 /// is defined during the duration of the specified interval.
154 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
155 VirtRegMap &vrm, unsigned reg) {
156 for (LiveInterval::Ranges::const_iterator
157 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
158 for (SlotIndex index = I->start.getBaseIndex(),
159 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
161 index = index.getNextIndex()) {
162 // skip deleted instructions
163 while (index != end && !getInstructionFromIndex(index))
164 index = index.getNextIndex();
165 if (index == end) break;
167 MachineInstr *MI = getInstructionFromIndex(index);
168 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
169 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
170 if (SrcReg == li.reg || DstReg == li.reg)
172 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
173 MachineOperand& mop = MI->getOperand(i);
176 unsigned PhysReg = mop.getReg();
177 if (PhysReg == 0 || PhysReg == li.reg)
179 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
180 if (!vrm.hasPhys(PhysReg))
182 PhysReg = vrm.getPhys(PhysReg);
184 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
193 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
194 /// it can check use as well.
195 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
196 unsigned Reg, bool CheckUse,
197 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
198 for (LiveInterval::Ranges::const_iterator
199 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
200 for (SlotIndex index = I->start.getBaseIndex(),
201 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
203 index = index.getNextIndex()) {
204 // Skip deleted instructions.
205 MachineInstr *MI = 0;
206 while (index != end) {
207 MI = getInstructionFromIndex(index);
210 index = index.getNextIndex();
212 if (index == end) break;
214 if (JoinedCopies.count(MI))
216 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
217 MachineOperand& MO = MI->getOperand(i);
220 if (MO.isUse() && !CheckUse)
222 unsigned PhysReg = MO.getReg();
223 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
225 if (tri_->isSubRegister(Reg, PhysReg))
235 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
236 if (TargetRegisterInfo::isPhysicalRegister(reg))
237 errs() << tri_->getName(reg);
239 errs() << "%reg" << reg;
243 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
244 MachineBasicBlock::iterator mi,
248 LiveInterval &interval) {
250 errs() << "\t\tregister: ";
251 printRegName(interval.reg, tri_);
254 // Virtual registers may be defined multiple times (due to phi
255 // elimination and 2-addr elimination). Much of what we do only has to be
256 // done once for the vreg. We use an empty interval to detect the first
257 // time we see a vreg.
258 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
259 if (interval.empty()) {
260 // Get the Idx of the defining instructions.
261 SlotIndex defIndex = MIIdx.getDefIndex();
262 // Earlyclobbers move back one, so that they overlap the live range
264 if (MO.isEarlyClobber())
265 defIndex = MIIdx.getUseIndex();
267 MachineInstr *CopyMI = NULL;
268 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
269 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
270 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
271 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
272 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
274 // Earlyclobbers move back one.
275 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
277 assert(ValNo->id == 0 && "First value in interval is not 0?");
279 // Loop over all of the blocks that the vreg is defined in. There are
280 // two cases we have to handle here. The most common case is a vreg
281 // whose lifetime is contained within a basic block. In this case there
282 // will be a single kill, in MBB, which comes after the definition.
283 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
284 // FIXME: what about dead vars?
286 if (vi.Kills[0] != mi)
287 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
289 killIdx = defIndex.getStoreIndex();
291 // If the kill happens after the definition, we have an intra-block
293 if (killIdx > defIndex) {
294 assert(vi.AliveBlocks.empty() &&
295 "Shouldn't be alive across any blocks!");
296 LiveRange LR(defIndex, killIdx, ValNo);
297 interval.addRange(LR);
298 DEBUG(errs() << " +" << LR << "\n");
299 ValNo->addKill(killIdx);
304 // The other case we handle is when a virtual register lives to the end
305 // of the defining block, potentially live across some blocks, then is
306 // live into some number of blocks, but gets killed. Start by adding a
307 // range that goes from this definition to the end of the defining block.
308 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
310 DEBUG(errs() << " +" << NewLR);
311 interval.addRange(NewLR);
313 // Iterate over all of the blocks that the variable is completely
314 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
316 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
317 E = vi.AliveBlocks.end(); I != E; ++I) {
319 getMBBStartIdx(mf_->getBlockNumbered(*I)),
320 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
322 interval.addRange(LR);
323 DEBUG(errs() << " +" << LR);
326 // Finally, this virtual register is live from the start of any killing
327 // block to the 'use' slot of the killing instruction.
328 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
329 MachineInstr *Kill = vi.Kills[i];
331 getInstructionIndex(Kill).getDefIndex();
332 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
333 interval.addRange(LR);
334 ValNo->addKill(killIdx);
335 DEBUG(errs() << " +" << LR);
339 // If this is the second time we see a virtual register definition, it
340 // must be due to phi elimination or two addr elimination. If this is
341 // the result of two address elimination, then the vreg is one of the
342 // def-and-use register operand.
343 if (mi->isRegTiedToUseOperand(MOIdx)) {
344 // If this is a two-address definition, then we have already processed
345 // the live range. The only problem is that we didn't realize there
346 // are actually two values in the live interval. Because of this we
347 // need to take the LiveRegion that defines this register and split it
349 assert(interval.containsOneValue());
350 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
351 SlotIndex RedefIndex = MIIdx.getDefIndex();
352 if (MO.isEarlyClobber())
353 RedefIndex = MIIdx.getUseIndex();
355 const LiveRange *OldLR =
356 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
357 VNInfo *OldValNo = OldLR->valno;
359 // Delete the initial value, which should be short and continuous,
360 // because the 2-addr copy must be in the same MBB as the redef.
361 interval.removeRange(DefIndex, RedefIndex);
363 // Two-address vregs should always only be redefined once. This means
364 // that at this point, there should be exactly one value number in it.
365 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
367 // The new value number (#1) is defined by the instruction we claimed
369 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
370 false, // update at *
372 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
374 // Value#0 is now defined by the 2-addr instruction.
375 OldValNo->def = RedefIndex;
376 OldValNo->setCopy(0);
377 if (MO.isEarlyClobber())
378 OldValNo->setHasRedefByEC(true);
380 // Add the new live interval which replaces the range for the input copy.
381 LiveRange LR(DefIndex, RedefIndex, ValNo);
382 DEBUG(errs() << " replace range with " << LR);
383 interval.addRange(LR);
384 ValNo->addKill(RedefIndex);
386 // If this redefinition is dead, we need to add a dummy unit live
387 // range covering the def slot.
389 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
393 errs() << " RESULT: ";
394 interval.print(errs(), tri_);
397 // Otherwise, this must be because of phi elimination. If this is the
398 // first redefinition of the vreg that we have seen, go back and change
399 // the live range in the PHI block to be a different value number.
400 if (interval.containsOneValue()) {
401 // Remove the old range that we now know has an incorrect number.
402 VNInfo *VNI = interval.getValNumInfo(0);
403 MachineInstr *Killer = vi.Kills[0];
404 SlotIndex Start = getMBBStartIdx(Killer->getParent());
405 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
407 errs() << " Removing [" << Start << "," << End << "] from: ";
408 interval.print(errs(), tri_);
411 interval.removeRange(Start, End);
412 assert(interval.ranges.size() == 1 &&
413 "Newly discovered PHI interval has >1 ranges.");
414 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
415 VNI->addKill(indexes_->getTerminatorGap(killMBB));
416 VNI->setHasPHIKill(true);
418 errs() << " RESULT: ";
419 interval.print(errs(), tri_);
422 // Replace the interval with one of a NEW value number. Note that this
423 // value number isn't actually defined by an instruction, weird huh? :)
424 LiveRange LR(Start, End,
425 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
426 0, false, VNInfoAllocator));
427 LR.valno->setIsPHIDef(true);
428 DEBUG(errs() << " replace range with " << LR);
429 interval.addRange(LR);
430 LR.valno->addKill(End);
432 errs() << " RESULT: ";
433 interval.print(errs(), tri_);
437 // In the case of PHI elimination, each variable definition is only
438 // live until the end of the block. We've already taken care of the
439 // rest of the live range.
440 SlotIndex defIndex = MIIdx.getDefIndex();
441 if (MO.isEarlyClobber())
442 defIndex = MIIdx.getUseIndex();
445 MachineInstr *CopyMI = NULL;
446 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
447 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
448 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
449 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
450 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
452 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
454 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
455 LiveRange LR(defIndex, killIndex, ValNo);
456 interval.addRange(LR);
457 ValNo->addKill(indexes_->getTerminatorGap(mbb));
458 ValNo->setHasPHIKill(true);
459 DEBUG(errs() << " +" << LR);
463 DEBUG(errs() << '\n');
466 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
467 MachineBasicBlock::iterator mi,
470 LiveInterval &interval,
471 MachineInstr *CopyMI) {
472 // A physical register cannot be live across basic block, so its
473 // lifetime must end somewhere in its defining basic block.
475 errs() << "\t\tregister: ";
476 printRegName(interval.reg, tri_);
479 SlotIndex baseIndex = MIIdx;
480 SlotIndex start = baseIndex.getDefIndex();
481 // Earlyclobbers move back one.
482 if (MO.isEarlyClobber())
483 start = MIIdx.getUseIndex();
484 SlotIndex end = start;
486 // If it is not used after definition, it is considered dead at
487 // the instruction defining it. Hence its interval is:
488 // [defSlot(def), defSlot(def)+1)
489 // For earlyclobbers, the defSlot was pushed back one; the extra
490 // advance below compensates.
492 DEBUG(errs() << " dead");
493 end = start.getStoreIndex();
497 // If it is not dead on definition, it must be killed by a
498 // subsequent instruction. Hence its interval is:
499 // [defSlot(def), useSlot(kill)+1)
500 baseIndex = baseIndex.getNextIndex();
501 while (++mi != MBB->end()) {
503 if (getInstructionFromIndex(baseIndex) == 0)
504 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
506 if (mi->killsRegister(interval.reg, tri_)) {
507 DEBUG(errs() << " killed");
508 end = baseIndex.getDefIndex();
511 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
513 if (mi->isRegTiedToUseOperand(DefIdx)) {
514 // Two-address instruction.
515 end = baseIndex.getDefIndex();
516 assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
517 "Two address instruction is an early clobber?");
519 // Another instruction redefines the register before it is ever read.
520 // Then the register is essentially dead at the instruction that defines
521 // it. Hence its interval is:
522 // [defSlot(def), defSlot(def)+1)
523 DEBUG(errs() << " dead");
524 end = start.getStoreIndex();
530 baseIndex = baseIndex.getNextIndex();
533 // The only case we should have a dead physreg here without a killing or
534 // instruction where we know it's dead is if it is live-in to the function
535 // and never used. Another possible case is the implicit use of the
536 // physical register has been deleted by two-address pass.
537 end = start.getStoreIndex();
540 assert(start < end && "did not find end of interval?");
542 // Already exists? Extend old live interval.
543 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
544 bool Extend = OldLR != interval.end();
545 VNInfo *ValNo = Extend
546 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
547 if (MO.isEarlyClobber() && Extend)
548 ValNo->setHasRedefByEC(true);
549 LiveRange LR(start, end, ValNo);
550 interval.addRange(LR);
551 LR.valno->addKill(end);
552 DEBUG(errs() << " +" << LR << '\n');
555 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
556 MachineBasicBlock::iterator MI,
560 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
561 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
562 getOrCreateInterval(MO.getReg()));
563 else if (allocatableRegs_[MO.getReg()]) {
564 MachineInstr *CopyMI = NULL;
565 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
566 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
567 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
568 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
569 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
571 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
572 getOrCreateInterval(MO.getReg()), CopyMI);
573 // Def of a register also defines its sub-registers.
574 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
575 // If MI also modifies the sub-register explicitly, avoid processing it
576 // more than once. Do not pass in TRI here so it checks for exact match.
577 if (!MI->modifiesRegister(*AS))
578 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579 getOrCreateInterval(*AS), 0);
583 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
585 LiveInterval &interval, bool isAlias) {
587 errs() << "\t\tlivein register: ";
588 printRegName(interval.reg, tri_);
591 // Look for kills, if it reaches a def before it's killed, then it shouldn't
592 // be considered a livein.
593 MachineBasicBlock::iterator mi = MBB->begin();
594 SlotIndex baseIndex = MIIdx;
595 SlotIndex start = baseIndex;
596 if (getInstructionFromIndex(baseIndex) == 0)
597 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
599 SlotIndex end = baseIndex;
600 bool SeenDefUse = false;
602 while (mi != MBB->end()) {
603 if (mi->killsRegister(interval.reg, tri_)) {
604 DEBUG(errs() << " killed");
605 end = baseIndex.getDefIndex();
608 } else if (mi->modifiesRegister(interval.reg, tri_)) {
609 // Another instruction redefines the register before it is ever read.
610 // Then the register is essentially dead at the instruction that defines
611 // it. Hence its interval is:
612 // [defSlot(def), defSlot(def)+1)
613 DEBUG(errs() << " dead");
614 end = start.getStoreIndex();
620 if (mi != MBB->end()) {
621 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
625 // Live-in register might not be used at all.
628 DEBUG(errs() << " dead");
629 end = MIIdx.getStoreIndex();
631 DEBUG(errs() << " live through");
637 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
638 0, false, VNInfoAllocator);
639 vni->setIsPHIDef(true);
640 LiveRange LR(start, end, vni);
642 interval.addRange(LR);
643 LR.valno->addKill(end);
644 DEBUG(errs() << " +" << LR << '\n');
647 /// computeIntervals - computes the live intervals for virtual
648 /// registers. for some ordering of the machine instructions [1,N] a
649 /// live interval is an interval [i, j) where 1 <= i <= j < N for
650 /// which a variable is live
651 void LiveIntervals::computeIntervals() {
652 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
653 << "********** Function: "
654 << ((Value*)mf_->getFunction())->getName() << '\n');
656 SmallVector<unsigned, 8> UndefUses;
657 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
659 MachineBasicBlock *MBB = MBBI;
660 // Track the index of the current machine instr.
661 SlotIndex MIIndex = getMBBStartIdx(MBB);
662 DEBUG(errs() << MBB->getName() << ":\n");
664 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
666 // Create intervals for live-ins to this BB first.
667 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
668 LE = MBB->livein_end(); LI != LE; ++LI) {
669 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
670 // Multiple live-ins can alias the same register.
671 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
672 if (!hasInterval(*AS))
673 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
677 // Skip over empty initial indices.
678 if (getInstructionFromIndex(MIIndex) == 0)
679 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
681 for (; MI != miEnd; ++MI) {
682 DEBUG(errs() << MIIndex << "\t" << *MI);
685 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
686 MachineOperand &MO = MI->getOperand(i);
687 if (!MO.isReg() || !MO.getReg())
690 // handle register defs - build intervals
692 handleRegisterDef(MBB, MI, MIIndex, MO, i);
693 else if (MO.isUndef())
694 UndefUses.push_back(MO.getReg());
697 // Move to the next instr slot.
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
702 // Create empty intervals for registers defined by implicit_def's (except
703 // for those implicit_def that define values which are liveout of their
705 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
706 unsigned UndefReg = UndefUses[i];
707 (void)getOrCreateInterval(UndefReg);
711 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
712 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
713 return new LiveInterval(reg, Weight);
716 /// dupInterval - Duplicate a live interval. The caller is responsible for
717 /// managing the allocated memory.
718 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
719 LiveInterval *NewLI = createInterval(li->reg);
720 NewLI->Copy(*li, mri_, getVNInfoAllocator());
724 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
725 /// copy field and returns the source register that defines it.
726 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
730 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
731 // If it's extracting out of a physical register, return the sub-register.
732 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
733 if (TargetRegisterInfo::isPhysicalRegister(Reg))
734 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
736 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
737 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
738 return VNI->getCopy()->getOperand(2).getReg();
740 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
741 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
743 llvm_unreachable("Unrecognized copy instruction!");
747 //===----------------------------------------------------------------------===//
748 // Register allocator hooks.
751 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
752 /// allow one) virtual register operand, then its uses are implicitly using
753 /// the register. Returns the virtual register.
754 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
755 MachineInstr *MI) const {
757 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
758 MachineOperand &MO = MI->getOperand(i);
759 if (!MO.isReg() || !MO.isUse())
761 unsigned Reg = MO.getReg();
762 if (Reg == 0 || Reg == li.reg)
765 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
766 !allocatableRegs_[Reg])
768 // FIXME: For now, only remat MI with at most one register operand.
770 "Can't rematerialize instruction with multiple register operand!");
779 /// isValNoAvailableAt - Return true if the val# of the specified interval
780 /// which reaches the given instruction also reaches the specified use index.
781 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
782 SlotIndex UseIdx) const {
783 SlotIndex Index = getInstructionIndex(MI);
784 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
785 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
786 return UI != li.end() && UI->valno == ValNo;
789 /// isReMaterializable - Returns true if the definition MI of the specified
790 /// val# of the specified interval is re-materializable.
791 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
792 const VNInfo *ValNo, MachineInstr *MI,
793 SmallVectorImpl<LiveInterval*> &SpillIs,
798 if (!tii_->isTriviallyReMaterializable(MI, aa_))
801 // Target-specific code can mark an instruction as being rematerializable
802 // if it has one virtual reg use, though it had better be something like
803 // a PIC base register which is likely to be live everywhere.
804 unsigned ImpUse = getReMatImplicitUse(li, MI);
806 const LiveInterval &ImpLi = getInterval(ImpUse);
807 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
808 re = mri_->use_end(); ri != re; ++ri) {
809 MachineInstr *UseMI = &*ri;
810 SlotIndex UseIdx = getInstructionIndex(UseMI);
811 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
813 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
817 // If a register operand of the re-materialized instruction is going to
818 // be spilled next, then it's not legal to re-materialize this instruction.
819 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
820 if (ImpUse == SpillIs[i]->reg)
826 /// isReMaterializable - Returns true if the definition MI of the specified
827 /// val# of the specified interval is re-materializable.
828 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
829 const VNInfo *ValNo, MachineInstr *MI) {
830 SmallVector<LiveInterval*, 4> Dummy1;
832 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
835 /// isReMaterializable - Returns true if every definition of MI of every
836 /// val# of the specified interval is re-materializable.
837 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
838 SmallVectorImpl<LiveInterval*> &SpillIs,
841 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
843 const VNInfo *VNI = *i;
845 continue; // Dead val#.
846 // Is the def for the val# rematerializable?
847 if (!VNI->isDefAccurate())
849 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
850 bool DefIsLoad = false;
852 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
859 /// FilterFoldedOps - Filter out two-address use operands. Return
860 /// true if it finds any issue with the operands that ought to prevent
862 static bool FilterFoldedOps(MachineInstr *MI,
863 SmallVector<unsigned, 2> &Ops,
865 SmallVector<unsigned, 2> &FoldOps) {
867 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
868 unsigned OpIdx = Ops[i];
869 MachineOperand &MO = MI->getOperand(OpIdx);
870 // FIXME: fold subreg use.
874 MRInfo |= (unsigned)VirtRegMap::isMod;
876 // Filter out two-address use operand(s).
877 if (MI->isRegTiedToDefOperand(OpIdx)) {
878 MRInfo = VirtRegMap::isModRef;
881 MRInfo |= (unsigned)VirtRegMap::isRef;
883 FoldOps.push_back(OpIdx);
889 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
890 /// slot / to reg or any rematerialized load into ith operand of specified
891 /// MI. If it is successul, MI is updated with the newly created MI and
893 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
894 VirtRegMap &vrm, MachineInstr *DefMI,
896 SmallVector<unsigned, 2> &Ops,
897 bool isSS, int Slot, unsigned Reg) {
898 // If it is an implicit def instruction, just delete it.
899 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
900 RemoveMachineInstrFromMaps(MI);
901 vrm.RemoveMachineInstrFromMaps(MI);
902 MI->eraseFromParent();
907 // Filter the list of operand indexes that are to be folded. Abort if
908 // any operand will prevent folding.
910 SmallVector<unsigned, 2> FoldOps;
911 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
914 // The only time it's safe to fold into a two address instruction is when
915 // it's folding reload and spill from / into a spill stack slot.
916 if (DefMI && (MRInfo & VirtRegMap::isMod))
919 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
920 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
922 // Remember this instruction uses the spill slot.
923 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
925 // Attempt to fold the memory reference into the instruction. If
926 // we can do this, we don't need to insert spill code.
927 MachineBasicBlock &MBB = *MI->getParent();
928 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
929 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
930 vrm.transferSpillPts(MI, fmi);
931 vrm.transferRestorePts(MI, fmi);
932 vrm.transferEmergencySpills(MI, fmi);
933 ReplaceMachineInstrInMaps(MI, fmi);
934 MI = MBB.insert(MBB.erase(MI), fmi);
941 /// canFoldMemoryOperand - Returns true if the specified load / store
942 /// folding is possible.
943 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
944 SmallVector<unsigned, 2> &Ops,
946 // Filter the list of operand indexes that are to be folded. Abort if
947 // any operand will prevent folding.
949 SmallVector<unsigned, 2> FoldOps;
950 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
953 // It's only legal to remat for a use, not a def.
954 if (ReMat && (MRInfo & VirtRegMap::isMod))
957 return tii_->canFoldMemoryOperand(MI, FoldOps);
960 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
961 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
963 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
968 for (++itr; itr != li.ranges.end(); ++itr) {
969 MachineBasicBlock *mbb2 =
970 indexes_->getMBBCoveringRange(itr->start, itr->end);
979 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
980 /// interval on to-be re-materialized operands of MI) with new register.
981 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
982 MachineInstr *MI, unsigned NewVReg,
984 // There is an implicit use. That means one of the other operand is
985 // being remat'ed and the remat'ed instruction has li.reg as an
986 // use operand. Make sure we rewrite that as well.
987 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
988 MachineOperand &MO = MI->getOperand(i);
991 unsigned Reg = MO.getReg();
992 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
994 if (!vrm.isReMaterialized(Reg))
996 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
997 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
999 UseMO->setReg(NewVReg);
1003 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1004 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1005 bool LiveIntervals::
1006 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1007 bool TrySplit, SlotIndex index, SlotIndex end,
1009 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1010 unsigned Slot, int LdSlot,
1011 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1013 const TargetRegisterClass* rc,
1014 SmallVector<int, 4> &ReMatIds,
1015 const MachineLoopInfo *loopInfo,
1016 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1017 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1018 std::vector<LiveInterval*> &NewLIs) {
1019 bool CanFold = false;
1021 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1022 MachineOperand& mop = MI->getOperand(i);
1025 unsigned Reg = mop.getReg();
1026 unsigned RegI = Reg;
1027 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1032 bool TryFold = !DefIsReMat;
1033 bool FoldSS = true; // Default behavior unless it's a remat.
1034 int FoldSlot = Slot;
1036 // If this is the rematerializable definition MI itself and
1037 // all of its uses are rematerialized, simply delete it.
1038 if (MI == ReMatOrigDefMI && CanDelete) {
1039 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1041 RemoveMachineInstrFromMaps(MI);
1042 vrm.RemoveMachineInstrFromMaps(MI);
1043 MI->eraseFromParent();
1047 // If def for this use can't be rematerialized, then try folding.
1048 // If def is rematerializable and it's a load, also try folding.
1049 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1051 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1057 // Scan all of the operands of this instruction rewriting operands
1058 // to use NewVReg instead of li.reg as appropriate. We do this for
1061 // 1. If the instr reads the same spilled vreg multiple times, we
1062 // want to reuse the NewVReg.
1063 // 2. If the instr is a two-addr instruction, we are required to
1064 // keep the src/dst regs pinned.
1066 // Keep track of whether we replace a use and/or def so that we can
1067 // create the spill interval with the appropriate range.
1069 HasUse = mop.isUse();
1070 HasDef = mop.isDef();
1071 SmallVector<unsigned, 2> Ops;
1073 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1074 const MachineOperand &MOj = MI->getOperand(j);
1077 unsigned RegJ = MOj.getReg();
1078 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1082 if (!MOj.isUndef()) {
1083 HasUse |= MOj.isUse();
1084 HasDef |= MOj.isDef();
1089 // Create a new virtual register for the spill interval.
1090 // Create the new register now so we can map the fold instruction
1091 // to the new register so when it is unfolded we get the correct
1093 bool CreatedNewVReg = false;
1095 NewVReg = mri_->createVirtualRegister(rc);
1097 CreatedNewVReg = true;
1103 // Do not fold load / store here if we are splitting. We'll find an
1104 // optimal point to insert a load / store later.
1106 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1107 Ops, FoldSS, FoldSlot, NewVReg)) {
1108 // Folding the load/store can completely change the instruction in
1109 // unpredictable ways, rescan it from the beginning.
1112 // We need to give the new vreg the same stack slot as the
1113 // spilled interval.
1114 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1120 if (isNotInMIMap(MI))
1122 goto RestartInstruction;
1125 // We'll try to fold it later if it's profitable.
1126 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1130 mop.setReg(NewVReg);
1131 if (mop.isImplicit())
1132 rewriteImplicitOps(li, MI, NewVReg, vrm);
1134 // Reuse NewVReg for other reads.
1135 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1136 MachineOperand &mopj = MI->getOperand(Ops[j]);
1137 mopj.setReg(NewVReg);
1138 if (mopj.isImplicit())
1139 rewriteImplicitOps(li, MI, NewVReg, vrm);
1142 if (CreatedNewVReg) {
1144 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1145 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1146 // Each valnum may have its own remat id.
1147 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1149 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1151 if (!CanDelete || (HasUse && HasDef)) {
1152 // If this is a two-addr instruction then its use operands are
1153 // rematerializable but its def is not. It should be assigned a
1155 vrm.assignVirt2StackSlot(NewVReg, Slot);
1158 vrm.assignVirt2StackSlot(NewVReg, Slot);
1160 } else if (HasUse && HasDef &&
1161 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1162 // If this interval hasn't been assigned a stack slot (because earlier
1163 // def is a deleted remat def), do it now.
1164 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1165 vrm.assignVirt2StackSlot(NewVReg, Slot);
1168 // Re-matting an instruction with virtual register use. Add the
1169 // register as an implicit use on the use MI.
1170 if (DefIsReMat && ImpUse)
1171 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1173 // Create a new register interval for this spill / remat.
1174 LiveInterval &nI = getOrCreateInterval(NewVReg);
1175 if (CreatedNewVReg) {
1176 NewLIs.push_back(&nI);
1177 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1179 vrm.setIsSplitFromReg(NewVReg, li.reg);
1183 if (CreatedNewVReg) {
1184 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1185 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1186 DEBUG(errs() << " +" << LR);
1189 // Extend the split live interval to this def / use.
1190 SlotIndex End = index.getDefIndex();
1191 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1192 nI.getValNumInfo(nI.getNumValNums()-1));
1193 DEBUG(errs() << " +" << LR);
1198 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1199 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1200 DEBUG(errs() << " +" << LR);
1205 errs() << "\t\t\t\tAdded new interval: ";
1206 nI.print(errs(), tri_);
1212 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1214 MachineBasicBlock *MBB,
1215 SlotIndex Idx) const {
1216 SlotIndex End = getMBBEndIdx(MBB);
1217 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1218 if (VNI->kills[j].isPHI())
1221 SlotIndex KillIdx = VNI->kills[j];
1222 if (KillIdx > Idx && KillIdx < End)
1228 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1229 /// during spilling.
1231 struct RewriteInfo {
1236 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1237 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1240 struct RewriteInfoCompare {
1241 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1242 return LHS.Index < RHS.Index;
1247 void LiveIntervals::
1248 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1249 LiveInterval::Ranges::const_iterator &I,
1250 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1251 unsigned Slot, int LdSlot,
1252 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1254 const TargetRegisterClass* rc,
1255 SmallVector<int, 4> &ReMatIds,
1256 const MachineLoopInfo *loopInfo,
1257 BitVector &SpillMBBs,
1258 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1259 BitVector &RestoreMBBs,
1260 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1261 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1262 std::vector<LiveInterval*> &NewLIs) {
1263 bool AllCanFold = true;
1264 unsigned NewVReg = 0;
1265 SlotIndex start = I->start.getBaseIndex();
1266 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1268 // First collect all the def / use in this live range that will be rewritten.
1269 // Make sure they are sorted according to instruction index.
1270 std::vector<RewriteInfo> RewriteMIs;
1271 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1272 re = mri_->reg_end(); ri != re; ) {
1273 MachineInstr *MI = &*ri;
1274 MachineOperand &O = ri.getOperand();
1276 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1277 SlotIndex index = getInstructionIndex(MI);
1278 if (index < start || index >= end)
1282 // Must be defined by an implicit def. It should not be spilled. Note,
1283 // this is for correctness reason. e.g.
1284 // 8 %reg1024<def> = IMPLICIT_DEF
1285 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1286 // The live range [12, 14) are not part of the r1024 live interval since
1287 // it's defined by an implicit def. It will not conflicts with live
1288 // interval of r1025. Now suppose both registers are spilled, you can
1289 // easily see a situation where both registers are reloaded before
1290 // the INSERT_SUBREG and both target registers that would overlap.
1292 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1294 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1296 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1297 // Now rewrite the defs and uses.
1298 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1299 RewriteInfo &rwi = RewriteMIs[i];
1301 SlotIndex index = rwi.Index;
1302 bool MIHasUse = rwi.HasUse;
1303 bool MIHasDef = rwi.HasDef;
1304 MachineInstr *MI = rwi.MI;
1305 // If MI def and/or use the same register multiple times, then there
1306 // are multiple entries.
1307 unsigned NumUses = MIHasUse;
1308 while (i != e && RewriteMIs[i].MI == MI) {
1309 assert(RewriteMIs[i].Index == index);
1310 bool isUse = RewriteMIs[i].HasUse;
1311 if (isUse) ++NumUses;
1313 MIHasDef |= RewriteMIs[i].HasDef;
1316 MachineBasicBlock *MBB = MI->getParent();
1318 if (ImpUse && MI != ReMatDefMI) {
1319 // Re-matting an instruction with virtual register use. Update the
1320 // register interval's spill weight to HUGE_VALF to prevent it from
1322 LiveInterval &ImpLi = getInterval(ImpUse);
1323 ImpLi.weight = HUGE_VALF;
1326 unsigned MBBId = MBB->getNumber();
1327 unsigned ThisVReg = 0;
1329 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1330 if (NVI != MBBVRegsMap.end()) {
1331 ThisVReg = NVI->second;
1338 // It's better to start a new interval to avoid artifically
1339 // extend the new interval.
1340 if (MIHasDef && !MIHasUse) {
1341 MBBVRegsMap.erase(MBB->getNumber());
1347 bool IsNew = ThisVReg == 0;
1349 // This ends the previous live interval. If all of its def / use
1350 // can be folded, give it a low spill weight.
1351 if (NewVReg && TrySplit && AllCanFold) {
1352 LiveInterval &nI = getOrCreateInterval(NewVReg);
1359 bool HasDef = false;
1360 bool HasUse = false;
1361 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1362 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1363 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1364 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1365 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1366 if (!HasDef && !HasUse)
1369 AllCanFold &= CanFold;
1371 // Update weight of spill interval.
1372 LiveInterval &nI = getOrCreateInterval(NewVReg);
1374 // The spill weight is now infinity as it cannot be spilled again.
1375 nI.weight = HUGE_VALF;
1379 // Keep track of the last def and first use in each MBB.
1381 if (MI != ReMatOrigDefMI || !CanDelete) {
1382 bool HasKill = false;
1384 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1386 // If this is a two-address code, then this index starts a new VNInfo.
1387 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1389 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1391 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1392 SpillIdxes.find(MBBId);
1394 if (SII == SpillIdxes.end()) {
1395 std::vector<SRInfo> S;
1396 S.push_back(SRInfo(index, NewVReg, true));
1397 SpillIdxes.insert(std::make_pair(MBBId, S));
1398 } else if (SII->second.back().vreg != NewVReg) {
1399 SII->second.push_back(SRInfo(index, NewVReg, true));
1400 } else if (index > SII->second.back().index) {
1401 // If there is an earlier def and this is a two-address
1402 // instruction, then it's not possible to fold the store (which
1403 // would also fold the load).
1404 SRInfo &Info = SII->second.back();
1406 Info.canFold = !HasUse;
1408 SpillMBBs.set(MBBId);
1409 } else if (SII != SpillIdxes.end() &&
1410 SII->second.back().vreg == NewVReg &&
1411 index > SII->second.back().index) {
1412 // There is an earlier def that's not killed (must be two-address).
1413 // The spill is no longer needed.
1414 SII->second.pop_back();
1415 if (SII->second.empty()) {
1416 SpillIdxes.erase(MBBId);
1417 SpillMBBs.reset(MBBId);
1424 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1425 SpillIdxes.find(MBBId);
1426 if (SII != SpillIdxes.end() &&
1427 SII->second.back().vreg == NewVReg &&
1428 index > SII->second.back().index)
1429 // Use(s) following the last def, it's not safe to fold the spill.
1430 SII->second.back().canFold = false;
1431 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1432 RestoreIdxes.find(MBBId);
1433 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1434 // If we are splitting live intervals, only fold if it's the first
1435 // use and there isn't another use later in the MBB.
1436 RII->second.back().canFold = false;
1438 // Only need a reload if there isn't an earlier def / use.
1439 if (RII == RestoreIdxes.end()) {
1440 std::vector<SRInfo> Infos;
1441 Infos.push_back(SRInfo(index, NewVReg, true));
1442 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1444 RII->second.push_back(SRInfo(index, NewVReg, true));
1446 RestoreMBBs.set(MBBId);
1450 // Update spill weight.
1451 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1452 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1455 if (NewVReg && TrySplit && AllCanFold) {
1456 // If all of its def / use can be folded, give it a low spill weight.
1457 LiveInterval &nI = getOrCreateInterval(NewVReg);
1462 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1463 unsigned vr, BitVector &RestoreMBBs,
1464 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1465 if (!RestoreMBBs[Id])
1467 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1468 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1469 if (Restores[i].index == index &&
1470 Restores[i].vreg == vr &&
1471 Restores[i].canFold)
1476 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1477 unsigned vr, BitVector &RestoreMBBs,
1478 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1479 if (!RestoreMBBs[Id])
1481 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1482 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1483 if (Restores[i].index == index && Restores[i].vreg)
1484 Restores[i].index = SlotIndex();
1487 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1488 /// spilled and create empty intervals for their uses.
1490 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1491 const TargetRegisterClass* rc,
1492 std::vector<LiveInterval*> &NewLIs) {
1493 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1494 re = mri_->reg_end(); ri != re; ) {
1495 MachineOperand &O = ri.getOperand();
1496 MachineInstr *MI = &*ri;
1499 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1500 "Register def was not rewritten?");
1501 RemoveMachineInstrFromMaps(MI);
1502 vrm.RemoveMachineInstrFromMaps(MI);
1503 MI->eraseFromParent();
1505 // This must be an use of an implicit_def so it's not part of the live
1506 // interval. Create a new empty live interval for it.
1507 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1508 unsigned NewVReg = mri_->createVirtualRegister(rc);
1510 vrm.setIsImplicitlyDefined(NewVReg);
1511 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1512 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1513 MachineOperand &MO = MI->getOperand(i);
1514 if (MO.isReg() && MO.getReg() == li.reg) {
1523 std::vector<LiveInterval*> LiveIntervals::
1524 addIntervalsForSpillsFast(const LiveInterval &li,
1525 const MachineLoopInfo *loopInfo,
1527 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1529 std::vector<LiveInterval*> added;
1531 assert(li.weight != HUGE_VALF &&
1532 "attempt to spill already spilled interval!");
1535 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1540 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1542 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1543 while (RI != mri_->reg_end()) {
1544 MachineInstr* MI = &*RI;
1546 SmallVector<unsigned, 2> Indices;
1547 bool HasUse = false;
1548 bool HasDef = false;
1550 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1551 MachineOperand& mop = MI->getOperand(i);
1552 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1554 HasUse |= MI->getOperand(i).isUse();
1555 HasDef |= MI->getOperand(i).isDef();
1557 Indices.push_back(i);
1560 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1561 Indices, true, slot, li.reg)) {
1562 unsigned NewVReg = mri_->createVirtualRegister(rc);
1564 vrm.assignVirt2StackSlot(NewVReg, slot);
1566 // create a new register for this spill
1567 LiveInterval &nI = getOrCreateInterval(NewVReg);
1569 // the spill weight is now infinity as it
1570 // cannot be spilled again
1571 nI.weight = HUGE_VALF;
1573 // Rewrite register operands to use the new vreg.
1574 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1575 E = Indices.end(); I != E; ++I) {
1576 MI->getOperand(*I).setReg(NewVReg);
1578 if (MI->getOperand(*I).isUse())
1579 MI->getOperand(*I).setIsKill(true);
1582 // Fill in the new live interval.
1583 SlotIndex index = getInstructionIndex(MI);
1585 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1586 nI.getNextValue(SlotIndex(), 0, false,
1587 getVNInfoAllocator()));
1588 DEBUG(errs() << " +" << LR);
1590 vrm.addRestorePoint(NewVReg, MI);
1593 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1594 nI.getNextValue(SlotIndex(), 0, false,
1595 getVNInfoAllocator()));
1596 DEBUG(errs() << " +" << LR);
1598 vrm.addSpillPoint(NewVReg, true, MI);
1601 added.push_back(&nI);
1604 errs() << "\t\t\t\tadded new interval: ";
1611 RI = mri_->reg_begin(li.reg);
1617 std::vector<LiveInterval*> LiveIntervals::
1618 addIntervalsForSpills(const LiveInterval &li,
1619 SmallVectorImpl<LiveInterval*> &SpillIs,
1620 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1622 if (EnableFastSpilling)
1623 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1625 assert(li.weight != HUGE_VALF &&
1626 "attempt to spill already spilled interval!");
1629 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1630 li.print(errs(), tri_);
1634 // Each bit specify whether a spill is required in the MBB.
1635 BitVector SpillMBBs(mf_->getNumBlockIDs());
1636 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1637 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1638 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1639 DenseMap<unsigned,unsigned> MBBVRegsMap;
1640 std::vector<LiveInterval*> NewLIs;
1641 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1643 unsigned NumValNums = li.getNumValNums();
1644 SmallVector<MachineInstr*, 4> ReMatDefs;
1645 ReMatDefs.resize(NumValNums, NULL);
1646 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1647 ReMatOrigDefs.resize(NumValNums, NULL);
1648 SmallVector<int, 4> ReMatIds;
1649 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1650 BitVector ReMatDelete(NumValNums);
1651 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1653 // Spilling a split live interval. It cannot be split any further. Also,
1654 // it's also guaranteed to be a single val# / range interval.
1655 if (vrm.getPreSplitReg(li.reg)) {
1656 vrm.setIsSplitFromReg(li.reg, 0);
1657 // Unset the split kill marker on the last use.
1658 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1659 if (KillIdx != SlotIndex()) {
1660 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1661 assert(KillMI && "Last use disappeared?");
1662 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1663 assert(KillOp != -1 && "Last use disappeared?");
1664 KillMI->getOperand(KillOp).setIsKill(false);
1666 vrm.removeKillPoint(li.reg);
1667 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1668 Slot = vrm.getStackSlot(li.reg);
1669 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1670 MachineInstr *ReMatDefMI = DefIsReMat ?
1671 vrm.getReMaterializedMI(li.reg) : NULL;
1673 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1674 bool isLoad = isLoadSS ||
1675 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1676 bool IsFirstRange = true;
1677 for (LiveInterval::Ranges::const_iterator
1678 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1679 // If this is a split live interval with multiple ranges, it means there
1680 // are two-address instructions that re-defined the value. Only the
1681 // first def can be rematerialized!
1683 // Note ReMatOrigDefMI has already been deleted.
1684 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1685 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1686 false, vrm, rc, ReMatIds, loopInfo,
1687 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1688 MBBVRegsMap, NewLIs);
1690 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1691 Slot, 0, false, false, false,
1692 false, vrm, rc, ReMatIds, loopInfo,
1693 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1694 MBBVRegsMap, NewLIs);
1696 IsFirstRange = false;
1699 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1703 bool TrySplit = !intervalIsInOneMBB(li);
1706 bool NeedStackSlot = false;
1707 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1709 const VNInfo *VNI = *i;
1710 unsigned VN = VNI->id;
1711 if (VNI->isUnused())
1712 continue; // Dead val#.
1713 // Is the def for the val# rematerializable?
1714 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1715 ? getInstructionFromIndex(VNI->def) : 0;
1717 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1718 // Remember how to remat the def of this val#.
1719 ReMatOrigDefs[VN] = ReMatDefMI;
1720 // Original def may be modified so we have to make a copy here.
1721 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1722 CloneMIs.push_back(Clone);
1723 ReMatDefs[VN] = Clone;
1725 bool CanDelete = true;
1726 if (VNI->hasPHIKill()) {
1727 // A kill is a phi node, not all of its uses can be rematerialized.
1728 // It must not be deleted.
1730 // Need a stack slot if there is any live range where uses cannot be
1732 NeedStackSlot = true;
1735 ReMatDelete.set(VN);
1737 // Need a stack slot if there is any live range where uses cannot be
1739 NeedStackSlot = true;
1743 // One stack slot per live interval.
1744 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1745 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1746 Slot = vrm.assignVirt2StackSlot(li.reg);
1748 // This case only occurs when the prealloc splitter has already assigned
1749 // a stack slot to this vreg.
1751 Slot = vrm.getStackSlot(li.reg);
1754 // Create new intervals and rewrite defs and uses.
1755 for (LiveInterval::Ranges::const_iterator
1756 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1757 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1758 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1759 bool DefIsReMat = ReMatDefMI != NULL;
1760 bool CanDelete = ReMatDelete[I->valno->id];
1762 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1763 bool isLoad = isLoadSS ||
1764 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1765 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1766 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1767 CanDelete, vrm, rc, ReMatIds, loopInfo,
1768 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1769 MBBVRegsMap, NewLIs);
1772 // Insert spills / restores if we are splitting.
1774 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1778 SmallPtrSet<LiveInterval*, 4> AddedKill;
1779 SmallVector<unsigned, 2> Ops;
1780 if (NeedStackSlot) {
1781 int Id = SpillMBBs.find_first();
1783 std::vector<SRInfo> &spills = SpillIdxes[Id];
1784 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1785 SlotIndex index = spills[i].index;
1786 unsigned VReg = spills[i].vreg;
1787 LiveInterval &nI = getOrCreateInterval(VReg);
1788 bool isReMat = vrm.isReMaterialized(VReg);
1789 MachineInstr *MI = getInstructionFromIndex(index);
1790 bool CanFold = false;
1791 bool FoundUse = false;
1793 if (spills[i].canFold) {
1795 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1796 MachineOperand &MO = MI->getOperand(j);
1797 if (!MO.isReg() || MO.getReg() != VReg)
1804 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1805 RestoreMBBs, RestoreIdxes))) {
1806 // MI has two-address uses of the same register. If the use
1807 // isn't the first and only use in the BB, then we can't fold
1808 // it. FIXME: Move this to rewriteInstructionsForSpills.
1815 // Fold the store into the def if possible.
1816 bool Folded = false;
1817 if (CanFold && !Ops.empty()) {
1818 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1821 // Also folded uses, do not issue a load.
1822 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1823 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1825 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1829 // Otherwise tell the spiller to issue a spill.
1831 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1832 bool isKill = LR->end == index.getStoreIndex();
1833 if (!MI->registerDefIsDead(nI.reg))
1834 // No need to spill a dead def.
1835 vrm.addSpillPoint(VReg, isKill, MI);
1837 AddedKill.insert(&nI);
1840 Id = SpillMBBs.find_next(Id);
1844 int Id = RestoreMBBs.find_first();
1846 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1847 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1848 SlotIndex index = restores[i].index;
1849 if (index == SlotIndex())
1851 unsigned VReg = restores[i].vreg;
1852 LiveInterval &nI = getOrCreateInterval(VReg);
1853 bool isReMat = vrm.isReMaterialized(VReg);
1854 MachineInstr *MI = getInstructionFromIndex(index);
1855 bool CanFold = false;
1857 if (restores[i].canFold) {
1859 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1860 MachineOperand &MO = MI->getOperand(j);
1861 if (!MO.isReg() || MO.getReg() != VReg)
1865 // If this restore were to be folded, it would have been folded
1874 // Fold the load into the use if possible.
1875 bool Folded = false;
1876 if (CanFold && !Ops.empty()) {
1878 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1880 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1882 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1883 // If the rematerializable def is a load, also try to fold it.
1884 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1885 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1886 Ops, isLoadSS, LdSlot, VReg);
1888 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1890 // Re-matting an instruction with virtual register use. Add the
1891 // register as an implicit use on the use MI and update the register
1892 // interval's spill weight to HUGE_VALF to prevent it from being
1894 LiveInterval &ImpLi = getInterval(ImpUse);
1895 ImpLi.weight = HUGE_VALF;
1896 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1901 // If folding is not possible / failed, then tell the spiller to issue a
1902 // load / rematerialization for us.
1904 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1906 vrm.addRestorePoint(VReg, MI);
1908 Id = RestoreMBBs.find_next(Id);
1911 // Finalize intervals: add kills, finalize spill weights, and filter out
1913 std::vector<LiveInterval*> RetNewLIs;
1914 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1915 LiveInterval *LI = NewLIs[i];
1917 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1918 if (!AddedKill.count(LI)) {
1919 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1920 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1921 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1922 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1923 assert(UseIdx != -1);
1924 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1925 LastUse->getOperand(UseIdx).setIsKill();
1926 vrm.addKillPoint(LI->reg, LastUseIdx);
1929 RetNewLIs.push_back(LI);
1933 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1937 /// hasAllocatableSuperReg - Return true if the specified physical register has
1938 /// any super register that's allocatable.
1939 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1940 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1941 if (allocatableRegs_[*AS] && hasInterval(*AS))
1946 /// getRepresentativeReg - Find the largest super register of the specified
1947 /// physical register.
1948 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1949 // Find the largest super-register that is allocatable.
1950 unsigned BestReg = Reg;
1951 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1952 unsigned SuperReg = *AS;
1953 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1961 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1962 /// specified interval that conflicts with the specified physical register.
1963 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1964 unsigned PhysReg) const {
1965 unsigned NumConflicts = 0;
1966 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1967 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1968 E = mri_->reg_end(); I != E; ++I) {
1969 MachineOperand &O = I.getOperand();
1970 MachineInstr *MI = O.getParent();
1971 SlotIndex Index = getInstructionIndex(MI);
1972 if (pli.liveAt(Index))
1975 return NumConflicts;
1978 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1979 /// around all defs and uses of the specified interval. Return true if it
1980 /// was able to cut its interval.
1981 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1982 unsigned PhysReg, VirtRegMap &vrm) {
1983 unsigned SpillReg = getRepresentativeReg(PhysReg);
1985 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1986 // If there are registers which alias PhysReg, but which are not a
1987 // sub-register of the chosen representative super register. Assert
1988 // since we can't handle it yet.
1989 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1990 tri_->isSuperRegister(*AS, SpillReg));
1993 SmallVector<unsigned, 4> PRegs;
1994 if (hasInterval(SpillReg))
1995 PRegs.push_back(SpillReg);
1997 SmallSet<unsigned, 4> Added;
1998 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1999 if (Added.insert(*AS) && hasInterval(*AS)) {
2000 PRegs.push_back(*AS);
2001 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2006 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2007 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2008 E = mri_->reg_end(); I != E; ++I) {
2009 MachineOperand &O = I.getOperand();
2010 MachineInstr *MI = O.getParent();
2011 if (SeenMIs.count(MI))
2014 SlotIndex Index = getInstructionIndex(MI);
2015 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2016 unsigned PReg = PRegs[i];
2017 LiveInterval &pli = getInterval(PReg);
2018 if (!pli.liveAt(Index))
2020 vrm.addEmergencySpill(PReg, MI);
2021 SlotIndex StartIdx = Index.getLoadIndex();
2022 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2023 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2024 pli.removeRange(StartIdx, EndIdx);
2028 raw_string_ostream Msg(msg);
2029 Msg << "Ran out of registers during register allocation!";
2030 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2031 Msg << "\nPlease check your inline asm statement for invalid "
2032 << "constraints:\n";
2033 MI->print(Msg, tm_);
2035 llvm_report_error(Msg.str());
2037 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2038 if (!hasInterval(*AS))
2040 LiveInterval &spli = getInterval(*AS);
2041 if (spli.liveAt(Index))
2042 spli.removeRange(Index.getLoadIndex(),
2043 Index.getNextIndex().getBaseIndex());
2050 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2051 MachineInstr* startInst) {
2052 LiveInterval& Interval = getOrCreateInterval(reg);
2053 VNInfo* VN = Interval.getNextValue(
2054 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2055 startInst, true, getVNInfoAllocator());
2056 VN->setHasPHIKill(true);
2057 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2059 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2060 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2061 Interval.addRange(LR);