1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 /// conflictsWithPhysRegDef - Returns true if the specified register
153 /// is defined during the duration of the specified interval.
154 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
155 VirtRegMap &vrm, unsigned reg) {
156 for (LiveInterval::Ranges::const_iterator
157 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
158 for (SlotIndex index = I->start.getBaseIndex(),
159 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
161 index = index.getNextIndex()) {
162 // skip deleted instructions
163 while (index != end && !getInstructionFromIndex(index))
164 index = index.getNextIndex();
165 if (index == end) break;
167 MachineInstr *MI = getInstructionFromIndex(index);
168 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
169 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
170 if (SrcReg == li.reg || DstReg == li.reg)
172 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
173 MachineOperand& mop = MI->getOperand(i);
176 unsigned PhysReg = mop.getReg();
177 if (PhysReg == 0 || PhysReg == li.reg)
179 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
180 if (!vrm.hasPhys(PhysReg))
182 PhysReg = vrm.getPhys(PhysReg);
184 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
193 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
194 /// it can check use as well.
195 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
196 unsigned Reg, bool CheckUse,
197 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
198 for (LiveInterval::Ranges::const_iterator
199 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
200 for (SlotIndex index = I->start.getBaseIndex(),
201 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
203 index = index.getNextIndex()) {
204 // Skip deleted instructions.
205 MachineInstr *MI = 0;
206 while (index != end) {
207 MI = getInstructionFromIndex(index);
210 index = index.getNextIndex();
212 if (index == end) break;
214 if (JoinedCopies.count(MI))
216 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
217 MachineOperand& MO = MI->getOperand(i);
220 if (MO.isUse() && !CheckUse)
222 unsigned PhysReg = MO.getReg();
223 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
225 if (tri_->isSubRegister(Reg, PhysReg))
235 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
236 if (TargetRegisterInfo::isPhysicalRegister(reg))
237 errs() << tri_->getName(reg);
239 errs() << "%reg" << reg;
243 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
244 MachineBasicBlock::iterator mi,
248 LiveInterval &interval) {
250 errs() << "\t\tregister: ";
251 printRegName(interval.reg, tri_);
254 // Virtual registers may be defined multiple times (due to phi
255 // elimination and 2-addr elimination). Much of what we do only has to be
256 // done once for the vreg. We use an empty interval to detect the first
257 // time we see a vreg.
258 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
259 if (interval.empty()) {
260 // Get the Idx of the defining instructions.
261 SlotIndex defIndex = MIIdx.getDefIndex();
262 // Earlyclobbers move back one, so that they overlap the live range
264 if (MO.isEarlyClobber())
265 defIndex = MIIdx.getUseIndex();
267 MachineInstr *CopyMI = NULL;
268 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
269 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
270 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
271 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
272 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
274 // Earlyclobbers move back one.
275 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
277 assert(ValNo->id == 0 && "First value in interval is not 0?");
279 // Loop over all of the blocks that the vreg is defined in. There are
280 // two cases we have to handle here. The most common case is a vreg
281 // whose lifetime is contained within a basic block. In this case there
282 // will be a single kill, in MBB, which comes after the definition.
283 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
284 // FIXME: what about dead vars?
286 if (vi.Kills[0] != mi)
287 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
289 killIdx = defIndex.getStoreIndex();
291 // If the kill happens after the definition, we have an intra-block
293 if (killIdx > defIndex) {
294 assert(vi.AliveBlocks.empty() &&
295 "Shouldn't be alive across any blocks!");
296 LiveRange LR(defIndex, killIdx, ValNo);
297 interval.addRange(LR);
298 DEBUG(errs() << " +" << LR << "\n");
299 ValNo->addKill(killIdx);
304 // The other case we handle is when a virtual register lives to the end
305 // of the defining block, potentially live across some blocks, then is
306 // live into some number of blocks, but gets killed. Start by adding a
307 // range that goes from this definition to the end of the defining block.
308 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
310 DEBUG(errs() << " +" << NewLR);
311 interval.addRange(NewLR);
313 // Iterate over all of the blocks that the variable is completely
314 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
316 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
317 E = vi.AliveBlocks.end(); I != E; ++I) {
319 getMBBStartIdx(mf_->getBlockNumbered(*I)),
320 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
322 interval.addRange(LR);
323 DEBUG(errs() << " +" << LR);
326 // Finally, this virtual register is live from the start of any killing
327 // block to the 'use' slot of the killing instruction.
328 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
329 MachineInstr *Kill = vi.Kills[i];
331 getInstructionIndex(Kill).getDefIndex();
332 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
333 interval.addRange(LR);
334 ValNo->addKill(killIdx);
335 DEBUG(errs() << " +" << LR);
339 // If this is the second time we see a virtual register definition, it
340 // must be due to phi elimination or two addr elimination. If this is
341 // the result of two address elimination, then the vreg is one of the
342 // def-and-use register operand.
343 if (mi->isRegTiedToUseOperand(MOIdx)) {
344 // If this is a two-address definition, then we have already processed
345 // the live range. The only problem is that we didn't realize there
346 // are actually two values in the live interval. Because of this we
347 // need to take the LiveRegion that defines this register and split it
349 assert(interval.containsOneValue());
350 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
351 SlotIndex RedefIndex = MIIdx.getDefIndex();
352 if (MO.isEarlyClobber())
353 RedefIndex = MIIdx.getUseIndex();
355 const LiveRange *OldLR =
356 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
357 VNInfo *OldValNo = OldLR->valno;
359 // Delete the initial value, which should be short and continuous,
360 // because the 2-addr copy must be in the same MBB as the redef.
361 interval.removeRange(DefIndex, RedefIndex);
363 // Two-address vregs should always only be redefined once. This means
364 // that at this point, there should be exactly one value number in it.
365 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
367 // The new value number (#1) is defined by the instruction we claimed
369 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
370 false, // update at *
372 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
374 // Value#0 is now defined by the 2-addr instruction.
375 OldValNo->def = RedefIndex;
376 OldValNo->setCopy(0);
377 if (MO.isEarlyClobber())
378 OldValNo->setHasRedefByEC(true);
380 // Add the new live interval which replaces the range for the input copy.
381 LiveRange LR(DefIndex, RedefIndex, ValNo);
382 DEBUG(errs() << " replace range with " << LR);
383 interval.addRange(LR);
384 ValNo->addKill(RedefIndex);
386 // If this redefinition is dead, we need to add a dummy unit live
387 // range covering the def slot.
389 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
393 errs() << " RESULT: ";
394 interval.print(errs(), tri_);
397 // Otherwise, this must be because of phi elimination. If this is the
398 // first redefinition of the vreg that we have seen, go back and change
399 // the live range in the PHI block to be a different value number.
400 if (interval.containsOneValue()) {
401 // Remove the old range that we now know has an incorrect number.
402 VNInfo *VNI = interval.getValNumInfo(0);
403 MachineInstr *Killer = vi.Kills[0];
404 SlotIndex Start = getMBBStartIdx(Killer->getParent());
405 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
407 errs() << " Removing [" << Start << "," << End << "] from: ";
408 interval.print(errs(), tri_);
411 interval.removeRange(Start, End);
412 assert(interval.ranges.size() == 1 &&
413 "Newly discovered PHI interval has >1 ranges.");
414 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
415 VNI->addKill(indexes_->getTerminatorGap(killMBB));
416 VNI->setHasPHIKill(true);
418 errs() << " RESULT: ";
419 interval.print(errs(), tri_);
422 // Replace the interval with one of a NEW value number. Note that this
423 // value number isn't actually defined by an instruction, weird huh? :)
424 LiveRange LR(Start, End,
425 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
426 0, false, VNInfoAllocator));
427 LR.valno->setIsPHIDef(true);
428 DEBUG(errs() << " replace range with " << LR);
429 interval.addRange(LR);
430 LR.valno->addKill(End);
432 errs() << " RESULT: ";
433 interval.print(errs(), tri_);
437 // In the case of PHI elimination, each variable definition is only
438 // live until the end of the block. We've already taken care of the
439 // rest of the live range.
440 SlotIndex defIndex = MIIdx.getDefIndex();
441 if (MO.isEarlyClobber())
442 defIndex = MIIdx.getUseIndex();
445 MachineInstr *CopyMI = NULL;
446 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
447 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
448 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
449 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
450 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
452 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
454 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
455 LiveRange LR(defIndex, killIndex, ValNo);
456 interval.addRange(LR);
457 ValNo->addKill(indexes_->getTerminatorGap(mbb));
458 ValNo->setHasPHIKill(true);
459 DEBUG(errs() << " +" << LR);
463 DEBUG(errs() << '\n');
466 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
467 MachineBasicBlock::iterator mi,
470 LiveInterval &interval,
471 MachineInstr *CopyMI) {
472 // A physical register cannot be live across basic block, so its
473 // lifetime must end somewhere in its defining basic block.
475 errs() << "\t\tregister: ";
476 printRegName(interval.reg, tri_);
479 SlotIndex baseIndex = MIIdx;
480 SlotIndex start = baseIndex.getDefIndex();
481 // Earlyclobbers move back one.
482 if (MO.isEarlyClobber())
483 start = MIIdx.getUseIndex();
484 SlotIndex end = start;
486 // If it is not used after definition, it is considered dead at
487 // the instruction defining it. Hence its interval is:
488 // [defSlot(def), defSlot(def)+1)
489 // For earlyclobbers, the defSlot was pushed back one; the extra
490 // advance below compensates.
492 DEBUG(errs() << " dead");
493 end = start.getStoreIndex();
497 // If it is not dead on definition, it must be killed by a
498 // subsequent instruction. Hence its interval is:
499 // [defSlot(def), useSlot(kill)+1)
500 baseIndex = baseIndex.getNextIndex();
501 while (++mi != MBB->end()) {
503 if (getInstructionFromIndex(baseIndex) == 0)
504 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
506 if (mi->killsRegister(interval.reg, tri_)) {
507 DEBUG(errs() << " killed");
508 end = baseIndex.getDefIndex();
511 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
513 if (mi->isRegTiedToUseOperand(DefIdx)) {
514 // Two-address instruction.
515 end = baseIndex.getDefIndex();
516 assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
517 "Two address instruction is an early clobber?");
519 // Another instruction redefines the register before it is ever read.
520 // Then the register is essentially dead at the instruction that defines
521 // it. Hence its interval is:
522 // [defSlot(def), defSlot(def)+1)
523 DEBUG(errs() << " dead");
524 end = start.getStoreIndex();
530 baseIndex = baseIndex.getNextIndex();
533 // The only case we should have a dead physreg here without a killing or
534 // instruction where we know it's dead is if it is live-in to the function
535 // and never used. Another possible case is the implicit use of the
536 // physical register has been deleted by two-address pass.
537 end = start.getStoreIndex();
540 assert(start < end && "did not find end of interval?");
542 // Already exists? Extend old live interval.
543 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
544 bool Extend = OldLR != interval.end();
545 VNInfo *ValNo = Extend
546 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
547 if (MO.isEarlyClobber() && Extend)
548 ValNo->setHasRedefByEC(true);
549 LiveRange LR(start, end, ValNo);
550 interval.addRange(LR);
551 LR.valno->addKill(end);
552 DEBUG(errs() << " +" << LR << '\n');
555 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
556 MachineBasicBlock::iterator MI,
560 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
561 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
562 getOrCreateInterval(MO.getReg()));
563 else if (allocatableRegs_[MO.getReg()]) {
564 MachineInstr *CopyMI = NULL;
565 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
566 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
567 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
568 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
569 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
571 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
572 getOrCreateInterval(MO.getReg()), CopyMI);
573 // Def of a register also defines its sub-registers.
574 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
575 // If MI also modifies the sub-register explicitly, avoid processing it
576 // more than once. Do not pass in TRI here so it checks for exact match.
577 if (!MI->modifiesRegister(*AS))
578 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579 getOrCreateInterval(*AS), 0);
583 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
585 LiveInterval &interval, bool isAlias) {
587 errs() << "\t\tlivein register: ";
588 printRegName(interval.reg, tri_);
591 // Look for kills, if it reaches a def before it's killed, then it shouldn't
592 // be considered a livein.
593 MachineBasicBlock::iterator mi = MBB->begin();
594 SlotIndex baseIndex = MIIdx;
595 SlotIndex start = baseIndex;
596 if (getInstructionFromIndex(baseIndex) == 0)
597 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
599 SlotIndex end = baseIndex;
600 bool SeenDefUse = false;
602 while (mi != MBB->end()) {
603 if (mi->killsRegister(interval.reg, tri_)) {
604 DEBUG(errs() << " killed");
605 end = baseIndex.getDefIndex();
608 } else if (mi->modifiesRegister(interval.reg, tri_)) {
609 // Another instruction redefines the register before it is ever read.
610 // Then the register is essentially dead at the instruction that defines
611 // it. Hence its interval is:
612 // [defSlot(def), defSlot(def)+1)
613 DEBUG(errs() << " dead");
614 end = start.getStoreIndex();
620 if (mi != MBB->end()) {
621 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
625 // Live-in register might not be used at all.
628 DEBUG(errs() << " dead");
629 end = MIIdx.getStoreIndex();
631 DEBUG(errs() << " live through");
637 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
638 0, false, VNInfoAllocator);
639 vni->setIsPHIDef(true);
640 LiveRange LR(start, end, vni);
642 interval.addRange(LR);
643 LR.valno->addKill(end);
644 DEBUG(errs() << " +" << LR << '\n');
647 /// computeIntervals - computes the live intervals for virtual
648 /// registers. for some ordering of the machine instructions [1,N] a
649 /// live interval is an interval [i, j) where 1 <= i <= j < N for
650 /// which a variable is live
651 void LiveIntervals::computeIntervals() {
652 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
653 << "********** Function: "
654 << ((Value*)mf_->getFunction())->getName() << '\n');
656 SmallVector<unsigned, 8> UndefUses;
657 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
659 MachineBasicBlock *MBB = MBBI;
660 // Track the index of the current machine instr.
661 SlotIndex MIIndex = getMBBStartIdx(MBB);
662 DEBUG(errs() << MBB->getName() << ":\n");
664 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
666 // Create intervals for live-ins to this BB first.
667 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
668 LE = MBB->livein_end(); LI != LE; ++LI) {
669 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
670 // Multiple live-ins can alias the same register.
671 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
672 if (!hasInterval(*AS))
673 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
677 // Skip over empty initial indices.
678 if (getInstructionFromIndex(MIIndex) == 0)
679 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
681 for (; MI != miEnd; ++MI) {
682 DEBUG(errs() << MIIndex << "\t" << *MI);
685 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
686 MachineOperand &MO = MI->getOperand(i);
687 if (!MO.isReg() || !MO.getReg())
690 // handle register defs - build intervals
692 handleRegisterDef(MBB, MI, MIIndex, MO, i);
693 else if (MO.isUndef())
694 UndefUses.push_back(MO.getReg());
697 // Move to the next instr slot.
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
702 // Create empty intervals for registers defined by implicit_def's (except
703 // for those implicit_def that define values which are liveout of their
705 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
706 unsigned UndefReg = UndefUses[i];
707 (void)getOrCreateInterval(UndefReg);
711 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
712 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
713 return new LiveInterval(reg, Weight);
716 /// dupInterval - Duplicate a live interval. The caller is responsible for
717 /// managing the allocated memory.
718 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
719 LiveInterval *NewLI = createInterval(li->reg);
720 NewLI->Copy(*li, mri_, getVNInfoAllocator());
724 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
725 /// copy field and returns the source register that defines it.
726 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
730 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
731 // If it's extracting out of a physical register, return the sub-register.
732 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
733 if (TargetRegisterInfo::isPhysicalRegister(Reg))
734 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
736 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
737 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
738 return VNI->getCopy()->getOperand(2).getReg();
740 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
741 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
743 llvm_unreachable("Unrecognized copy instruction!");
747 //===----------------------------------------------------------------------===//
748 // Register allocator hooks.
751 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
752 /// allow one) virtual register operand, then its uses are implicitly using
753 /// the register. Returns the virtual register.
754 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
755 MachineInstr *MI) const {
757 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
758 MachineOperand &MO = MI->getOperand(i);
759 if (!MO.isReg() || !MO.isUse())
761 unsigned Reg = MO.getReg();
762 if (Reg == 0 || Reg == li.reg)
765 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
766 !allocatableRegs_[Reg])
768 // FIXME: For now, only remat MI with at most one register operand.
770 "Can't rematerialize instruction with multiple register operand!");
779 /// isValNoAvailableAt - Return true if the val# of the specified interval
780 /// which reaches the given instruction also reaches the specified use index.
781 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
782 SlotIndex UseIdx) const {
783 SlotIndex Index = getInstructionIndex(MI);
784 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
785 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
786 return UI != li.end() && UI->valno == ValNo;
789 /// isReMaterializable - Returns true if the definition MI of the specified
790 /// val# of the specified interval is re-materializable.
791 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
792 const VNInfo *ValNo, MachineInstr *MI,
793 SmallVectorImpl<LiveInterval*> &SpillIs,
798 if (!tii_->isTriviallyReMaterializable(MI, aa_))
801 // Target-specific code can mark an instruction as being rematerializable
802 // if it has one virtual reg use, though it had better be something like
803 // a PIC base register which is likely to be live everywhere.
804 unsigned ImpUse = getReMatImplicitUse(li, MI);
806 const LiveInterval &ImpLi = getInterval(ImpUse);
807 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
808 re = mri_->use_end(); ri != re; ++ri) {
809 MachineInstr *UseMI = &*ri;
810 SlotIndex UseIdx = getInstructionIndex(UseMI);
811 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
813 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
817 // If a register operand of the re-materialized instruction is going to
818 // be spilled next, then it's not legal to re-materialize this instruction.
819 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
820 if (ImpUse == SpillIs[i]->reg)
826 /// isReMaterializable - Returns true if the definition MI of the specified
827 /// val# of the specified interval is re-materializable.
828 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
829 const VNInfo *ValNo, MachineInstr *MI) {
830 SmallVector<LiveInterval*, 4> Dummy1;
832 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
835 /// isReMaterializable - Returns true if every definition of MI of every
836 /// val# of the specified interval is re-materializable.
837 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
838 SmallVectorImpl<LiveInterval*> &SpillIs,
841 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
843 const VNInfo *VNI = *i;
845 continue; // Dead val#.
846 // Is the def for the val# rematerializable?
847 if (!VNI->isDefAccurate())
849 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
850 bool DefIsLoad = false;
852 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
859 /// FilterFoldedOps - Filter out two-address use operands. Return
860 /// true if it finds any issue with the operands that ought to prevent
862 static bool FilterFoldedOps(MachineInstr *MI,
863 SmallVector<unsigned, 2> &Ops,
865 SmallVector<unsigned, 2> &FoldOps) {
867 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
868 unsigned OpIdx = Ops[i];
869 MachineOperand &MO = MI->getOperand(OpIdx);
870 // FIXME: fold subreg use.
874 MRInfo |= (unsigned)VirtRegMap::isMod;
876 // Filter out two-address use operand(s).
877 if (MI->isRegTiedToDefOperand(OpIdx)) {
878 MRInfo = VirtRegMap::isModRef;
881 MRInfo |= (unsigned)VirtRegMap::isRef;
883 FoldOps.push_back(OpIdx);
889 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
890 /// slot / to reg or any rematerialized load into ith operand of specified
891 /// MI. If it is successul, MI is updated with the newly created MI and
893 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
894 VirtRegMap &vrm, MachineInstr *DefMI,
896 SmallVector<unsigned, 2> &Ops,
897 bool isSS, int Slot, unsigned Reg) {
898 // If it is an implicit def instruction, just delete it.
899 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
900 RemoveMachineInstrFromMaps(MI);
901 vrm.RemoveMachineInstrFromMaps(MI);
902 MI->eraseFromParent();
907 // Filter the list of operand indexes that are to be folded. Abort if
908 // any operand will prevent folding.
910 SmallVector<unsigned, 2> FoldOps;
911 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
914 // The only time it's safe to fold into a two address instruction is when
915 // it's folding reload and spill from / into a spill stack slot.
916 if (DefMI && (MRInfo & VirtRegMap::isMod))
919 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
920 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
922 // Remember this instruction uses the spill slot.
923 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
925 // Attempt to fold the memory reference into the instruction. If
926 // we can do this, we don't need to insert spill code.
927 MachineBasicBlock &MBB = *MI->getParent();
928 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
929 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
930 vrm.transferSpillPts(MI, fmi);
931 vrm.transferRestorePts(MI, fmi);
932 vrm.transferEmergencySpills(MI, fmi);
933 ReplaceMachineInstrInMaps(MI, fmi);
934 MI = MBB.insert(MBB.erase(MI), fmi);
941 /// canFoldMemoryOperand - Returns true if the specified load / store
942 /// folding is possible.
943 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
944 SmallVector<unsigned, 2> &Ops,
946 // Filter the list of operand indexes that are to be folded. Abort if
947 // any operand will prevent folding.
949 SmallVector<unsigned, 2> FoldOps;
950 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
953 // It's only legal to remat for a use, not a def.
954 if (ReMat && (MRInfo & VirtRegMap::isMod))
957 return tii_->canFoldMemoryOperand(MI, FoldOps);
960 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
961 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
963 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
968 for (++itr; itr != li.ranges.end(); ++itr) {
969 MachineBasicBlock *mbb2 =
970 indexes_->getMBBCoveringRange(itr->start, itr->end);
979 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
980 /// interval on to-be re-materialized operands of MI) with new register.
981 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
982 MachineInstr *MI, unsigned NewVReg,
984 // There is an implicit use. That means one of the other operand is
985 // being remat'ed and the remat'ed instruction has li.reg as an
986 // use operand. Make sure we rewrite that as well.
987 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
988 MachineOperand &MO = MI->getOperand(i);
991 unsigned Reg = MO.getReg();
992 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
994 if (!vrm.isReMaterialized(Reg))
996 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
997 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
999 UseMO->setReg(NewVReg);
1003 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1004 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1005 bool LiveIntervals::
1006 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1007 bool TrySplit, SlotIndex index, SlotIndex end,
1009 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1010 unsigned Slot, int LdSlot,
1011 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1013 const TargetRegisterClass* rc,
1014 SmallVector<int, 4> &ReMatIds,
1015 const MachineLoopInfo *loopInfo,
1016 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1017 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1018 std::vector<LiveInterval*> &NewLIs) {
1019 bool CanFold = false;
1021 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1022 MachineOperand& mop = MI->getOperand(i);
1025 unsigned Reg = mop.getReg();
1026 unsigned RegI = Reg;
1027 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1032 bool TryFold = !DefIsReMat;
1033 bool FoldSS = true; // Default behavior unless it's a remat.
1034 int FoldSlot = Slot;
1036 // If this is the rematerializable definition MI itself and
1037 // all of its uses are rematerialized, simply delete it.
1038 if (MI == ReMatOrigDefMI && CanDelete) {
1039 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1041 RemoveMachineInstrFromMaps(MI);
1042 vrm.RemoveMachineInstrFromMaps(MI);
1043 MI->eraseFromParent();
1047 // If def for this use can't be rematerialized, then try folding.
1048 // If def is rematerializable and it's a load, also try folding.
1049 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1051 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1057 // Scan all of the operands of this instruction rewriting operands
1058 // to use NewVReg instead of li.reg as appropriate. We do this for
1061 // 1. If the instr reads the same spilled vreg multiple times, we
1062 // want to reuse the NewVReg.
1063 // 2. If the instr is a two-addr instruction, we are required to
1064 // keep the src/dst regs pinned.
1066 // Keep track of whether we replace a use and/or def so that we can
1067 // create the spill interval with the appropriate range.
1069 HasUse = mop.isUse();
1070 HasDef = mop.isDef();
1071 SmallVector<unsigned, 2> Ops;
1073 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1074 const MachineOperand &MOj = MI->getOperand(j);
1077 unsigned RegJ = MOj.getReg();
1078 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1082 if (!MOj.isUndef()) {
1083 HasUse |= MOj.isUse();
1084 HasDef |= MOj.isDef();
1089 // Create a new virtual register for the spill interval.
1090 // Create the new register now so we can map the fold instruction
1091 // to the new register so when it is unfolded we get the correct
1093 bool CreatedNewVReg = false;
1095 NewVReg = mri_->createVirtualRegister(rc);
1097 CreatedNewVReg = true;
1099 // The new virtual register should get the same allocation hints as the
1101 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1102 if (Hint.first || Hint.second)
1103 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1109 // Do not fold load / store here if we are splitting. We'll find an
1110 // optimal point to insert a load / store later.
1112 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1113 Ops, FoldSS, FoldSlot, NewVReg)) {
1114 // Folding the load/store can completely change the instruction in
1115 // unpredictable ways, rescan it from the beginning.
1118 // We need to give the new vreg the same stack slot as the
1119 // spilled interval.
1120 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1126 if (isNotInMIMap(MI))
1128 goto RestartInstruction;
1131 // We'll try to fold it later if it's profitable.
1132 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1136 mop.setReg(NewVReg);
1137 if (mop.isImplicit())
1138 rewriteImplicitOps(li, MI, NewVReg, vrm);
1140 // Reuse NewVReg for other reads.
1141 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1142 MachineOperand &mopj = MI->getOperand(Ops[j]);
1143 mopj.setReg(NewVReg);
1144 if (mopj.isImplicit())
1145 rewriteImplicitOps(li, MI, NewVReg, vrm);
1148 if (CreatedNewVReg) {
1150 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1151 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1152 // Each valnum may have its own remat id.
1153 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1155 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1157 if (!CanDelete || (HasUse && HasDef)) {
1158 // If this is a two-addr instruction then its use operands are
1159 // rematerializable but its def is not. It should be assigned a
1161 vrm.assignVirt2StackSlot(NewVReg, Slot);
1164 vrm.assignVirt2StackSlot(NewVReg, Slot);
1166 } else if (HasUse && HasDef &&
1167 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1168 // If this interval hasn't been assigned a stack slot (because earlier
1169 // def is a deleted remat def), do it now.
1170 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1171 vrm.assignVirt2StackSlot(NewVReg, Slot);
1174 // Re-matting an instruction with virtual register use. Add the
1175 // register as an implicit use on the use MI.
1176 if (DefIsReMat && ImpUse)
1177 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1179 // Create a new register interval for this spill / remat.
1180 LiveInterval &nI = getOrCreateInterval(NewVReg);
1181 if (CreatedNewVReg) {
1182 NewLIs.push_back(&nI);
1183 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1185 vrm.setIsSplitFromReg(NewVReg, li.reg);
1189 if (CreatedNewVReg) {
1190 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1191 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1192 DEBUG(errs() << " +" << LR);
1195 // Extend the split live interval to this def / use.
1196 SlotIndex End = index.getDefIndex();
1197 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1198 nI.getValNumInfo(nI.getNumValNums()-1));
1199 DEBUG(errs() << " +" << LR);
1204 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1205 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1206 DEBUG(errs() << " +" << LR);
1211 errs() << "\t\t\t\tAdded new interval: ";
1212 nI.print(errs(), tri_);
1218 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1220 MachineBasicBlock *MBB,
1221 SlotIndex Idx) const {
1222 SlotIndex End = getMBBEndIdx(MBB);
1223 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1224 if (VNI->kills[j].isPHI())
1227 SlotIndex KillIdx = VNI->kills[j];
1228 if (KillIdx > Idx && KillIdx < End)
1234 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1235 /// during spilling.
1237 struct RewriteInfo {
1242 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1243 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1246 struct RewriteInfoCompare {
1247 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1248 return LHS.Index < RHS.Index;
1253 void LiveIntervals::
1254 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1255 LiveInterval::Ranges::const_iterator &I,
1256 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1257 unsigned Slot, int LdSlot,
1258 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1260 const TargetRegisterClass* rc,
1261 SmallVector<int, 4> &ReMatIds,
1262 const MachineLoopInfo *loopInfo,
1263 BitVector &SpillMBBs,
1264 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1265 BitVector &RestoreMBBs,
1266 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1267 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1268 std::vector<LiveInterval*> &NewLIs) {
1269 bool AllCanFold = true;
1270 unsigned NewVReg = 0;
1271 SlotIndex start = I->start.getBaseIndex();
1272 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1274 // First collect all the def / use in this live range that will be rewritten.
1275 // Make sure they are sorted according to instruction index.
1276 std::vector<RewriteInfo> RewriteMIs;
1277 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1278 re = mri_->reg_end(); ri != re; ) {
1279 MachineInstr *MI = &*ri;
1280 MachineOperand &O = ri.getOperand();
1282 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1283 SlotIndex index = getInstructionIndex(MI);
1284 if (index < start || index >= end)
1288 // Must be defined by an implicit def. It should not be spilled. Note,
1289 // this is for correctness reason. e.g.
1290 // 8 %reg1024<def> = IMPLICIT_DEF
1291 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1292 // The live range [12, 14) are not part of the r1024 live interval since
1293 // it's defined by an implicit def. It will not conflicts with live
1294 // interval of r1025. Now suppose both registers are spilled, you can
1295 // easily see a situation where both registers are reloaded before
1296 // the INSERT_SUBREG and both target registers that would overlap.
1298 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1300 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1302 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1303 // Now rewrite the defs and uses.
1304 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1305 RewriteInfo &rwi = RewriteMIs[i];
1307 SlotIndex index = rwi.Index;
1308 bool MIHasUse = rwi.HasUse;
1309 bool MIHasDef = rwi.HasDef;
1310 MachineInstr *MI = rwi.MI;
1311 // If MI def and/or use the same register multiple times, then there
1312 // are multiple entries.
1313 unsigned NumUses = MIHasUse;
1314 while (i != e && RewriteMIs[i].MI == MI) {
1315 assert(RewriteMIs[i].Index == index);
1316 bool isUse = RewriteMIs[i].HasUse;
1317 if (isUse) ++NumUses;
1319 MIHasDef |= RewriteMIs[i].HasDef;
1322 MachineBasicBlock *MBB = MI->getParent();
1324 if (ImpUse && MI != ReMatDefMI) {
1325 // Re-matting an instruction with virtual register use. Update the
1326 // register interval's spill weight to HUGE_VALF to prevent it from
1328 LiveInterval &ImpLi = getInterval(ImpUse);
1329 ImpLi.weight = HUGE_VALF;
1332 unsigned MBBId = MBB->getNumber();
1333 unsigned ThisVReg = 0;
1335 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1336 if (NVI != MBBVRegsMap.end()) {
1337 ThisVReg = NVI->second;
1344 // It's better to start a new interval to avoid artifically
1345 // extend the new interval.
1346 if (MIHasDef && !MIHasUse) {
1347 MBBVRegsMap.erase(MBB->getNumber());
1353 bool IsNew = ThisVReg == 0;
1355 // This ends the previous live interval. If all of its def / use
1356 // can be folded, give it a low spill weight.
1357 if (NewVReg && TrySplit && AllCanFold) {
1358 LiveInterval &nI = getOrCreateInterval(NewVReg);
1365 bool HasDef = false;
1366 bool HasUse = false;
1367 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1368 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1369 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1370 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1371 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1372 if (!HasDef && !HasUse)
1375 AllCanFold &= CanFold;
1377 // Update weight of spill interval.
1378 LiveInterval &nI = getOrCreateInterval(NewVReg);
1380 // The spill weight is now infinity as it cannot be spilled again.
1381 nI.weight = HUGE_VALF;
1385 // Keep track of the last def and first use in each MBB.
1387 if (MI != ReMatOrigDefMI || !CanDelete) {
1388 bool HasKill = false;
1390 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1392 // If this is a two-address code, then this index starts a new VNInfo.
1393 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1395 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1397 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1398 SpillIdxes.find(MBBId);
1400 if (SII == SpillIdxes.end()) {
1401 std::vector<SRInfo> S;
1402 S.push_back(SRInfo(index, NewVReg, true));
1403 SpillIdxes.insert(std::make_pair(MBBId, S));
1404 } else if (SII->second.back().vreg != NewVReg) {
1405 SII->second.push_back(SRInfo(index, NewVReg, true));
1406 } else if (index > SII->second.back().index) {
1407 // If there is an earlier def and this is a two-address
1408 // instruction, then it's not possible to fold the store (which
1409 // would also fold the load).
1410 SRInfo &Info = SII->second.back();
1412 Info.canFold = !HasUse;
1414 SpillMBBs.set(MBBId);
1415 } else if (SII != SpillIdxes.end() &&
1416 SII->second.back().vreg == NewVReg &&
1417 index > SII->second.back().index) {
1418 // There is an earlier def that's not killed (must be two-address).
1419 // The spill is no longer needed.
1420 SII->second.pop_back();
1421 if (SII->second.empty()) {
1422 SpillIdxes.erase(MBBId);
1423 SpillMBBs.reset(MBBId);
1430 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1431 SpillIdxes.find(MBBId);
1432 if (SII != SpillIdxes.end() &&
1433 SII->second.back().vreg == NewVReg &&
1434 index > SII->second.back().index)
1435 // Use(s) following the last def, it's not safe to fold the spill.
1436 SII->second.back().canFold = false;
1437 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1438 RestoreIdxes.find(MBBId);
1439 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1440 // If we are splitting live intervals, only fold if it's the first
1441 // use and there isn't another use later in the MBB.
1442 RII->second.back().canFold = false;
1444 // Only need a reload if there isn't an earlier def / use.
1445 if (RII == RestoreIdxes.end()) {
1446 std::vector<SRInfo> Infos;
1447 Infos.push_back(SRInfo(index, NewVReg, true));
1448 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1450 RII->second.push_back(SRInfo(index, NewVReg, true));
1452 RestoreMBBs.set(MBBId);
1456 // Update spill weight.
1457 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1458 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1461 if (NewVReg && TrySplit && AllCanFold) {
1462 // If all of its def / use can be folded, give it a low spill weight.
1463 LiveInterval &nI = getOrCreateInterval(NewVReg);
1468 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1469 unsigned vr, BitVector &RestoreMBBs,
1470 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1471 if (!RestoreMBBs[Id])
1473 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1474 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1475 if (Restores[i].index == index &&
1476 Restores[i].vreg == vr &&
1477 Restores[i].canFold)
1482 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1483 unsigned vr, BitVector &RestoreMBBs,
1484 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1485 if (!RestoreMBBs[Id])
1487 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1488 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1489 if (Restores[i].index == index && Restores[i].vreg)
1490 Restores[i].index = SlotIndex();
1493 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1494 /// spilled and create empty intervals for their uses.
1496 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1497 const TargetRegisterClass* rc,
1498 std::vector<LiveInterval*> &NewLIs) {
1499 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1500 re = mri_->reg_end(); ri != re; ) {
1501 MachineOperand &O = ri.getOperand();
1502 MachineInstr *MI = &*ri;
1505 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1506 "Register def was not rewritten?");
1507 RemoveMachineInstrFromMaps(MI);
1508 vrm.RemoveMachineInstrFromMaps(MI);
1509 MI->eraseFromParent();
1511 // This must be an use of an implicit_def so it's not part of the live
1512 // interval. Create a new empty live interval for it.
1513 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1514 unsigned NewVReg = mri_->createVirtualRegister(rc);
1516 vrm.setIsImplicitlyDefined(NewVReg);
1517 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1518 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1519 MachineOperand &MO = MI->getOperand(i);
1520 if (MO.isReg() && MO.getReg() == li.reg) {
1529 std::vector<LiveInterval*> LiveIntervals::
1530 addIntervalsForSpillsFast(const LiveInterval &li,
1531 const MachineLoopInfo *loopInfo,
1533 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1535 std::vector<LiveInterval*> added;
1537 assert(li.weight != HUGE_VALF &&
1538 "attempt to spill already spilled interval!");
1541 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1546 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1548 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1549 while (RI != mri_->reg_end()) {
1550 MachineInstr* MI = &*RI;
1552 SmallVector<unsigned, 2> Indices;
1553 bool HasUse = false;
1554 bool HasDef = false;
1556 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1557 MachineOperand& mop = MI->getOperand(i);
1558 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1560 HasUse |= MI->getOperand(i).isUse();
1561 HasDef |= MI->getOperand(i).isDef();
1563 Indices.push_back(i);
1566 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1567 Indices, true, slot, li.reg)) {
1568 unsigned NewVReg = mri_->createVirtualRegister(rc);
1570 vrm.assignVirt2StackSlot(NewVReg, slot);
1572 // create a new register for this spill
1573 LiveInterval &nI = getOrCreateInterval(NewVReg);
1575 // the spill weight is now infinity as it
1576 // cannot be spilled again
1577 nI.weight = HUGE_VALF;
1579 // Rewrite register operands to use the new vreg.
1580 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1581 E = Indices.end(); I != E; ++I) {
1582 MI->getOperand(*I).setReg(NewVReg);
1584 if (MI->getOperand(*I).isUse())
1585 MI->getOperand(*I).setIsKill(true);
1588 // Fill in the new live interval.
1589 SlotIndex index = getInstructionIndex(MI);
1591 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1592 nI.getNextValue(SlotIndex(), 0, false,
1593 getVNInfoAllocator()));
1594 DEBUG(errs() << " +" << LR);
1596 vrm.addRestorePoint(NewVReg, MI);
1599 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1600 nI.getNextValue(SlotIndex(), 0, false,
1601 getVNInfoAllocator()));
1602 DEBUG(errs() << " +" << LR);
1604 vrm.addSpillPoint(NewVReg, true, MI);
1607 added.push_back(&nI);
1610 errs() << "\t\t\t\tadded new interval: ";
1617 RI = mri_->reg_begin(li.reg);
1623 std::vector<LiveInterval*> LiveIntervals::
1624 addIntervalsForSpills(const LiveInterval &li,
1625 SmallVectorImpl<LiveInterval*> &SpillIs,
1626 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1628 if (EnableFastSpilling)
1629 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1631 assert(li.weight != HUGE_VALF &&
1632 "attempt to spill already spilled interval!");
1635 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1636 li.print(errs(), tri_);
1640 // Each bit specify whether a spill is required in the MBB.
1641 BitVector SpillMBBs(mf_->getNumBlockIDs());
1642 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1643 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1644 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1645 DenseMap<unsigned,unsigned> MBBVRegsMap;
1646 std::vector<LiveInterval*> NewLIs;
1647 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1649 unsigned NumValNums = li.getNumValNums();
1650 SmallVector<MachineInstr*, 4> ReMatDefs;
1651 ReMatDefs.resize(NumValNums, NULL);
1652 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1653 ReMatOrigDefs.resize(NumValNums, NULL);
1654 SmallVector<int, 4> ReMatIds;
1655 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1656 BitVector ReMatDelete(NumValNums);
1657 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1659 // Spilling a split live interval. It cannot be split any further. Also,
1660 // it's also guaranteed to be a single val# / range interval.
1661 if (vrm.getPreSplitReg(li.reg)) {
1662 vrm.setIsSplitFromReg(li.reg, 0);
1663 // Unset the split kill marker on the last use.
1664 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1665 if (KillIdx != SlotIndex()) {
1666 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1667 assert(KillMI && "Last use disappeared?");
1668 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1669 assert(KillOp != -1 && "Last use disappeared?");
1670 KillMI->getOperand(KillOp).setIsKill(false);
1672 vrm.removeKillPoint(li.reg);
1673 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1674 Slot = vrm.getStackSlot(li.reg);
1675 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1676 MachineInstr *ReMatDefMI = DefIsReMat ?
1677 vrm.getReMaterializedMI(li.reg) : NULL;
1679 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1680 bool isLoad = isLoadSS ||
1681 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1682 bool IsFirstRange = true;
1683 for (LiveInterval::Ranges::const_iterator
1684 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1685 // If this is a split live interval with multiple ranges, it means there
1686 // are two-address instructions that re-defined the value. Only the
1687 // first def can be rematerialized!
1689 // Note ReMatOrigDefMI has already been deleted.
1690 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1691 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1692 false, vrm, rc, ReMatIds, loopInfo,
1693 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1694 MBBVRegsMap, NewLIs);
1696 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1697 Slot, 0, false, false, false,
1698 false, vrm, rc, ReMatIds, loopInfo,
1699 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1700 MBBVRegsMap, NewLIs);
1702 IsFirstRange = false;
1705 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1709 bool TrySplit = !intervalIsInOneMBB(li);
1712 bool NeedStackSlot = false;
1713 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1715 const VNInfo *VNI = *i;
1716 unsigned VN = VNI->id;
1717 if (VNI->isUnused())
1718 continue; // Dead val#.
1719 // Is the def for the val# rematerializable?
1720 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1721 ? getInstructionFromIndex(VNI->def) : 0;
1723 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1724 // Remember how to remat the def of this val#.
1725 ReMatOrigDefs[VN] = ReMatDefMI;
1726 // Original def may be modified so we have to make a copy here.
1727 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1728 CloneMIs.push_back(Clone);
1729 ReMatDefs[VN] = Clone;
1731 bool CanDelete = true;
1732 if (VNI->hasPHIKill()) {
1733 // A kill is a phi node, not all of its uses can be rematerialized.
1734 // It must not be deleted.
1736 // Need a stack slot if there is any live range where uses cannot be
1738 NeedStackSlot = true;
1741 ReMatDelete.set(VN);
1743 // Need a stack slot if there is any live range where uses cannot be
1745 NeedStackSlot = true;
1749 // One stack slot per live interval.
1750 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1751 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1752 Slot = vrm.assignVirt2StackSlot(li.reg);
1754 // This case only occurs when the prealloc splitter has already assigned
1755 // a stack slot to this vreg.
1757 Slot = vrm.getStackSlot(li.reg);
1760 // Create new intervals and rewrite defs and uses.
1761 for (LiveInterval::Ranges::const_iterator
1762 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1763 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1764 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1765 bool DefIsReMat = ReMatDefMI != NULL;
1766 bool CanDelete = ReMatDelete[I->valno->id];
1768 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1769 bool isLoad = isLoadSS ||
1770 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1771 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1772 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1773 CanDelete, vrm, rc, ReMatIds, loopInfo,
1774 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1775 MBBVRegsMap, NewLIs);
1778 // Insert spills / restores if we are splitting.
1780 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1784 SmallPtrSet<LiveInterval*, 4> AddedKill;
1785 SmallVector<unsigned, 2> Ops;
1786 if (NeedStackSlot) {
1787 int Id = SpillMBBs.find_first();
1789 std::vector<SRInfo> &spills = SpillIdxes[Id];
1790 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1791 SlotIndex index = spills[i].index;
1792 unsigned VReg = spills[i].vreg;
1793 LiveInterval &nI = getOrCreateInterval(VReg);
1794 bool isReMat = vrm.isReMaterialized(VReg);
1795 MachineInstr *MI = getInstructionFromIndex(index);
1796 bool CanFold = false;
1797 bool FoundUse = false;
1799 if (spills[i].canFold) {
1801 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1802 MachineOperand &MO = MI->getOperand(j);
1803 if (!MO.isReg() || MO.getReg() != VReg)
1810 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1811 RestoreMBBs, RestoreIdxes))) {
1812 // MI has two-address uses of the same register. If the use
1813 // isn't the first and only use in the BB, then we can't fold
1814 // it. FIXME: Move this to rewriteInstructionsForSpills.
1821 // Fold the store into the def if possible.
1822 bool Folded = false;
1823 if (CanFold && !Ops.empty()) {
1824 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1827 // Also folded uses, do not issue a load.
1828 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1829 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1831 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1835 // Otherwise tell the spiller to issue a spill.
1837 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1838 bool isKill = LR->end == index.getStoreIndex();
1839 if (!MI->registerDefIsDead(nI.reg))
1840 // No need to spill a dead def.
1841 vrm.addSpillPoint(VReg, isKill, MI);
1843 AddedKill.insert(&nI);
1846 Id = SpillMBBs.find_next(Id);
1850 int Id = RestoreMBBs.find_first();
1852 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1853 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1854 SlotIndex index = restores[i].index;
1855 if (index == SlotIndex())
1857 unsigned VReg = restores[i].vreg;
1858 LiveInterval &nI = getOrCreateInterval(VReg);
1859 bool isReMat = vrm.isReMaterialized(VReg);
1860 MachineInstr *MI = getInstructionFromIndex(index);
1861 bool CanFold = false;
1863 if (restores[i].canFold) {
1865 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1866 MachineOperand &MO = MI->getOperand(j);
1867 if (!MO.isReg() || MO.getReg() != VReg)
1871 // If this restore were to be folded, it would have been folded
1880 // Fold the load into the use if possible.
1881 bool Folded = false;
1882 if (CanFold && !Ops.empty()) {
1884 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1886 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1888 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1889 // If the rematerializable def is a load, also try to fold it.
1890 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1891 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1892 Ops, isLoadSS, LdSlot, VReg);
1894 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1896 // Re-matting an instruction with virtual register use. Add the
1897 // register as an implicit use on the use MI and update the register
1898 // interval's spill weight to HUGE_VALF to prevent it from being
1900 LiveInterval &ImpLi = getInterval(ImpUse);
1901 ImpLi.weight = HUGE_VALF;
1902 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1907 // If folding is not possible / failed, then tell the spiller to issue a
1908 // load / rematerialization for us.
1910 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1912 vrm.addRestorePoint(VReg, MI);
1914 Id = RestoreMBBs.find_next(Id);
1917 // Finalize intervals: add kills, finalize spill weights, and filter out
1919 std::vector<LiveInterval*> RetNewLIs;
1920 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1921 LiveInterval *LI = NewLIs[i];
1923 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1924 if (!AddedKill.count(LI)) {
1925 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1926 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1927 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1928 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1929 assert(UseIdx != -1);
1930 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1931 LastUse->getOperand(UseIdx).setIsKill();
1932 vrm.addKillPoint(LI->reg, LastUseIdx);
1935 RetNewLIs.push_back(LI);
1939 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1943 /// hasAllocatableSuperReg - Return true if the specified physical register has
1944 /// any super register that's allocatable.
1945 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1946 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1947 if (allocatableRegs_[*AS] && hasInterval(*AS))
1952 /// getRepresentativeReg - Find the largest super register of the specified
1953 /// physical register.
1954 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1955 // Find the largest super-register that is allocatable.
1956 unsigned BestReg = Reg;
1957 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1958 unsigned SuperReg = *AS;
1959 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1967 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1968 /// specified interval that conflicts with the specified physical register.
1969 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1970 unsigned PhysReg) const {
1971 unsigned NumConflicts = 0;
1972 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1973 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1974 E = mri_->reg_end(); I != E; ++I) {
1975 MachineOperand &O = I.getOperand();
1976 MachineInstr *MI = O.getParent();
1977 SlotIndex Index = getInstructionIndex(MI);
1978 if (pli.liveAt(Index))
1981 return NumConflicts;
1984 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1985 /// around all defs and uses of the specified interval. Return true if it
1986 /// was able to cut its interval.
1987 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1988 unsigned PhysReg, VirtRegMap &vrm) {
1989 unsigned SpillReg = getRepresentativeReg(PhysReg);
1991 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1992 // If there are registers which alias PhysReg, but which are not a
1993 // sub-register of the chosen representative super register. Assert
1994 // since we can't handle it yet.
1995 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1996 tri_->isSuperRegister(*AS, SpillReg));
1999 SmallVector<unsigned, 4> PRegs;
2000 if (hasInterval(SpillReg))
2001 PRegs.push_back(SpillReg);
2003 SmallSet<unsigned, 4> Added;
2004 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2005 if (Added.insert(*AS) && hasInterval(*AS)) {
2006 PRegs.push_back(*AS);
2007 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2012 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2013 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2014 E = mri_->reg_end(); I != E; ++I) {
2015 MachineOperand &O = I.getOperand();
2016 MachineInstr *MI = O.getParent();
2017 if (SeenMIs.count(MI))
2020 SlotIndex Index = getInstructionIndex(MI);
2021 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2022 unsigned PReg = PRegs[i];
2023 LiveInterval &pli = getInterval(PReg);
2024 if (!pli.liveAt(Index))
2026 vrm.addEmergencySpill(PReg, MI);
2027 SlotIndex StartIdx = Index.getLoadIndex();
2028 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2029 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2030 pli.removeRange(StartIdx, EndIdx);
2034 raw_string_ostream Msg(msg);
2035 Msg << "Ran out of registers during register allocation!";
2036 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2037 Msg << "\nPlease check your inline asm statement for invalid "
2038 << "constraints:\n";
2039 MI->print(Msg, tm_);
2041 llvm_report_error(Msg.str());
2043 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2044 if (!hasInterval(*AS))
2046 LiveInterval &spli = getInterval(*AS);
2047 if (spli.liveAt(Index))
2048 spli.removeRange(Index.getLoadIndex(),
2049 Index.getNextIndex().getBaseIndex());
2056 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2057 MachineInstr* startInst) {
2058 LiveInterval& Interval = getOrCreateInterval(reg);
2059 VNInfo* VN = Interval.getNextValue(
2060 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2061 startInst, true, getVNInfoAllocator());
2062 VN->setHasPHIKill(true);
2063 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2065 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2066 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2067 Interval.addRange(LR);