1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 /// conflictsWithPhysRegDef - Returns true if the specified register
153 /// is defined during the duration of the specified interval.
154 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
155 VirtRegMap &vrm, unsigned reg) {
156 for (LiveInterval::Ranges::const_iterator
157 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
158 for (SlotIndex index = I->start.getBaseIndex(),
159 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
161 index = index.getNextIndex()) {
162 MachineInstr *MI = getInstructionFromIndex(index);
164 continue; // skip deleted instructions
166 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
167 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
168 if (SrcReg == li.reg || DstReg == li.reg)
170 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
171 MachineOperand& mop = MI->getOperand(i);
174 unsigned PhysReg = mop.getReg();
175 if (PhysReg == 0 || PhysReg == li.reg)
177 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
178 if (!vrm.hasPhys(PhysReg))
180 PhysReg = vrm.getPhys(PhysReg);
182 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
191 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
192 /// it can check use as well.
193 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
194 unsigned Reg, bool CheckUse,
195 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
196 for (LiveInterval::Ranges::const_iterator
197 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
198 for (SlotIndex index = I->start.getBaseIndex(),
199 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
201 index = index.getNextIndex()) {
202 MachineInstr *MI = getInstructionFromIndex(index);
204 continue; // skip deleted instructions
206 if (JoinedCopies.count(MI))
208 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
209 MachineOperand& MO = MI->getOperand(i);
212 if (MO.isUse() && !CheckUse)
214 unsigned PhysReg = MO.getReg();
215 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
217 if (tri_->isSubRegister(Reg, PhysReg))
227 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
228 if (TargetRegisterInfo::isPhysicalRegister(reg))
229 errs() << tri_->getName(reg);
231 errs() << "%reg" << reg;
235 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
236 MachineBasicBlock::iterator mi,
240 LiveInterval &interval) {
242 errs() << "\t\tregister: ";
243 printRegName(interval.reg, tri_);
246 // Virtual registers may be defined multiple times (due to phi
247 // elimination and 2-addr elimination). Much of what we do only has to be
248 // done once for the vreg. We use an empty interval to detect the first
249 // time we see a vreg.
250 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
251 if (interval.empty()) {
252 // Get the Idx of the defining instructions.
253 SlotIndex defIndex = MIIdx.getDefIndex();
254 // Earlyclobbers move back one, so that they overlap the live range
256 if (MO.isEarlyClobber())
257 defIndex = MIIdx.getUseIndex();
259 MachineInstr *CopyMI = NULL;
260 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
261 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
262 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
263 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
264 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
266 // Earlyclobbers move back one.
267 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
269 assert(ValNo->id == 0 && "First value in interval is not 0?");
271 // Loop over all of the blocks that the vreg is defined in. There are
272 // two cases we have to handle here. The most common case is a vreg
273 // whose lifetime is contained within a basic block. In this case there
274 // will be a single kill, in MBB, which comes after the definition.
275 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
276 // FIXME: what about dead vars?
278 if (vi.Kills[0] != mi)
279 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
281 killIdx = defIndex.getStoreIndex();
283 // If the kill happens after the definition, we have an intra-block
285 if (killIdx > defIndex) {
286 assert(vi.AliveBlocks.empty() &&
287 "Shouldn't be alive across any blocks!");
288 LiveRange LR(defIndex, killIdx, ValNo);
289 interval.addRange(LR);
290 DEBUG(errs() << " +" << LR << "\n");
291 ValNo->addKill(killIdx);
296 // The other case we handle is when a virtual register lives to the end
297 // of the defining block, potentially live across some blocks, then is
298 // live into some number of blocks, but gets killed. Start by adding a
299 // range that goes from this definition to the end of the defining block.
300 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
302 DEBUG(errs() << " +" << NewLR);
303 interval.addRange(NewLR);
305 // Iterate over all of the blocks that the variable is completely
306 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
308 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
309 E = vi.AliveBlocks.end(); I != E; ++I) {
311 getMBBStartIdx(mf_->getBlockNumbered(*I)),
312 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
314 interval.addRange(LR);
315 DEBUG(errs() << " +" << LR);
318 // Finally, this virtual register is live from the start of any killing
319 // block to the 'use' slot of the killing instruction.
320 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
321 MachineInstr *Kill = vi.Kills[i];
323 getInstructionIndex(Kill).getDefIndex();
324 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
325 interval.addRange(LR);
326 ValNo->addKill(killIdx);
327 DEBUG(errs() << " +" << LR);
331 // If this is the second time we see a virtual register definition, it
332 // must be due to phi elimination or two addr elimination. If this is
333 // the result of two address elimination, then the vreg is one of the
334 // def-and-use register operand.
335 if (mi->isRegTiedToUseOperand(MOIdx)) {
336 // If this is a two-address definition, then we have already processed
337 // the live range. The only problem is that we didn't realize there
338 // are actually two values in the live interval. Because of this we
339 // need to take the LiveRegion that defines this register and split it
341 assert(interval.containsOneValue());
342 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
343 SlotIndex RedefIndex = MIIdx.getDefIndex();
344 if (MO.isEarlyClobber())
345 RedefIndex = MIIdx.getUseIndex();
347 const LiveRange *OldLR =
348 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
349 VNInfo *OldValNo = OldLR->valno;
351 // Delete the initial value, which should be short and continuous,
352 // because the 2-addr copy must be in the same MBB as the redef.
353 interval.removeRange(DefIndex, RedefIndex);
355 // Two-address vregs should always only be redefined once. This means
356 // that at this point, there should be exactly one value number in it.
357 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
359 // The new value number (#1) is defined by the instruction we claimed
361 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
362 false, // update at *
364 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
366 // Value#0 is now defined by the 2-addr instruction.
367 OldValNo->def = RedefIndex;
368 OldValNo->setCopy(0);
370 // Add the new live interval which replaces the range for the input copy.
371 LiveRange LR(DefIndex, RedefIndex, ValNo);
372 DEBUG(errs() << " replace range with " << LR);
373 interval.addRange(LR);
374 ValNo->addKill(RedefIndex);
376 // If this redefinition is dead, we need to add a dummy unit live
377 // range covering the def slot.
379 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
383 errs() << " RESULT: ";
384 interval.print(errs(), tri_);
387 // Otherwise, this must be because of phi elimination. If this is the
388 // first redefinition of the vreg that we have seen, go back and change
389 // the live range in the PHI block to be a different value number.
390 if (interval.containsOneValue()) {
391 // Remove the old range that we now know has an incorrect number.
392 VNInfo *VNI = interval.getValNumInfo(0);
393 MachineInstr *Killer = vi.Kills[0];
394 SlotIndex Start = getMBBStartIdx(Killer->getParent());
395 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
397 errs() << " Removing [" << Start << "," << End << "] from: ";
398 interval.print(errs(), tri_);
401 interval.removeRange(Start, End);
402 assert(interval.ranges.size() == 1 &&
403 "Newly discovered PHI interval has >1 ranges.");
404 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
405 VNI->addKill(indexes_->getTerminatorGap(killMBB));
406 VNI->setHasPHIKill(true);
408 errs() << " RESULT: ";
409 interval.print(errs(), tri_);
412 // Replace the interval with one of a NEW value number. Note that this
413 // value number isn't actually defined by an instruction, weird huh? :)
414 LiveRange LR(Start, End,
415 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
416 0, false, VNInfoAllocator));
417 LR.valno->setIsPHIDef(true);
418 DEBUG(errs() << " replace range with " << LR);
419 interval.addRange(LR);
420 LR.valno->addKill(End);
422 errs() << " RESULT: ";
423 interval.print(errs(), tri_);
427 // In the case of PHI elimination, each variable definition is only
428 // live until the end of the block. We've already taken care of the
429 // rest of the live range.
430 SlotIndex defIndex = MIIdx.getDefIndex();
431 if (MO.isEarlyClobber())
432 defIndex = MIIdx.getUseIndex();
435 MachineInstr *CopyMI = NULL;
436 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
437 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
438 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
439 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
440 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
442 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
444 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
445 LiveRange LR(defIndex, killIndex, ValNo);
446 interval.addRange(LR);
447 ValNo->addKill(indexes_->getTerminatorGap(mbb));
448 ValNo->setHasPHIKill(true);
449 DEBUG(errs() << " +" << LR);
453 DEBUG(errs() << '\n');
456 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
457 MachineBasicBlock::iterator mi,
460 LiveInterval &interval,
461 MachineInstr *CopyMI) {
462 // A physical register cannot be live across basic block, so its
463 // lifetime must end somewhere in its defining basic block.
465 errs() << "\t\tregister: ";
466 printRegName(interval.reg, tri_);
469 SlotIndex baseIndex = MIIdx;
470 SlotIndex start = baseIndex.getDefIndex();
471 // Earlyclobbers move back one.
472 if (MO.isEarlyClobber())
473 start = MIIdx.getUseIndex();
474 SlotIndex end = start;
476 // If it is not used after definition, it is considered dead at
477 // the instruction defining it. Hence its interval is:
478 // [defSlot(def), defSlot(def)+1)
479 // For earlyclobbers, the defSlot was pushed back one; the extra
480 // advance below compensates.
482 DEBUG(errs() << " dead");
483 end = start.getStoreIndex();
487 // If it is not dead on definition, it must be killed by a
488 // subsequent instruction. Hence its interval is:
489 // [defSlot(def), useSlot(kill)+1)
490 baseIndex = baseIndex.getNextIndex();
491 while (++mi != MBB->end()) {
493 if (getInstructionFromIndex(baseIndex) == 0)
494 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
496 if (mi->killsRegister(interval.reg, tri_)) {
497 DEBUG(errs() << " killed");
498 end = baseIndex.getDefIndex();
501 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
503 if (mi->isRegTiedToUseOperand(DefIdx)) {
504 // Two-address instruction.
505 end = baseIndex.getDefIndex();
507 // Another instruction redefines the register before it is ever read.
508 // Then the register is essentially dead at the instruction that defines
509 // it. Hence its interval is:
510 // [defSlot(def), defSlot(def)+1)
511 DEBUG(errs() << " dead");
512 end = start.getStoreIndex();
518 baseIndex = baseIndex.getNextIndex();
521 // The only case we should have a dead physreg here without a killing or
522 // instruction where we know it's dead is if it is live-in to the function
523 // and never used. Another possible case is the implicit use of the
524 // physical register has been deleted by two-address pass.
525 end = start.getStoreIndex();
528 assert(start < end && "did not find end of interval?");
530 // Already exists? Extend old live interval.
531 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
532 bool Extend = OldLR != interval.end();
533 VNInfo *ValNo = Extend
534 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
535 if (MO.isEarlyClobber() && Extend)
536 ValNo->setHasRedefByEC(true);
537 LiveRange LR(start, end, ValNo);
538 interval.addRange(LR);
539 LR.valno->addKill(end);
540 DEBUG(errs() << " +" << LR << '\n');
543 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
544 MachineBasicBlock::iterator MI,
548 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
549 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
550 getOrCreateInterval(MO.getReg()));
551 else if (allocatableRegs_[MO.getReg()]) {
552 MachineInstr *CopyMI = NULL;
553 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
554 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
555 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
556 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
557 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
559 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
560 getOrCreateInterval(MO.getReg()), CopyMI);
561 // Def of a register also defines its sub-registers.
562 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
563 // If MI also modifies the sub-register explicitly, avoid processing it
564 // more than once. Do not pass in TRI here so it checks for exact match.
565 if (!MI->modifiesRegister(*AS))
566 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
567 getOrCreateInterval(*AS), 0);
571 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
573 LiveInterval &interval, bool isAlias) {
575 errs() << "\t\tlivein register: ";
576 printRegName(interval.reg, tri_);
579 // Look for kills, if it reaches a def before it's killed, then it shouldn't
580 // be considered a livein.
581 MachineBasicBlock::iterator mi = MBB->begin();
582 SlotIndex baseIndex = MIIdx;
583 SlotIndex start = baseIndex;
584 if (getInstructionFromIndex(baseIndex) == 0)
585 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
587 SlotIndex end = baseIndex;
588 bool SeenDefUse = false;
590 while (mi != MBB->end()) {
591 if (mi->killsRegister(interval.reg, tri_)) {
592 DEBUG(errs() << " killed");
593 end = baseIndex.getDefIndex();
596 } else if (mi->modifiesRegister(interval.reg, tri_)) {
597 // Another instruction redefines the register before it is ever read.
598 // Then the register is essentially dead at the instruction that defines
599 // it. Hence its interval is:
600 // [defSlot(def), defSlot(def)+1)
601 DEBUG(errs() << " dead");
602 end = start.getStoreIndex();
608 if (mi != MBB->end()) {
609 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613 // Live-in register might not be used at all.
616 DEBUG(errs() << " dead");
617 end = MIIdx.getStoreIndex();
619 DEBUG(errs() << " live through");
625 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
626 0, false, VNInfoAllocator);
627 vni->setIsPHIDef(true);
628 LiveRange LR(start, end, vni);
630 interval.addRange(LR);
631 LR.valno->addKill(end);
632 DEBUG(errs() << " +" << LR << '\n');
635 /// computeIntervals - computes the live intervals for virtual
636 /// registers. for some ordering of the machine instructions [1,N] a
637 /// live interval is an interval [i, j) where 1 <= i <= j < N for
638 /// which a variable is live
639 void LiveIntervals::computeIntervals() {
640 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
641 << "********** Function: "
642 << ((Value*)mf_->getFunction())->getName() << '\n');
644 SmallVector<unsigned, 8> UndefUses;
645 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
647 MachineBasicBlock *MBB = MBBI;
648 // Track the index of the current machine instr.
649 SlotIndex MIIndex = getMBBStartIdx(MBB);
650 DEBUG(errs() << MBB->getName() << ":\n");
652 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
654 // Create intervals for live-ins to this BB first.
655 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
656 LE = MBB->livein_end(); LI != LE; ++LI) {
657 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
658 // Multiple live-ins can alias the same register.
659 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
660 if (!hasInterval(*AS))
661 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
665 // Skip over empty initial indices.
666 if (getInstructionFromIndex(MIIndex) == 0)
667 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
669 for (; MI != miEnd; ++MI) {
670 DEBUG(errs() << MIIndex << "\t" << *MI);
673 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
674 MachineOperand &MO = MI->getOperand(i);
675 if (!MO.isReg() || !MO.getReg())
678 // handle register defs - build intervals
680 handleRegisterDef(MBB, MI, MIIndex, MO, i);
681 else if (MO.isUndef())
682 UndefUses.push_back(MO.getReg());
685 // Move to the next instr slot.
686 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
690 // Create empty intervals for registers defined by implicit_def's (except
691 // for those implicit_def that define values which are liveout of their
693 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
694 unsigned UndefReg = UndefUses[i];
695 (void)getOrCreateInterval(UndefReg);
699 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
700 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
701 return new LiveInterval(reg, Weight);
704 /// dupInterval - Duplicate a live interval. The caller is responsible for
705 /// managing the allocated memory.
706 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
707 LiveInterval *NewLI = createInterval(li->reg);
708 NewLI->Copy(*li, mri_, getVNInfoAllocator());
712 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
713 /// copy field and returns the source register that defines it.
714 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
718 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
719 // If it's extracting out of a physical register, return the sub-register.
720 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
721 if (TargetRegisterInfo::isPhysicalRegister(Reg))
722 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
724 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
725 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
726 return VNI->getCopy()->getOperand(2).getReg();
728 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
729 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
731 llvm_unreachable("Unrecognized copy instruction!");
735 //===----------------------------------------------------------------------===//
736 // Register allocator hooks.
739 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
740 /// allow one) virtual register operand, then its uses are implicitly using
741 /// the register. Returns the virtual register.
742 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
743 MachineInstr *MI) const {
745 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
746 MachineOperand &MO = MI->getOperand(i);
747 if (!MO.isReg() || !MO.isUse())
749 unsigned Reg = MO.getReg();
750 if (Reg == 0 || Reg == li.reg)
753 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
754 !allocatableRegs_[Reg])
756 // FIXME: For now, only remat MI with at most one register operand.
758 "Can't rematerialize instruction with multiple register operand!");
767 /// isValNoAvailableAt - Return true if the val# of the specified interval
768 /// which reaches the given instruction also reaches the specified use index.
769 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
770 SlotIndex UseIdx) const {
771 SlotIndex Index = getInstructionIndex(MI);
772 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
773 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
774 return UI != li.end() && UI->valno == ValNo;
777 /// isReMaterializable - Returns true if the definition MI of the specified
778 /// val# of the specified interval is re-materializable.
779 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
780 const VNInfo *ValNo, MachineInstr *MI,
781 SmallVectorImpl<LiveInterval*> &SpillIs,
786 if (!tii_->isTriviallyReMaterializable(MI, aa_))
789 // Target-specific code can mark an instruction as being rematerializable
790 // if it has one virtual reg use, though it had better be something like
791 // a PIC base register which is likely to be live everywhere.
792 unsigned ImpUse = getReMatImplicitUse(li, MI);
794 const LiveInterval &ImpLi = getInterval(ImpUse);
795 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
796 re = mri_->use_end(); ri != re; ++ri) {
797 MachineInstr *UseMI = &*ri;
798 SlotIndex UseIdx = getInstructionIndex(UseMI);
799 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
801 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
805 // If a register operand of the re-materialized instruction is going to
806 // be spilled next, then it's not legal to re-materialize this instruction.
807 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
808 if (ImpUse == SpillIs[i]->reg)
814 /// isReMaterializable - Returns true if the definition MI of the specified
815 /// val# of the specified interval is re-materializable.
816 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
817 const VNInfo *ValNo, MachineInstr *MI) {
818 SmallVector<LiveInterval*, 4> Dummy1;
820 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
823 /// isReMaterializable - Returns true if every definition of MI of every
824 /// val# of the specified interval is re-materializable.
825 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
826 SmallVectorImpl<LiveInterval*> &SpillIs,
829 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
831 const VNInfo *VNI = *i;
833 continue; // Dead val#.
834 // Is the def for the val# rematerializable?
835 if (!VNI->isDefAccurate())
837 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
838 bool DefIsLoad = false;
840 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
847 /// FilterFoldedOps - Filter out two-address use operands. Return
848 /// true if it finds any issue with the operands that ought to prevent
850 static bool FilterFoldedOps(MachineInstr *MI,
851 SmallVector<unsigned, 2> &Ops,
853 SmallVector<unsigned, 2> &FoldOps) {
855 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
856 unsigned OpIdx = Ops[i];
857 MachineOperand &MO = MI->getOperand(OpIdx);
858 // FIXME: fold subreg use.
862 MRInfo |= (unsigned)VirtRegMap::isMod;
864 // Filter out two-address use operand(s).
865 if (MI->isRegTiedToDefOperand(OpIdx)) {
866 MRInfo = VirtRegMap::isModRef;
869 MRInfo |= (unsigned)VirtRegMap::isRef;
871 FoldOps.push_back(OpIdx);
877 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
878 /// slot / to reg or any rematerialized load into ith operand of specified
879 /// MI. If it is successul, MI is updated with the newly created MI and
881 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
882 VirtRegMap &vrm, MachineInstr *DefMI,
884 SmallVector<unsigned, 2> &Ops,
885 bool isSS, int Slot, unsigned Reg) {
886 // If it is an implicit def instruction, just delete it.
887 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
888 RemoveMachineInstrFromMaps(MI);
889 vrm.RemoveMachineInstrFromMaps(MI);
890 MI->eraseFromParent();
895 // Filter the list of operand indexes that are to be folded. Abort if
896 // any operand will prevent folding.
898 SmallVector<unsigned, 2> FoldOps;
899 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
902 // The only time it's safe to fold into a two address instruction is when
903 // it's folding reload and spill from / into a spill stack slot.
904 if (DefMI && (MRInfo & VirtRegMap::isMod))
907 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
908 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
910 // Remember this instruction uses the spill slot.
911 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
913 // Attempt to fold the memory reference into the instruction. If
914 // we can do this, we don't need to insert spill code.
915 MachineBasicBlock &MBB = *MI->getParent();
916 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
917 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
918 vrm.transferSpillPts(MI, fmi);
919 vrm.transferRestorePts(MI, fmi);
920 vrm.transferEmergencySpills(MI, fmi);
921 ReplaceMachineInstrInMaps(MI, fmi);
922 MI = MBB.insert(MBB.erase(MI), fmi);
929 /// canFoldMemoryOperand - Returns true if the specified load / store
930 /// folding is possible.
931 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
932 SmallVector<unsigned, 2> &Ops,
934 // Filter the list of operand indexes that are to be folded. Abort if
935 // any operand will prevent folding.
937 SmallVector<unsigned, 2> FoldOps;
938 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
941 // It's only legal to remat for a use, not a def.
942 if (ReMat && (MRInfo & VirtRegMap::isMod))
945 return tii_->canFoldMemoryOperand(MI, FoldOps);
948 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
949 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
951 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
956 for (++itr; itr != li.ranges.end(); ++itr) {
957 MachineBasicBlock *mbb2 =
958 indexes_->getMBBCoveringRange(itr->start, itr->end);
967 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
968 /// interval on to-be re-materialized operands of MI) with new register.
969 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
970 MachineInstr *MI, unsigned NewVReg,
972 // There is an implicit use. That means one of the other operand is
973 // being remat'ed and the remat'ed instruction has li.reg as an
974 // use operand. Make sure we rewrite that as well.
975 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
976 MachineOperand &MO = MI->getOperand(i);
979 unsigned Reg = MO.getReg();
980 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
982 if (!vrm.isReMaterialized(Reg))
984 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
985 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
987 UseMO->setReg(NewVReg);
991 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
992 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
994 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
995 bool TrySplit, SlotIndex index, SlotIndex end,
997 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
998 unsigned Slot, int LdSlot,
999 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1001 const TargetRegisterClass* rc,
1002 SmallVector<int, 4> &ReMatIds,
1003 const MachineLoopInfo *loopInfo,
1004 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1005 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1006 std::vector<LiveInterval*> &NewLIs) {
1007 bool CanFold = false;
1009 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1010 MachineOperand& mop = MI->getOperand(i);
1013 unsigned Reg = mop.getReg();
1014 unsigned RegI = Reg;
1015 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1020 bool TryFold = !DefIsReMat;
1021 bool FoldSS = true; // Default behavior unless it's a remat.
1022 int FoldSlot = Slot;
1024 // If this is the rematerializable definition MI itself and
1025 // all of its uses are rematerialized, simply delete it.
1026 if (MI == ReMatOrigDefMI && CanDelete) {
1027 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1029 RemoveMachineInstrFromMaps(MI);
1030 vrm.RemoveMachineInstrFromMaps(MI);
1031 MI->eraseFromParent();
1035 // If def for this use can't be rematerialized, then try folding.
1036 // If def is rematerializable and it's a load, also try folding.
1037 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1039 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1045 // Scan all of the operands of this instruction rewriting operands
1046 // to use NewVReg instead of li.reg as appropriate. We do this for
1049 // 1. If the instr reads the same spilled vreg multiple times, we
1050 // want to reuse the NewVReg.
1051 // 2. If the instr is a two-addr instruction, we are required to
1052 // keep the src/dst regs pinned.
1054 // Keep track of whether we replace a use and/or def so that we can
1055 // create the spill interval with the appropriate range.
1057 HasUse = mop.isUse();
1058 HasDef = mop.isDef();
1059 SmallVector<unsigned, 2> Ops;
1061 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1062 const MachineOperand &MOj = MI->getOperand(j);
1065 unsigned RegJ = MOj.getReg();
1066 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1070 if (!MOj.isUndef()) {
1071 HasUse |= MOj.isUse();
1072 HasDef |= MOj.isDef();
1077 // Create a new virtual register for the spill interval.
1078 // Create the new register now so we can map the fold instruction
1079 // to the new register so when it is unfolded we get the correct
1081 bool CreatedNewVReg = false;
1083 NewVReg = mri_->createVirtualRegister(rc);
1085 CreatedNewVReg = true;
1087 // The new virtual register should get the same allocation hints as the
1089 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1090 if (Hint.first || Hint.second)
1091 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1097 // Do not fold load / store here if we are splitting. We'll find an
1098 // optimal point to insert a load / store later.
1100 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1101 Ops, FoldSS, FoldSlot, NewVReg)) {
1102 // Folding the load/store can completely change the instruction in
1103 // unpredictable ways, rescan it from the beginning.
1106 // We need to give the new vreg the same stack slot as the
1107 // spilled interval.
1108 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1114 if (isNotInMIMap(MI))
1116 goto RestartInstruction;
1119 // We'll try to fold it later if it's profitable.
1120 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1124 mop.setReg(NewVReg);
1125 if (mop.isImplicit())
1126 rewriteImplicitOps(li, MI, NewVReg, vrm);
1128 // Reuse NewVReg for other reads.
1129 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1130 MachineOperand &mopj = MI->getOperand(Ops[j]);
1131 mopj.setReg(NewVReg);
1132 if (mopj.isImplicit())
1133 rewriteImplicitOps(li, MI, NewVReg, vrm);
1136 if (CreatedNewVReg) {
1138 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1139 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1140 // Each valnum may have its own remat id.
1141 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1143 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1145 if (!CanDelete || (HasUse && HasDef)) {
1146 // If this is a two-addr instruction then its use operands are
1147 // rematerializable but its def is not. It should be assigned a
1149 vrm.assignVirt2StackSlot(NewVReg, Slot);
1152 vrm.assignVirt2StackSlot(NewVReg, Slot);
1154 } else if (HasUse && HasDef &&
1155 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1156 // If this interval hasn't been assigned a stack slot (because earlier
1157 // def is a deleted remat def), do it now.
1158 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1159 vrm.assignVirt2StackSlot(NewVReg, Slot);
1162 // Re-matting an instruction with virtual register use. Add the
1163 // register as an implicit use on the use MI.
1164 if (DefIsReMat && ImpUse)
1165 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1167 // Create a new register interval for this spill / remat.
1168 LiveInterval &nI = getOrCreateInterval(NewVReg);
1169 if (CreatedNewVReg) {
1170 NewLIs.push_back(&nI);
1171 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1173 vrm.setIsSplitFromReg(NewVReg, li.reg);
1177 if (CreatedNewVReg) {
1178 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1179 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1180 DEBUG(errs() << " +" << LR);
1183 // Extend the split live interval to this def / use.
1184 SlotIndex End = index.getDefIndex();
1185 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1186 nI.getValNumInfo(nI.getNumValNums()-1));
1187 DEBUG(errs() << " +" << LR);
1192 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1193 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1194 DEBUG(errs() << " +" << LR);
1199 errs() << "\t\t\t\tAdded new interval: ";
1200 nI.print(errs(), tri_);
1206 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1208 MachineBasicBlock *MBB,
1209 SlotIndex Idx) const {
1210 SlotIndex End = getMBBEndIdx(MBB);
1211 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1212 if (VNI->kills[j].isPHI())
1215 SlotIndex KillIdx = VNI->kills[j];
1216 if (KillIdx > Idx && KillIdx < End)
1222 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1223 /// during spilling.
1225 struct RewriteInfo {
1230 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1231 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1234 struct RewriteInfoCompare {
1235 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1236 return LHS.Index < RHS.Index;
1241 void LiveIntervals::
1242 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1243 LiveInterval::Ranges::const_iterator &I,
1244 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1245 unsigned Slot, int LdSlot,
1246 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1248 const TargetRegisterClass* rc,
1249 SmallVector<int, 4> &ReMatIds,
1250 const MachineLoopInfo *loopInfo,
1251 BitVector &SpillMBBs,
1252 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1253 BitVector &RestoreMBBs,
1254 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1255 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1256 std::vector<LiveInterval*> &NewLIs) {
1257 bool AllCanFold = true;
1258 unsigned NewVReg = 0;
1259 SlotIndex start = I->start.getBaseIndex();
1260 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1262 // First collect all the def / use in this live range that will be rewritten.
1263 // Make sure they are sorted according to instruction index.
1264 std::vector<RewriteInfo> RewriteMIs;
1265 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1266 re = mri_->reg_end(); ri != re; ) {
1267 MachineInstr *MI = &*ri;
1268 MachineOperand &O = ri.getOperand();
1270 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1271 SlotIndex index = getInstructionIndex(MI);
1272 if (index < start || index >= end)
1276 // Must be defined by an implicit def. It should not be spilled. Note,
1277 // this is for correctness reason. e.g.
1278 // 8 %reg1024<def> = IMPLICIT_DEF
1279 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1280 // The live range [12, 14) are not part of the r1024 live interval since
1281 // it's defined by an implicit def. It will not conflicts with live
1282 // interval of r1025. Now suppose both registers are spilled, you can
1283 // easily see a situation where both registers are reloaded before
1284 // the INSERT_SUBREG and both target registers that would overlap.
1286 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1288 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1290 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1291 // Now rewrite the defs and uses.
1292 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1293 RewriteInfo &rwi = RewriteMIs[i];
1295 SlotIndex index = rwi.Index;
1296 bool MIHasUse = rwi.HasUse;
1297 bool MIHasDef = rwi.HasDef;
1298 MachineInstr *MI = rwi.MI;
1299 // If MI def and/or use the same register multiple times, then there
1300 // are multiple entries.
1301 unsigned NumUses = MIHasUse;
1302 while (i != e && RewriteMIs[i].MI == MI) {
1303 assert(RewriteMIs[i].Index == index);
1304 bool isUse = RewriteMIs[i].HasUse;
1305 if (isUse) ++NumUses;
1307 MIHasDef |= RewriteMIs[i].HasDef;
1310 MachineBasicBlock *MBB = MI->getParent();
1312 if (ImpUse && MI != ReMatDefMI) {
1313 // Re-matting an instruction with virtual register use. Update the
1314 // register interval's spill weight to HUGE_VALF to prevent it from
1316 LiveInterval &ImpLi = getInterval(ImpUse);
1317 ImpLi.weight = HUGE_VALF;
1320 unsigned MBBId = MBB->getNumber();
1321 unsigned ThisVReg = 0;
1323 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1324 if (NVI != MBBVRegsMap.end()) {
1325 ThisVReg = NVI->second;
1332 // It's better to start a new interval to avoid artifically
1333 // extend the new interval.
1334 if (MIHasDef && !MIHasUse) {
1335 MBBVRegsMap.erase(MBB->getNumber());
1341 bool IsNew = ThisVReg == 0;
1343 // This ends the previous live interval. If all of its def / use
1344 // can be folded, give it a low spill weight.
1345 if (NewVReg && TrySplit && AllCanFold) {
1346 LiveInterval &nI = getOrCreateInterval(NewVReg);
1353 bool HasDef = false;
1354 bool HasUse = false;
1355 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1356 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1357 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1358 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1359 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1360 if (!HasDef && !HasUse)
1363 AllCanFold &= CanFold;
1365 // Update weight of spill interval.
1366 LiveInterval &nI = getOrCreateInterval(NewVReg);
1368 // The spill weight is now infinity as it cannot be spilled again.
1369 nI.weight = HUGE_VALF;
1373 // Keep track of the last def and first use in each MBB.
1375 if (MI != ReMatOrigDefMI || !CanDelete) {
1376 bool HasKill = false;
1378 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1380 // If this is a two-address code, then this index starts a new VNInfo.
1381 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1383 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1385 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1386 SpillIdxes.find(MBBId);
1388 if (SII == SpillIdxes.end()) {
1389 std::vector<SRInfo> S;
1390 S.push_back(SRInfo(index, NewVReg, true));
1391 SpillIdxes.insert(std::make_pair(MBBId, S));
1392 } else if (SII->second.back().vreg != NewVReg) {
1393 SII->second.push_back(SRInfo(index, NewVReg, true));
1394 } else if (index > SII->second.back().index) {
1395 // If there is an earlier def and this is a two-address
1396 // instruction, then it's not possible to fold the store (which
1397 // would also fold the load).
1398 SRInfo &Info = SII->second.back();
1400 Info.canFold = !HasUse;
1402 SpillMBBs.set(MBBId);
1403 } else if (SII != SpillIdxes.end() &&
1404 SII->second.back().vreg == NewVReg &&
1405 index > SII->second.back().index) {
1406 // There is an earlier def that's not killed (must be two-address).
1407 // The spill is no longer needed.
1408 SII->second.pop_back();
1409 if (SII->second.empty()) {
1410 SpillIdxes.erase(MBBId);
1411 SpillMBBs.reset(MBBId);
1418 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1419 SpillIdxes.find(MBBId);
1420 if (SII != SpillIdxes.end() &&
1421 SII->second.back().vreg == NewVReg &&
1422 index > SII->second.back().index)
1423 // Use(s) following the last def, it's not safe to fold the spill.
1424 SII->second.back().canFold = false;
1425 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1426 RestoreIdxes.find(MBBId);
1427 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1428 // If we are splitting live intervals, only fold if it's the first
1429 // use and there isn't another use later in the MBB.
1430 RII->second.back().canFold = false;
1432 // Only need a reload if there isn't an earlier def / use.
1433 if (RII == RestoreIdxes.end()) {
1434 std::vector<SRInfo> Infos;
1435 Infos.push_back(SRInfo(index, NewVReg, true));
1436 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1438 RII->second.push_back(SRInfo(index, NewVReg, true));
1440 RestoreMBBs.set(MBBId);
1444 // Update spill weight.
1445 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1446 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1449 if (NewVReg && TrySplit && AllCanFold) {
1450 // If all of its def / use can be folded, give it a low spill weight.
1451 LiveInterval &nI = getOrCreateInterval(NewVReg);
1456 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1457 unsigned vr, BitVector &RestoreMBBs,
1458 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1459 if (!RestoreMBBs[Id])
1461 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1462 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1463 if (Restores[i].index == index &&
1464 Restores[i].vreg == vr &&
1465 Restores[i].canFold)
1470 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1471 unsigned vr, BitVector &RestoreMBBs,
1472 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1473 if (!RestoreMBBs[Id])
1475 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1476 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1477 if (Restores[i].index == index && Restores[i].vreg)
1478 Restores[i].index = SlotIndex();
1481 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1482 /// spilled and create empty intervals for their uses.
1484 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1485 const TargetRegisterClass* rc,
1486 std::vector<LiveInterval*> &NewLIs) {
1487 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1488 re = mri_->reg_end(); ri != re; ) {
1489 MachineOperand &O = ri.getOperand();
1490 MachineInstr *MI = &*ri;
1493 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1494 "Register def was not rewritten?");
1495 RemoveMachineInstrFromMaps(MI);
1496 vrm.RemoveMachineInstrFromMaps(MI);
1497 MI->eraseFromParent();
1499 // This must be an use of an implicit_def so it's not part of the live
1500 // interval. Create a new empty live interval for it.
1501 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1502 unsigned NewVReg = mri_->createVirtualRegister(rc);
1504 vrm.setIsImplicitlyDefined(NewVReg);
1505 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1506 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1507 MachineOperand &MO = MI->getOperand(i);
1508 if (MO.isReg() && MO.getReg() == li.reg) {
1517 std::vector<LiveInterval*> LiveIntervals::
1518 addIntervalsForSpillsFast(const LiveInterval &li,
1519 const MachineLoopInfo *loopInfo,
1521 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1523 std::vector<LiveInterval*> added;
1525 assert(li.weight != HUGE_VALF &&
1526 "attempt to spill already spilled interval!");
1529 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1534 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1536 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1537 while (RI != mri_->reg_end()) {
1538 MachineInstr* MI = &*RI;
1540 SmallVector<unsigned, 2> Indices;
1541 bool HasUse = false;
1542 bool HasDef = false;
1544 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1545 MachineOperand& mop = MI->getOperand(i);
1546 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1548 HasUse |= MI->getOperand(i).isUse();
1549 HasDef |= MI->getOperand(i).isDef();
1551 Indices.push_back(i);
1554 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1555 Indices, true, slot, li.reg)) {
1556 unsigned NewVReg = mri_->createVirtualRegister(rc);
1558 vrm.assignVirt2StackSlot(NewVReg, slot);
1560 // create a new register for this spill
1561 LiveInterval &nI = getOrCreateInterval(NewVReg);
1563 // the spill weight is now infinity as it
1564 // cannot be spilled again
1565 nI.weight = HUGE_VALF;
1567 // Rewrite register operands to use the new vreg.
1568 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1569 E = Indices.end(); I != E; ++I) {
1570 MI->getOperand(*I).setReg(NewVReg);
1572 if (MI->getOperand(*I).isUse())
1573 MI->getOperand(*I).setIsKill(true);
1576 // Fill in the new live interval.
1577 SlotIndex index = getInstructionIndex(MI);
1579 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1580 nI.getNextValue(SlotIndex(), 0, false,
1581 getVNInfoAllocator()));
1582 DEBUG(errs() << " +" << LR);
1584 vrm.addRestorePoint(NewVReg, MI);
1587 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1588 nI.getNextValue(SlotIndex(), 0, false,
1589 getVNInfoAllocator()));
1590 DEBUG(errs() << " +" << LR);
1592 vrm.addSpillPoint(NewVReg, true, MI);
1595 added.push_back(&nI);
1598 errs() << "\t\t\t\tadded new interval: ";
1605 RI = mri_->reg_begin(li.reg);
1611 std::vector<LiveInterval*> LiveIntervals::
1612 addIntervalsForSpills(const LiveInterval &li,
1613 SmallVectorImpl<LiveInterval*> &SpillIs,
1614 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1616 if (EnableFastSpilling)
1617 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1619 assert(li.weight != HUGE_VALF &&
1620 "attempt to spill already spilled interval!");
1623 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1624 li.print(errs(), tri_);
1628 // Each bit specify whether a spill is required in the MBB.
1629 BitVector SpillMBBs(mf_->getNumBlockIDs());
1630 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1631 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1632 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1633 DenseMap<unsigned,unsigned> MBBVRegsMap;
1634 std::vector<LiveInterval*> NewLIs;
1635 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1637 unsigned NumValNums = li.getNumValNums();
1638 SmallVector<MachineInstr*, 4> ReMatDefs;
1639 ReMatDefs.resize(NumValNums, NULL);
1640 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1641 ReMatOrigDefs.resize(NumValNums, NULL);
1642 SmallVector<int, 4> ReMatIds;
1643 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1644 BitVector ReMatDelete(NumValNums);
1645 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1647 // Spilling a split live interval. It cannot be split any further. Also,
1648 // it's also guaranteed to be a single val# / range interval.
1649 if (vrm.getPreSplitReg(li.reg)) {
1650 vrm.setIsSplitFromReg(li.reg, 0);
1651 // Unset the split kill marker on the last use.
1652 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1653 if (KillIdx != SlotIndex()) {
1654 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1655 assert(KillMI && "Last use disappeared?");
1656 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1657 assert(KillOp != -1 && "Last use disappeared?");
1658 KillMI->getOperand(KillOp).setIsKill(false);
1660 vrm.removeKillPoint(li.reg);
1661 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1662 Slot = vrm.getStackSlot(li.reg);
1663 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1664 MachineInstr *ReMatDefMI = DefIsReMat ?
1665 vrm.getReMaterializedMI(li.reg) : NULL;
1667 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1668 bool isLoad = isLoadSS ||
1669 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1670 bool IsFirstRange = true;
1671 for (LiveInterval::Ranges::const_iterator
1672 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1673 // If this is a split live interval with multiple ranges, it means there
1674 // are two-address instructions that re-defined the value. Only the
1675 // first def can be rematerialized!
1677 // Note ReMatOrigDefMI has already been deleted.
1678 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1679 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1680 false, vrm, rc, ReMatIds, loopInfo,
1681 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1682 MBBVRegsMap, NewLIs);
1684 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1685 Slot, 0, false, false, false,
1686 false, vrm, rc, ReMatIds, loopInfo,
1687 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1688 MBBVRegsMap, NewLIs);
1690 IsFirstRange = false;
1693 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1697 bool TrySplit = !intervalIsInOneMBB(li);
1700 bool NeedStackSlot = false;
1701 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1703 const VNInfo *VNI = *i;
1704 unsigned VN = VNI->id;
1705 if (VNI->isUnused())
1706 continue; // Dead val#.
1707 // Is the def for the val# rematerializable?
1708 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1709 ? getInstructionFromIndex(VNI->def) : 0;
1711 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1712 // Remember how to remat the def of this val#.
1713 ReMatOrigDefs[VN] = ReMatDefMI;
1714 // Original def may be modified so we have to make a copy here.
1715 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1716 CloneMIs.push_back(Clone);
1717 ReMatDefs[VN] = Clone;
1719 bool CanDelete = true;
1720 if (VNI->hasPHIKill()) {
1721 // A kill is a phi node, not all of its uses can be rematerialized.
1722 // It must not be deleted.
1724 // Need a stack slot if there is any live range where uses cannot be
1726 NeedStackSlot = true;
1729 ReMatDelete.set(VN);
1731 // Need a stack slot if there is any live range where uses cannot be
1733 NeedStackSlot = true;
1737 // One stack slot per live interval.
1738 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1739 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1740 Slot = vrm.assignVirt2StackSlot(li.reg);
1742 // This case only occurs when the prealloc splitter has already assigned
1743 // a stack slot to this vreg.
1745 Slot = vrm.getStackSlot(li.reg);
1748 // Create new intervals and rewrite defs and uses.
1749 for (LiveInterval::Ranges::const_iterator
1750 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1751 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1752 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1753 bool DefIsReMat = ReMatDefMI != NULL;
1754 bool CanDelete = ReMatDelete[I->valno->id];
1756 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1757 bool isLoad = isLoadSS ||
1758 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1759 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1760 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1761 CanDelete, vrm, rc, ReMatIds, loopInfo,
1762 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1763 MBBVRegsMap, NewLIs);
1766 // Insert spills / restores if we are splitting.
1768 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1772 SmallPtrSet<LiveInterval*, 4> AddedKill;
1773 SmallVector<unsigned, 2> Ops;
1774 if (NeedStackSlot) {
1775 int Id = SpillMBBs.find_first();
1777 std::vector<SRInfo> &spills = SpillIdxes[Id];
1778 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1779 SlotIndex index = spills[i].index;
1780 unsigned VReg = spills[i].vreg;
1781 LiveInterval &nI = getOrCreateInterval(VReg);
1782 bool isReMat = vrm.isReMaterialized(VReg);
1783 MachineInstr *MI = getInstructionFromIndex(index);
1784 bool CanFold = false;
1785 bool FoundUse = false;
1787 if (spills[i].canFold) {
1789 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1790 MachineOperand &MO = MI->getOperand(j);
1791 if (!MO.isReg() || MO.getReg() != VReg)
1798 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1799 RestoreMBBs, RestoreIdxes))) {
1800 // MI has two-address uses of the same register. If the use
1801 // isn't the first and only use in the BB, then we can't fold
1802 // it. FIXME: Move this to rewriteInstructionsForSpills.
1809 // Fold the store into the def if possible.
1810 bool Folded = false;
1811 if (CanFold && !Ops.empty()) {
1812 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1815 // Also folded uses, do not issue a load.
1816 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1817 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1819 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1823 // Otherwise tell the spiller to issue a spill.
1825 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1826 bool isKill = LR->end == index.getStoreIndex();
1827 if (!MI->registerDefIsDead(nI.reg))
1828 // No need to spill a dead def.
1829 vrm.addSpillPoint(VReg, isKill, MI);
1831 AddedKill.insert(&nI);
1834 Id = SpillMBBs.find_next(Id);
1838 int Id = RestoreMBBs.find_first();
1840 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1841 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1842 SlotIndex index = restores[i].index;
1843 if (index == SlotIndex())
1845 unsigned VReg = restores[i].vreg;
1846 LiveInterval &nI = getOrCreateInterval(VReg);
1847 bool isReMat = vrm.isReMaterialized(VReg);
1848 MachineInstr *MI = getInstructionFromIndex(index);
1849 bool CanFold = false;
1851 if (restores[i].canFold) {
1853 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1854 MachineOperand &MO = MI->getOperand(j);
1855 if (!MO.isReg() || MO.getReg() != VReg)
1859 // If this restore were to be folded, it would have been folded
1868 // Fold the load into the use if possible.
1869 bool Folded = false;
1870 if (CanFold && !Ops.empty()) {
1872 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1874 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1876 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1877 // If the rematerializable def is a load, also try to fold it.
1878 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1879 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1880 Ops, isLoadSS, LdSlot, VReg);
1882 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1884 // Re-matting an instruction with virtual register use. Add the
1885 // register as an implicit use on the use MI and update the register
1886 // interval's spill weight to HUGE_VALF to prevent it from being
1888 LiveInterval &ImpLi = getInterval(ImpUse);
1889 ImpLi.weight = HUGE_VALF;
1890 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1895 // If folding is not possible / failed, then tell the spiller to issue a
1896 // load / rematerialization for us.
1898 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1900 vrm.addRestorePoint(VReg, MI);
1902 Id = RestoreMBBs.find_next(Id);
1905 // Finalize intervals: add kills, finalize spill weights, and filter out
1907 std::vector<LiveInterval*> RetNewLIs;
1908 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1909 LiveInterval *LI = NewLIs[i];
1911 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1912 if (!AddedKill.count(LI)) {
1913 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1914 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1915 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1916 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1917 assert(UseIdx != -1);
1918 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1919 LastUse->getOperand(UseIdx).setIsKill();
1920 vrm.addKillPoint(LI->reg, LastUseIdx);
1923 RetNewLIs.push_back(LI);
1927 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1931 /// hasAllocatableSuperReg - Return true if the specified physical register has
1932 /// any super register that's allocatable.
1933 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1934 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1935 if (allocatableRegs_[*AS] && hasInterval(*AS))
1940 /// getRepresentativeReg - Find the largest super register of the specified
1941 /// physical register.
1942 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1943 // Find the largest super-register that is allocatable.
1944 unsigned BestReg = Reg;
1945 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1946 unsigned SuperReg = *AS;
1947 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1955 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1956 /// specified interval that conflicts with the specified physical register.
1957 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1958 unsigned PhysReg) const {
1959 unsigned NumConflicts = 0;
1960 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1961 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1962 E = mri_->reg_end(); I != E; ++I) {
1963 MachineOperand &O = I.getOperand();
1964 MachineInstr *MI = O.getParent();
1965 SlotIndex Index = getInstructionIndex(MI);
1966 if (pli.liveAt(Index))
1969 return NumConflicts;
1972 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1973 /// around all defs and uses of the specified interval. Return true if it
1974 /// was able to cut its interval.
1975 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1976 unsigned PhysReg, VirtRegMap &vrm) {
1977 unsigned SpillReg = getRepresentativeReg(PhysReg);
1979 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1980 // If there are registers which alias PhysReg, but which are not a
1981 // sub-register of the chosen representative super register. Assert
1982 // since we can't handle it yet.
1983 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1984 tri_->isSuperRegister(*AS, SpillReg));
1987 SmallVector<unsigned, 4> PRegs;
1988 if (hasInterval(SpillReg))
1989 PRegs.push_back(SpillReg);
1991 SmallSet<unsigned, 4> Added;
1992 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1993 if (Added.insert(*AS) && hasInterval(*AS)) {
1994 PRegs.push_back(*AS);
1995 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2000 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2001 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2002 E = mri_->reg_end(); I != E; ++I) {
2003 MachineOperand &O = I.getOperand();
2004 MachineInstr *MI = O.getParent();
2005 if (SeenMIs.count(MI))
2008 SlotIndex Index = getInstructionIndex(MI);
2009 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2010 unsigned PReg = PRegs[i];
2011 LiveInterval &pli = getInterval(PReg);
2012 if (!pli.liveAt(Index))
2014 vrm.addEmergencySpill(PReg, MI);
2015 SlotIndex StartIdx = Index.getLoadIndex();
2016 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2017 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2018 pli.removeRange(StartIdx, EndIdx);
2022 raw_string_ostream Msg(msg);
2023 Msg << "Ran out of registers during register allocation!";
2024 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2025 Msg << "\nPlease check your inline asm statement for invalid "
2026 << "constraints:\n";
2027 MI->print(Msg, tm_);
2029 llvm_report_error(Msg.str());
2031 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2032 if (!hasInterval(*AS))
2034 LiveInterval &spli = getInterval(*AS);
2035 if (spli.liveAt(Index))
2036 spli.removeRange(Index.getLoadIndex(),
2037 Index.getNextIndex().getBaseIndex());
2044 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2045 MachineInstr* startInst) {
2046 LiveInterval& Interval = getOrCreateInterval(reg);
2047 VNInfo* VN = Interval.getNextValue(
2048 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2049 startInst, true, getVNInfoAllocator());
2050 VN->setHasPHIKill(true);
2051 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2053 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2054 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2055 Interval.addRange(LR);