1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it can check use as well.
223 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 bool PHIJoin = lv_->isPHIJoin(interval.reg);
335 // A phi join register is killed at the end of the MBB and revived as a new
336 // valno in the killing blocks.
337 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338 DEBUG(dbgs() << " phi-join");
339 ValNo->addKill(indexes_->getTerminatorGap(mbb));
340 ValNo->setHasPHIKill(true);
342 // Iterate over all of the blocks that the variable is completely
343 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
345 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346 E = vi.AliveBlocks.end(); I != E; ++I) {
347 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349 interval.addRange(LR);
350 DEBUG(dbgs() << " +" << LR);
354 // Finally, this virtual register is live from the start of any killing
355 // block to the 'use' slot of the killing instruction.
356 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357 MachineInstr *Kill = vi.Kills[i];
358 SlotIndex Start = getMBBStartIdx(Kill->getParent());
359 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
361 // Create interval with one of a NEW value number. Note that this value
362 // number isn't actually defined by an instruction, weird huh? :)
364 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
366 ValNo->setIsPHIDef(true);
368 LiveRange LR(Start, killIdx, ValNo);
369 interval.addRange(LR);
370 ValNo->addKill(killIdx);
371 DEBUG(dbgs() << " +" << LR);
375 // If this is the second time we see a virtual register definition, it
376 // must be due to phi elimination or two addr elimination. If this is
377 // the result of two address elimination, then the vreg is one of the
378 // def-and-use register operand.
379 if (mi->isRegTiedToUseOperand(MOIdx)) {
380 // If this is a two-address definition, then we have already processed
381 // the live range. The only problem is that we didn't realize there
382 // are actually two values in the live interval. Because of this we
383 // need to take the LiveRegion that defines this register and split it
385 assert(interval.containsOneValue());
386 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387 SlotIndex RedefIndex = MIIdx.getDefIndex();
388 if (MO.isEarlyClobber())
389 RedefIndex = MIIdx.getUseIndex();
391 const LiveRange *OldLR =
392 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393 VNInfo *OldValNo = OldLR->valno;
395 // Delete the initial value, which should be short and continuous,
396 // because the 2-addr copy must be in the same MBB as the redef.
397 interval.removeRange(DefIndex, RedefIndex);
399 // Two-address vregs should always only be redefined once. This means
400 // that at this point, there should be exactly one value number in it.
401 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
403 // The new value number (#1) is defined by the instruction we claimed
405 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406 false, // update at *
408 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
410 // Value#0 is now defined by the 2-addr instruction.
411 OldValNo->def = RedefIndex;
412 OldValNo->setCopy(0);
414 // Add the new live interval which replaces the range for the input copy.
415 LiveRange LR(DefIndex, RedefIndex, ValNo);
416 DEBUG(dbgs() << " replace range with " << LR);
417 interval.addRange(LR);
418 ValNo->addKill(RedefIndex);
420 // If this redefinition is dead, we need to add a dummy unit live
421 // range covering the def slot.
423 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
427 dbgs() << " RESULT: ";
428 interval.print(dbgs(), tri_);
431 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432 // In the case of PHI elimination, each variable definition is only
433 // live until the end of the block. We've already taken care of the
434 // rest of the live range.
436 SlotIndex defIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 defIndex = MIIdx.getUseIndex();
441 MachineInstr *CopyMI = NULL;
442 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
446 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
448 SlotIndex killIndex = getMBBEndIdx(mbb);
449 LiveRange LR(defIndex, killIndex, ValNo);
450 interval.addRange(LR);
451 ValNo->addKill(indexes_->getTerminatorGap(mbb));
452 ValNo->setHasPHIKill(true);
453 DEBUG(dbgs() << " phi-join +" << LR);
457 DEBUG(dbgs() << '\n');
460 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461 MachineBasicBlock::iterator mi,
464 LiveInterval &interval,
465 MachineInstr *CopyMI) {
466 // A physical register cannot be live across basic block, so its
467 // lifetime must end somewhere in its defining basic block.
469 dbgs() << "\t\tregister: ";
470 printRegName(interval.reg, tri_);
473 SlotIndex baseIndex = MIIdx;
474 SlotIndex start = baseIndex.getDefIndex();
475 // Earlyclobbers move back one.
476 if (MO.isEarlyClobber())
477 start = MIIdx.getUseIndex();
478 SlotIndex end = start;
480 // If it is not used after definition, it is considered dead at
481 // the instruction defining it. Hence its interval is:
482 // [defSlot(def), defSlot(def)+1)
483 // For earlyclobbers, the defSlot was pushed back one; the extra
484 // advance below compensates.
486 DEBUG(dbgs() << " dead");
487 end = start.getStoreIndex();
491 // If it is not dead on definition, it must be killed by a
492 // subsequent instruction. Hence its interval is:
493 // [defSlot(def), useSlot(kill)+1)
494 baseIndex = baseIndex.getNextIndex();
495 while (++mi != MBB->end()) {
497 if (mi->isDebugValue())
499 if (getInstructionFromIndex(baseIndex) == 0)
500 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
502 if (mi->killsRegister(interval.reg, tri_)) {
503 DEBUG(dbgs() << " killed");
504 end = baseIndex.getDefIndex();
507 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
509 if (mi->isRegTiedToUseOperand(DefIdx)) {
510 // Two-address instruction.
511 end = baseIndex.getDefIndex();
513 // Another instruction redefines the register before it is ever read.
514 // Then the register is essentially dead at the instruction that
515 // defines it. Hence its interval is:
516 // [defSlot(def), defSlot(def)+1)
517 DEBUG(dbgs() << " dead");
518 end = start.getStoreIndex();
524 baseIndex = baseIndex.getNextIndex();
527 // The only case we should have a dead physreg here without a killing or
528 // instruction where we know it's dead is if it is live-in to the function
529 // and never used. Another possible case is the implicit use of the
530 // physical register has been deleted by two-address pass.
531 end = start.getStoreIndex();
534 assert(start < end && "did not find end of interval?");
536 // Already exists? Extend old live interval.
537 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538 bool Extend = OldLR != interval.end();
539 VNInfo *ValNo = Extend
540 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541 if (MO.isEarlyClobber() && Extend)
542 ValNo->setHasRedefByEC(true);
543 LiveRange LR(start, end, ValNo);
544 interval.addRange(LR);
545 LR.valno->addKill(end);
546 DEBUG(dbgs() << " +" << LR << '\n');
549 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550 MachineBasicBlock::iterator MI,
554 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556 getOrCreateInterval(MO.getReg()));
557 else if (allocatableRegs_[MO.getReg()]) {
558 MachineInstr *CopyMI = NULL;
559 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
563 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564 getOrCreateInterval(MO.getReg()), CopyMI);
565 // Def of a register also defines its sub-registers.
566 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567 // If MI also modifies the sub-register explicitly, avoid processing it
568 // more than once. Do not pass in TRI here so it checks for exact match.
569 if (!MI->modifiesRegister(*AS))
570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571 getOrCreateInterval(*AS), 0);
575 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
577 LiveInterval &interval, bool isAlias) {
579 dbgs() << "\t\tlivein register: ";
580 printRegName(interval.reg, tri_);
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 SlotIndex baseIndex = MIIdx;
587 SlotIndex start = baseIndex;
588 if (getInstructionFromIndex(baseIndex) == 0)
589 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
591 SlotIndex end = baseIndex;
592 bool SeenDefUse = false;
594 MachineBasicBlock::iterator E = MBB->end();
596 if (mi->isDebugValue()) {
598 if (mi != E && !mi->isDebugValue()) {
599 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
603 if (mi->killsRegister(interval.reg, tri_)) {
604 DEBUG(dbgs() << " killed");
605 end = baseIndex.getDefIndex();
608 } else if (mi->modifiesRegister(interval.reg, tri_)) {
609 // Another instruction redefines the register before it is ever read.
610 // Then the register is essentially dead at the instruction that defines
611 // it. Hence its interval is:
612 // [defSlot(def), defSlot(def)+1)
613 DEBUG(dbgs() << " dead");
614 end = start.getStoreIndex();
620 if (mi != E && !mi->isDebugValue()) {
621 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
625 // Live-in register might not be used at all.
628 DEBUG(dbgs() << " dead");
629 end = MIIdx.getStoreIndex();
631 DEBUG(dbgs() << " live through");
637 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
638 0, false, VNInfoAllocator);
639 vni->setIsPHIDef(true);
640 LiveRange LR(start, end, vni);
642 interval.addRange(LR);
643 LR.valno->addKill(end);
644 DEBUG(dbgs() << " +" << LR << '\n');
647 /// computeIntervals - computes the live intervals for virtual
648 /// registers. for some ordering of the machine instructions [1,N] a
649 /// live interval is an interval [i, j) where 1 <= i <= j < N for
650 /// which a variable is live
651 void LiveIntervals::computeIntervals() {
652 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
653 << "********** Function: "
654 << ((Value*)mf_->getFunction())->getName() << '\n');
656 SmallVector<unsigned, 8> UndefUses;
657 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
659 MachineBasicBlock *MBB = MBBI;
663 // Track the index of the current machine instr.
664 SlotIndex MIIndex = getMBBStartIdx(MBB);
665 DEBUG(dbgs() << MBB->getName() << ":\n");
667 // Create intervals for live-ins to this BB first.
668 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
669 LE = MBB->livein_end(); LI != LE; ++LI) {
670 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
671 // Multiple live-ins can alias the same register.
672 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
673 if (!hasInterval(*AS))
674 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
678 // Skip over empty initial indices.
679 if (getInstructionFromIndex(MIIndex) == 0)
680 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
682 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
684 DEBUG(dbgs() << MIIndex << "\t" << *MI);
685 if (MI->isDebugValue())
689 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
690 MachineOperand &MO = MI->getOperand(i);
691 if (!MO.isReg() || !MO.getReg())
694 // handle register defs - build intervals
696 handleRegisterDef(MBB, MI, MIIndex, MO, i);
697 else if (MO.isUndef())
698 UndefUses.push_back(MO.getReg());
701 // Move to the next instr slot.
702 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
706 // Create empty intervals for registers defined by implicit_def's (except
707 // for those implicit_def that define values which are liveout of their
709 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
710 unsigned UndefReg = UndefUses[i];
711 (void)getOrCreateInterval(UndefReg);
715 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
716 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
717 return new LiveInterval(reg, Weight);
720 /// dupInterval - Duplicate a live interval. The caller is responsible for
721 /// managing the allocated memory.
722 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
723 LiveInterval *NewLI = createInterval(li->reg);
724 NewLI->Copy(*li, mri_, getVNInfoAllocator());
728 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
729 /// copy field and returns the source register that defines it.
730 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
734 if (VNI->getCopy()->isExtractSubreg()) {
735 // If it's extracting out of a physical register, return the sub-register.
736 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
737 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
738 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
739 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
740 if (SrcSubReg == DstSubReg)
741 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
742 // reg1034 can still be coalesced to EDX.
744 assert(DstSubReg == 0);
745 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
748 } else if (VNI->getCopy()->isInsertSubreg() ||
749 VNI->getCopy()->isSubregToReg())
750 return VNI->getCopy()->getOperand(2).getReg();
752 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
753 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
755 llvm_unreachable("Unrecognized copy instruction!");
759 //===----------------------------------------------------------------------===//
760 // Register allocator hooks.
763 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
764 /// allow one) virtual register operand, then its uses are implicitly using
765 /// the register. Returns the virtual register.
766 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
767 MachineInstr *MI) const {
769 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
770 MachineOperand &MO = MI->getOperand(i);
771 if (!MO.isReg() || !MO.isUse())
773 unsigned Reg = MO.getReg();
774 if (Reg == 0 || Reg == li.reg)
777 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
778 !allocatableRegs_[Reg])
780 // FIXME: For now, only remat MI with at most one register operand.
782 "Can't rematerialize instruction with multiple register operand!");
791 /// isValNoAvailableAt - Return true if the val# of the specified interval
792 /// which reaches the given instruction also reaches the specified use index.
793 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
794 SlotIndex UseIdx) const {
795 SlotIndex Index = getInstructionIndex(MI);
796 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
797 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
798 return UI != li.end() && UI->valno == ValNo;
801 /// isReMaterializable - Returns true if the definition MI of the specified
802 /// val# of the specified interval is re-materializable.
803 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
804 const VNInfo *ValNo, MachineInstr *MI,
805 SmallVectorImpl<LiveInterval*> &SpillIs,
810 if (!tii_->isTriviallyReMaterializable(MI, aa_))
813 // Target-specific code can mark an instruction as being rematerializable
814 // if it has one virtual reg use, though it had better be something like
815 // a PIC base register which is likely to be live everywhere.
816 unsigned ImpUse = getReMatImplicitUse(li, MI);
818 const LiveInterval &ImpLi = getInterval(ImpUse);
819 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
820 re = mri_->use_end(); ri != re; ++ri) {
821 MachineInstr *UseMI = &*ri;
822 SlotIndex UseIdx = getInstructionIndex(UseMI);
823 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
825 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
829 // If a register operand of the re-materialized instruction is going to
830 // be spilled next, then it's not legal to re-materialize this instruction.
831 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
832 if (ImpUse == SpillIs[i]->reg)
838 /// isReMaterializable - Returns true if the definition MI of the specified
839 /// val# of the specified interval is re-materializable.
840 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
841 const VNInfo *ValNo, MachineInstr *MI) {
842 SmallVector<LiveInterval*, 4> Dummy1;
844 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
847 /// isReMaterializable - Returns true if every definition of MI of every
848 /// val# of the specified interval is re-materializable.
849 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
850 SmallVectorImpl<LiveInterval*> &SpillIs,
853 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
855 const VNInfo *VNI = *i;
857 continue; // Dead val#.
858 // Is the def for the val# rematerializable?
859 if (!VNI->isDefAccurate())
861 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
862 bool DefIsLoad = false;
864 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
871 /// FilterFoldedOps - Filter out two-address use operands. Return
872 /// true if it finds any issue with the operands that ought to prevent
874 static bool FilterFoldedOps(MachineInstr *MI,
875 SmallVector<unsigned, 2> &Ops,
877 SmallVector<unsigned, 2> &FoldOps) {
879 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
880 unsigned OpIdx = Ops[i];
881 MachineOperand &MO = MI->getOperand(OpIdx);
882 // FIXME: fold subreg use.
886 MRInfo |= (unsigned)VirtRegMap::isMod;
888 // Filter out two-address use operand(s).
889 if (MI->isRegTiedToDefOperand(OpIdx)) {
890 MRInfo = VirtRegMap::isModRef;
893 MRInfo |= (unsigned)VirtRegMap::isRef;
895 FoldOps.push_back(OpIdx);
901 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
902 /// slot / to reg or any rematerialized load into ith operand of specified
903 /// MI. If it is successul, MI is updated with the newly created MI and
905 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
906 VirtRegMap &vrm, MachineInstr *DefMI,
908 SmallVector<unsigned, 2> &Ops,
909 bool isSS, int Slot, unsigned Reg) {
910 // If it is an implicit def instruction, just delete it.
911 if (MI->isImplicitDef()) {
912 RemoveMachineInstrFromMaps(MI);
913 vrm.RemoveMachineInstrFromMaps(MI);
914 MI->eraseFromParent();
919 // Filter the list of operand indexes that are to be folded. Abort if
920 // any operand will prevent folding.
922 SmallVector<unsigned, 2> FoldOps;
923 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
926 // The only time it's safe to fold into a two address instruction is when
927 // it's folding reload and spill from / into a spill stack slot.
928 if (DefMI && (MRInfo & VirtRegMap::isMod))
931 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
932 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
934 // Remember this instruction uses the spill slot.
935 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
937 // Attempt to fold the memory reference into the instruction. If
938 // we can do this, we don't need to insert spill code.
939 MachineBasicBlock &MBB = *MI->getParent();
940 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
941 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
942 vrm.transferSpillPts(MI, fmi);
943 vrm.transferRestorePts(MI, fmi);
944 vrm.transferEmergencySpills(MI, fmi);
945 ReplaceMachineInstrInMaps(MI, fmi);
946 MI = MBB.insert(MBB.erase(MI), fmi);
953 /// canFoldMemoryOperand - Returns true if the specified load / store
954 /// folding is possible.
955 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
956 SmallVector<unsigned, 2> &Ops,
958 // Filter the list of operand indexes that are to be folded. Abort if
959 // any operand will prevent folding.
961 SmallVector<unsigned, 2> FoldOps;
962 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
965 // It's only legal to remat for a use, not a def.
966 if (ReMat && (MRInfo & VirtRegMap::isMod))
969 return tii_->canFoldMemoryOperand(MI, FoldOps);
972 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
973 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
975 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
980 for (++itr; itr != li.ranges.end(); ++itr) {
981 MachineBasicBlock *mbb2 =
982 indexes_->getMBBCoveringRange(itr->start, itr->end);
991 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
992 /// interval on to-be re-materialized operands of MI) with new register.
993 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
994 MachineInstr *MI, unsigned NewVReg,
996 // There is an implicit use. That means one of the other operand is
997 // being remat'ed and the remat'ed instruction has li.reg as an
998 // use operand. Make sure we rewrite that as well.
999 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1000 MachineOperand &MO = MI->getOperand(i);
1003 unsigned Reg = MO.getReg();
1004 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1006 if (!vrm.isReMaterialized(Reg))
1008 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1009 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1011 UseMO->setReg(NewVReg);
1015 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1016 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1017 bool LiveIntervals::
1018 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1019 bool TrySplit, SlotIndex index, SlotIndex end,
1021 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1022 unsigned Slot, int LdSlot,
1023 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1025 const TargetRegisterClass* rc,
1026 SmallVector<int, 4> &ReMatIds,
1027 const MachineLoopInfo *loopInfo,
1028 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1029 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1030 std::vector<LiveInterval*> &NewLIs) {
1031 bool CanFold = false;
1033 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1034 MachineOperand& mop = MI->getOperand(i);
1037 unsigned Reg = mop.getReg();
1038 unsigned RegI = Reg;
1039 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1044 bool TryFold = !DefIsReMat;
1045 bool FoldSS = true; // Default behavior unless it's a remat.
1046 int FoldSlot = Slot;
1048 // If this is the rematerializable definition MI itself and
1049 // all of its uses are rematerialized, simply delete it.
1050 if (MI == ReMatOrigDefMI && CanDelete) {
1051 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1053 RemoveMachineInstrFromMaps(MI);
1054 vrm.RemoveMachineInstrFromMaps(MI);
1055 MI->eraseFromParent();
1059 // If def for this use can't be rematerialized, then try folding.
1060 // If def is rematerializable and it's a load, also try folding.
1061 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1063 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1069 // Scan all of the operands of this instruction rewriting operands
1070 // to use NewVReg instead of li.reg as appropriate. We do this for
1073 // 1. If the instr reads the same spilled vreg multiple times, we
1074 // want to reuse the NewVReg.
1075 // 2. If the instr is a two-addr instruction, we are required to
1076 // keep the src/dst regs pinned.
1078 // Keep track of whether we replace a use and/or def so that we can
1079 // create the spill interval with the appropriate range.
1081 HasUse = mop.isUse();
1082 HasDef = mop.isDef();
1083 SmallVector<unsigned, 2> Ops;
1085 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1086 const MachineOperand &MOj = MI->getOperand(j);
1089 unsigned RegJ = MOj.getReg();
1090 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1094 if (!MOj.isUndef()) {
1095 HasUse |= MOj.isUse();
1096 HasDef |= MOj.isDef();
1101 // Create a new virtual register for the spill interval.
1102 // Create the new register now so we can map the fold instruction
1103 // to the new register so when it is unfolded we get the correct
1105 bool CreatedNewVReg = false;
1107 NewVReg = mri_->createVirtualRegister(rc);
1109 CreatedNewVReg = true;
1111 // The new virtual register should get the same allocation hints as the
1113 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1114 if (Hint.first || Hint.second)
1115 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1121 // Do not fold load / store here if we are splitting. We'll find an
1122 // optimal point to insert a load / store later.
1124 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1125 Ops, FoldSS, FoldSlot, NewVReg)) {
1126 // Folding the load/store can completely change the instruction in
1127 // unpredictable ways, rescan it from the beginning.
1130 // We need to give the new vreg the same stack slot as the
1131 // spilled interval.
1132 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1138 if (isNotInMIMap(MI))
1140 goto RestartInstruction;
1143 // We'll try to fold it later if it's profitable.
1144 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1148 mop.setReg(NewVReg);
1149 if (mop.isImplicit())
1150 rewriteImplicitOps(li, MI, NewVReg, vrm);
1152 // Reuse NewVReg for other reads.
1153 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1154 MachineOperand &mopj = MI->getOperand(Ops[j]);
1155 mopj.setReg(NewVReg);
1156 if (mopj.isImplicit())
1157 rewriteImplicitOps(li, MI, NewVReg, vrm);
1160 if (CreatedNewVReg) {
1162 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1163 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1164 // Each valnum may have its own remat id.
1165 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1167 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1169 if (!CanDelete || (HasUse && HasDef)) {
1170 // If this is a two-addr instruction then its use operands are
1171 // rematerializable but its def is not. It should be assigned a
1173 vrm.assignVirt2StackSlot(NewVReg, Slot);
1176 vrm.assignVirt2StackSlot(NewVReg, Slot);
1178 } else if (HasUse && HasDef &&
1179 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1180 // If this interval hasn't been assigned a stack slot (because earlier
1181 // def is a deleted remat def), do it now.
1182 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1183 vrm.assignVirt2StackSlot(NewVReg, Slot);
1186 // Re-matting an instruction with virtual register use. Add the
1187 // register as an implicit use on the use MI.
1188 if (DefIsReMat && ImpUse)
1189 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1191 // Create a new register interval for this spill / remat.
1192 LiveInterval &nI = getOrCreateInterval(NewVReg);
1193 if (CreatedNewVReg) {
1194 NewLIs.push_back(&nI);
1195 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1197 vrm.setIsSplitFromReg(NewVReg, li.reg);
1201 if (CreatedNewVReg) {
1202 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1203 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1204 DEBUG(dbgs() << " +" << LR);
1207 // Extend the split live interval to this def / use.
1208 SlotIndex End = index.getDefIndex();
1209 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1210 nI.getValNumInfo(nI.getNumValNums()-1));
1211 DEBUG(dbgs() << " +" << LR);
1216 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1217 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1218 DEBUG(dbgs() << " +" << LR);
1223 dbgs() << "\t\t\t\tAdded new interval: ";
1224 nI.print(dbgs(), tri_);
1230 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1232 MachineBasicBlock *MBB,
1233 SlotIndex Idx) const {
1234 SlotIndex End = getMBBEndIdx(MBB);
1235 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1236 if (VNI->kills[j].isPHI())
1239 SlotIndex KillIdx = VNI->kills[j];
1240 if (KillIdx > Idx && KillIdx <= End)
1246 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1247 /// during spilling.
1249 struct RewriteInfo {
1254 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1255 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1258 struct RewriteInfoCompare {
1259 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1260 return LHS.Index < RHS.Index;
1265 void LiveIntervals::
1266 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1267 LiveInterval::Ranges::const_iterator &I,
1268 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1269 unsigned Slot, int LdSlot,
1270 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1272 const TargetRegisterClass* rc,
1273 SmallVector<int, 4> &ReMatIds,
1274 const MachineLoopInfo *loopInfo,
1275 BitVector &SpillMBBs,
1276 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1277 BitVector &RestoreMBBs,
1278 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1279 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1280 std::vector<LiveInterval*> &NewLIs) {
1281 bool AllCanFold = true;
1282 unsigned NewVReg = 0;
1283 SlotIndex start = I->start.getBaseIndex();
1284 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1286 // First collect all the def / use in this live range that will be rewritten.
1287 // Make sure they are sorted according to instruction index.
1288 std::vector<RewriteInfo> RewriteMIs;
1289 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1290 re = mri_->reg_end(); ri != re; ) {
1291 MachineInstr *MI = &*ri;
1292 MachineOperand &O = ri.getOperand();
1294 if (MI->isDebugValue()) {
1295 // Remove debug info for now.
1297 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1300 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1301 SlotIndex index = getInstructionIndex(MI);
1302 if (index < start || index >= end)
1306 // Must be defined by an implicit def. It should not be spilled. Note,
1307 // this is for correctness reason. e.g.
1308 // 8 %reg1024<def> = IMPLICIT_DEF
1309 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1310 // The live range [12, 14) are not part of the r1024 live interval since
1311 // it's defined by an implicit def. It will not conflicts with live
1312 // interval of r1025. Now suppose both registers are spilled, you can
1313 // easily see a situation where both registers are reloaded before
1314 // the INSERT_SUBREG and both target registers that would overlap.
1316 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1318 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1320 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1321 // Now rewrite the defs and uses.
1322 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1323 RewriteInfo &rwi = RewriteMIs[i];
1325 SlotIndex index = rwi.Index;
1326 bool MIHasUse = rwi.HasUse;
1327 bool MIHasDef = rwi.HasDef;
1328 MachineInstr *MI = rwi.MI;
1329 // If MI def and/or use the same register multiple times, then there
1330 // are multiple entries.
1331 unsigned NumUses = MIHasUse;
1332 while (i != e && RewriteMIs[i].MI == MI) {
1333 assert(RewriteMIs[i].Index == index);
1334 bool isUse = RewriteMIs[i].HasUse;
1335 if (isUse) ++NumUses;
1337 MIHasDef |= RewriteMIs[i].HasDef;
1340 MachineBasicBlock *MBB = MI->getParent();
1342 if (ImpUse && MI != ReMatDefMI) {
1343 // Re-matting an instruction with virtual register use. Update the
1344 // register interval's spill weight to HUGE_VALF to prevent it from
1346 LiveInterval &ImpLi = getInterval(ImpUse);
1347 ImpLi.weight = HUGE_VALF;
1350 unsigned MBBId = MBB->getNumber();
1351 unsigned ThisVReg = 0;
1353 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1354 if (NVI != MBBVRegsMap.end()) {
1355 ThisVReg = NVI->second;
1362 // It's better to start a new interval to avoid artifically
1363 // extend the new interval.
1364 if (MIHasDef && !MIHasUse) {
1365 MBBVRegsMap.erase(MBB->getNumber());
1371 bool IsNew = ThisVReg == 0;
1373 // This ends the previous live interval. If all of its def / use
1374 // can be folded, give it a low spill weight.
1375 if (NewVReg && TrySplit && AllCanFold) {
1376 LiveInterval &nI = getOrCreateInterval(NewVReg);
1383 bool HasDef = false;
1384 bool HasUse = false;
1385 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1386 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1387 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1388 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1389 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1390 if (!HasDef && !HasUse)
1393 AllCanFold &= CanFold;
1395 // Update weight of spill interval.
1396 LiveInterval &nI = getOrCreateInterval(NewVReg);
1398 // The spill weight is now infinity as it cannot be spilled again.
1399 nI.weight = HUGE_VALF;
1403 // Keep track of the last def and first use in each MBB.
1405 if (MI != ReMatOrigDefMI || !CanDelete) {
1406 bool HasKill = false;
1408 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1410 // If this is a two-address code, then this index starts a new VNInfo.
1411 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1413 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1415 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1416 SpillIdxes.find(MBBId);
1418 if (SII == SpillIdxes.end()) {
1419 std::vector<SRInfo> S;
1420 S.push_back(SRInfo(index, NewVReg, true));
1421 SpillIdxes.insert(std::make_pair(MBBId, S));
1422 } else if (SII->second.back().vreg != NewVReg) {
1423 SII->second.push_back(SRInfo(index, NewVReg, true));
1424 } else if (index > SII->second.back().index) {
1425 // If there is an earlier def and this is a two-address
1426 // instruction, then it's not possible to fold the store (which
1427 // would also fold the load).
1428 SRInfo &Info = SII->second.back();
1430 Info.canFold = !HasUse;
1432 SpillMBBs.set(MBBId);
1433 } else if (SII != SpillIdxes.end() &&
1434 SII->second.back().vreg == NewVReg &&
1435 index > SII->second.back().index) {
1436 // There is an earlier def that's not killed (must be two-address).
1437 // The spill is no longer needed.
1438 SII->second.pop_back();
1439 if (SII->second.empty()) {
1440 SpillIdxes.erase(MBBId);
1441 SpillMBBs.reset(MBBId);
1448 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1449 SpillIdxes.find(MBBId);
1450 if (SII != SpillIdxes.end() &&
1451 SII->second.back().vreg == NewVReg &&
1452 index > SII->second.back().index)
1453 // Use(s) following the last def, it's not safe to fold the spill.
1454 SII->second.back().canFold = false;
1455 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1456 RestoreIdxes.find(MBBId);
1457 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1458 // If we are splitting live intervals, only fold if it's the first
1459 // use and there isn't another use later in the MBB.
1460 RII->second.back().canFold = false;
1462 // Only need a reload if there isn't an earlier def / use.
1463 if (RII == RestoreIdxes.end()) {
1464 std::vector<SRInfo> Infos;
1465 Infos.push_back(SRInfo(index, NewVReg, true));
1466 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1468 RII->second.push_back(SRInfo(index, NewVReg, true));
1470 RestoreMBBs.set(MBBId);
1474 // Update spill weight.
1475 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1476 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1479 if (NewVReg && TrySplit && AllCanFold) {
1480 // If all of its def / use can be folded, give it a low spill weight.
1481 LiveInterval &nI = getOrCreateInterval(NewVReg);
1486 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1487 unsigned vr, BitVector &RestoreMBBs,
1488 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1489 if (!RestoreMBBs[Id])
1491 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1492 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1493 if (Restores[i].index == index &&
1494 Restores[i].vreg == vr &&
1495 Restores[i].canFold)
1500 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1501 unsigned vr, BitVector &RestoreMBBs,
1502 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1503 if (!RestoreMBBs[Id])
1505 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1506 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1507 if (Restores[i].index == index && Restores[i].vreg)
1508 Restores[i].index = SlotIndex();
1511 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1512 /// spilled and create empty intervals for their uses.
1514 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1515 const TargetRegisterClass* rc,
1516 std::vector<LiveInterval*> &NewLIs) {
1517 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1518 re = mri_->reg_end(); ri != re; ) {
1519 MachineOperand &O = ri.getOperand();
1520 MachineInstr *MI = &*ri;
1523 assert(MI->isImplicitDef() &&
1524 "Register def was not rewritten?");
1525 RemoveMachineInstrFromMaps(MI);
1526 vrm.RemoveMachineInstrFromMaps(MI);
1527 MI->eraseFromParent();
1529 // This must be an use of an implicit_def so it's not part of the live
1530 // interval. Create a new empty live interval for it.
1531 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1532 unsigned NewVReg = mri_->createVirtualRegister(rc);
1534 vrm.setIsImplicitlyDefined(NewVReg);
1535 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1536 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1537 MachineOperand &MO = MI->getOperand(i);
1538 if (MO.isReg() && MO.getReg() == li.reg) {
1548 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1549 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1550 normalizeSpillWeight(*NewLIs[i]);
1553 std::vector<LiveInterval*> LiveIntervals::
1554 addIntervalsForSpillsFast(const LiveInterval &li,
1555 const MachineLoopInfo *loopInfo,
1557 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1559 std::vector<LiveInterval*> added;
1561 assert(li.weight != HUGE_VALF &&
1562 "attempt to spill already spilled interval!");
1565 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1570 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1572 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1573 while (RI != mri_->reg_end()) {
1574 MachineInstr* MI = &*RI;
1576 SmallVector<unsigned, 2> Indices;
1577 bool HasUse = false;
1578 bool HasDef = false;
1580 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1581 MachineOperand& mop = MI->getOperand(i);
1582 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1584 HasUse |= MI->getOperand(i).isUse();
1585 HasDef |= MI->getOperand(i).isDef();
1587 Indices.push_back(i);
1590 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1591 Indices, true, slot, li.reg)) {
1592 unsigned NewVReg = mri_->createVirtualRegister(rc);
1594 vrm.assignVirt2StackSlot(NewVReg, slot);
1596 // create a new register for this spill
1597 LiveInterval &nI = getOrCreateInterval(NewVReg);
1599 // the spill weight is now infinity as it
1600 // cannot be spilled again
1601 nI.weight = HUGE_VALF;
1603 // Rewrite register operands to use the new vreg.
1604 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1605 E = Indices.end(); I != E; ++I) {
1606 MI->getOperand(*I).setReg(NewVReg);
1608 if (MI->getOperand(*I).isUse())
1609 MI->getOperand(*I).setIsKill(true);
1612 // Fill in the new live interval.
1613 SlotIndex index = getInstructionIndex(MI);
1615 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1616 nI.getNextValue(SlotIndex(), 0, false,
1617 getVNInfoAllocator()));
1618 DEBUG(dbgs() << " +" << LR);
1620 vrm.addRestorePoint(NewVReg, MI);
1623 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1624 nI.getNextValue(SlotIndex(), 0, false,
1625 getVNInfoAllocator()));
1626 DEBUG(dbgs() << " +" << LR);
1628 vrm.addSpillPoint(NewVReg, true, MI);
1631 added.push_back(&nI);
1634 dbgs() << "\t\t\t\tadded new interval: ";
1641 RI = mri_->reg_begin(li.reg);
1647 std::vector<LiveInterval*> LiveIntervals::
1648 addIntervalsForSpills(const LiveInterval &li,
1649 SmallVectorImpl<LiveInterval*> &SpillIs,
1650 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1652 if (EnableFastSpilling)
1653 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1655 assert(li.weight != HUGE_VALF &&
1656 "attempt to spill already spilled interval!");
1659 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1660 li.print(dbgs(), tri_);
1664 // Each bit specify whether a spill is required in the MBB.
1665 BitVector SpillMBBs(mf_->getNumBlockIDs());
1666 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1667 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1668 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1669 DenseMap<unsigned,unsigned> MBBVRegsMap;
1670 std::vector<LiveInterval*> NewLIs;
1671 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1673 unsigned NumValNums = li.getNumValNums();
1674 SmallVector<MachineInstr*, 4> ReMatDefs;
1675 ReMatDefs.resize(NumValNums, NULL);
1676 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1677 ReMatOrigDefs.resize(NumValNums, NULL);
1678 SmallVector<int, 4> ReMatIds;
1679 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1680 BitVector ReMatDelete(NumValNums);
1681 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1683 // Spilling a split live interval. It cannot be split any further. Also,
1684 // it's also guaranteed to be a single val# / range interval.
1685 if (vrm.getPreSplitReg(li.reg)) {
1686 vrm.setIsSplitFromReg(li.reg, 0);
1687 // Unset the split kill marker on the last use.
1688 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1689 if (KillIdx != SlotIndex()) {
1690 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1691 assert(KillMI && "Last use disappeared?");
1692 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1693 assert(KillOp != -1 && "Last use disappeared?");
1694 KillMI->getOperand(KillOp).setIsKill(false);
1696 vrm.removeKillPoint(li.reg);
1697 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1698 Slot = vrm.getStackSlot(li.reg);
1699 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1700 MachineInstr *ReMatDefMI = DefIsReMat ?
1701 vrm.getReMaterializedMI(li.reg) : NULL;
1703 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1704 bool isLoad = isLoadSS ||
1705 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1706 bool IsFirstRange = true;
1707 for (LiveInterval::Ranges::const_iterator
1708 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1709 // If this is a split live interval with multiple ranges, it means there
1710 // are two-address instructions that re-defined the value. Only the
1711 // first def can be rematerialized!
1713 // Note ReMatOrigDefMI has already been deleted.
1714 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1715 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1716 false, vrm, rc, ReMatIds, loopInfo,
1717 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1718 MBBVRegsMap, NewLIs);
1720 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1721 Slot, 0, false, false, false,
1722 false, vrm, rc, ReMatIds, loopInfo,
1723 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1724 MBBVRegsMap, NewLIs);
1726 IsFirstRange = false;
1729 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1730 normalizeSpillWeights(NewLIs);
1734 bool TrySplit = !intervalIsInOneMBB(li);
1737 bool NeedStackSlot = false;
1738 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1740 const VNInfo *VNI = *i;
1741 unsigned VN = VNI->id;
1742 if (VNI->isUnused())
1743 continue; // Dead val#.
1744 // Is the def for the val# rematerializable?
1745 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1746 ? getInstructionFromIndex(VNI->def) : 0;
1748 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1749 // Remember how to remat the def of this val#.
1750 ReMatOrigDefs[VN] = ReMatDefMI;
1751 // Original def may be modified so we have to make a copy here.
1752 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1753 CloneMIs.push_back(Clone);
1754 ReMatDefs[VN] = Clone;
1756 bool CanDelete = true;
1757 if (VNI->hasPHIKill()) {
1758 // A kill is a phi node, not all of its uses can be rematerialized.
1759 // It must not be deleted.
1761 // Need a stack slot if there is any live range where uses cannot be
1763 NeedStackSlot = true;
1766 ReMatDelete.set(VN);
1768 // Need a stack slot if there is any live range where uses cannot be
1770 NeedStackSlot = true;
1774 // One stack slot per live interval.
1775 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1776 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1777 Slot = vrm.assignVirt2StackSlot(li.reg);
1779 // This case only occurs when the prealloc splitter has already assigned
1780 // a stack slot to this vreg.
1782 Slot = vrm.getStackSlot(li.reg);
1785 // Create new intervals and rewrite defs and uses.
1786 for (LiveInterval::Ranges::const_iterator
1787 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1788 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1789 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1790 bool DefIsReMat = ReMatDefMI != NULL;
1791 bool CanDelete = ReMatDelete[I->valno->id];
1793 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1794 bool isLoad = isLoadSS ||
1795 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1796 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1797 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1798 CanDelete, vrm, rc, ReMatIds, loopInfo,
1799 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1800 MBBVRegsMap, NewLIs);
1803 // Insert spills / restores if we are splitting.
1805 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1806 normalizeSpillWeights(NewLIs);
1810 SmallPtrSet<LiveInterval*, 4> AddedKill;
1811 SmallVector<unsigned, 2> Ops;
1812 if (NeedStackSlot) {
1813 int Id = SpillMBBs.find_first();
1815 std::vector<SRInfo> &spills = SpillIdxes[Id];
1816 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1817 SlotIndex index = spills[i].index;
1818 unsigned VReg = spills[i].vreg;
1819 LiveInterval &nI = getOrCreateInterval(VReg);
1820 bool isReMat = vrm.isReMaterialized(VReg);
1821 MachineInstr *MI = getInstructionFromIndex(index);
1822 bool CanFold = false;
1823 bool FoundUse = false;
1825 if (spills[i].canFold) {
1827 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1828 MachineOperand &MO = MI->getOperand(j);
1829 if (!MO.isReg() || MO.getReg() != VReg)
1836 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1837 RestoreMBBs, RestoreIdxes))) {
1838 // MI has two-address uses of the same register. If the use
1839 // isn't the first and only use in the BB, then we can't fold
1840 // it. FIXME: Move this to rewriteInstructionsForSpills.
1847 // Fold the store into the def if possible.
1848 bool Folded = false;
1849 if (CanFold && !Ops.empty()) {
1850 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1853 // Also folded uses, do not issue a load.
1854 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1855 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1857 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1861 // Otherwise tell the spiller to issue a spill.
1863 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1864 bool isKill = LR->end == index.getStoreIndex();
1865 if (!MI->registerDefIsDead(nI.reg))
1866 // No need to spill a dead def.
1867 vrm.addSpillPoint(VReg, isKill, MI);
1869 AddedKill.insert(&nI);
1872 Id = SpillMBBs.find_next(Id);
1876 int Id = RestoreMBBs.find_first();
1878 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1879 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1880 SlotIndex index = restores[i].index;
1881 if (index == SlotIndex())
1883 unsigned VReg = restores[i].vreg;
1884 LiveInterval &nI = getOrCreateInterval(VReg);
1885 bool isReMat = vrm.isReMaterialized(VReg);
1886 MachineInstr *MI = getInstructionFromIndex(index);
1887 bool CanFold = false;
1889 if (restores[i].canFold) {
1891 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1892 MachineOperand &MO = MI->getOperand(j);
1893 if (!MO.isReg() || MO.getReg() != VReg)
1897 // If this restore were to be folded, it would have been folded
1906 // Fold the load into the use if possible.
1907 bool Folded = false;
1908 if (CanFold && !Ops.empty()) {
1910 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1912 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1914 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1915 // If the rematerializable def is a load, also try to fold it.
1916 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1917 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1918 Ops, isLoadSS, LdSlot, VReg);
1920 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1922 // Re-matting an instruction with virtual register use. Add the
1923 // register as an implicit use on the use MI and update the register
1924 // interval's spill weight to HUGE_VALF to prevent it from being
1926 LiveInterval &ImpLi = getInterval(ImpUse);
1927 ImpLi.weight = HUGE_VALF;
1928 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1933 // If folding is not possible / failed, then tell the spiller to issue a
1934 // load / rematerialization for us.
1936 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1938 vrm.addRestorePoint(VReg, MI);
1940 Id = RestoreMBBs.find_next(Id);
1943 // Finalize intervals: add kills, finalize spill weights, and filter out
1945 std::vector<LiveInterval*> RetNewLIs;
1946 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1947 LiveInterval *LI = NewLIs[i];
1949 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1950 if (!AddedKill.count(LI)) {
1951 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1952 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1953 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1954 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1955 assert(UseIdx != -1);
1956 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1957 LastUse->getOperand(UseIdx).setIsKill();
1958 vrm.addKillPoint(LI->reg, LastUseIdx);
1961 RetNewLIs.push_back(LI);
1965 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1966 normalizeSpillWeights(RetNewLIs);
1970 /// hasAllocatableSuperReg - Return true if the specified physical register has
1971 /// any super register that's allocatable.
1972 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1973 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1974 if (allocatableRegs_[*AS] && hasInterval(*AS))
1979 /// getRepresentativeReg - Find the largest super register of the specified
1980 /// physical register.
1981 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1982 // Find the largest super-register that is allocatable.
1983 unsigned BestReg = Reg;
1984 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1985 unsigned SuperReg = *AS;
1986 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1994 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1995 /// specified interval that conflicts with the specified physical register.
1996 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1997 unsigned PhysReg) const {
1998 unsigned NumConflicts = 0;
1999 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2000 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2001 E = mri_->reg_end(); I != E; ++I) {
2002 MachineOperand &O = I.getOperand();
2003 MachineInstr *MI = O.getParent();
2004 SlotIndex Index = getInstructionIndex(MI);
2005 if (pli.liveAt(Index))
2008 return NumConflicts;
2011 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2012 /// around all defs and uses of the specified interval. Return true if it
2013 /// was able to cut its interval.
2014 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2015 unsigned PhysReg, VirtRegMap &vrm) {
2016 unsigned SpillReg = getRepresentativeReg(PhysReg);
2018 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2019 // If there are registers which alias PhysReg, but which are not a
2020 // sub-register of the chosen representative super register. Assert
2021 // since we can't handle it yet.
2022 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2023 tri_->isSuperRegister(*AS, SpillReg));
2026 SmallVector<unsigned, 4> PRegs;
2027 if (hasInterval(SpillReg))
2028 PRegs.push_back(SpillReg);
2030 SmallSet<unsigned, 4> Added;
2031 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2032 if (Added.insert(*AS) && hasInterval(*AS)) {
2033 PRegs.push_back(*AS);
2034 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2039 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2040 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2041 E = mri_->reg_end(); I != E; ++I) {
2042 MachineOperand &O = I.getOperand();
2043 MachineInstr *MI = O.getParent();
2044 if (SeenMIs.count(MI))
2047 SlotIndex Index = getInstructionIndex(MI);
2048 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2049 unsigned PReg = PRegs[i];
2050 LiveInterval &pli = getInterval(PReg);
2051 if (!pli.liveAt(Index))
2053 vrm.addEmergencySpill(PReg, MI);
2054 SlotIndex StartIdx = Index.getLoadIndex();
2055 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2056 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2057 pli.removeRange(StartIdx, EndIdx);
2061 raw_string_ostream Msg(msg);
2062 Msg << "Ran out of registers during register allocation!";
2063 if (MI->isInlineAsm()) {
2064 Msg << "\nPlease check your inline asm statement for invalid "
2065 << "constraints:\n";
2066 MI->print(Msg, tm_);
2068 llvm_report_error(Msg.str());
2070 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2071 if (!hasInterval(*AS))
2073 LiveInterval &spli = getInterval(*AS);
2074 if (spli.liveAt(Index))
2075 spli.removeRange(Index.getLoadIndex(),
2076 Index.getNextIndex().getBaseIndex());
2083 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2084 MachineInstr* startInst) {
2085 LiveInterval& Interval = getOrCreateInterval(reg);
2086 VNInfo* VN = Interval.getNextValue(
2087 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2088 startInst, true, getVNInfoAllocator());
2089 VN->setHasPHIKill(true);
2090 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2092 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2093 getMBBEndIdx(startInst->getParent()), VN);
2094 Interval.addRange(LR);