1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
266 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267 unsigned Reg = MI.getOperand(MOIdx).getReg();
268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269 const MachineOperand &MO = MI.getOperand(i);
272 if (MO.getReg() == Reg && MO.isDef()) {
273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274 MI.getOperand(MOIdx).getSubReg() &&
282 /// isPartialRedef - Return true if the specified def at the specific index is
283 /// partially re-defining the specified live interval. A common case of this is
284 /// a definition of the sub-register.
285 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286 LiveInterval &interval) {
287 if (!MO.getSubReg() || MO.isEarlyClobber())
290 SlotIndex RedefIndex = MIIdx.getDefIndex();
291 const LiveRange *OldLR =
292 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293 if (OldLR->valno->isDefAccurate()) {
294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
300 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301 MachineBasicBlock::iterator mi,
305 LiveInterval &interval) {
307 dbgs() << "\t\tregister: ";
308 printRegName(interval.reg, tri_);
311 // Virtual registers may be defined multiple times (due to phi
312 // elimination and 2-addr elimination). Much of what we do only has to be
313 // done once for the vreg. We use an empty interval to detect the first
314 // time we see a vreg.
315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316 if (interval.empty()) {
317 // Get the Idx of the defining instructions.
318 SlotIndex defIndex = MIIdx.getDefIndex();
319 // Earlyclobbers move back one, so that they overlap the live range
321 if (MO.isEarlyClobber())
322 defIndex = MIIdx.getUseIndex();
323 MachineInstr *CopyMI = NULL;
324 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
325 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
326 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
331 assert(ValNo->id == 0 && "First value in interval is not 0?");
333 // Loop over all of the blocks that the vreg is defined in. There are
334 // two cases we have to handle here. The most common case is a vreg
335 // whose lifetime is contained within a basic block. In this case there
336 // will be a single kill, in MBB, which comes after the definition.
337 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338 // FIXME: what about dead vars?
340 if (vi.Kills[0] != mi)
341 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
343 killIdx = defIndex.getStoreIndex();
345 // If the kill happens after the definition, we have an intra-block
347 if (killIdx > defIndex) {
348 assert(vi.AliveBlocks.empty() &&
349 "Shouldn't be alive across any blocks!");
350 LiveRange LR(defIndex, killIdx, ValNo);
351 interval.addRange(LR);
352 DEBUG(dbgs() << " +" << LR << "\n");
353 ValNo->addKill(killIdx);
358 // The other case we handle is when a virtual register lives to the end
359 // of the defining block, potentially live across some blocks, then is
360 // live into some number of blocks, but gets killed. Start by adding a
361 // range that goes from this definition to the end of the defining block.
362 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
363 DEBUG(dbgs() << " +" << NewLR);
364 interval.addRange(NewLR);
366 bool PHIJoin = lv_->isPHIJoin(interval.reg);
369 // A phi join register is killed at the end of the MBB and revived as a new
370 // valno in the killing blocks.
371 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
372 DEBUG(dbgs() << " phi-join");
373 ValNo->addKill(indexes_->getTerminatorGap(mbb));
374 ValNo->setHasPHIKill(true);
376 // Iterate over all of the blocks that the variable is completely
377 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
379 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
380 E = vi.AliveBlocks.end(); I != E; ++I) {
381 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
382 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
383 interval.addRange(LR);
384 DEBUG(dbgs() << " +" << LR);
388 // Finally, this virtual register is live from the start of any killing
389 // block to the 'use' slot of the killing instruction.
390 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
391 MachineInstr *Kill = vi.Kills[i];
392 SlotIndex Start = getMBBStartIdx(Kill->getParent());
393 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
395 // Create interval with one of a NEW value number. Note that this value
396 // number isn't actually defined by an instruction, weird huh? :)
398 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
400 ValNo->setIsPHIDef(true);
402 LiveRange LR(Start, killIdx, ValNo);
403 interval.addRange(LR);
404 ValNo->addKill(killIdx);
405 DEBUG(dbgs() << " +" << LR);
409 if (MultipleDefsBySameMI(*mi, MOIdx))
410 // Mutple defs of the same virtual register by the same instruction. e.g.
411 // %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
412 // This is likely due to elimination of REG_SEQUENCE instructions. Return
413 // here since there is nothing to do.
416 // If this is the second time we see a virtual register definition, it
417 // must be due to phi elimination or two addr elimination. If this is
418 // the result of two address elimination, then the vreg is one of the
419 // def-and-use register operand.
421 // It may also be partial redef like this:
422 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
423 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
424 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
425 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
426 // If this is a two-address definition, then we have already processed
427 // the live range. The only problem is that we didn't realize there
428 // are actually two values in the live interval. Because of this we
429 // need to take the LiveRegion that defines this register and split it
431 // Two-address vregs should always only be redefined once. This means
432 // that at this point, there should be exactly one value number in it.
433 assert((PartReDef || interval.containsOneValue()) &&
434 "Unexpected 2-addr liveint!");
435 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
436 SlotIndex RedefIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 RedefIndex = MIIdx.getUseIndex();
440 const LiveRange *OldLR =
441 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
442 VNInfo *OldValNo = OldLR->valno;
444 // Delete the initial value, which should be short and continuous,
445 // because the 2-addr copy must be in the same MBB as the redef.
446 interval.removeRange(DefIndex, RedefIndex);
448 // The new value number (#1) is defined by the instruction we claimed
450 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
451 false, // update at *
453 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
455 // Value#0 is now defined by the 2-addr instruction.
456 OldValNo->def = RedefIndex;
458 OldValNo->setCopy(0);
460 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
461 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
462 if (tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
463 OldValNo->setCopy(&*mi);
466 // Add the new live interval which replaces the range for the input copy.
467 LiveRange LR(DefIndex, RedefIndex, ValNo);
468 DEBUG(dbgs() << " replace range with " << LR);
469 interval.addRange(LR);
470 ValNo->addKill(RedefIndex);
472 // If this redefinition is dead, we need to add a dummy unit live
473 // range covering the def slot.
475 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
479 dbgs() << " RESULT: ";
480 interval.print(dbgs(), tri_);
482 } else if (lv_->isPHIJoin(interval.reg)) {
483 // In the case of PHI elimination, each variable definition is only
484 // live until the end of the block. We've already taken care of the
485 // rest of the live range.
487 SlotIndex defIndex = MIIdx.getDefIndex();
488 if (MO.isEarlyClobber())
489 defIndex = MIIdx.getUseIndex();
492 MachineInstr *CopyMI = NULL;
493 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
494 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
495 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
497 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
499 SlotIndex killIndex = getMBBEndIdx(mbb);
500 LiveRange LR(defIndex, killIndex, ValNo);
501 interval.addRange(LR);
502 ValNo->addKill(indexes_->getTerminatorGap(mbb));
503 ValNo->setHasPHIKill(true);
504 DEBUG(dbgs() << " phi-join +" << LR);
506 llvm_unreachable("Multiply defined register");
510 DEBUG(dbgs() << '\n');
513 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
514 MachineBasicBlock::iterator mi,
517 LiveInterval &interval,
518 MachineInstr *CopyMI) {
519 // A physical register cannot be live across basic block, so its
520 // lifetime must end somewhere in its defining basic block.
522 dbgs() << "\t\tregister: ";
523 printRegName(interval.reg, tri_);
526 SlotIndex baseIndex = MIIdx;
527 SlotIndex start = baseIndex.getDefIndex();
528 // Earlyclobbers move back one.
529 if (MO.isEarlyClobber())
530 start = MIIdx.getUseIndex();
531 SlotIndex end = start;
533 // If it is not used after definition, it is considered dead at
534 // the instruction defining it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 // For earlyclobbers, the defSlot was pushed back one; the extra
537 // advance below compensates.
539 DEBUG(dbgs() << " dead");
540 end = start.getStoreIndex();
544 // If it is not dead on definition, it must be killed by a
545 // subsequent instruction. Hence its interval is:
546 // [defSlot(def), useSlot(kill)+1)
547 baseIndex = baseIndex.getNextIndex();
548 while (++mi != MBB->end()) {
550 if (mi->isDebugValue())
552 if (getInstructionFromIndex(baseIndex) == 0)
553 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
555 if (mi->killsRegister(interval.reg, tri_)) {
556 DEBUG(dbgs() << " killed");
557 end = baseIndex.getDefIndex();
560 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
562 if (mi->isRegTiedToUseOperand(DefIdx)) {
563 // Two-address instruction.
564 end = baseIndex.getDefIndex();
566 // Another instruction redefines the register before it is ever read.
567 // Then the register is essentially dead at the instruction that
568 // defines it. Hence its interval is:
569 // [defSlot(def), defSlot(def)+1)
570 DEBUG(dbgs() << " dead");
571 end = start.getStoreIndex();
577 baseIndex = baseIndex.getNextIndex();
580 // The only case we should have a dead physreg here without a killing or
581 // instruction where we know it's dead is if it is live-in to the function
582 // and never used. Another possible case is the implicit use of the
583 // physical register has been deleted by two-address pass.
584 end = start.getStoreIndex();
587 assert(start < end && "did not find end of interval?");
589 // Already exists? Extend old live interval.
590 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
591 bool Extend = OldLR != interval.end();
592 VNInfo *ValNo = Extend
593 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
594 if (MO.isEarlyClobber() && Extend)
595 ValNo->setHasRedefByEC(true);
596 LiveRange LR(start, end, ValNo);
597 interval.addRange(LR);
598 LR.valno->addKill(end);
599 DEBUG(dbgs() << " +" << LR << '\n');
602 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
603 MachineBasicBlock::iterator MI,
607 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
608 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
609 getOrCreateInterval(MO.getReg()));
610 else if (allocatableRegs_[MO.getReg()]) {
611 MachineInstr *CopyMI = NULL;
612 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
613 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
614 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
616 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
617 getOrCreateInterval(MO.getReg()), CopyMI);
618 // Def of a register also defines its sub-registers.
619 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
620 // If MI also modifies the sub-register explicitly, avoid processing it
621 // more than once. Do not pass in TRI here so it checks for exact match.
622 if (!MI->modifiesRegister(*AS))
623 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
624 getOrCreateInterval(*AS), 0);
628 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
630 LiveInterval &interval, bool isAlias) {
632 dbgs() << "\t\tlivein register: ";
633 printRegName(interval.reg, tri_);
636 // Look for kills, if it reaches a def before it's killed, then it shouldn't
637 // be considered a livein.
638 MachineBasicBlock::iterator mi = MBB->begin();
639 MachineBasicBlock::iterator E = MBB->end();
640 // Skip over DBG_VALUE at the start of the MBB.
641 if (mi != E && mi->isDebugValue()) {
642 while (++mi != E && mi->isDebugValue())
645 // MBB is empty except for DBG_VALUE's.
649 SlotIndex baseIndex = MIIdx;
650 SlotIndex start = baseIndex;
651 if (getInstructionFromIndex(baseIndex) == 0)
652 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
654 SlotIndex end = baseIndex;
655 bool SeenDefUse = false;
658 if (mi->killsRegister(interval.reg, tri_)) {
659 DEBUG(dbgs() << " killed");
660 end = baseIndex.getDefIndex();
663 } else if (mi->modifiesRegister(interval.reg, tri_)) {
664 // Another instruction redefines the register before it is ever read.
665 // Then the register is essentially dead at the instruction that defines
666 // it. Hence its interval is:
667 // [defSlot(def), defSlot(def)+1)
668 DEBUG(dbgs() << " dead");
669 end = start.getStoreIndex();
674 while (++mi != E && mi->isDebugValue())
675 // Skip over DBG_VALUE.
678 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
681 // Live-in register might not be used at all.
684 DEBUG(dbgs() << " dead");
685 end = MIIdx.getStoreIndex();
687 DEBUG(dbgs() << " live through");
693 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
694 0, false, VNInfoAllocator);
695 vni->setIsPHIDef(true);
696 LiveRange LR(start, end, vni);
698 interval.addRange(LR);
699 LR.valno->addKill(end);
700 DEBUG(dbgs() << " +" << LR << '\n');
703 /// computeIntervals - computes the live intervals for virtual
704 /// registers. for some ordering of the machine instructions [1,N] a
705 /// live interval is an interval [i, j) where 1 <= i <= j < N for
706 /// which a variable is live
707 void LiveIntervals::computeIntervals() {
708 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
709 << "********** Function: "
710 << ((Value*)mf_->getFunction())->getName() << '\n');
712 SmallVector<unsigned, 8> UndefUses;
713 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
715 MachineBasicBlock *MBB = MBBI;
719 // Track the index of the current machine instr.
720 SlotIndex MIIndex = getMBBStartIdx(MBB);
721 DEBUG(dbgs() << "BB#" << MBB->getNumber()
722 << ":\t\t# derived from " << MBB->getName() << "\n");
724 // Create intervals for live-ins to this BB first.
725 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
726 LE = MBB->livein_end(); LI != LE; ++LI) {
727 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
728 // Multiple live-ins can alias the same register.
729 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
730 if (!hasInterval(*AS))
731 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
735 // Skip over empty initial indices.
736 if (getInstructionFromIndex(MIIndex) == 0)
737 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
739 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
741 DEBUG(dbgs() << MIIndex << "\t" << *MI);
742 if (MI->isDebugValue())
746 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
747 MachineOperand &MO = MI->getOperand(i);
748 if (!MO.isReg() || !MO.getReg())
751 // handle register defs - build intervals
753 handleRegisterDef(MBB, MI, MIIndex, MO, i);
754 else if (MO.isUndef())
755 UndefUses.push_back(MO.getReg());
758 // Move to the next instr slot.
759 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
763 // Create empty intervals for registers defined by implicit_def's (except
764 // for those implicit_def that define values which are liveout of their
766 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
767 unsigned UndefReg = UndefUses[i];
768 (void)getOrCreateInterval(UndefReg);
772 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
773 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
774 return new LiveInterval(reg, Weight);
777 /// dupInterval - Duplicate a live interval. The caller is responsible for
778 /// managing the allocated memory.
779 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
780 LiveInterval *NewLI = createInterval(li->reg);
781 NewLI->Copy(*li, mri_, getVNInfoAllocator());
785 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
786 /// copy field and returns the source register that defines it.
787 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
791 if (VNI->getCopy()->isExtractSubreg()) {
792 // If it's extracting out of a physical register, return the sub-register.
793 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
794 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
795 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
796 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
797 if (SrcSubReg == DstSubReg)
798 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
799 // reg1034 can still be coalesced to EDX.
801 assert(DstSubReg == 0);
802 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
805 } else if (VNI->getCopy()->isInsertSubreg() ||
806 VNI->getCopy()->isSubregToReg())
807 return VNI->getCopy()->getOperand(2).getReg();
809 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
810 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
812 llvm_unreachable("Unrecognized copy instruction!");
816 //===----------------------------------------------------------------------===//
817 // Register allocator hooks.
820 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
821 /// allow one) virtual register operand, then its uses are implicitly using
822 /// the register. Returns the virtual register.
823 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
824 MachineInstr *MI) const {
826 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
827 MachineOperand &MO = MI->getOperand(i);
828 if (!MO.isReg() || !MO.isUse())
830 unsigned Reg = MO.getReg();
831 if (Reg == 0 || Reg == li.reg)
834 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
835 !allocatableRegs_[Reg])
837 // FIXME: For now, only remat MI with at most one register operand.
839 "Can't rematerialize instruction with multiple register operand!");
848 /// isValNoAvailableAt - Return true if the val# of the specified interval
849 /// which reaches the given instruction also reaches the specified use index.
850 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
851 SlotIndex UseIdx) const {
852 SlotIndex Index = getInstructionIndex(MI);
853 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
854 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
855 return UI != li.end() && UI->valno == ValNo;
858 /// isReMaterializable - Returns true if the definition MI of the specified
859 /// val# of the specified interval is re-materializable.
860 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
861 const VNInfo *ValNo, MachineInstr *MI,
862 SmallVectorImpl<LiveInterval*> &SpillIs,
867 if (!tii_->isTriviallyReMaterializable(MI, aa_))
870 // Target-specific code can mark an instruction as being rematerializable
871 // if it has one virtual reg use, though it had better be something like
872 // a PIC base register which is likely to be live everywhere.
873 unsigned ImpUse = getReMatImplicitUse(li, MI);
875 const LiveInterval &ImpLi = getInterval(ImpUse);
876 for (MachineRegisterInfo::use_nodbg_iterator
877 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
879 MachineInstr *UseMI = &*ri;
880 SlotIndex UseIdx = getInstructionIndex(UseMI);
881 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
883 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
887 // If a register operand of the re-materialized instruction is going to
888 // be spilled next, then it's not legal to re-materialize this instruction.
889 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
890 if (ImpUse == SpillIs[i]->reg)
896 /// isReMaterializable - Returns true if the definition MI of the specified
897 /// val# of the specified interval is re-materializable.
898 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
899 const VNInfo *ValNo, MachineInstr *MI) {
900 SmallVector<LiveInterval*, 4> Dummy1;
902 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
905 /// isReMaterializable - Returns true if every definition of MI of every
906 /// val# of the specified interval is re-materializable.
907 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
908 SmallVectorImpl<LiveInterval*> &SpillIs,
911 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
913 const VNInfo *VNI = *i;
915 continue; // Dead val#.
916 // Is the def for the val# rematerializable?
917 if (!VNI->isDefAccurate())
919 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
920 bool DefIsLoad = false;
922 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
929 /// FilterFoldedOps - Filter out two-address use operands. Return
930 /// true if it finds any issue with the operands that ought to prevent
932 static bool FilterFoldedOps(MachineInstr *MI,
933 SmallVector<unsigned, 2> &Ops,
935 SmallVector<unsigned, 2> &FoldOps) {
937 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
938 unsigned OpIdx = Ops[i];
939 MachineOperand &MO = MI->getOperand(OpIdx);
940 // FIXME: fold subreg use.
944 MRInfo |= (unsigned)VirtRegMap::isMod;
946 // Filter out two-address use operand(s).
947 if (MI->isRegTiedToDefOperand(OpIdx)) {
948 MRInfo = VirtRegMap::isModRef;
951 MRInfo |= (unsigned)VirtRegMap::isRef;
953 FoldOps.push_back(OpIdx);
959 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
960 /// slot / to reg or any rematerialized load into ith operand of specified
961 /// MI. If it is successul, MI is updated with the newly created MI and
963 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
964 VirtRegMap &vrm, MachineInstr *DefMI,
966 SmallVector<unsigned, 2> &Ops,
967 bool isSS, int Slot, unsigned Reg) {
968 // If it is an implicit def instruction, just delete it.
969 if (MI->isImplicitDef()) {
970 RemoveMachineInstrFromMaps(MI);
971 vrm.RemoveMachineInstrFromMaps(MI);
972 MI->eraseFromParent();
977 // Filter the list of operand indexes that are to be folded. Abort if
978 // any operand will prevent folding.
980 SmallVector<unsigned, 2> FoldOps;
981 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
984 // The only time it's safe to fold into a two address instruction is when
985 // it's folding reload and spill from / into a spill stack slot.
986 if (DefMI && (MRInfo & VirtRegMap::isMod))
989 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
990 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
992 // Remember this instruction uses the spill slot.
993 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
995 // Attempt to fold the memory reference into the instruction. If
996 // we can do this, we don't need to insert spill code.
997 MachineBasicBlock &MBB = *MI->getParent();
998 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
999 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1000 vrm.transferSpillPts(MI, fmi);
1001 vrm.transferRestorePts(MI, fmi);
1002 vrm.transferEmergencySpills(MI, fmi);
1003 ReplaceMachineInstrInMaps(MI, fmi);
1004 MI = MBB.insert(MBB.erase(MI), fmi);
1011 /// canFoldMemoryOperand - Returns true if the specified load / store
1012 /// folding is possible.
1013 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1014 SmallVector<unsigned, 2> &Ops,
1016 // Filter the list of operand indexes that are to be folded. Abort if
1017 // any operand will prevent folding.
1018 unsigned MRInfo = 0;
1019 SmallVector<unsigned, 2> FoldOps;
1020 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1023 // It's only legal to remat for a use, not a def.
1024 if (ReMat && (MRInfo & VirtRegMap::isMod))
1027 return tii_->canFoldMemoryOperand(MI, FoldOps);
1030 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1031 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1033 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1038 for (++itr; itr != li.ranges.end(); ++itr) {
1039 MachineBasicBlock *mbb2 =
1040 indexes_->getMBBCoveringRange(itr->start, itr->end);
1049 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1050 /// interval on to-be re-materialized operands of MI) with new register.
1051 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1052 MachineInstr *MI, unsigned NewVReg,
1054 // There is an implicit use. That means one of the other operand is
1055 // being remat'ed and the remat'ed instruction has li.reg as an
1056 // use operand. Make sure we rewrite that as well.
1057 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1058 MachineOperand &MO = MI->getOperand(i);
1061 unsigned Reg = MO.getReg();
1062 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1064 if (!vrm.isReMaterialized(Reg))
1066 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1067 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1069 UseMO->setReg(NewVReg);
1073 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1074 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1075 bool LiveIntervals::
1076 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1077 bool TrySplit, SlotIndex index, SlotIndex end,
1079 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1080 unsigned Slot, int LdSlot,
1081 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1083 const TargetRegisterClass* rc,
1084 SmallVector<int, 4> &ReMatIds,
1085 const MachineLoopInfo *loopInfo,
1086 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1087 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1088 std::vector<LiveInterval*> &NewLIs) {
1089 bool CanFold = false;
1091 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1092 MachineOperand& mop = MI->getOperand(i);
1095 unsigned Reg = mop.getReg();
1096 unsigned RegI = Reg;
1097 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1102 bool TryFold = !DefIsReMat;
1103 bool FoldSS = true; // Default behavior unless it's a remat.
1104 int FoldSlot = Slot;
1106 // If this is the rematerializable definition MI itself and
1107 // all of its uses are rematerialized, simply delete it.
1108 if (MI == ReMatOrigDefMI && CanDelete) {
1109 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1111 RemoveMachineInstrFromMaps(MI);
1112 vrm.RemoveMachineInstrFromMaps(MI);
1113 MI->eraseFromParent();
1117 // If def for this use can't be rematerialized, then try folding.
1118 // If def is rematerializable and it's a load, also try folding.
1119 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1121 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1127 // Scan all of the operands of this instruction rewriting operands
1128 // to use NewVReg instead of li.reg as appropriate. We do this for
1131 // 1. If the instr reads the same spilled vreg multiple times, we
1132 // want to reuse the NewVReg.
1133 // 2. If the instr is a two-addr instruction, we are required to
1134 // keep the src/dst regs pinned.
1136 // Keep track of whether we replace a use and/or def so that we can
1137 // create the spill interval with the appropriate range.
1139 HasUse = mop.isUse();
1140 HasDef = mop.isDef();
1141 SmallVector<unsigned, 2> Ops;
1143 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1144 const MachineOperand &MOj = MI->getOperand(j);
1147 unsigned RegJ = MOj.getReg();
1148 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1152 if (!MOj.isUndef()) {
1153 HasUse |= MOj.isUse();
1154 HasDef |= MOj.isDef();
1159 // Create a new virtual register for the spill interval.
1160 // Create the new register now so we can map the fold instruction
1161 // to the new register so when it is unfolded we get the correct
1163 bool CreatedNewVReg = false;
1165 NewVReg = mri_->createVirtualRegister(rc);
1167 CreatedNewVReg = true;
1169 // The new virtual register should get the same allocation hints as the
1171 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1172 if (Hint.first || Hint.second)
1173 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1179 // Do not fold load / store here if we are splitting. We'll find an
1180 // optimal point to insert a load / store later.
1182 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1183 Ops, FoldSS, FoldSlot, NewVReg)) {
1184 // Folding the load/store can completely change the instruction in
1185 // unpredictable ways, rescan it from the beginning.
1188 // We need to give the new vreg the same stack slot as the
1189 // spilled interval.
1190 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1196 if (isNotInMIMap(MI))
1198 goto RestartInstruction;
1201 // We'll try to fold it later if it's profitable.
1202 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1206 mop.setReg(NewVReg);
1207 if (mop.isImplicit())
1208 rewriteImplicitOps(li, MI, NewVReg, vrm);
1210 // Reuse NewVReg for other reads.
1211 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1212 MachineOperand &mopj = MI->getOperand(Ops[j]);
1213 mopj.setReg(NewVReg);
1214 if (mopj.isImplicit())
1215 rewriteImplicitOps(li, MI, NewVReg, vrm);
1218 if (CreatedNewVReg) {
1220 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1221 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1222 // Each valnum may have its own remat id.
1223 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1225 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1227 if (!CanDelete || (HasUse && HasDef)) {
1228 // If this is a two-addr instruction then its use operands are
1229 // rematerializable but its def is not. It should be assigned a
1231 vrm.assignVirt2StackSlot(NewVReg, Slot);
1234 vrm.assignVirt2StackSlot(NewVReg, Slot);
1236 } else if (HasUse && HasDef &&
1237 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1238 // If this interval hasn't been assigned a stack slot (because earlier
1239 // def is a deleted remat def), do it now.
1240 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1241 vrm.assignVirt2StackSlot(NewVReg, Slot);
1244 // Re-matting an instruction with virtual register use. Add the
1245 // register as an implicit use on the use MI.
1246 if (DefIsReMat && ImpUse)
1247 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1249 // Create a new register interval for this spill / remat.
1250 LiveInterval &nI = getOrCreateInterval(NewVReg);
1251 if (CreatedNewVReg) {
1252 NewLIs.push_back(&nI);
1253 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1255 vrm.setIsSplitFromReg(NewVReg, li.reg);
1259 if (CreatedNewVReg) {
1260 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1261 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1262 DEBUG(dbgs() << " +" << LR);
1265 // Extend the split live interval to this def / use.
1266 SlotIndex End = index.getDefIndex();
1267 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1268 nI.getValNumInfo(nI.getNumValNums()-1));
1269 DEBUG(dbgs() << " +" << LR);
1274 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1275 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1276 DEBUG(dbgs() << " +" << LR);
1281 dbgs() << "\t\t\t\tAdded new interval: ";
1282 nI.print(dbgs(), tri_);
1288 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1290 MachineBasicBlock *MBB,
1291 SlotIndex Idx) const {
1292 SlotIndex End = getMBBEndIdx(MBB);
1293 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1294 if (VNI->kills[j].isPHI())
1297 SlotIndex KillIdx = VNI->kills[j];
1298 if (KillIdx > Idx && KillIdx <= End)
1304 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1305 /// during spilling.
1307 struct RewriteInfo {
1312 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1313 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1316 struct RewriteInfoCompare {
1317 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1318 return LHS.Index < RHS.Index;
1323 void LiveIntervals::
1324 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1325 LiveInterval::Ranges::const_iterator &I,
1326 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1327 unsigned Slot, int LdSlot,
1328 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1330 const TargetRegisterClass* rc,
1331 SmallVector<int, 4> &ReMatIds,
1332 const MachineLoopInfo *loopInfo,
1333 BitVector &SpillMBBs,
1334 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1335 BitVector &RestoreMBBs,
1336 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1337 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1338 std::vector<LiveInterval*> &NewLIs) {
1339 bool AllCanFold = true;
1340 unsigned NewVReg = 0;
1341 SlotIndex start = I->start.getBaseIndex();
1342 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1344 // First collect all the def / use in this live range that will be rewritten.
1345 // Make sure they are sorted according to instruction index.
1346 std::vector<RewriteInfo> RewriteMIs;
1347 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1348 re = mri_->reg_end(); ri != re; ) {
1349 MachineInstr *MI = &*ri;
1350 MachineOperand &O = ri.getOperand();
1352 if (MI->isDebugValue()) {
1353 // Modify DBG_VALUE now that the value is in a spill slot.
1354 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1355 uint64_t Offset = MI->getOperand(1).getImm();
1356 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1357 DebugLoc DL = MI->getDebugLoc();
1358 int FI = isLoadSS ? LdSlot : (int)Slot;
1359 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1360 Offset, MDPtr, DL)) {
1361 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1362 ReplaceMachineInstrInMaps(MI, NewDV);
1363 MachineBasicBlock *MBB = MI->getParent();
1364 MBB->insert(MBB->erase(MI), NewDV);
1369 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1370 RemoveMachineInstrFromMaps(MI);
1371 vrm.RemoveMachineInstrFromMaps(MI);
1372 MI->eraseFromParent();
1375 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1376 SlotIndex index = getInstructionIndex(MI);
1377 if (index < start || index >= end)
1381 // Must be defined by an implicit def. It should not be spilled. Note,
1382 // this is for correctness reason. e.g.
1383 // 8 %reg1024<def> = IMPLICIT_DEF
1384 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1385 // The live range [12, 14) are not part of the r1024 live interval since
1386 // it's defined by an implicit def. It will not conflicts with live
1387 // interval of r1025. Now suppose both registers are spilled, you can
1388 // easily see a situation where both registers are reloaded before
1389 // the INSERT_SUBREG and both target registers that would overlap.
1391 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1393 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1395 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1396 // Now rewrite the defs and uses.
1397 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1398 RewriteInfo &rwi = RewriteMIs[i];
1400 SlotIndex index = rwi.Index;
1401 bool MIHasUse = rwi.HasUse;
1402 bool MIHasDef = rwi.HasDef;
1403 MachineInstr *MI = rwi.MI;
1404 // If MI def and/or use the same register multiple times, then there
1405 // are multiple entries.
1406 unsigned NumUses = MIHasUse;
1407 while (i != e && RewriteMIs[i].MI == MI) {
1408 assert(RewriteMIs[i].Index == index);
1409 bool isUse = RewriteMIs[i].HasUse;
1410 if (isUse) ++NumUses;
1412 MIHasDef |= RewriteMIs[i].HasDef;
1415 MachineBasicBlock *MBB = MI->getParent();
1417 if (ImpUse && MI != ReMatDefMI) {
1418 // Re-matting an instruction with virtual register use. Prevent interval
1419 // from being spilled.
1420 getInterval(ImpUse).markNotSpillable();
1423 unsigned MBBId = MBB->getNumber();
1424 unsigned ThisVReg = 0;
1426 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1427 if (NVI != MBBVRegsMap.end()) {
1428 ThisVReg = NVI->second;
1435 // It's better to start a new interval to avoid artifically
1436 // extend the new interval.
1437 if (MIHasDef && !MIHasUse) {
1438 MBBVRegsMap.erase(MBB->getNumber());
1444 bool IsNew = ThisVReg == 0;
1446 // This ends the previous live interval. If all of its def / use
1447 // can be folded, give it a low spill weight.
1448 if (NewVReg && TrySplit && AllCanFold) {
1449 LiveInterval &nI = getOrCreateInterval(NewVReg);
1456 bool HasDef = false;
1457 bool HasUse = false;
1458 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1459 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1460 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1461 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1462 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1463 if (!HasDef && !HasUse)
1466 AllCanFold &= CanFold;
1468 // Update weight of spill interval.
1469 LiveInterval &nI = getOrCreateInterval(NewVReg);
1471 // The spill weight is now infinity as it cannot be spilled again.
1472 nI.markNotSpillable();
1476 // Keep track of the last def and first use in each MBB.
1478 if (MI != ReMatOrigDefMI || !CanDelete) {
1479 bool HasKill = false;
1481 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1483 // If this is a two-address code, then this index starts a new VNInfo.
1484 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1486 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1488 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1489 SpillIdxes.find(MBBId);
1491 if (SII == SpillIdxes.end()) {
1492 std::vector<SRInfo> S;
1493 S.push_back(SRInfo(index, NewVReg, true));
1494 SpillIdxes.insert(std::make_pair(MBBId, S));
1495 } else if (SII->second.back().vreg != NewVReg) {
1496 SII->second.push_back(SRInfo(index, NewVReg, true));
1497 } else if (index > SII->second.back().index) {
1498 // If there is an earlier def and this is a two-address
1499 // instruction, then it's not possible to fold the store (which
1500 // would also fold the load).
1501 SRInfo &Info = SII->second.back();
1503 Info.canFold = !HasUse;
1505 SpillMBBs.set(MBBId);
1506 } else if (SII != SpillIdxes.end() &&
1507 SII->second.back().vreg == NewVReg &&
1508 index > SII->second.back().index) {
1509 // There is an earlier def that's not killed (must be two-address).
1510 // The spill is no longer needed.
1511 SII->second.pop_back();
1512 if (SII->second.empty()) {
1513 SpillIdxes.erase(MBBId);
1514 SpillMBBs.reset(MBBId);
1521 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1522 SpillIdxes.find(MBBId);
1523 if (SII != SpillIdxes.end() &&
1524 SII->second.back().vreg == NewVReg &&
1525 index > SII->second.back().index)
1526 // Use(s) following the last def, it's not safe to fold the spill.
1527 SII->second.back().canFold = false;
1528 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1529 RestoreIdxes.find(MBBId);
1530 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1531 // If we are splitting live intervals, only fold if it's the first
1532 // use and there isn't another use later in the MBB.
1533 RII->second.back().canFold = false;
1535 // Only need a reload if there isn't an earlier def / use.
1536 if (RII == RestoreIdxes.end()) {
1537 std::vector<SRInfo> Infos;
1538 Infos.push_back(SRInfo(index, NewVReg, true));
1539 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1541 RII->second.push_back(SRInfo(index, NewVReg, true));
1543 RestoreMBBs.set(MBBId);
1547 // Update spill weight.
1548 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1549 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1552 if (NewVReg && TrySplit && AllCanFold) {
1553 // If all of its def / use can be folded, give it a low spill weight.
1554 LiveInterval &nI = getOrCreateInterval(NewVReg);
1559 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1560 unsigned vr, BitVector &RestoreMBBs,
1561 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1562 if (!RestoreMBBs[Id])
1564 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1565 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1566 if (Restores[i].index == index &&
1567 Restores[i].vreg == vr &&
1568 Restores[i].canFold)
1573 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1574 unsigned vr, BitVector &RestoreMBBs,
1575 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1576 if (!RestoreMBBs[Id])
1578 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1579 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1580 if (Restores[i].index == index && Restores[i].vreg)
1581 Restores[i].index = SlotIndex();
1584 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1585 /// spilled and create empty intervals for their uses.
1587 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1588 const TargetRegisterClass* rc,
1589 std::vector<LiveInterval*> &NewLIs) {
1590 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1591 re = mri_->reg_end(); ri != re; ) {
1592 MachineOperand &O = ri.getOperand();
1593 MachineInstr *MI = &*ri;
1595 if (MI->isDebugValue()) {
1596 // Remove debug info for now.
1598 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1602 assert(MI->isImplicitDef() &&
1603 "Register def was not rewritten?");
1604 RemoveMachineInstrFromMaps(MI);
1605 vrm.RemoveMachineInstrFromMaps(MI);
1606 MI->eraseFromParent();
1608 // This must be an use of an implicit_def so it's not part of the live
1609 // interval. Create a new empty live interval for it.
1610 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1611 unsigned NewVReg = mri_->createVirtualRegister(rc);
1613 vrm.setIsImplicitlyDefined(NewVReg);
1614 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1615 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1616 MachineOperand &MO = MI->getOperand(i);
1617 if (MO.isReg() && MO.getReg() == li.reg) {
1627 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1628 // Limit the loop depth ridiculousness.
1629 if (loopDepth > 200)
1632 // The loop depth is used to roughly estimate the number of times the
1633 // instruction is executed. Something like 10^d is simple, but will quickly
1634 // overflow a float. This expression behaves like 10^d for small d, but is
1635 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1636 // headroom before overflow.
1637 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1639 return (isDef + isUse) * lc;
1643 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1644 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1645 normalizeSpillWeight(*NewLIs[i]);
1648 std::vector<LiveInterval*> LiveIntervals::
1649 addIntervalsForSpillsFast(const LiveInterval &li,
1650 const MachineLoopInfo *loopInfo,
1652 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1654 std::vector<LiveInterval*> added;
1656 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1659 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1664 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1666 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1667 while (RI != mri_->reg_end()) {
1668 MachineInstr* MI = &*RI;
1670 SmallVector<unsigned, 2> Indices;
1671 bool HasUse = false;
1672 bool HasDef = false;
1674 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1675 MachineOperand& mop = MI->getOperand(i);
1676 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1678 HasUse |= MI->getOperand(i).isUse();
1679 HasDef |= MI->getOperand(i).isDef();
1681 Indices.push_back(i);
1684 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1685 Indices, true, slot, li.reg)) {
1686 unsigned NewVReg = mri_->createVirtualRegister(rc);
1688 vrm.assignVirt2StackSlot(NewVReg, slot);
1690 // create a new register for this spill
1691 LiveInterval &nI = getOrCreateInterval(NewVReg);
1692 nI.markNotSpillable();
1694 // Rewrite register operands to use the new vreg.
1695 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1696 E = Indices.end(); I != E; ++I) {
1697 MI->getOperand(*I).setReg(NewVReg);
1699 if (MI->getOperand(*I).isUse())
1700 MI->getOperand(*I).setIsKill(true);
1703 // Fill in the new live interval.
1704 SlotIndex index = getInstructionIndex(MI);
1706 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1707 nI.getNextValue(SlotIndex(), 0, false,
1708 getVNInfoAllocator()));
1709 DEBUG(dbgs() << " +" << LR);
1711 vrm.addRestorePoint(NewVReg, MI);
1714 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1715 nI.getNextValue(SlotIndex(), 0, false,
1716 getVNInfoAllocator()));
1717 DEBUG(dbgs() << " +" << LR);
1719 vrm.addSpillPoint(NewVReg, true, MI);
1722 added.push_back(&nI);
1725 dbgs() << "\t\t\t\tadded new interval: ";
1732 RI = mri_->reg_begin(li.reg);
1738 std::vector<LiveInterval*> LiveIntervals::
1739 addIntervalsForSpills(const LiveInterval &li,
1740 SmallVectorImpl<LiveInterval*> &SpillIs,
1741 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1743 if (EnableFastSpilling)
1744 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1746 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1749 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1750 li.print(dbgs(), tri_);
1754 // Each bit specify whether a spill is required in the MBB.
1755 BitVector SpillMBBs(mf_->getNumBlockIDs());
1756 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1757 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1758 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1759 DenseMap<unsigned,unsigned> MBBVRegsMap;
1760 std::vector<LiveInterval*> NewLIs;
1761 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1763 unsigned NumValNums = li.getNumValNums();
1764 SmallVector<MachineInstr*, 4> ReMatDefs;
1765 ReMatDefs.resize(NumValNums, NULL);
1766 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1767 ReMatOrigDefs.resize(NumValNums, NULL);
1768 SmallVector<int, 4> ReMatIds;
1769 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1770 BitVector ReMatDelete(NumValNums);
1771 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1773 // Spilling a split live interval. It cannot be split any further. Also,
1774 // it's also guaranteed to be a single val# / range interval.
1775 if (vrm.getPreSplitReg(li.reg)) {
1776 vrm.setIsSplitFromReg(li.reg, 0);
1777 // Unset the split kill marker on the last use.
1778 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1779 if (KillIdx != SlotIndex()) {
1780 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1781 assert(KillMI && "Last use disappeared?");
1782 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1783 assert(KillOp != -1 && "Last use disappeared?");
1784 KillMI->getOperand(KillOp).setIsKill(false);
1786 vrm.removeKillPoint(li.reg);
1787 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1788 Slot = vrm.getStackSlot(li.reg);
1789 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1790 MachineInstr *ReMatDefMI = DefIsReMat ?
1791 vrm.getReMaterializedMI(li.reg) : NULL;
1793 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1794 bool isLoad = isLoadSS ||
1795 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1796 bool IsFirstRange = true;
1797 for (LiveInterval::Ranges::const_iterator
1798 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1799 // If this is a split live interval with multiple ranges, it means there
1800 // are two-address instructions that re-defined the value. Only the
1801 // first def can be rematerialized!
1803 // Note ReMatOrigDefMI has already been deleted.
1804 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1805 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1806 false, vrm, rc, ReMatIds, loopInfo,
1807 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1808 MBBVRegsMap, NewLIs);
1810 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1811 Slot, 0, false, false, false,
1812 false, vrm, rc, ReMatIds, loopInfo,
1813 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1814 MBBVRegsMap, NewLIs);
1816 IsFirstRange = false;
1819 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1820 normalizeSpillWeights(NewLIs);
1824 bool TrySplit = !intervalIsInOneMBB(li);
1827 bool NeedStackSlot = false;
1828 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1830 const VNInfo *VNI = *i;
1831 unsigned VN = VNI->id;
1832 if (VNI->isUnused())
1833 continue; // Dead val#.
1834 // Is the def for the val# rematerializable?
1835 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1836 ? getInstructionFromIndex(VNI->def) : 0;
1838 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1839 // Remember how to remat the def of this val#.
1840 ReMatOrigDefs[VN] = ReMatDefMI;
1841 // Original def may be modified so we have to make a copy here.
1842 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1843 CloneMIs.push_back(Clone);
1844 ReMatDefs[VN] = Clone;
1846 bool CanDelete = true;
1847 if (VNI->hasPHIKill()) {
1848 // A kill is a phi node, not all of its uses can be rematerialized.
1849 // It must not be deleted.
1851 // Need a stack slot if there is any live range where uses cannot be
1853 NeedStackSlot = true;
1856 ReMatDelete.set(VN);
1858 // Need a stack slot if there is any live range where uses cannot be
1860 NeedStackSlot = true;
1864 // One stack slot per live interval.
1865 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1866 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1867 Slot = vrm.assignVirt2StackSlot(li.reg);
1869 // This case only occurs when the prealloc splitter has already assigned
1870 // a stack slot to this vreg.
1872 Slot = vrm.getStackSlot(li.reg);
1875 // Create new intervals and rewrite defs and uses.
1876 for (LiveInterval::Ranges::const_iterator
1877 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1878 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1879 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1880 bool DefIsReMat = ReMatDefMI != NULL;
1881 bool CanDelete = ReMatDelete[I->valno->id];
1883 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1884 bool isLoad = isLoadSS ||
1885 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1886 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1887 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1888 CanDelete, vrm, rc, ReMatIds, loopInfo,
1889 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1890 MBBVRegsMap, NewLIs);
1893 // Insert spills / restores if we are splitting.
1895 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1896 normalizeSpillWeights(NewLIs);
1900 SmallPtrSet<LiveInterval*, 4> AddedKill;
1901 SmallVector<unsigned, 2> Ops;
1902 if (NeedStackSlot) {
1903 int Id = SpillMBBs.find_first();
1905 std::vector<SRInfo> &spills = SpillIdxes[Id];
1906 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1907 SlotIndex index = spills[i].index;
1908 unsigned VReg = spills[i].vreg;
1909 LiveInterval &nI = getOrCreateInterval(VReg);
1910 bool isReMat = vrm.isReMaterialized(VReg);
1911 MachineInstr *MI = getInstructionFromIndex(index);
1912 bool CanFold = false;
1913 bool FoundUse = false;
1915 if (spills[i].canFold) {
1917 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1918 MachineOperand &MO = MI->getOperand(j);
1919 if (!MO.isReg() || MO.getReg() != VReg)
1926 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1927 RestoreMBBs, RestoreIdxes))) {
1928 // MI has two-address uses of the same register. If the use
1929 // isn't the first and only use in the BB, then we can't fold
1930 // it. FIXME: Move this to rewriteInstructionsForSpills.
1937 // Fold the store into the def if possible.
1938 bool Folded = false;
1939 if (CanFold && !Ops.empty()) {
1940 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1943 // Also folded uses, do not issue a load.
1944 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1945 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1947 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1951 // Otherwise tell the spiller to issue a spill.
1953 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1954 bool isKill = LR->end == index.getStoreIndex();
1955 if (!MI->registerDefIsDead(nI.reg))
1956 // No need to spill a dead def.
1957 vrm.addSpillPoint(VReg, isKill, MI);
1959 AddedKill.insert(&nI);
1962 Id = SpillMBBs.find_next(Id);
1966 int Id = RestoreMBBs.find_first();
1968 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1969 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1970 SlotIndex index = restores[i].index;
1971 if (index == SlotIndex())
1973 unsigned VReg = restores[i].vreg;
1974 LiveInterval &nI = getOrCreateInterval(VReg);
1975 bool isReMat = vrm.isReMaterialized(VReg);
1976 MachineInstr *MI = getInstructionFromIndex(index);
1977 bool CanFold = false;
1979 if (restores[i].canFold) {
1981 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1982 MachineOperand &MO = MI->getOperand(j);
1983 if (!MO.isReg() || MO.getReg() != VReg)
1987 // If this restore were to be folded, it would have been folded
1996 // Fold the load into the use if possible.
1997 bool Folded = false;
1998 if (CanFold && !Ops.empty()) {
2000 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2002 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2004 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2005 // If the rematerializable def is a load, also try to fold it.
2006 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2007 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2008 Ops, isLoadSS, LdSlot, VReg);
2010 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2012 // Re-matting an instruction with virtual register use. Add the
2013 // register as an implicit use on the use MI and mark the register
2014 // interval as unspillable.
2015 LiveInterval &ImpLi = getInterval(ImpUse);
2016 ImpLi.markNotSpillable();
2017 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2022 // If folding is not possible / failed, then tell the spiller to issue a
2023 // load / rematerialization for us.
2025 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2027 vrm.addRestorePoint(VReg, MI);
2029 Id = RestoreMBBs.find_next(Id);
2032 // Finalize intervals: add kills, finalize spill weights, and filter out
2034 std::vector<LiveInterval*> RetNewLIs;
2035 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2036 LiveInterval *LI = NewLIs[i];
2038 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2039 if (!AddedKill.count(LI)) {
2040 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2041 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2042 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2043 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2044 assert(UseIdx != -1);
2045 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2046 LastUse->getOperand(UseIdx).setIsKill();
2047 vrm.addKillPoint(LI->reg, LastUseIdx);
2050 RetNewLIs.push_back(LI);
2054 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2055 normalizeSpillWeights(RetNewLIs);
2059 /// hasAllocatableSuperReg - Return true if the specified physical register has
2060 /// any super register that's allocatable.
2061 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2062 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2063 if (allocatableRegs_[*AS] && hasInterval(*AS))
2068 /// getRepresentativeReg - Find the largest super register of the specified
2069 /// physical register.
2070 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2071 // Find the largest super-register that is allocatable.
2072 unsigned BestReg = Reg;
2073 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2074 unsigned SuperReg = *AS;
2075 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2083 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2084 /// specified interval that conflicts with the specified physical register.
2085 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2086 unsigned PhysReg) const {
2087 unsigned NumConflicts = 0;
2088 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2089 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2090 E = mri_->reg_end(); I != E; ++I) {
2091 MachineOperand &O = I.getOperand();
2092 MachineInstr *MI = O.getParent();
2093 if (MI->isDebugValue())
2095 SlotIndex Index = getInstructionIndex(MI);
2096 if (pli.liveAt(Index))
2099 return NumConflicts;
2102 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2103 /// around all defs and uses of the specified interval. Return true if it
2104 /// was able to cut its interval.
2105 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2106 unsigned PhysReg, VirtRegMap &vrm) {
2107 unsigned SpillReg = getRepresentativeReg(PhysReg);
2109 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2110 // If there are registers which alias PhysReg, but which are not a
2111 // sub-register of the chosen representative super register. Assert
2112 // since we can't handle it yet.
2113 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2114 tri_->isSuperRegister(*AS, SpillReg));
2117 SmallVector<unsigned, 4> PRegs;
2118 if (hasInterval(SpillReg))
2119 PRegs.push_back(SpillReg);
2121 SmallSet<unsigned, 4> Added;
2122 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2123 if (Added.insert(*AS) && hasInterval(*AS)) {
2124 PRegs.push_back(*AS);
2125 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2130 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2131 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2132 E = mri_->reg_end(); I != E; ++I) {
2133 MachineOperand &O = I.getOperand();
2134 MachineInstr *MI = O.getParent();
2135 if (MI->isDebugValue() || SeenMIs.count(MI))
2138 SlotIndex Index = getInstructionIndex(MI);
2139 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2140 unsigned PReg = PRegs[i];
2141 LiveInterval &pli = getInterval(PReg);
2142 if (!pli.liveAt(Index))
2144 vrm.addEmergencySpill(PReg, MI);
2145 SlotIndex StartIdx = Index.getLoadIndex();
2146 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2147 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2148 pli.removeRange(StartIdx, EndIdx);
2152 raw_string_ostream Msg(msg);
2153 Msg << "Ran out of registers during register allocation!";
2154 if (MI->isInlineAsm()) {
2155 Msg << "\nPlease check your inline asm statement for invalid "
2156 << "constraints:\n";
2157 MI->print(Msg, tm_);
2159 report_fatal_error(Msg.str());
2161 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2162 if (!hasInterval(*AS))
2164 LiveInterval &spli = getInterval(*AS);
2165 if (spli.liveAt(Index))
2166 spli.removeRange(Index.getLoadIndex(),
2167 Index.getNextIndex().getBaseIndex());
2174 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2175 MachineInstr* startInst) {
2176 LiveInterval& Interval = getOrCreateInterval(reg);
2177 VNInfo* VN = Interval.getNextValue(
2178 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2179 startInst, true, getVNInfoAllocator());
2180 VN->setHasPHIKill(true);
2181 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2183 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2184 getMBBEndIdx(startInst->getParent()), VN);
2185 Interval.addRange(LR);