1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
266 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267 unsigned Reg = MI.getOperand(MOIdx).getReg();
268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269 const MachineOperand &MO = MI.getOperand(i);
272 if (MO.getReg() == Reg && MO.isDef()) {
273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274 MI.getOperand(MOIdx).getSubReg() &&
282 /// isPartialRedef - Return true if the specified def at the specific index is
283 /// partially re-defining the specified live interval. A common case of this is
284 /// a definition of the sub-register.
285 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286 LiveInterval &interval) {
287 if (!MO.getSubReg() || MO.isEarlyClobber())
290 SlotIndex RedefIndex = MIIdx.getDefIndex();
291 const LiveRange *OldLR =
292 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293 if (OldLR->valno->isDefAccurate()) {
294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
300 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301 MachineBasicBlock::iterator mi,
305 LiveInterval &interval) {
307 dbgs() << "\t\tregister: ";
308 printRegName(interval.reg, tri_);
311 // Virtual registers may be defined multiple times (due to phi
312 // elimination and 2-addr elimination). Much of what we do only has to be
313 // done once for the vreg. We use an empty interval to detect the first
314 // time we see a vreg.
315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316 if (interval.empty()) {
317 // Get the Idx of the defining instructions.
318 SlotIndex defIndex = MIIdx.getDefIndex();
319 // Earlyclobbers move back one, so that they overlap the live range
321 if (MO.isEarlyClobber())
322 defIndex = MIIdx.getUseIndex();
324 // Make sure the first definition is not a partial redefinition. Add an
325 // <imp-def> of the full register.
327 mi->addRegisterDefined(interval.reg);
329 MachineInstr *CopyMI = NULL;
330 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
331 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
332 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
335 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
337 assert(ValNo->id == 0 && "First value in interval is not 0?");
339 // Loop over all of the blocks that the vreg is defined in. There are
340 // two cases we have to handle here. The most common case is a vreg
341 // whose lifetime is contained within a basic block. In this case there
342 // will be a single kill, in MBB, which comes after the definition.
343 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
344 // FIXME: what about dead vars?
346 if (vi.Kills[0] != mi)
347 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
349 killIdx = defIndex.getStoreIndex();
351 // If the kill happens after the definition, we have an intra-block
353 if (killIdx > defIndex) {
354 assert(vi.AliveBlocks.empty() &&
355 "Shouldn't be alive across any blocks!");
356 LiveRange LR(defIndex, killIdx, ValNo);
357 interval.addRange(LR);
358 DEBUG(dbgs() << " +" << LR << "\n");
359 ValNo->addKill(killIdx);
364 // The other case we handle is when a virtual register lives to the end
365 // of the defining block, potentially live across some blocks, then is
366 // live into some number of blocks, but gets killed. Start by adding a
367 // range that goes from this definition to the end of the defining block.
368 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
369 DEBUG(dbgs() << " +" << NewLR);
370 interval.addRange(NewLR);
372 bool PHIJoin = lv_->isPHIJoin(interval.reg);
375 // A phi join register is killed at the end of the MBB and revived as a new
376 // valno in the killing blocks.
377 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
378 DEBUG(dbgs() << " phi-join");
379 ValNo->addKill(indexes_->getTerminatorGap(mbb));
380 ValNo->setHasPHIKill(true);
382 // Iterate over all of the blocks that the variable is completely
383 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
385 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
386 E = vi.AliveBlocks.end(); I != E; ++I) {
387 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
388 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
389 interval.addRange(LR);
390 DEBUG(dbgs() << " +" << LR);
394 // Finally, this virtual register is live from the start of any killing
395 // block to the 'use' slot of the killing instruction.
396 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
397 MachineInstr *Kill = vi.Kills[i];
398 SlotIndex Start = getMBBStartIdx(Kill->getParent());
399 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
401 // Create interval with one of a NEW value number. Note that this value
402 // number isn't actually defined by an instruction, weird huh? :)
404 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
406 ValNo->setIsPHIDef(true);
408 LiveRange LR(Start, killIdx, ValNo);
409 interval.addRange(LR);
410 ValNo->addKill(killIdx);
411 DEBUG(dbgs() << " +" << LR);
415 if (MultipleDefsBySameMI(*mi, MOIdx))
416 // Multiple defs of the same virtual register by the same instruction.
417 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
418 // This is likely due to elimination of REG_SEQUENCE instructions. Return
419 // here since there is nothing to do.
422 // If this is the second time we see a virtual register definition, it
423 // must be due to phi elimination or two addr elimination. If this is
424 // the result of two address elimination, then the vreg is one of the
425 // def-and-use register operand.
427 // It may also be partial redef like this:
428 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
429 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
430 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
431 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
432 // If this is a two-address definition, then we have already processed
433 // the live range. The only problem is that we didn't realize there
434 // are actually two values in the live interval. Because of this we
435 // need to take the LiveRegion that defines this register and split it
437 SlotIndex RedefIndex = MIIdx.getDefIndex();
438 if (MO.isEarlyClobber())
439 RedefIndex = MIIdx.getUseIndex();
441 const LiveRange *OldLR =
442 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
443 VNInfo *OldValNo = OldLR->valno;
444 SlotIndex DefIndex = OldValNo->def.getDefIndex();
446 // Delete the previous value, which should be short and continuous,
447 // because the 2-addr copy must be in the same MBB as the redef.
448 interval.removeRange(DefIndex, RedefIndex);
450 // The new value number (#1) is defined by the instruction we claimed
452 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
453 false, // update at *
455 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
457 // Value#0 is now defined by the 2-addr instruction.
458 OldValNo->def = RedefIndex;
459 OldValNo->setCopy(0);
461 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
462 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
464 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
465 OldValNo->setCopy(&*mi);
467 // Add the new live interval which replaces the range for the input copy.
468 LiveRange LR(DefIndex, RedefIndex, ValNo);
469 DEBUG(dbgs() << " replace range with " << LR);
470 interval.addRange(LR);
471 ValNo->addKill(RedefIndex);
473 // If this redefinition is dead, we need to add a dummy unit live
474 // range covering the def slot.
476 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
480 dbgs() << " RESULT: ";
481 interval.print(dbgs(), tri_);
483 } else if (lv_->isPHIJoin(interval.reg)) {
484 // In the case of PHI elimination, each variable definition is only
485 // live until the end of the block. We've already taken care of the
486 // rest of the live range.
488 SlotIndex defIndex = MIIdx.getDefIndex();
489 if (MO.isEarlyClobber())
490 defIndex = MIIdx.getUseIndex();
493 MachineInstr *CopyMI = NULL;
494 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
495 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
496 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
498 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
500 SlotIndex killIndex = getMBBEndIdx(mbb);
501 LiveRange LR(defIndex, killIndex, ValNo);
502 interval.addRange(LR);
503 ValNo->addKill(indexes_->getTerminatorGap(mbb));
504 ValNo->setHasPHIKill(true);
505 DEBUG(dbgs() << " phi-join +" << LR);
507 llvm_unreachable("Multiply defined register");
511 DEBUG(dbgs() << '\n');
514 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
515 MachineBasicBlock::iterator mi,
518 LiveInterval &interval,
519 MachineInstr *CopyMI) {
520 // A physical register cannot be live across basic block, so its
521 // lifetime must end somewhere in its defining basic block.
523 dbgs() << "\t\tregister: ";
524 printRegName(interval.reg, tri_);
527 SlotIndex baseIndex = MIIdx;
528 SlotIndex start = baseIndex.getDefIndex();
529 // Earlyclobbers move back one.
530 if (MO.isEarlyClobber())
531 start = MIIdx.getUseIndex();
532 SlotIndex end = start;
534 // If it is not used after definition, it is considered dead at
535 // the instruction defining it. Hence its interval is:
536 // [defSlot(def), defSlot(def)+1)
537 // For earlyclobbers, the defSlot was pushed back one; the extra
538 // advance below compensates.
540 DEBUG(dbgs() << " dead");
541 end = start.getStoreIndex();
545 // If it is not dead on definition, it must be killed by a
546 // subsequent instruction. Hence its interval is:
547 // [defSlot(def), useSlot(kill)+1)
548 baseIndex = baseIndex.getNextIndex();
549 while (++mi != MBB->end()) {
551 if (mi->isDebugValue())
553 if (getInstructionFromIndex(baseIndex) == 0)
554 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
556 if (mi->killsRegister(interval.reg, tri_)) {
557 DEBUG(dbgs() << " killed");
558 end = baseIndex.getDefIndex();
561 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
563 if (mi->isRegTiedToUseOperand(DefIdx)) {
564 // Two-address instruction.
565 end = baseIndex.getDefIndex();
567 // Another instruction redefines the register before it is ever read.
568 // Then the register is essentially dead at the instruction that
569 // defines it. Hence its interval is:
570 // [defSlot(def), defSlot(def)+1)
571 DEBUG(dbgs() << " dead");
572 end = start.getStoreIndex();
578 baseIndex = baseIndex.getNextIndex();
581 // The only case we should have a dead physreg here without a killing or
582 // instruction where we know it's dead is if it is live-in to the function
583 // and never used. Another possible case is the implicit use of the
584 // physical register has been deleted by two-address pass.
585 end = start.getStoreIndex();
588 assert(start < end && "did not find end of interval?");
590 // Already exists? Extend old live interval.
591 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
592 bool Extend = OldLR != interval.end();
593 VNInfo *ValNo = Extend
594 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
595 if (MO.isEarlyClobber() && Extend)
596 ValNo->setHasRedefByEC(true);
597 LiveRange LR(start, end, ValNo);
598 interval.addRange(LR);
599 LR.valno->addKill(end);
600 DEBUG(dbgs() << " +" << LR << '\n');
603 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
604 MachineBasicBlock::iterator MI,
608 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
609 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
610 getOrCreateInterval(MO.getReg()));
611 else if (allocatableRegs_[MO.getReg()]) {
612 MachineInstr *CopyMI = NULL;
613 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
614 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
615 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
617 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618 getOrCreateInterval(MO.getReg()), CopyMI);
619 // Def of a register also defines its sub-registers.
620 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
621 // If MI also modifies the sub-register explicitly, avoid processing it
622 // more than once. Do not pass in TRI here so it checks for exact match.
623 if (!MI->definesRegister(*AS))
624 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625 getOrCreateInterval(*AS), 0);
629 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
631 LiveInterval &interval, bool isAlias) {
633 dbgs() << "\t\tlivein register: ";
634 printRegName(interval.reg, tri_);
637 // Look for kills, if it reaches a def before it's killed, then it shouldn't
638 // be considered a livein.
639 MachineBasicBlock::iterator mi = MBB->begin();
640 MachineBasicBlock::iterator E = MBB->end();
641 // Skip over DBG_VALUE at the start of the MBB.
642 if (mi != E && mi->isDebugValue()) {
643 while (++mi != E && mi->isDebugValue())
646 // MBB is empty except for DBG_VALUE's.
650 SlotIndex baseIndex = MIIdx;
651 SlotIndex start = baseIndex;
652 if (getInstructionFromIndex(baseIndex) == 0)
653 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
655 SlotIndex end = baseIndex;
656 bool SeenDefUse = false;
659 if (mi->killsRegister(interval.reg, tri_)) {
660 DEBUG(dbgs() << " killed");
661 end = baseIndex.getDefIndex();
664 } else if (mi->definesRegister(interval.reg, tri_)) {
665 // Another instruction redefines the register before it is ever read.
666 // Then the register is essentially dead at the instruction that defines
667 // it. Hence its interval is:
668 // [defSlot(def), defSlot(def)+1)
669 DEBUG(dbgs() << " dead");
670 end = start.getStoreIndex();
675 while (++mi != E && mi->isDebugValue())
676 // Skip over DBG_VALUE.
679 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
682 // Live-in register might not be used at all.
685 DEBUG(dbgs() << " dead");
686 end = MIIdx.getStoreIndex();
688 DEBUG(dbgs() << " live through");
694 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
695 0, false, VNInfoAllocator);
696 vni->setIsPHIDef(true);
697 LiveRange LR(start, end, vni);
699 interval.addRange(LR);
700 LR.valno->addKill(end);
701 DEBUG(dbgs() << " +" << LR << '\n');
704 /// computeIntervals - computes the live intervals for virtual
705 /// registers. for some ordering of the machine instructions [1,N] a
706 /// live interval is an interval [i, j) where 1 <= i <= j < N for
707 /// which a variable is live
708 void LiveIntervals::computeIntervals() {
709 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
710 << "********** Function: "
711 << ((Value*)mf_->getFunction())->getName() << '\n');
713 SmallVector<unsigned, 8> UndefUses;
714 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
716 MachineBasicBlock *MBB = MBBI;
720 // Track the index of the current machine instr.
721 SlotIndex MIIndex = getMBBStartIdx(MBB);
722 DEBUG(dbgs() << "BB#" << MBB->getNumber()
723 << ":\t\t# derived from " << MBB->getName() << "\n");
725 // Create intervals for live-ins to this BB first.
726 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
727 LE = MBB->livein_end(); LI != LE; ++LI) {
728 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
729 // Multiple live-ins can alias the same register.
730 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
731 if (!hasInterval(*AS))
732 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
736 // Skip over empty initial indices.
737 if (getInstructionFromIndex(MIIndex) == 0)
738 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
740 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
742 DEBUG(dbgs() << MIIndex << "\t" << *MI);
743 if (MI->isDebugValue())
747 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
748 MachineOperand &MO = MI->getOperand(i);
749 if (!MO.isReg() || !MO.getReg())
752 // handle register defs - build intervals
754 handleRegisterDef(MBB, MI, MIIndex, MO, i);
755 else if (MO.isUndef())
756 UndefUses.push_back(MO.getReg());
759 // Move to the next instr slot.
760 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
764 // Create empty intervals for registers defined by implicit_def's (except
765 // for those implicit_def that define values which are liveout of their
767 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
768 unsigned UndefReg = UndefUses[i];
769 (void)getOrCreateInterval(UndefReg);
773 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
774 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
775 return new LiveInterval(reg, Weight);
778 /// dupInterval - Duplicate a live interval. The caller is responsible for
779 /// managing the allocated memory.
780 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
781 LiveInterval *NewLI = createInterval(li->reg);
782 NewLI->Copy(*li, mri_, getVNInfoAllocator());
786 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
787 /// copy field and returns the source register that defines it.
788 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
792 if (VNI->getCopy()->isExtractSubreg()) {
793 // If it's extracting out of a physical register, return the sub-register.
794 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
795 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
796 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
797 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
798 if (SrcSubReg == DstSubReg)
799 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
800 // reg1034 can still be coalesced to EDX.
802 assert(DstSubReg == 0);
803 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
806 } else if (VNI->getCopy()->isInsertSubreg() ||
807 VNI->getCopy()->isSubregToReg())
808 return VNI->getCopy()->getOperand(2).getReg();
810 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
811 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
813 llvm_unreachable("Unrecognized copy instruction!");
817 //===----------------------------------------------------------------------===//
818 // Register allocator hooks.
821 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
822 /// allow one) virtual register operand, then its uses are implicitly using
823 /// the register. Returns the virtual register.
824 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
825 MachineInstr *MI) const {
827 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
828 MachineOperand &MO = MI->getOperand(i);
829 if (!MO.isReg() || !MO.isUse())
831 unsigned Reg = MO.getReg();
832 if (Reg == 0 || Reg == li.reg)
835 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
836 !allocatableRegs_[Reg])
838 // FIXME: For now, only remat MI with at most one register operand.
840 "Can't rematerialize instruction with multiple register operand!");
849 /// isValNoAvailableAt - Return true if the val# of the specified interval
850 /// which reaches the given instruction also reaches the specified use index.
851 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
852 SlotIndex UseIdx) const {
853 SlotIndex Index = getInstructionIndex(MI);
854 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
855 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
856 return UI != li.end() && UI->valno == ValNo;
859 /// isReMaterializable - Returns true if the definition MI of the specified
860 /// val# of the specified interval is re-materializable.
861 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
862 const VNInfo *ValNo, MachineInstr *MI,
863 SmallVectorImpl<LiveInterval*> &SpillIs,
868 if (!tii_->isTriviallyReMaterializable(MI, aa_))
871 // Target-specific code can mark an instruction as being rematerializable
872 // if it has one virtual reg use, though it had better be something like
873 // a PIC base register which is likely to be live everywhere.
874 unsigned ImpUse = getReMatImplicitUse(li, MI);
876 const LiveInterval &ImpLi = getInterval(ImpUse);
877 for (MachineRegisterInfo::use_nodbg_iterator
878 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
880 MachineInstr *UseMI = &*ri;
881 SlotIndex UseIdx = getInstructionIndex(UseMI);
882 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
884 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
888 // If a register operand of the re-materialized instruction is going to
889 // be spilled next, then it's not legal to re-materialize this instruction.
890 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
891 if (ImpUse == SpillIs[i]->reg)
897 /// isReMaterializable - Returns true if the definition MI of the specified
898 /// val# of the specified interval is re-materializable.
899 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
900 const VNInfo *ValNo, MachineInstr *MI) {
901 SmallVector<LiveInterval*, 4> Dummy1;
903 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
906 /// isReMaterializable - Returns true if every definition of MI of every
907 /// val# of the specified interval is re-materializable.
908 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
909 SmallVectorImpl<LiveInterval*> &SpillIs,
912 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
914 const VNInfo *VNI = *i;
916 continue; // Dead val#.
917 // Is the def for the val# rematerializable?
918 if (!VNI->isDefAccurate())
920 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
921 bool DefIsLoad = false;
923 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
930 /// FilterFoldedOps - Filter out two-address use operands. Return
931 /// true if it finds any issue with the operands that ought to prevent
933 static bool FilterFoldedOps(MachineInstr *MI,
934 SmallVector<unsigned, 2> &Ops,
936 SmallVector<unsigned, 2> &FoldOps) {
938 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
939 unsigned OpIdx = Ops[i];
940 MachineOperand &MO = MI->getOperand(OpIdx);
941 // FIXME: fold subreg use.
945 MRInfo |= (unsigned)VirtRegMap::isMod;
947 // Filter out two-address use operand(s).
948 if (MI->isRegTiedToDefOperand(OpIdx)) {
949 MRInfo = VirtRegMap::isModRef;
952 MRInfo |= (unsigned)VirtRegMap::isRef;
954 FoldOps.push_back(OpIdx);
960 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
961 /// slot / to reg or any rematerialized load into ith operand of specified
962 /// MI. If it is successul, MI is updated with the newly created MI and
964 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
965 VirtRegMap &vrm, MachineInstr *DefMI,
967 SmallVector<unsigned, 2> &Ops,
968 bool isSS, int Slot, unsigned Reg) {
969 // If it is an implicit def instruction, just delete it.
970 if (MI->isImplicitDef()) {
971 RemoveMachineInstrFromMaps(MI);
972 vrm.RemoveMachineInstrFromMaps(MI);
973 MI->eraseFromParent();
978 // Filter the list of operand indexes that are to be folded. Abort if
979 // any operand will prevent folding.
981 SmallVector<unsigned, 2> FoldOps;
982 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
985 // The only time it's safe to fold into a two address instruction is when
986 // it's folding reload and spill from / into a spill stack slot.
987 if (DefMI && (MRInfo & VirtRegMap::isMod))
990 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
991 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
993 // Remember this instruction uses the spill slot.
994 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
996 // Attempt to fold the memory reference into the instruction. If
997 // we can do this, we don't need to insert spill code.
998 MachineBasicBlock &MBB = *MI->getParent();
999 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1000 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1001 vrm.transferSpillPts(MI, fmi);
1002 vrm.transferRestorePts(MI, fmi);
1003 vrm.transferEmergencySpills(MI, fmi);
1004 ReplaceMachineInstrInMaps(MI, fmi);
1005 MI = MBB.insert(MBB.erase(MI), fmi);
1012 /// canFoldMemoryOperand - Returns true if the specified load / store
1013 /// folding is possible.
1014 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1015 SmallVector<unsigned, 2> &Ops,
1017 // Filter the list of operand indexes that are to be folded. Abort if
1018 // any operand will prevent folding.
1019 unsigned MRInfo = 0;
1020 SmallVector<unsigned, 2> FoldOps;
1021 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1024 // It's only legal to remat for a use, not a def.
1025 if (ReMat && (MRInfo & VirtRegMap::isMod))
1028 return tii_->canFoldMemoryOperand(MI, FoldOps);
1031 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1032 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1034 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1039 for (++itr; itr != li.ranges.end(); ++itr) {
1040 MachineBasicBlock *mbb2 =
1041 indexes_->getMBBCoveringRange(itr->start, itr->end);
1050 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1051 /// interval on to-be re-materialized operands of MI) with new register.
1052 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1053 MachineInstr *MI, unsigned NewVReg,
1055 // There is an implicit use. That means one of the other operand is
1056 // being remat'ed and the remat'ed instruction has li.reg as an
1057 // use operand. Make sure we rewrite that as well.
1058 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1059 MachineOperand &MO = MI->getOperand(i);
1062 unsigned Reg = MO.getReg();
1063 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1065 if (!vrm.isReMaterialized(Reg))
1067 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1068 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1070 UseMO->setReg(NewVReg);
1074 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1075 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1076 bool LiveIntervals::
1077 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1078 bool TrySplit, SlotIndex index, SlotIndex end,
1080 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1081 unsigned Slot, int LdSlot,
1082 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1084 const TargetRegisterClass* rc,
1085 SmallVector<int, 4> &ReMatIds,
1086 const MachineLoopInfo *loopInfo,
1087 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1088 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1089 std::vector<LiveInterval*> &NewLIs) {
1090 bool CanFold = false;
1092 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1093 MachineOperand& mop = MI->getOperand(i);
1096 unsigned Reg = mop.getReg();
1097 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1102 bool TryFold = !DefIsReMat;
1103 bool FoldSS = true; // Default behavior unless it's a remat.
1104 int FoldSlot = Slot;
1106 // If this is the rematerializable definition MI itself and
1107 // all of its uses are rematerialized, simply delete it.
1108 if (MI == ReMatOrigDefMI && CanDelete) {
1109 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1111 RemoveMachineInstrFromMaps(MI);
1112 vrm.RemoveMachineInstrFromMaps(MI);
1113 MI->eraseFromParent();
1117 // If def for this use can't be rematerialized, then try folding.
1118 // If def is rematerializable and it's a load, also try folding.
1119 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1121 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1127 // Scan all of the operands of this instruction rewriting operands
1128 // to use NewVReg instead of li.reg as appropriate. We do this for
1131 // 1. If the instr reads the same spilled vreg multiple times, we
1132 // want to reuse the NewVReg.
1133 // 2. If the instr is a two-addr instruction, we are required to
1134 // keep the src/dst regs pinned.
1136 // Keep track of whether we replace a use and/or def so that we can
1137 // create the spill interval with the appropriate range.
1138 SmallVector<unsigned, 2> Ops;
1139 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1141 // Create a new virtual register for the spill interval.
1142 // Create the new register now so we can map the fold instruction
1143 // to the new register so when it is unfolded we get the correct
1145 bool CreatedNewVReg = false;
1147 NewVReg = mri_->createVirtualRegister(rc);
1149 CreatedNewVReg = true;
1151 // The new virtual register should get the same allocation hints as the
1153 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1154 if (Hint.first || Hint.second)
1155 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1161 // Do not fold load / store here if we are splitting. We'll find an
1162 // optimal point to insert a load / store later.
1164 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1165 Ops, FoldSS, FoldSlot, NewVReg)) {
1166 // Folding the load/store can completely change the instruction in
1167 // unpredictable ways, rescan it from the beginning.
1170 // We need to give the new vreg the same stack slot as the
1171 // spilled interval.
1172 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1178 if (isNotInMIMap(MI))
1180 goto RestartInstruction;
1183 // We'll try to fold it later if it's profitable.
1184 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1188 mop.setReg(NewVReg);
1189 if (mop.isImplicit())
1190 rewriteImplicitOps(li, MI, NewVReg, vrm);
1192 // Reuse NewVReg for other reads.
1193 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1194 MachineOperand &mopj = MI->getOperand(Ops[j]);
1195 mopj.setReg(NewVReg);
1196 if (mopj.isImplicit())
1197 rewriteImplicitOps(li, MI, NewVReg, vrm);
1200 if (CreatedNewVReg) {
1202 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1203 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1204 // Each valnum may have its own remat id.
1205 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1207 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1209 if (!CanDelete || (HasUse && HasDef)) {
1210 // If this is a two-addr instruction then its use operands are
1211 // rematerializable but its def is not. It should be assigned a
1213 vrm.assignVirt2StackSlot(NewVReg, Slot);
1216 vrm.assignVirt2StackSlot(NewVReg, Slot);
1218 } else if (HasUse && HasDef &&
1219 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1220 // If this interval hasn't been assigned a stack slot (because earlier
1221 // def is a deleted remat def), do it now.
1222 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1223 vrm.assignVirt2StackSlot(NewVReg, Slot);
1226 // Re-matting an instruction with virtual register use. Add the
1227 // register as an implicit use on the use MI.
1228 if (DefIsReMat && ImpUse)
1229 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1231 // Create a new register interval for this spill / remat.
1232 LiveInterval &nI = getOrCreateInterval(NewVReg);
1233 if (CreatedNewVReg) {
1234 NewLIs.push_back(&nI);
1235 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1237 vrm.setIsSplitFromReg(NewVReg, li.reg);
1241 if (CreatedNewVReg) {
1242 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1243 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1244 DEBUG(dbgs() << " +" << LR);
1247 // Extend the split live interval to this def / use.
1248 SlotIndex End = index.getDefIndex();
1249 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1250 nI.getValNumInfo(nI.getNumValNums()-1));
1251 DEBUG(dbgs() << " +" << LR);
1256 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1257 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1258 DEBUG(dbgs() << " +" << LR);
1263 dbgs() << "\t\t\t\tAdded new interval: ";
1264 nI.print(dbgs(), tri_);
1270 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1272 MachineBasicBlock *MBB,
1273 SlotIndex Idx) const {
1274 SlotIndex End = getMBBEndIdx(MBB);
1275 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1276 if (VNI->kills[j].isPHI())
1279 SlotIndex KillIdx = VNI->kills[j];
1280 if (KillIdx > Idx && KillIdx <= End)
1286 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1287 /// during spilling.
1289 struct RewriteInfo {
1292 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1295 struct RewriteInfoCompare {
1296 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1297 return LHS.Index < RHS.Index;
1302 void LiveIntervals::
1303 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1304 LiveInterval::Ranges::const_iterator &I,
1305 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1306 unsigned Slot, int LdSlot,
1307 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1309 const TargetRegisterClass* rc,
1310 SmallVector<int, 4> &ReMatIds,
1311 const MachineLoopInfo *loopInfo,
1312 BitVector &SpillMBBs,
1313 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1314 BitVector &RestoreMBBs,
1315 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1316 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1317 std::vector<LiveInterval*> &NewLIs) {
1318 bool AllCanFold = true;
1319 unsigned NewVReg = 0;
1320 SlotIndex start = I->start.getBaseIndex();
1321 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1323 // First collect all the def / use in this live range that will be rewritten.
1324 // Make sure they are sorted according to instruction index.
1325 std::vector<RewriteInfo> RewriteMIs;
1326 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1327 re = mri_->reg_end(); ri != re; ) {
1328 MachineInstr *MI = &*ri;
1329 MachineOperand &O = ri.getOperand();
1331 if (MI->isDebugValue()) {
1332 // Modify DBG_VALUE now that the value is in a spill slot.
1333 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1334 uint64_t Offset = MI->getOperand(1).getImm();
1335 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1336 DebugLoc DL = MI->getDebugLoc();
1337 int FI = isLoadSS ? LdSlot : (int)Slot;
1338 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1339 Offset, MDPtr, DL)) {
1340 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1341 ReplaceMachineInstrInMaps(MI, NewDV);
1342 MachineBasicBlock *MBB = MI->getParent();
1343 MBB->insert(MBB->erase(MI), NewDV);
1348 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1349 RemoveMachineInstrFromMaps(MI);
1350 vrm.RemoveMachineInstrFromMaps(MI);
1351 MI->eraseFromParent();
1354 assert(!(O.isImplicit() && O.isUse()) &&
1355 "Spilling register that's used as implicit use?");
1356 SlotIndex index = getInstructionIndex(MI);
1357 if (index < start || index >= end)
1361 // Must be defined by an implicit def. It should not be spilled. Note,
1362 // this is for correctness reason. e.g.
1363 // 8 %reg1024<def> = IMPLICIT_DEF
1364 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1365 // The live range [12, 14) are not part of the r1024 live interval since
1366 // it's defined by an implicit def. It will not conflicts with live
1367 // interval of r1025. Now suppose both registers are spilled, you can
1368 // easily see a situation where both registers are reloaded before
1369 // the INSERT_SUBREG and both target registers that would overlap.
1371 RewriteMIs.push_back(RewriteInfo(index, MI));
1373 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1375 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1376 // Now rewrite the defs and uses.
1377 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1378 RewriteInfo &rwi = RewriteMIs[i];
1380 SlotIndex index = rwi.Index;
1381 MachineInstr *MI = rwi.MI;
1382 // If MI def and/or use the same register multiple times, then there
1383 // are multiple entries.
1384 while (i != e && RewriteMIs[i].MI == MI) {
1385 assert(RewriteMIs[i].Index == index);
1388 MachineBasicBlock *MBB = MI->getParent();
1390 if (ImpUse && MI != ReMatDefMI) {
1391 // Re-matting an instruction with virtual register use. Prevent interval
1392 // from being spilled.
1393 getInterval(ImpUse).markNotSpillable();
1396 unsigned MBBId = MBB->getNumber();
1397 unsigned ThisVReg = 0;
1399 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1400 if (NVI != MBBVRegsMap.end()) {
1401 ThisVReg = NVI->second;
1408 // It's better to start a new interval to avoid artifically
1409 // extend the new interval.
1410 if (MI->readsWritesVirtualRegister(li.reg) ==
1411 std::make_pair(false,true)) {
1412 MBBVRegsMap.erase(MBB->getNumber());
1418 bool IsNew = ThisVReg == 0;
1420 // This ends the previous live interval. If all of its def / use
1421 // can be folded, give it a low spill weight.
1422 if (NewVReg && TrySplit && AllCanFold) {
1423 LiveInterval &nI = getOrCreateInterval(NewVReg);
1430 bool HasDef = false;
1431 bool HasUse = false;
1432 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1433 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1434 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1435 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1436 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1437 if (!HasDef && !HasUse)
1440 AllCanFold &= CanFold;
1442 // Update weight of spill interval.
1443 LiveInterval &nI = getOrCreateInterval(NewVReg);
1445 // The spill weight is now infinity as it cannot be spilled again.
1446 nI.markNotSpillable();
1450 // Keep track of the last def and first use in each MBB.
1452 if (MI != ReMatOrigDefMI || !CanDelete) {
1453 bool HasKill = false;
1455 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1457 // If this is a two-address code, then this index starts a new VNInfo.
1458 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1460 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1462 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1463 SpillIdxes.find(MBBId);
1465 if (SII == SpillIdxes.end()) {
1466 std::vector<SRInfo> S;
1467 S.push_back(SRInfo(index, NewVReg, true));
1468 SpillIdxes.insert(std::make_pair(MBBId, S));
1469 } else if (SII->second.back().vreg != NewVReg) {
1470 SII->second.push_back(SRInfo(index, NewVReg, true));
1471 } else if (index > SII->second.back().index) {
1472 // If there is an earlier def and this is a two-address
1473 // instruction, then it's not possible to fold the store (which
1474 // would also fold the load).
1475 SRInfo &Info = SII->second.back();
1477 Info.canFold = !HasUse;
1479 SpillMBBs.set(MBBId);
1480 } else if (SII != SpillIdxes.end() &&
1481 SII->second.back().vreg == NewVReg &&
1482 index > SII->second.back().index) {
1483 // There is an earlier def that's not killed (must be two-address).
1484 // The spill is no longer needed.
1485 SII->second.pop_back();
1486 if (SII->second.empty()) {
1487 SpillIdxes.erase(MBBId);
1488 SpillMBBs.reset(MBBId);
1495 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1496 SpillIdxes.find(MBBId);
1497 if (SII != SpillIdxes.end() &&
1498 SII->second.back().vreg == NewVReg &&
1499 index > SII->second.back().index)
1500 // Use(s) following the last def, it's not safe to fold the spill.
1501 SII->second.back().canFold = false;
1502 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1503 RestoreIdxes.find(MBBId);
1504 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1505 // If we are splitting live intervals, only fold if it's the first
1506 // use and there isn't another use later in the MBB.
1507 RII->second.back().canFold = false;
1509 // Only need a reload if there isn't an earlier def / use.
1510 if (RII == RestoreIdxes.end()) {
1511 std::vector<SRInfo> Infos;
1512 Infos.push_back(SRInfo(index, NewVReg, true));
1513 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1515 RII->second.push_back(SRInfo(index, NewVReg, true));
1517 RestoreMBBs.set(MBBId);
1521 // Update spill weight.
1522 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1523 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1526 if (NewVReg && TrySplit && AllCanFold) {
1527 // If all of its def / use can be folded, give it a low spill weight.
1528 LiveInterval &nI = getOrCreateInterval(NewVReg);
1533 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1534 unsigned vr, BitVector &RestoreMBBs,
1535 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1536 if (!RestoreMBBs[Id])
1538 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1539 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1540 if (Restores[i].index == index &&
1541 Restores[i].vreg == vr &&
1542 Restores[i].canFold)
1547 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1548 unsigned vr, BitVector &RestoreMBBs,
1549 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1550 if (!RestoreMBBs[Id])
1552 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1553 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1554 if (Restores[i].index == index && Restores[i].vreg)
1555 Restores[i].index = SlotIndex();
1558 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1559 /// spilled and create empty intervals for their uses.
1561 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1562 const TargetRegisterClass* rc,
1563 std::vector<LiveInterval*> &NewLIs) {
1564 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1565 re = mri_->reg_end(); ri != re; ) {
1566 MachineOperand &O = ri.getOperand();
1567 MachineInstr *MI = &*ri;
1569 if (MI->isDebugValue()) {
1570 // Remove debug info for now.
1572 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1576 assert(MI->isImplicitDef() &&
1577 "Register def was not rewritten?");
1578 RemoveMachineInstrFromMaps(MI);
1579 vrm.RemoveMachineInstrFromMaps(MI);
1580 MI->eraseFromParent();
1582 // This must be an use of an implicit_def so it's not part of the live
1583 // interval. Create a new empty live interval for it.
1584 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1585 unsigned NewVReg = mri_->createVirtualRegister(rc);
1587 vrm.setIsImplicitlyDefined(NewVReg);
1588 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1589 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1590 MachineOperand &MO = MI->getOperand(i);
1591 if (MO.isReg() && MO.getReg() == li.reg) {
1601 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1602 // Limit the loop depth ridiculousness.
1603 if (loopDepth > 200)
1606 // The loop depth is used to roughly estimate the number of times the
1607 // instruction is executed. Something like 10^d is simple, but will quickly
1608 // overflow a float. This expression behaves like 10^d for small d, but is
1609 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1610 // headroom before overflow.
1611 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1613 return (isDef + isUse) * lc;
1617 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1618 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1619 normalizeSpillWeight(*NewLIs[i]);
1622 std::vector<LiveInterval*> LiveIntervals::
1623 addIntervalsForSpillsFast(const LiveInterval &li,
1624 const MachineLoopInfo *loopInfo,
1626 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1628 std::vector<LiveInterval*> added;
1630 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1633 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1638 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1640 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1641 while (RI != mri_->reg_end()) {
1642 MachineInstr* MI = &*RI;
1644 SmallVector<unsigned, 2> Indices;
1645 bool HasUse, HasDef;
1646 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(li.reg, &Indices);
1648 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1649 Indices, true, slot, li.reg)) {
1650 unsigned NewVReg = mri_->createVirtualRegister(rc);
1652 vrm.assignVirt2StackSlot(NewVReg, slot);
1654 // create a new register for this spill
1655 LiveInterval &nI = getOrCreateInterval(NewVReg);
1656 nI.markNotSpillable();
1658 // Rewrite register operands to use the new vreg.
1659 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1660 E = Indices.end(); I != E; ++I) {
1661 MI->getOperand(*I).setReg(NewVReg);
1663 if (MI->getOperand(*I).isUse())
1664 MI->getOperand(*I).setIsKill(true);
1667 // Fill in the new live interval.
1668 SlotIndex index = getInstructionIndex(MI);
1670 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1671 nI.getNextValue(SlotIndex(), 0, false,
1672 getVNInfoAllocator()));
1673 DEBUG(dbgs() << " +" << LR);
1675 vrm.addRestorePoint(NewVReg, MI);
1678 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1679 nI.getNextValue(SlotIndex(), 0, false,
1680 getVNInfoAllocator()));
1681 DEBUG(dbgs() << " +" << LR);
1683 vrm.addSpillPoint(NewVReg, true, MI);
1686 added.push_back(&nI);
1689 dbgs() << "\t\t\t\tadded new interval: ";
1696 RI = mri_->reg_begin(li.reg);
1702 std::vector<LiveInterval*> LiveIntervals::
1703 addIntervalsForSpills(const LiveInterval &li,
1704 SmallVectorImpl<LiveInterval*> &SpillIs,
1705 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1707 if (EnableFastSpilling)
1708 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1710 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1713 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1714 li.print(dbgs(), tri_);
1718 // Each bit specify whether a spill is required in the MBB.
1719 BitVector SpillMBBs(mf_->getNumBlockIDs());
1720 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1721 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1722 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1723 DenseMap<unsigned,unsigned> MBBVRegsMap;
1724 std::vector<LiveInterval*> NewLIs;
1725 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1727 unsigned NumValNums = li.getNumValNums();
1728 SmallVector<MachineInstr*, 4> ReMatDefs;
1729 ReMatDefs.resize(NumValNums, NULL);
1730 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1731 ReMatOrigDefs.resize(NumValNums, NULL);
1732 SmallVector<int, 4> ReMatIds;
1733 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1734 BitVector ReMatDelete(NumValNums);
1735 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1737 // Spilling a split live interval. It cannot be split any further. Also,
1738 // it's also guaranteed to be a single val# / range interval.
1739 if (vrm.getPreSplitReg(li.reg)) {
1740 vrm.setIsSplitFromReg(li.reg, 0);
1741 // Unset the split kill marker on the last use.
1742 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1743 if (KillIdx != SlotIndex()) {
1744 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1745 assert(KillMI && "Last use disappeared?");
1746 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1747 assert(KillOp != -1 && "Last use disappeared?");
1748 KillMI->getOperand(KillOp).setIsKill(false);
1750 vrm.removeKillPoint(li.reg);
1751 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1752 Slot = vrm.getStackSlot(li.reg);
1753 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1754 MachineInstr *ReMatDefMI = DefIsReMat ?
1755 vrm.getReMaterializedMI(li.reg) : NULL;
1757 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1758 bool isLoad = isLoadSS ||
1759 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1760 bool IsFirstRange = true;
1761 for (LiveInterval::Ranges::const_iterator
1762 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1763 // If this is a split live interval with multiple ranges, it means there
1764 // are two-address instructions that re-defined the value. Only the
1765 // first def can be rematerialized!
1767 // Note ReMatOrigDefMI has already been deleted.
1768 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1769 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1770 false, vrm, rc, ReMatIds, loopInfo,
1771 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1772 MBBVRegsMap, NewLIs);
1774 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1775 Slot, 0, false, false, false,
1776 false, vrm, rc, ReMatIds, loopInfo,
1777 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1778 MBBVRegsMap, NewLIs);
1780 IsFirstRange = false;
1783 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1784 normalizeSpillWeights(NewLIs);
1788 bool TrySplit = !intervalIsInOneMBB(li);
1791 bool NeedStackSlot = false;
1792 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1794 const VNInfo *VNI = *i;
1795 unsigned VN = VNI->id;
1796 if (VNI->isUnused())
1797 continue; // Dead val#.
1798 // Is the def for the val# rematerializable?
1799 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1800 ? getInstructionFromIndex(VNI->def) : 0;
1802 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1803 // Remember how to remat the def of this val#.
1804 ReMatOrigDefs[VN] = ReMatDefMI;
1805 // Original def may be modified so we have to make a copy here.
1806 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1807 CloneMIs.push_back(Clone);
1808 ReMatDefs[VN] = Clone;
1810 bool CanDelete = true;
1811 if (VNI->hasPHIKill()) {
1812 // A kill is a phi node, not all of its uses can be rematerialized.
1813 // It must not be deleted.
1815 // Need a stack slot if there is any live range where uses cannot be
1817 NeedStackSlot = true;
1820 ReMatDelete.set(VN);
1822 // Need a stack slot if there is any live range where uses cannot be
1824 NeedStackSlot = true;
1828 // One stack slot per live interval.
1829 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1830 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1831 Slot = vrm.assignVirt2StackSlot(li.reg);
1833 // This case only occurs when the prealloc splitter has already assigned
1834 // a stack slot to this vreg.
1836 Slot = vrm.getStackSlot(li.reg);
1839 // Create new intervals and rewrite defs and uses.
1840 for (LiveInterval::Ranges::const_iterator
1841 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1842 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1843 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1844 bool DefIsReMat = ReMatDefMI != NULL;
1845 bool CanDelete = ReMatDelete[I->valno->id];
1847 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1848 bool isLoad = isLoadSS ||
1849 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1850 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1851 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1852 CanDelete, vrm, rc, ReMatIds, loopInfo,
1853 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1854 MBBVRegsMap, NewLIs);
1857 // Insert spills / restores if we are splitting.
1859 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1860 normalizeSpillWeights(NewLIs);
1864 SmallPtrSet<LiveInterval*, 4> AddedKill;
1865 SmallVector<unsigned, 2> Ops;
1866 if (NeedStackSlot) {
1867 int Id = SpillMBBs.find_first();
1869 std::vector<SRInfo> &spills = SpillIdxes[Id];
1870 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1871 SlotIndex index = spills[i].index;
1872 unsigned VReg = spills[i].vreg;
1873 LiveInterval &nI = getOrCreateInterval(VReg);
1874 bool isReMat = vrm.isReMaterialized(VReg);
1875 MachineInstr *MI = getInstructionFromIndex(index);
1876 bool CanFold = false;
1877 bool FoundUse = false;
1879 if (spills[i].canFold) {
1881 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1882 MachineOperand &MO = MI->getOperand(j);
1883 if (!MO.isReg() || MO.getReg() != VReg)
1890 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1891 RestoreMBBs, RestoreIdxes))) {
1892 // MI has two-address uses of the same register. If the use
1893 // isn't the first and only use in the BB, then we can't fold
1894 // it. FIXME: Move this to rewriteInstructionsForSpills.
1901 // Fold the store into the def if possible.
1902 bool Folded = false;
1903 if (CanFold && !Ops.empty()) {
1904 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1907 // Also folded uses, do not issue a load.
1908 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1909 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1911 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1915 // Otherwise tell the spiller to issue a spill.
1917 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1918 bool isKill = LR->end == index.getStoreIndex();
1919 if (!MI->registerDefIsDead(nI.reg))
1920 // No need to spill a dead def.
1921 vrm.addSpillPoint(VReg, isKill, MI);
1923 AddedKill.insert(&nI);
1926 Id = SpillMBBs.find_next(Id);
1930 int Id = RestoreMBBs.find_first();
1932 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1933 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1934 SlotIndex index = restores[i].index;
1935 if (index == SlotIndex())
1937 unsigned VReg = restores[i].vreg;
1938 LiveInterval &nI = getOrCreateInterval(VReg);
1939 bool isReMat = vrm.isReMaterialized(VReg);
1940 MachineInstr *MI = getInstructionFromIndex(index);
1941 bool CanFold = false;
1943 if (restores[i].canFold) {
1945 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1946 MachineOperand &MO = MI->getOperand(j);
1947 if (!MO.isReg() || MO.getReg() != VReg)
1951 // If this restore were to be folded, it would have been folded
1960 // Fold the load into the use if possible.
1961 bool Folded = false;
1962 if (CanFold && !Ops.empty()) {
1964 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1966 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1968 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1969 // If the rematerializable def is a load, also try to fold it.
1970 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1971 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1972 Ops, isLoadSS, LdSlot, VReg);
1974 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1976 // Re-matting an instruction with virtual register use. Add the
1977 // register as an implicit use on the use MI and mark the register
1978 // interval as unspillable.
1979 LiveInterval &ImpLi = getInterval(ImpUse);
1980 ImpLi.markNotSpillable();
1981 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1986 // If folding is not possible / failed, then tell the spiller to issue a
1987 // load / rematerialization for us.
1989 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1991 vrm.addRestorePoint(VReg, MI);
1993 Id = RestoreMBBs.find_next(Id);
1996 // Finalize intervals: add kills, finalize spill weights, and filter out
1998 std::vector<LiveInterval*> RetNewLIs;
1999 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2000 LiveInterval *LI = NewLIs[i];
2002 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2003 if (!AddedKill.count(LI)) {
2004 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2005 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2006 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2007 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2008 assert(UseIdx != -1);
2009 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2010 LastUse->getOperand(UseIdx).setIsKill();
2011 vrm.addKillPoint(LI->reg, LastUseIdx);
2014 RetNewLIs.push_back(LI);
2018 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2019 normalizeSpillWeights(RetNewLIs);
2023 /// hasAllocatableSuperReg - Return true if the specified physical register has
2024 /// any super register that's allocatable.
2025 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2026 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2027 if (allocatableRegs_[*AS] && hasInterval(*AS))
2032 /// getRepresentativeReg - Find the largest super register of the specified
2033 /// physical register.
2034 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2035 // Find the largest super-register that is allocatable.
2036 unsigned BestReg = Reg;
2037 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2038 unsigned SuperReg = *AS;
2039 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2047 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2048 /// specified interval that conflicts with the specified physical register.
2049 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2050 unsigned PhysReg) const {
2051 unsigned NumConflicts = 0;
2052 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2053 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2054 E = mri_->reg_end(); I != E; ++I) {
2055 MachineOperand &O = I.getOperand();
2056 MachineInstr *MI = O.getParent();
2057 if (MI->isDebugValue())
2059 SlotIndex Index = getInstructionIndex(MI);
2060 if (pli.liveAt(Index))
2063 return NumConflicts;
2066 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2067 /// around all defs and uses of the specified interval. Return true if it
2068 /// was able to cut its interval.
2069 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2070 unsigned PhysReg, VirtRegMap &vrm) {
2071 unsigned SpillReg = getRepresentativeReg(PhysReg);
2073 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2074 // If there are registers which alias PhysReg, but which are not a
2075 // sub-register of the chosen representative super register. Assert
2076 // since we can't handle it yet.
2077 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2078 tri_->isSuperRegister(*AS, SpillReg));
2081 SmallVector<unsigned, 4> PRegs;
2082 if (hasInterval(SpillReg))
2083 PRegs.push_back(SpillReg);
2085 SmallSet<unsigned, 4> Added;
2086 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2087 if (Added.insert(*AS) && hasInterval(*AS)) {
2088 PRegs.push_back(*AS);
2089 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2094 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2095 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2096 E = mri_->reg_end(); I != E; ++I) {
2097 MachineOperand &O = I.getOperand();
2098 MachineInstr *MI = O.getParent();
2099 if (MI->isDebugValue() || SeenMIs.count(MI))
2102 SlotIndex Index = getInstructionIndex(MI);
2103 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2104 unsigned PReg = PRegs[i];
2105 LiveInterval &pli = getInterval(PReg);
2106 if (!pli.liveAt(Index))
2108 vrm.addEmergencySpill(PReg, MI);
2109 SlotIndex StartIdx = Index.getLoadIndex();
2110 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2111 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2112 pli.removeRange(StartIdx, EndIdx);
2116 raw_string_ostream Msg(msg);
2117 Msg << "Ran out of registers during register allocation!";
2118 if (MI->isInlineAsm()) {
2119 Msg << "\nPlease check your inline asm statement for invalid "
2120 << "constraints:\n";
2121 MI->print(Msg, tm_);
2123 report_fatal_error(Msg.str());
2125 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2126 if (!hasInterval(*AS))
2128 LiveInterval &spli = getInterval(*AS);
2129 if (spli.liveAt(Index))
2130 spli.removeRange(Index.getLoadIndex(),
2131 Index.getNextIndex().getBaseIndex());
2138 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2139 MachineInstr* startInst) {
2140 LiveInterval& Interval = getOrCreateInterval(reg);
2141 VNInfo* VN = Interval.getNextValue(
2142 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2143 startInst, true, getVNInfoAllocator());
2144 VN->setHasPHIKill(true);
2145 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2147 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2148 getMBBEndIdx(startInst->getParent()), VN);
2149 Interval.addRange(LR);