1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
62 AU.addRequired<AliasAnalysis>();
63 AU.addPreserved<AliasAnalysis>();
64 AU.addPreserved<LiveVariables>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreservedID(MachineLoopInfoID);
67 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
74 AU.addRequiredID(TwoAddressInstructionPassID);
75 AU.addPreserved<ProcessImplicitDefs>();
76 AU.addRequired<ProcessImplicitDefs>();
77 AU.addPreserved<SlotIndexes>();
78 AU.addRequiredTransitive<SlotIndexes>();
79 MachineFunctionPass::getAnalysisUsage(AU);
82 void LiveIntervals::releaseMemory() {
83 // Free the live intervals themselves.
84 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
85 E = r2iMap_.end(); I != E; ++I)
90 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
91 VNInfoAllocator.Reset();
92 while (!CloneMIs.empty()) {
93 MachineInstr *MI = CloneMIs.back();
95 mf_->DeleteMachineInstr(MI);
99 /// runOnMachineFunction - Register allocate the whole function
101 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
103 mri_ = &mf_->getRegInfo();
104 tm_ = &fn.getTarget();
105 tri_ = tm_->getRegisterInfo();
106 tii_ = tm_->getInstrInfo();
107 aa_ = &getAnalysis<AliasAnalysis>();
108 lv_ = &getAnalysis<LiveVariables>();
109 indexes_ = &getAnalysis<SlotIndexes>();
110 allocatableRegs_ = tri_->getAllocatableSet(fn);
114 numIntervals += getNumIntervals();
120 /// print - Implement the dump method.
121 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
122 OS << "********** INTERVALS **********\n";
123 for (const_iterator I = begin(), E = end(); I != E; ++I) {
124 I->second->print(OS, tri_);
131 void LiveIntervals::printInstrs(raw_ostream &OS) const {
132 OS << "********** MACHINEINSTRS **********\n";
134 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
135 mbbi != mbbe; ++mbbi) {
136 OS << "BB#" << mbbi->getNumber()
137 << ":\t\t# derived from " << mbbi->getName() << "\n";
138 for (MachineBasicBlock::iterator mii = mbbi->begin(),
139 mie = mbbi->end(); mii != mie; ++mii) {
140 if (mii->isDebugValue())
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
195 if (MI.isCopy() && MI.getOperand(0).getReg() == li.reg &&
196 MI.getOperand(1).getReg() == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
222 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223 for (LiveInterval::Ranges::const_iterator
224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225 for (SlotIndex index = I->start.getBaseIndex(),
226 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
228 index = index.getNextIndex()) {
229 MachineInstr *MI = getInstructionFromIndex(index);
231 continue; // skip deleted instructions
233 if (JoinedCopies.count(MI))
235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236 MachineOperand& MO = MI->getOperand(i);
239 unsigned PhysReg = MO.getReg();
240 if (PhysReg == 0 || PhysReg == Reg ||
241 TargetRegisterInfo::isVirtualRegister(PhysReg))
243 if (tri_->regsOverlap(Reg, PhysReg))
253 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
254 if (TargetRegisterInfo::isPhysicalRegister(reg))
255 dbgs() << tri_->getName(reg);
257 dbgs() << "%reg" << reg;
262 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
263 unsigned Reg = MI.getOperand(MOIdx).getReg();
264 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
265 const MachineOperand &MO = MI.getOperand(i);
268 if (MO.getReg() == Reg && MO.isDef()) {
269 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
270 MI.getOperand(MOIdx).getSubReg() &&
271 (MO.getSubReg() || MO.isImplicit()));
278 /// isPartialRedef - Return true if the specified def at the specific index is
279 /// partially re-defining the specified live interval. A common case of this is
280 /// a definition of the sub-register.
281 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
282 LiveInterval &interval) {
283 if (!MO.getSubReg() || MO.isEarlyClobber())
286 SlotIndex RedefIndex = MIIdx.getDefIndex();
287 const LiveRange *OldLR =
288 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
289 if (OldLR->valno->isDefAccurate()) {
290 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
291 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
296 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
297 MachineBasicBlock::iterator mi,
301 LiveInterval &interval) {
303 dbgs() << "\t\tregister: ";
304 printRegName(interval.reg, tri_);
307 // Virtual registers may be defined multiple times (due to phi
308 // elimination and 2-addr elimination). Much of what we do only has to be
309 // done once for the vreg. We use an empty interval to detect the first
310 // time we see a vreg.
311 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
312 if (interval.empty()) {
313 // Get the Idx of the defining instructions.
314 SlotIndex defIndex = MIIdx.getDefIndex();
315 // Earlyclobbers move back one, so that they overlap the live range
317 if (MO.isEarlyClobber())
318 defIndex = MIIdx.getUseIndex();
320 // Make sure the first definition is not a partial redefinition. Add an
321 // <imp-def> of the full register.
323 mi->addRegisterDefined(interval.reg);
325 MachineInstr *CopyMI = NULL;
326 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
327 if (mi->isCopyLike() ||
328 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
332 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
334 assert(ValNo->id == 0 && "First value in interval is not 0?");
336 // Loop over all of the blocks that the vreg is defined in. There are
337 // two cases we have to handle here. The most common case is a vreg
338 // whose lifetime is contained within a basic block. In this case there
339 // will be a single kill, in MBB, which comes after the definition.
340 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
341 // FIXME: what about dead vars?
343 if (vi.Kills[0] != mi)
344 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
346 killIdx = defIndex.getStoreIndex();
348 // If the kill happens after the definition, we have an intra-block
350 if (killIdx > defIndex) {
351 assert(vi.AliveBlocks.empty() &&
352 "Shouldn't be alive across any blocks!");
353 LiveRange LR(defIndex, killIdx, ValNo);
354 interval.addRange(LR);
355 DEBUG(dbgs() << " +" << LR << "\n");
360 // The other case we handle is when a virtual register lives to the end
361 // of the defining block, potentially live across some blocks, then is
362 // live into some number of blocks, but gets killed. Start by adding a
363 // range that goes from this definition to the end of the defining block.
364 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
365 DEBUG(dbgs() << " +" << NewLR);
366 interval.addRange(NewLR);
368 bool PHIJoin = lv_->isPHIJoin(interval.reg);
371 // A phi join register is killed at the end of the MBB and revived as a new
372 // valno in the killing blocks.
373 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
374 DEBUG(dbgs() << " phi-join");
375 ValNo->setHasPHIKill(true);
377 // Iterate over all of the blocks that the variable is completely
378 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
380 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
381 E = vi.AliveBlocks.end(); I != E; ++I) {
382 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
383 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
384 interval.addRange(LR);
385 DEBUG(dbgs() << " +" << LR);
389 // Finally, this virtual register is live from the start of any killing
390 // block to the 'use' slot of the killing instruction.
391 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
392 MachineInstr *Kill = vi.Kills[i];
393 SlotIndex Start = getMBBStartIdx(Kill->getParent());
394 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
396 // Create interval with one of a NEW value number. Note that this value
397 // number isn't actually defined by an instruction, weird huh? :)
399 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
401 ValNo->setIsPHIDef(true);
403 LiveRange LR(Start, killIdx, ValNo);
404 interval.addRange(LR);
405 DEBUG(dbgs() << " +" << LR);
409 if (MultipleDefsBySameMI(*mi, MOIdx))
410 // Multiple defs of the same virtual register by the same instruction.
411 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
412 // This is likely due to elimination of REG_SEQUENCE instructions. Return
413 // here since there is nothing to do.
416 // If this is the second time we see a virtual register definition, it
417 // must be due to phi elimination or two addr elimination. If this is
418 // the result of two address elimination, then the vreg is one of the
419 // def-and-use register operand.
421 // It may also be partial redef like this:
422 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
423 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
424 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
425 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
426 // If this is a two-address definition, then we have already processed
427 // the live range. The only problem is that we didn't realize there
428 // are actually two values in the live interval. Because of this we
429 // need to take the LiveRegion that defines this register and split it
431 SlotIndex RedefIndex = MIIdx.getDefIndex();
432 if (MO.isEarlyClobber())
433 RedefIndex = MIIdx.getUseIndex();
435 const LiveRange *OldLR =
436 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
437 VNInfo *OldValNo = OldLR->valno;
438 SlotIndex DefIndex = OldValNo->def.getDefIndex();
440 // Delete the previous value, which should be short and continuous,
441 // because the 2-addr copy must be in the same MBB as the redef.
442 interval.removeRange(DefIndex, RedefIndex);
444 // The new value number (#1) is defined by the instruction we claimed
446 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
447 false, // update at *
449 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
451 // Value#0 is now defined by the 2-addr instruction.
452 OldValNo->def = RedefIndex;
453 OldValNo->setCopy(0);
455 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
456 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
457 if (PartReDef && (mi->isCopyLike() ||
458 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)))
459 OldValNo->setCopy(&*mi);
461 // Add the new live interval which replaces the range for the input copy.
462 LiveRange LR(DefIndex, RedefIndex, ValNo);
463 DEBUG(dbgs() << " replace range with " << LR);
464 interval.addRange(LR);
466 // If this redefinition is dead, we need to add a dummy unit live
467 // range covering the def slot.
469 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
473 dbgs() << " RESULT: ";
474 interval.print(dbgs(), tri_);
476 } else if (lv_->isPHIJoin(interval.reg)) {
477 // In the case of PHI elimination, each variable definition is only
478 // live until the end of the block. We've already taken care of the
479 // rest of the live range.
481 SlotIndex defIndex = MIIdx.getDefIndex();
482 if (MO.isEarlyClobber())
483 defIndex = MIIdx.getUseIndex();
486 MachineInstr *CopyMI = NULL;
487 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
488 if (mi->isCopyLike() ||
489 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
491 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
493 SlotIndex killIndex = getMBBEndIdx(mbb);
494 LiveRange LR(defIndex, killIndex, ValNo);
495 interval.addRange(LR);
496 ValNo->setHasPHIKill(true);
497 DEBUG(dbgs() << " phi-join +" << LR);
499 llvm_unreachable("Multiply defined register");
503 DEBUG(dbgs() << '\n');
506 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
507 MachineBasicBlock::iterator mi,
510 LiveInterval &interval,
511 MachineInstr *CopyMI) {
512 // A physical register cannot be live across basic block, so its
513 // lifetime must end somewhere in its defining basic block.
515 dbgs() << "\t\tregister: ";
516 printRegName(interval.reg, tri_);
519 SlotIndex baseIndex = MIIdx;
520 SlotIndex start = baseIndex.getDefIndex();
521 // Earlyclobbers move back one.
522 if (MO.isEarlyClobber())
523 start = MIIdx.getUseIndex();
524 SlotIndex end = start;
526 // If it is not used after definition, it is considered dead at
527 // the instruction defining it. Hence its interval is:
528 // [defSlot(def), defSlot(def)+1)
529 // For earlyclobbers, the defSlot was pushed back one; the extra
530 // advance below compensates.
532 DEBUG(dbgs() << " dead");
533 end = start.getStoreIndex();
537 // If it is not dead on definition, it must be killed by a
538 // subsequent instruction. Hence its interval is:
539 // [defSlot(def), useSlot(kill)+1)
540 baseIndex = baseIndex.getNextIndex();
541 while (++mi != MBB->end()) {
543 if (mi->isDebugValue())
545 if (getInstructionFromIndex(baseIndex) == 0)
546 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
548 if (mi->killsRegister(interval.reg, tri_)) {
549 DEBUG(dbgs() << " killed");
550 end = baseIndex.getDefIndex();
553 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
555 if (mi->isRegTiedToUseOperand(DefIdx)) {
556 // Two-address instruction.
557 end = baseIndex.getDefIndex();
559 // Another instruction redefines the register before it is ever read.
560 // Then the register is essentially dead at the instruction that
561 // defines it. Hence its interval is:
562 // [defSlot(def), defSlot(def)+1)
563 DEBUG(dbgs() << " dead");
564 end = start.getStoreIndex();
570 baseIndex = baseIndex.getNextIndex();
573 // The only case we should have a dead physreg here without a killing or
574 // instruction where we know it's dead is if it is live-in to the function
575 // and never used. Another possible case is the implicit use of the
576 // physical register has been deleted by two-address pass.
577 end = start.getStoreIndex();
580 assert(start < end && "did not find end of interval?");
582 // Already exists? Extend old live interval.
583 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
584 bool Extend = OldLR != interval.end();
585 VNInfo *ValNo = Extend
586 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
587 if (MO.isEarlyClobber() && Extend)
588 ValNo->setHasRedefByEC(true);
589 LiveRange LR(start, end, ValNo);
590 interval.addRange(LR);
591 DEBUG(dbgs() << " +" << LR << '\n');
594 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
595 MachineBasicBlock::iterator MI,
599 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
600 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
601 getOrCreateInterval(MO.getReg()));
602 else if (allocatableRegs_[MO.getReg()]) {
603 MachineInstr *CopyMI = NULL;
604 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
605 if (MI->isCopyLike() ||
606 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
608 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
609 getOrCreateInterval(MO.getReg()), CopyMI);
610 // Def of a register also defines its sub-registers.
611 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
612 // If MI also modifies the sub-register explicitly, avoid processing it
613 // more than once. Do not pass in TRI here so it checks for exact match.
614 if (!MI->definesRegister(*AS))
615 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
616 getOrCreateInterval(*AS), 0);
620 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
622 LiveInterval &interval, bool isAlias) {
624 dbgs() << "\t\tlivein register: ";
625 printRegName(interval.reg, tri_);
628 // Look for kills, if it reaches a def before it's killed, then it shouldn't
629 // be considered a livein.
630 MachineBasicBlock::iterator mi = MBB->begin();
631 MachineBasicBlock::iterator E = MBB->end();
632 // Skip over DBG_VALUE at the start of the MBB.
633 if (mi != E && mi->isDebugValue()) {
634 while (++mi != E && mi->isDebugValue())
637 // MBB is empty except for DBG_VALUE's.
641 SlotIndex baseIndex = MIIdx;
642 SlotIndex start = baseIndex;
643 if (getInstructionFromIndex(baseIndex) == 0)
644 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
646 SlotIndex end = baseIndex;
647 bool SeenDefUse = false;
650 if (mi->killsRegister(interval.reg, tri_)) {
651 DEBUG(dbgs() << " killed");
652 end = baseIndex.getDefIndex();
655 } else if (mi->definesRegister(interval.reg, tri_)) {
656 // Another instruction redefines the register before it is ever read.
657 // Then the register is essentially dead at the instruction that defines
658 // it. Hence its interval is:
659 // [defSlot(def), defSlot(def)+1)
660 DEBUG(dbgs() << " dead");
661 end = start.getStoreIndex();
666 while (++mi != E && mi->isDebugValue())
667 // Skip over DBG_VALUE.
670 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
673 // Live-in register might not be used at all.
676 DEBUG(dbgs() << " dead");
677 end = MIIdx.getStoreIndex();
679 DEBUG(dbgs() << " live through");
685 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
686 0, false, VNInfoAllocator);
687 vni->setIsPHIDef(true);
688 LiveRange LR(start, end, vni);
690 interval.addRange(LR);
691 DEBUG(dbgs() << " +" << LR << '\n');
694 /// computeIntervals - computes the live intervals for virtual
695 /// registers. for some ordering of the machine instructions [1,N] a
696 /// live interval is an interval [i, j) where 1 <= i <= j < N for
697 /// which a variable is live
698 void LiveIntervals::computeIntervals() {
699 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
700 << "********** Function: "
701 << ((Value*)mf_->getFunction())->getName() << '\n');
703 SmallVector<unsigned, 8> UndefUses;
704 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
706 MachineBasicBlock *MBB = MBBI;
710 // Track the index of the current machine instr.
711 SlotIndex MIIndex = getMBBStartIdx(MBB);
712 DEBUG(dbgs() << "BB#" << MBB->getNumber()
713 << ":\t\t# derived from " << MBB->getName() << "\n");
715 // Create intervals for live-ins to this BB first.
716 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
717 LE = MBB->livein_end(); LI != LE; ++LI) {
718 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
719 // Multiple live-ins can alias the same register.
720 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
721 if (!hasInterval(*AS))
722 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
726 // Skip over empty initial indices.
727 if (getInstructionFromIndex(MIIndex) == 0)
728 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
730 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
732 DEBUG(dbgs() << MIIndex << "\t" << *MI);
733 if (MI->isDebugValue())
737 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
738 MachineOperand &MO = MI->getOperand(i);
739 if (!MO.isReg() || !MO.getReg())
742 // handle register defs - build intervals
744 handleRegisterDef(MBB, MI, MIIndex, MO, i);
745 else if (MO.isUndef())
746 UndefUses.push_back(MO.getReg());
749 // Move to the next instr slot.
750 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
754 // Create empty intervals for registers defined by implicit_def's (except
755 // for those implicit_def that define values which are liveout of their
757 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
758 unsigned UndefReg = UndefUses[i];
759 (void)getOrCreateInterval(UndefReg);
763 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
764 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
765 return new LiveInterval(reg, Weight);
768 /// dupInterval - Duplicate a live interval. The caller is responsible for
769 /// managing the allocated memory.
770 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
771 LiveInterval *NewLI = createInterval(li->reg);
772 NewLI->Copy(*li, mri_, getVNInfoAllocator());
776 //===----------------------------------------------------------------------===//
777 // Register allocator hooks.
780 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
781 /// allow one) virtual register operand, then its uses are implicitly using
782 /// the register. Returns the virtual register.
783 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
784 MachineInstr *MI) const {
786 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
787 MachineOperand &MO = MI->getOperand(i);
788 if (!MO.isReg() || !MO.isUse())
790 unsigned Reg = MO.getReg();
791 if (Reg == 0 || Reg == li.reg)
794 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
795 !allocatableRegs_[Reg])
797 // FIXME: For now, only remat MI with at most one register operand.
799 "Can't rematerialize instruction with multiple register operand!");
808 /// isValNoAvailableAt - Return true if the val# of the specified interval
809 /// which reaches the given instruction also reaches the specified use index.
810 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
811 SlotIndex UseIdx) const {
812 SlotIndex Index = getInstructionIndex(MI);
813 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
814 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
815 return UI != li.end() && UI->valno == ValNo;
818 /// isReMaterializable - Returns true if the definition MI of the specified
819 /// val# of the specified interval is re-materializable.
820 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
821 const VNInfo *ValNo, MachineInstr *MI,
822 SmallVectorImpl<LiveInterval*> &SpillIs,
827 if (!tii_->isTriviallyReMaterializable(MI, aa_))
830 // Target-specific code can mark an instruction as being rematerializable
831 // if it has one virtual reg use, though it had better be something like
832 // a PIC base register which is likely to be live everywhere.
833 unsigned ImpUse = getReMatImplicitUse(li, MI);
835 const LiveInterval &ImpLi = getInterval(ImpUse);
836 for (MachineRegisterInfo::use_nodbg_iterator
837 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
839 MachineInstr *UseMI = &*ri;
840 SlotIndex UseIdx = getInstructionIndex(UseMI);
841 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
843 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
847 // If a register operand of the re-materialized instruction is going to
848 // be spilled next, then it's not legal to re-materialize this instruction.
849 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
850 if (ImpUse == SpillIs[i]->reg)
856 /// isReMaterializable - Returns true if the definition MI of the specified
857 /// val# of the specified interval is re-materializable.
858 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
859 const VNInfo *ValNo, MachineInstr *MI) {
860 SmallVector<LiveInterval*, 4> Dummy1;
862 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
865 /// isReMaterializable - Returns true if every definition of MI of every
866 /// val# of the specified interval is re-materializable.
867 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
868 SmallVectorImpl<LiveInterval*> &SpillIs,
871 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
873 const VNInfo *VNI = *i;
875 continue; // Dead val#.
876 // Is the def for the val# rematerializable?
877 if (!VNI->isDefAccurate())
879 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
880 bool DefIsLoad = false;
882 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
889 /// FilterFoldedOps - Filter out two-address use operands. Return
890 /// true if it finds any issue with the operands that ought to prevent
892 static bool FilterFoldedOps(MachineInstr *MI,
893 SmallVector<unsigned, 2> &Ops,
895 SmallVector<unsigned, 2> &FoldOps) {
897 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
898 unsigned OpIdx = Ops[i];
899 MachineOperand &MO = MI->getOperand(OpIdx);
900 // FIXME: fold subreg use.
904 MRInfo |= (unsigned)VirtRegMap::isMod;
906 // Filter out two-address use operand(s).
907 if (MI->isRegTiedToDefOperand(OpIdx)) {
908 MRInfo = VirtRegMap::isModRef;
911 MRInfo |= (unsigned)VirtRegMap::isRef;
913 FoldOps.push_back(OpIdx);
919 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
920 /// slot / to reg or any rematerialized load into ith operand of specified
921 /// MI. If it is successul, MI is updated with the newly created MI and
923 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
924 VirtRegMap &vrm, MachineInstr *DefMI,
926 SmallVector<unsigned, 2> &Ops,
927 bool isSS, int Slot, unsigned Reg) {
928 // If it is an implicit def instruction, just delete it.
929 if (MI->isImplicitDef()) {
930 RemoveMachineInstrFromMaps(MI);
931 vrm.RemoveMachineInstrFromMaps(MI);
932 MI->eraseFromParent();
937 // Filter the list of operand indexes that are to be folded. Abort if
938 // any operand will prevent folding.
940 SmallVector<unsigned, 2> FoldOps;
941 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
944 // The only time it's safe to fold into a two address instruction is when
945 // it's folding reload and spill from / into a spill stack slot.
946 if (DefMI && (MRInfo & VirtRegMap::isMod))
949 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
950 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
952 // Remember this instruction uses the spill slot.
953 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
955 // Attempt to fold the memory reference into the instruction. If
956 // we can do this, we don't need to insert spill code.
957 MachineBasicBlock &MBB = *MI->getParent();
958 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
959 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
960 vrm.transferSpillPts(MI, fmi);
961 vrm.transferRestorePts(MI, fmi);
962 vrm.transferEmergencySpills(MI, fmi);
963 ReplaceMachineInstrInMaps(MI, fmi);
964 MI = MBB.insert(MBB.erase(MI), fmi);
971 /// canFoldMemoryOperand - Returns true if the specified load / store
972 /// folding is possible.
973 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
974 SmallVector<unsigned, 2> &Ops,
976 // Filter the list of operand indexes that are to be folded. Abort if
977 // any operand will prevent folding.
979 SmallVector<unsigned, 2> FoldOps;
980 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
983 // It's only legal to remat for a use, not a def.
984 if (ReMat && (MRInfo & VirtRegMap::isMod))
987 return tii_->canFoldMemoryOperand(MI, FoldOps);
990 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
991 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
993 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
998 for (++itr; itr != li.ranges.end(); ++itr) {
999 MachineBasicBlock *mbb2 =
1000 indexes_->getMBBCoveringRange(itr->start, itr->end);
1009 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1010 /// interval on to-be re-materialized operands of MI) with new register.
1011 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1012 MachineInstr *MI, unsigned NewVReg,
1014 // There is an implicit use. That means one of the other operand is
1015 // being remat'ed and the remat'ed instruction has li.reg as an
1016 // use operand. Make sure we rewrite that as well.
1017 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1018 MachineOperand &MO = MI->getOperand(i);
1021 unsigned Reg = MO.getReg();
1022 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1024 if (!vrm.isReMaterialized(Reg))
1026 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1027 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1029 UseMO->setReg(NewVReg);
1033 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1034 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1035 bool LiveIntervals::
1036 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1037 bool TrySplit, SlotIndex index, SlotIndex end,
1039 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1040 unsigned Slot, int LdSlot,
1041 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1043 const TargetRegisterClass* rc,
1044 SmallVector<int, 4> &ReMatIds,
1045 const MachineLoopInfo *loopInfo,
1046 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1047 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1048 std::vector<LiveInterval*> &NewLIs) {
1049 bool CanFold = false;
1051 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1052 MachineOperand& mop = MI->getOperand(i);
1055 unsigned Reg = mop.getReg();
1056 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1061 bool TryFold = !DefIsReMat;
1062 bool FoldSS = true; // Default behavior unless it's a remat.
1063 int FoldSlot = Slot;
1065 // If this is the rematerializable definition MI itself and
1066 // all of its uses are rematerialized, simply delete it.
1067 if (MI == ReMatOrigDefMI && CanDelete) {
1068 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1070 RemoveMachineInstrFromMaps(MI);
1071 vrm.RemoveMachineInstrFromMaps(MI);
1072 MI->eraseFromParent();
1076 // If def for this use can't be rematerialized, then try folding.
1077 // If def is rematerializable and it's a load, also try folding.
1078 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1080 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1086 // Scan all of the operands of this instruction rewriting operands
1087 // to use NewVReg instead of li.reg as appropriate. We do this for
1090 // 1. If the instr reads the same spilled vreg multiple times, we
1091 // want to reuse the NewVReg.
1092 // 2. If the instr is a two-addr instruction, we are required to
1093 // keep the src/dst regs pinned.
1095 // Keep track of whether we replace a use and/or def so that we can
1096 // create the spill interval with the appropriate range.
1097 SmallVector<unsigned, 2> Ops;
1098 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1100 // Create a new virtual register for the spill interval.
1101 // Create the new register now so we can map the fold instruction
1102 // to the new register so when it is unfolded we get the correct
1104 bool CreatedNewVReg = false;
1106 NewVReg = mri_->createVirtualRegister(rc);
1108 CreatedNewVReg = true;
1110 // The new virtual register should get the same allocation hints as the
1112 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1113 if (Hint.first || Hint.second)
1114 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1120 // Do not fold load / store here if we are splitting. We'll find an
1121 // optimal point to insert a load / store later.
1123 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1124 Ops, FoldSS, FoldSlot, NewVReg)) {
1125 // Folding the load/store can completely change the instruction in
1126 // unpredictable ways, rescan it from the beginning.
1129 // We need to give the new vreg the same stack slot as the
1130 // spilled interval.
1131 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1137 if (isNotInMIMap(MI))
1139 goto RestartInstruction;
1142 // We'll try to fold it later if it's profitable.
1143 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1147 mop.setReg(NewVReg);
1148 if (mop.isImplicit())
1149 rewriteImplicitOps(li, MI, NewVReg, vrm);
1151 // Reuse NewVReg for other reads.
1152 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1153 MachineOperand &mopj = MI->getOperand(Ops[j]);
1154 mopj.setReg(NewVReg);
1155 if (mopj.isImplicit())
1156 rewriteImplicitOps(li, MI, NewVReg, vrm);
1159 if (CreatedNewVReg) {
1161 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1162 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1163 // Each valnum may have its own remat id.
1164 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1166 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1168 if (!CanDelete || (HasUse && HasDef)) {
1169 // If this is a two-addr instruction then its use operands are
1170 // rematerializable but its def is not. It should be assigned a
1172 vrm.assignVirt2StackSlot(NewVReg, Slot);
1175 vrm.assignVirt2StackSlot(NewVReg, Slot);
1177 } else if (HasUse && HasDef &&
1178 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1179 // If this interval hasn't been assigned a stack slot (because earlier
1180 // def is a deleted remat def), do it now.
1181 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1182 vrm.assignVirt2StackSlot(NewVReg, Slot);
1185 // Re-matting an instruction with virtual register use. Add the
1186 // register as an implicit use on the use MI.
1187 if (DefIsReMat && ImpUse)
1188 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1190 // Create a new register interval for this spill / remat.
1191 LiveInterval &nI = getOrCreateInterval(NewVReg);
1192 if (CreatedNewVReg) {
1193 NewLIs.push_back(&nI);
1194 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1196 vrm.setIsSplitFromReg(NewVReg, li.reg);
1200 if (CreatedNewVReg) {
1201 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1202 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1203 DEBUG(dbgs() << " +" << LR);
1206 // Extend the split live interval to this def / use.
1207 SlotIndex End = index.getDefIndex();
1208 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1209 nI.getValNumInfo(nI.getNumValNums()-1));
1210 DEBUG(dbgs() << " +" << LR);
1215 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1216 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1217 DEBUG(dbgs() << " +" << LR);
1222 dbgs() << "\t\t\t\tAdded new interval: ";
1223 nI.print(dbgs(), tri_);
1229 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1231 MachineBasicBlock *MBB,
1232 SlotIndex Idx) const {
1233 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1236 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1237 /// during spilling.
1239 struct RewriteInfo {
1242 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1245 struct RewriteInfoCompare {
1246 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1247 return LHS.Index < RHS.Index;
1252 void LiveIntervals::
1253 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1254 LiveInterval::Ranges::const_iterator &I,
1255 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1256 unsigned Slot, int LdSlot,
1257 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1259 const TargetRegisterClass* rc,
1260 SmallVector<int, 4> &ReMatIds,
1261 const MachineLoopInfo *loopInfo,
1262 BitVector &SpillMBBs,
1263 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1264 BitVector &RestoreMBBs,
1265 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1266 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1267 std::vector<LiveInterval*> &NewLIs) {
1268 bool AllCanFold = true;
1269 unsigned NewVReg = 0;
1270 SlotIndex start = I->start.getBaseIndex();
1271 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1273 // First collect all the def / use in this live range that will be rewritten.
1274 // Make sure they are sorted according to instruction index.
1275 std::vector<RewriteInfo> RewriteMIs;
1276 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1277 re = mri_->reg_end(); ri != re; ) {
1278 MachineInstr *MI = &*ri;
1279 MachineOperand &O = ri.getOperand();
1281 if (MI->isDebugValue()) {
1282 // Modify DBG_VALUE now that the value is in a spill slot.
1283 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1284 uint64_t Offset = MI->getOperand(1).getImm();
1285 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1286 DebugLoc DL = MI->getDebugLoc();
1287 int FI = isLoadSS ? LdSlot : (int)Slot;
1288 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1289 Offset, MDPtr, DL)) {
1290 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1291 ReplaceMachineInstrInMaps(MI, NewDV);
1292 MachineBasicBlock *MBB = MI->getParent();
1293 MBB->insert(MBB->erase(MI), NewDV);
1298 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1299 RemoveMachineInstrFromMaps(MI);
1300 vrm.RemoveMachineInstrFromMaps(MI);
1301 MI->eraseFromParent();
1304 assert(!(O.isImplicit() && O.isUse()) &&
1305 "Spilling register that's used as implicit use?");
1306 SlotIndex index = getInstructionIndex(MI);
1307 if (index < start || index >= end)
1311 // Must be defined by an implicit def. It should not be spilled. Note,
1312 // this is for correctness reason. e.g.
1313 // 8 %reg1024<def> = IMPLICIT_DEF
1314 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1315 // The live range [12, 14) are not part of the r1024 live interval since
1316 // it's defined by an implicit def. It will not conflicts with live
1317 // interval of r1025. Now suppose both registers are spilled, you can
1318 // easily see a situation where both registers are reloaded before
1319 // the INSERT_SUBREG and both target registers that would overlap.
1321 RewriteMIs.push_back(RewriteInfo(index, MI));
1323 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1325 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1326 // Now rewrite the defs and uses.
1327 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1328 RewriteInfo &rwi = RewriteMIs[i];
1330 SlotIndex index = rwi.Index;
1331 MachineInstr *MI = rwi.MI;
1332 // If MI def and/or use the same register multiple times, then there
1333 // are multiple entries.
1334 while (i != e && RewriteMIs[i].MI == MI) {
1335 assert(RewriteMIs[i].Index == index);
1338 MachineBasicBlock *MBB = MI->getParent();
1340 if (ImpUse && MI != ReMatDefMI) {
1341 // Re-matting an instruction with virtual register use. Prevent interval
1342 // from being spilled.
1343 getInterval(ImpUse).markNotSpillable();
1346 unsigned MBBId = MBB->getNumber();
1347 unsigned ThisVReg = 0;
1349 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1350 if (NVI != MBBVRegsMap.end()) {
1351 ThisVReg = NVI->second;
1358 // It's better to start a new interval to avoid artifically
1359 // extend the new interval.
1360 if (MI->readsWritesVirtualRegister(li.reg) ==
1361 std::make_pair(false,true)) {
1362 MBBVRegsMap.erase(MBB->getNumber());
1368 bool IsNew = ThisVReg == 0;
1370 // This ends the previous live interval. If all of its def / use
1371 // can be folded, give it a low spill weight.
1372 if (NewVReg && TrySplit && AllCanFold) {
1373 LiveInterval &nI = getOrCreateInterval(NewVReg);
1380 bool HasDef = false;
1381 bool HasUse = false;
1382 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1383 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1384 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1385 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1386 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1387 if (!HasDef && !HasUse)
1390 AllCanFold &= CanFold;
1392 // Update weight of spill interval.
1393 LiveInterval &nI = getOrCreateInterval(NewVReg);
1395 // The spill weight is now infinity as it cannot be spilled again.
1396 nI.markNotSpillable();
1400 // Keep track of the last def and first use in each MBB.
1402 if (MI != ReMatOrigDefMI || !CanDelete) {
1403 bool HasKill = false;
1405 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1407 // If this is a two-address code, then this index starts a new VNInfo.
1408 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1410 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1412 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1413 SpillIdxes.find(MBBId);
1415 if (SII == SpillIdxes.end()) {
1416 std::vector<SRInfo> S;
1417 S.push_back(SRInfo(index, NewVReg, true));
1418 SpillIdxes.insert(std::make_pair(MBBId, S));
1419 } else if (SII->second.back().vreg != NewVReg) {
1420 SII->second.push_back(SRInfo(index, NewVReg, true));
1421 } else if (index > SII->second.back().index) {
1422 // If there is an earlier def and this is a two-address
1423 // instruction, then it's not possible to fold the store (which
1424 // would also fold the load).
1425 SRInfo &Info = SII->second.back();
1427 Info.canFold = !HasUse;
1429 SpillMBBs.set(MBBId);
1430 } else if (SII != SpillIdxes.end() &&
1431 SII->second.back().vreg == NewVReg &&
1432 index > SII->second.back().index) {
1433 // There is an earlier def that's not killed (must be two-address).
1434 // The spill is no longer needed.
1435 SII->second.pop_back();
1436 if (SII->second.empty()) {
1437 SpillIdxes.erase(MBBId);
1438 SpillMBBs.reset(MBBId);
1445 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1446 SpillIdxes.find(MBBId);
1447 if (SII != SpillIdxes.end() &&
1448 SII->second.back().vreg == NewVReg &&
1449 index > SII->second.back().index)
1450 // Use(s) following the last def, it's not safe to fold the spill.
1451 SII->second.back().canFold = false;
1452 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1453 RestoreIdxes.find(MBBId);
1454 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1455 // If we are splitting live intervals, only fold if it's the first
1456 // use and there isn't another use later in the MBB.
1457 RII->second.back().canFold = false;
1459 // Only need a reload if there isn't an earlier def / use.
1460 if (RII == RestoreIdxes.end()) {
1461 std::vector<SRInfo> Infos;
1462 Infos.push_back(SRInfo(index, NewVReg, true));
1463 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1465 RII->second.push_back(SRInfo(index, NewVReg, true));
1467 RestoreMBBs.set(MBBId);
1471 // Update spill weight.
1472 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1473 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1476 if (NewVReg && TrySplit && AllCanFold) {
1477 // If all of its def / use can be folded, give it a low spill weight.
1478 LiveInterval &nI = getOrCreateInterval(NewVReg);
1483 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1484 unsigned vr, BitVector &RestoreMBBs,
1485 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1486 if (!RestoreMBBs[Id])
1488 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1489 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1490 if (Restores[i].index == index &&
1491 Restores[i].vreg == vr &&
1492 Restores[i].canFold)
1497 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1498 unsigned vr, BitVector &RestoreMBBs,
1499 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1500 if (!RestoreMBBs[Id])
1502 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1503 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1504 if (Restores[i].index == index && Restores[i].vreg)
1505 Restores[i].index = SlotIndex();
1508 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1509 /// spilled and create empty intervals for their uses.
1511 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1512 const TargetRegisterClass* rc,
1513 std::vector<LiveInterval*> &NewLIs) {
1514 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1515 re = mri_->reg_end(); ri != re; ) {
1516 MachineOperand &O = ri.getOperand();
1517 MachineInstr *MI = &*ri;
1519 if (MI->isDebugValue()) {
1520 // Remove debug info for now.
1522 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1526 assert(MI->isImplicitDef() &&
1527 "Register def was not rewritten?");
1528 RemoveMachineInstrFromMaps(MI);
1529 vrm.RemoveMachineInstrFromMaps(MI);
1530 MI->eraseFromParent();
1532 // This must be an use of an implicit_def so it's not part of the live
1533 // interval. Create a new empty live interval for it.
1534 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1535 unsigned NewVReg = mri_->createVirtualRegister(rc);
1537 vrm.setIsImplicitlyDefined(NewVReg);
1538 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1539 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1540 MachineOperand &MO = MI->getOperand(i);
1541 if (MO.isReg() && MO.getReg() == li.reg) {
1551 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1552 // Limit the loop depth ridiculousness.
1553 if (loopDepth > 200)
1556 // The loop depth is used to roughly estimate the number of times the
1557 // instruction is executed. Something like 10^d is simple, but will quickly
1558 // overflow a float. This expression behaves like 10^d for small d, but is
1559 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1560 // headroom before overflow.
1561 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1563 return (isDef + isUse) * lc;
1567 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1568 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1569 normalizeSpillWeight(*NewLIs[i]);
1572 std::vector<LiveInterval*> LiveIntervals::
1573 addIntervalsForSpills(const LiveInterval &li,
1574 SmallVectorImpl<LiveInterval*> &SpillIs,
1575 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1576 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1579 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1580 li.print(dbgs(), tri_);
1584 // Each bit specify whether a spill is required in the MBB.
1585 BitVector SpillMBBs(mf_->getNumBlockIDs());
1586 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1587 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1588 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1589 DenseMap<unsigned,unsigned> MBBVRegsMap;
1590 std::vector<LiveInterval*> NewLIs;
1591 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1593 unsigned NumValNums = li.getNumValNums();
1594 SmallVector<MachineInstr*, 4> ReMatDefs;
1595 ReMatDefs.resize(NumValNums, NULL);
1596 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1597 ReMatOrigDefs.resize(NumValNums, NULL);
1598 SmallVector<int, 4> ReMatIds;
1599 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1600 BitVector ReMatDelete(NumValNums);
1601 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1603 // Spilling a split live interval. It cannot be split any further. Also,
1604 // it's also guaranteed to be a single val# / range interval.
1605 if (vrm.getPreSplitReg(li.reg)) {
1606 vrm.setIsSplitFromReg(li.reg, 0);
1607 // Unset the split kill marker on the last use.
1608 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1609 if (KillIdx != SlotIndex()) {
1610 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1611 assert(KillMI && "Last use disappeared?");
1612 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1613 assert(KillOp != -1 && "Last use disappeared?");
1614 KillMI->getOperand(KillOp).setIsKill(false);
1616 vrm.removeKillPoint(li.reg);
1617 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1618 Slot = vrm.getStackSlot(li.reg);
1619 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1620 MachineInstr *ReMatDefMI = DefIsReMat ?
1621 vrm.getReMaterializedMI(li.reg) : NULL;
1623 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1624 bool isLoad = isLoadSS ||
1625 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1626 bool IsFirstRange = true;
1627 for (LiveInterval::Ranges::const_iterator
1628 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1629 // If this is a split live interval with multiple ranges, it means there
1630 // are two-address instructions that re-defined the value. Only the
1631 // first def can be rematerialized!
1633 // Note ReMatOrigDefMI has already been deleted.
1634 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1635 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1636 false, vrm, rc, ReMatIds, loopInfo,
1637 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1638 MBBVRegsMap, NewLIs);
1640 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1641 Slot, 0, false, false, false,
1642 false, vrm, rc, ReMatIds, loopInfo,
1643 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1644 MBBVRegsMap, NewLIs);
1646 IsFirstRange = false;
1649 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1650 normalizeSpillWeights(NewLIs);
1654 bool TrySplit = !intervalIsInOneMBB(li);
1657 bool NeedStackSlot = false;
1658 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1660 const VNInfo *VNI = *i;
1661 unsigned VN = VNI->id;
1662 if (VNI->isUnused())
1663 continue; // Dead val#.
1664 // Is the def for the val# rematerializable?
1665 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1666 ? getInstructionFromIndex(VNI->def) : 0;
1668 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1669 // Remember how to remat the def of this val#.
1670 ReMatOrigDefs[VN] = ReMatDefMI;
1671 // Original def may be modified so we have to make a copy here.
1672 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1673 CloneMIs.push_back(Clone);
1674 ReMatDefs[VN] = Clone;
1676 bool CanDelete = true;
1677 if (VNI->hasPHIKill()) {
1678 // A kill is a phi node, not all of its uses can be rematerialized.
1679 // It must not be deleted.
1681 // Need a stack slot if there is any live range where uses cannot be
1683 NeedStackSlot = true;
1686 ReMatDelete.set(VN);
1688 // Need a stack slot if there is any live range where uses cannot be
1690 NeedStackSlot = true;
1694 // One stack slot per live interval.
1695 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1696 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1697 Slot = vrm.assignVirt2StackSlot(li.reg);
1699 // This case only occurs when the prealloc splitter has already assigned
1700 // a stack slot to this vreg.
1702 Slot = vrm.getStackSlot(li.reg);
1705 // Create new intervals and rewrite defs and uses.
1706 for (LiveInterval::Ranges::const_iterator
1707 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1708 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1709 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1710 bool DefIsReMat = ReMatDefMI != NULL;
1711 bool CanDelete = ReMatDelete[I->valno->id];
1713 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1714 bool isLoad = isLoadSS ||
1715 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1716 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1717 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1718 CanDelete, vrm, rc, ReMatIds, loopInfo,
1719 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1720 MBBVRegsMap, NewLIs);
1723 // Insert spills / restores if we are splitting.
1725 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1726 normalizeSpillWeights(NewLIs);
1730 SmallPtrSet<LiveInterval*, 4> AddedKill;
1731 SmallVector<unsigned, 2> Ops;
1732 if (NeedStackSlot) {
1733 int Id = SpillMBBs.find_first();
1735 std::vector<SRInfo> &spills = SpillIdxes[Id];
1736 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1737 SlotIndex index = spills[i].index;
1738 unsigned VReg = spills[i].vreg;
1739 LiveInterval &nI = getOrCreateInterval(VReg);
1740 bool isReMat = vrm.isReMaterialized(VReg);
1741 MachineInstr *MI = getInstructionFromIndex(index);
1742 bool CanFold = false;
1743 bool FoundUse = false;
1745 if (spills[i].canFold) {
1747 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1748 MachineOperand &MO = MI->getOperand(j);
1749 if (!MO.isReg() || MO.getReg() != VReg)
1756 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1757 RestoreMBBs, RestoreIdxes))) {
1758 // MI has two-address uses of the same register. If the use
1759 // isn't the first and only use in the BB, then we can't fold
1760 // it. FIXME: Move this to rewriteInstructionsForSpills.
1767 // Fold the store into the def if possible.
1768 bool Folded = false;
1769 if (CanFold && !Ops.empty()) {
1770 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1773 // Also folded uses, do not issue a load.
1774 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1775 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1777 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1781 // Otherwise tell the spiller to issue a spill.
1783 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1784 bool isKill = LR->end == index.getStoreIndex();
1785 if (!MI->registerDefIsDead(nI.reg))
1786 // No need to spill a dead def.
1787 vrm.addSpillPoint(VReg, isKill, MI);
1789 AddedKill.insert(&nI);
1792 Id = SpillMBBs.find_next(Id);
1796 int Id = RestoreMBBs.find_first();
1798 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1799 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1800 SlotIndex index = restores[i].index;
1801 if (index == SlotIndex())
1803 unsigned VReg = restores[i].vreg;
1804 LiveInterval &nI = getOrCreateInterval(VReg);
1805 bool isReMat = vrm.isReMaterialized(VReg);
1806 MachineInstr *MI = getInstructionFromIndex(index);
1807 bool CanFold = false;
1809 if (restores[i].canFold) {
1811 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1812 MachineOperand &MO = MI->getOperand(j);
1813 if (!MO.isReg() || MO.getReg() != VReg)
1817 // If this restore were to be folded, it would have been folded
1826 // Fold the load into the use if possible.
1827 bool Folded = false;
1828 if (CanFold && !Ops.empty()) {
1830 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1832 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1834 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1835 // If the rematerializable def is a load, also try to fold it.
1836 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1837 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1838 Ops, isLoadSS, LdSlot, VReg);
1840 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1842 // Re-matting an instruction with virtual register use. Add the
1843 // register as an implicit use on the use MI and mark the register
1844 // interval as unspillable.
1845 LiveInterval &ImpLi = getInterval(ImpUse);
1846 ImpLi.markNotSpillable();
1847 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1852 // If folding is not possible / failed, then tell the spiller to issue a
1853 // load / rematerialization for us.
1855 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1857 vrm.addRestorePoint(VReg, MI);
1859 Id = RestoreMBBs.find_next(Id);
1862 // Finalize intervals: add kills, finalize spill weights, and filter out
1864 std::vector<LiveInterval*> RetNewLIs;
1865 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1866 LiveInterval *LI = NewLIs[i];
1868 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1869 if (!AddedKill.count(LI)) {
1870 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1871 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1872 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1873 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1874 assert(UseIdx != -1);
1875 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1876 LastUse->getOperand(UseIdx).setIsKill();
1877 vrm.addKillPoint(LI->reg, LastUseIdx);
1880 RetNewLIs.push_back(LI);
1884 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1885 normalizeSpillWeights(RetNewLIs);
1889 /// hasAllocatableSuperReg - Return true if the specified physical register has
1890 /// any super register that's allocatable.
1891 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1892 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1893 if (allocatableRegs_[*AS] && hasInterval(*AS))
1898 /// getRepresentativeReg - Find the largest super register of the specified
1899 /// physical register.
1900 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1901 // Find the largest super-register that is allocatable.
1902 unsigned BestReg = Reg;
1903 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1904 unsigned SuperReg = *AS;
1905 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1913 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1914 /// specified interval that conflicts with the specified physical register.
1915 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1916 unsigned PhysReg) const {
1917 unsigned NumConflicts = 0;
1918 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1919 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1920 E = mri_->reg_end(); I != E; ++I) {
1921 MachineOperand &O = I.getOperand();
1922 MachineInstr *MI = O.getParent();
1923 if (MI->isDebugValue())
1925 SlotIndex Index = getInstructionIndex(MI);
1926 if (pli.liveAt(Index))
1929 return NumConflicts;
1932 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1933 /// around all defs and uses of the specified interval. Return true if it
1934 /// was able to cut its interval.
1935 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1936 unsigned PhysReg, VirtRegMap &vrm) {
1937 unsigned SpillReg = getRepresentativeReg(PhysReg);
1939 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1940 // If there are registers which alias PhysReg, but which are not a
1941 // sub-register of the chosen representative super register. Assert
1942 // since we can't handle it yet.
1943 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1944 tri_->isSuperRegister(*AS, SpillReg));
1947 SmallVector<unsigned, 4> PRegs;
1948 if (hasInterval(SpillReg))
1949 PRegs.push_back(SpillReg);
1951 SmallSet<unsigned, 4> Added;
1952 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1953 if (Added.insert(*AS) && hasInterval(*AS)) {
1954 PRegs.push_back(*AS);
1955 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1960 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1961 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1962 E = mri_->reg_end(); I != E; ++I) {
1963 MachineOperand &O = I.getOperand();
1964 MachineInstr *MI = O.getParent();
1965 if (MI->isDebugValue() || SeenMIs.count(MI))
1968 SlotIndex Index = getInstructionIndex(MI);
1969 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1970 unsigned PReg = PRegs[i];
1971 LiveInterval &pli = getInterval(PReg);
1972 if (!pli.liveAt(Index))
1974 vrm.addEmergencySpill(PReg, MI);
1975 SlotIndex StartIdx = Index.getLoadIndex();
1976 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1977 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1978 pli.removeRange(StartIdx, EndIdx);
1982 raw_string_ostream Msg(msg);
1983 Msg << "Ran out of registers during register allocation!";
1984 if (MI->isInlineAsm()) {
1985 Msg << "\nPlease check your inline asm statement for invalid "
1986 << "constraints:\n";
1987 MI->print(Msg, tm_);
1989 report_fatal_error(Msg.str());
1991 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1992 if (!hasInterval(*AS))
1994 LiveInterval &spli = getInterval(*AS);
1995 if (spli.liveAt(Index))
1996 spli.removeRange(Index.getLoadIndex(),
1997 Index.getNextIndex().getBaseIndex());
2004 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2005 MachineInstr* startInst) {
2006 LiveInterval& Interval = getOrCreateInterval(reg);
2007 VNInfo* VN = Interval.getNextValue(
2008 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2009 startInst, true, getVNInfoAllocator());
2010 VN->setHasPHIKill(true);
2012 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2013 getMBBEndIdx(startInst->getParent()), VN);
2014 Interval.addRange(LR);