1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
266 bool MultipleDefsByMI(const MachineInstr &MI, unsigned MOIdx) {
267 unsigned Reg = MI.getOperand(MOIdx).getReg();
268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269 const MachineOperand &MO = MI.getOperand(i);
272 if (MO.getReg() == Reg && MO.isDef()) {
273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274 MI.getOperand(MOIdx).getSubReg() &&
282 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
283 MachineBasicBlock::iterator mi,
287 LiveInterval &interval) {
289 dbgs() << "\t\tregister: ";
290 printRegName(interval.reg, tri_);
293 // Virtual registers may be defined multiple times (due to phi
294 // elimination and 2-addr elimination). Much of what we do only has to be
295 // done once for the vreg. We use an empty interval to detect the first
296 // time we see a vreg.
297 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
298 if (interval.empty()) {
299 // Get the Idx of the defining instructions.
300 SlotIndex defIndex = MIIdx.getDefIndex();
301 // Earlyclobbers move back one, so that they overlap the live range
303 if (MO.isEarlyClobber())
304 defIndex = MIIdx.getUseIndex();
306 MachineInstr *CopyMI = NULL;
307 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
308 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
309 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
311 // Earlyclobbers move back one.
312 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
314 assert(ValNo->id == 0 && "First value in interval is not 0?");
316 // Loop over all of the blocks that the vreg is defined in. There are
317 // two cases we have to handle here. The most common case is a vreg
318 // whose lifetime is contained within a basic block. In this case there
319 // will be a single kill, in MBB, which comes after the definition.
320 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
321 // FIXME: what about dead vars?
323 if (vi.Kills[0] != mi)
324 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
326 killIdx = defIndex.getStoreIndex();
328 // If the kill happens after the definition, we have an intra-block
330 if (killIdx > defIndex) {
331 assert(vi.AliveBlocks.empty() &&
332 "Shouldn't be alive across any blocks!");
333 LiveRange LR(defIndex, killIdx, ValNo);
334 interval.addRange(LR);
335 DEBUG(dbgs() << " +" << LR << "\n");
336 ValNo->addKill(killIdx);
341 // The other case we handle is when a virtual register lives to the end
342 // of the defining block, potentially live across some blocks, then is
343 // live into some number of blocks, but gets killed. Start by adding a
344 // range that goes from this definition to the end of the defining block.
345 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
346 DEBUG(dbgs() << " +" << NewLR);
347 interval.addRange(NewLR);
349 bool PHIJoin = lv_->isPHIJoin(interval.reg);
352 // A phi join register is killed at the end of the MBB and revived as a new
353 // valno in the killing blocks.
354 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
355 DEBUG(dbgs() << " phi-join");
356 ValNo->addKill(indexes_->getTerminatorGap(mbb));
357 ValNo->setHasPHIKill(true);
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
381 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
383 ValNo->setIsPHIDef(true);
385 LiveRange LR(Start, killIdx, ValNo);
386 interval.addRange(LR);
387 ValNo->addKill(killIdx);
388 DEBUG(dbgs() << " +" << LR);
392 if (MultipleDefsByMI(*mi, MOIdx))
393 // Mutple defs of the same virtual register by the same instruction. e.g.
394 // %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
403 if (mi->isRegTiedToUseOperand(MOIdx)) {
404 // If this is a two-address definition, then we have already processed
405 // the live range. The only problem is that we didn't realize there
406 // are actually two values in the live interval. Because of this we
407 // need to take the LiveRegion that defines this register and split it
409 assert(interval.containsOneValue());
410 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
411 SlotIndex RedefIndex = MIIdx.getDefIndex();
412 if (MO.isEarlyClobber())
413 RedefIndex = MIIdx.getUseIndex();
415 const LiveRange *OldLR =
416 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
417 VNInfo *OldValNo = OldLR->valno;
419 // Delete the initial value, which should be short and continuous,
420 // because the 2-addr copy must be in the same MBB as the redef.
421 interval.removeRange(DefIndex, RedefIndex);
423 // Two-address vregs should always only be redefined once. This means
424 // that at this point, there should be exactly one value number in it.
425 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
430 false, // update at *
432 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
434 // Value#0 is now defined by the 2-addr instruction.
435 OldValNo->def = RedefIndex;
436 OldValNo->setCopy(0);
438 // Add the new live interval which replaces the range for the input copy.
439 LiveRange LR(DefIndex, RedefIndex, ValNo);
440 DEBUG(dbgs() << " replace range with " << LR);
441 interval.addRange(LR);
442 ValNo->addKill(RedefIndex);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
455 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
456 // In the case of PHI elimination, each variable definition is only
457 // live until the end of the block. We've already taken care of the
458 // rest of the live range.
460 SlotIndex defIndex = MIIdx.getDefIndex();
461 if (MO.isEarlyClobber())
462 defIndex = MIIdx.getUseIndex();
465 MachineInstr *CopyMI = NULL;
466 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
467 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
468 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
470 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
472 SlotIndex killIndex = getMBBEndIdx(mbb);
473 LiveRange LR(defIndex, killIndex, ValNo);
474 interval.addRange(LR);
475 ValNo->addKill(indexes_->getTerminatorGap(mbb));
476 ValNo->setHasPHIKill(true);
477 DEBUG(dbgs() << " phi-join +" << LR);
481 DEBUG(dbgs() << '\n');
484 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
485 MachineBasicBlock::iterator mi,
488 LiveInterval &interval,
489 MachineInstr *CopyMI) {
490 // A physical register cannot be live across basic block, so its
491 // lifetime must end somewhere in its defining basic block.
493 dbgs() << "\t\tregister: ";
494 printRegName(interval.reg, tri_);
497 SlotIndex baseIndex = MIIdx;
498 SlotIndex start = baseIndex.getDefIndex();
499 // Earlyclobbers move back one.
500 if (MO.isEarlyClobber())
501 start = MIIdx.getUseIndex();
502 SlotIndex end = start;
504 // If it is not used after definition, it is considered dead at
505 // the instruction defining it. Hence its interval is:
506 // [defSlot(def), defSlot(def)+1)
507 // For earlyclobbers, the defSlot was pushed back one; the extra
508 // advance below compensates.
510 DEBUG(dbgs() << " dead");
511 end = start.getStoreIndex();
515 // If it is not dead on definition, it must be killed by a
516 // subsequent instruction. Hence its interval is:
517 // [defSlot(def), useSlot(kill)+1)
518 baseIndex = baseIndex.getNextIndex();
519 while (++mi != MBB->end()) {
521 if (mi->isDebugValue())
523 if (getInstructionFromIndex(baseIndex) == 0)
524 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
526 if (mi->killsRegister(interval.reg, tri_)) {
527 DEBUG(dbgs() << " killed");
528 end = baseIndex.getDefIndex();
531 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
533 if (mi->isRegTiedToUseOperand(DefIdx)) {
534 // Two-address instruction.
535 end = baseIndex.getDefIndex();
537 // Another instruction redefines the register before it is ever read.
538 // Then the register is essentially dead at the instruction that
539 // defines it. Hence its interval is:
540 // [defSlot(def), defSlot(def)+1)
541 DEBUG(dbgs() << " dead");
542 end = start.getStoreIndex();
548 baseIndex = baseIndex.getNextIndex();
551 // The only case we should have a dead physreg here without a killing or
552 // instruction where we know it's dead is if it is live-in to the function
553 // and never used. Another possible case is the implicit use of the
554 // physical register has been deleted by two-address pass.
555 end = start.getStoreIndex();
558 assert(start < end && "did not find end of interval?");
560 // Already exists? Extend old live interval.
561 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
562 bool Extend = OldLR != interval.end();
563 VNInfo *ValNo = Extend
564 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
565 if (MO.isEarlyClobber() && Extend)
566 ValNo->setHasRedefByEC(true);
567 LiveRange LR(start, end, ValNo);
568 interval.addRange(LR);
569 LR.valno->addKill(end);
570 DEBUG(dbgs() << " +" << LR << '\n');
573 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
574 MachineBasicBlock::iterator MI,
578 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
579 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
580 getOrCreateInterval(MO.getReg()));
581 else if (allocatableRegs_[MO.getReg()]) {
582 MachineInstr *CopyMI = NULL;
583 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
584 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
585 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
587 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
588 getOrCreateInterval(MO.getReg()), CopyMI);
589 // Def of a register also defines its sub-registers.
590 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
591 // If MI also modifies the sub-register explicitly, avoid processing it
592 // more than once. Do not pass in TRI here so it checks for exact match.
593 if (!MI->modifiesRegister(*AS))
594 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
595 getOrCreateInterval(*AS), 0);
599 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
601 LiveInterval &interval, bool isAlias) {
603 dbgs() << "\t\tlivein register: ";
604 printRegName(interval.reg, tri_);
607 // Look for kills, if it reaches a def before it's killed, then it shouldn't
608 // be considered a livein.
609 MachineBasicBlock::iterator mi = MBB->begin();
610 MachineBasicBlock::iterator E = MBB->end();
611 // Skip over DBG_VALUE at the start of the MBB.
612 if (mi != E && mi->isDebugValue()) {
613 while (++mi != E && mi->isDebugValue())
616 // MBB is empty except for DBG_VALUE's.
620 SlotIndex baseIndex = MIIdx;
621 SlotIndex start = baseIndex;
622 if (getInstructionFromIndex(baseIndex) == 0)
623 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
625 SlotIndex end = baseIndex;
626 bool SeenDefUse = false;
629 if (mi->killsRegister(interval.reg, tri_)) {
630 DEBUG(dbgs() << " killed");
631 end = baseIndex.getDefIndex();
634 } else if (mi->modifiesRegister(interval.reg, tri_)) {
635 // Another instruction redefines the register before it is ever read.
636 // Then the register is essentially dead at the instruction that defines
637 // it. Hence its interval is:
638 // [defSlot(def), defSlot(def)+1)
639 DEBUG(dbgs() << " dead");
640 end = start.getStoreIndex();
645 while (++mi != E && mi->isDebugValue())
646 // Skip over DBG_VALUE.
649 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
652 // Live-in register might not be used at all.
655 DEBUG(dbgs() << " dead");
656 end = MIIdx.getStoreIndex();
658 DEBUG(dbgs() << " live through");
664 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
665 0, false, VNInfoAllocator);
666 vni->setIsPHIDef(true);
667 LiveRange LR(start, end, vni);
669 interval.addRange(LR);
670 LR.valno->addKill(end);
671 DEBUG(dbgs() << " +" << LR << '\n');
674 /// computeIntervals - computes the live intervals for virtual
675 /// registers. for some ordering of the machine instructions [1,N] a
676 /// live interval is an interval [i, j) where 1 <= i <= j < N for
677 /// which a variable is live
678 void LiveIntervals::computeIntervals() {
679 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
680 << "********** Function: "
681 << ((Value*)mf_->getFunction())->getName() << '\n');
683 SmallVector<unsigned, 8> UndefUses;
684 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
686 MachineBasicBlock *MBB = MBBI;
690 // Track the index of the current machine instr.
691 SlotIndex MIIndex = getMBBStartIdx(MBB);
692 DEBUG(dbgs() << "BB#" << MBB->getNumber()
693 << ":\t\t# derived from " << MBB->getName() << "\n");
695 // Create intervals for live-ins to this BB first.
696 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
697 LE = MBB->livein_end(); LI != LE; ++LI) {
698 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
699 // Multiple live-ins can alias the same register.
700 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
701 if (!hasInterval(*AS))
702 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
706 // Skip over empty initial indices.
707 if (getInstructionFromIndex(MIIndex) == 0)
708 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
710 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
712 DEBUG(dbgs() << MIIndex << "\t" << *MI);
713 if (MI->isDebugValue())
717 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
718 MachineOperand &MO = MI->getOperand(i);
719 if (!MO.isReg() || !MO.getReg())
722 // handle register defs - build intervals
724 handleRegisterDef(MBB, MI, MIIndex, MO, i);
725 else if (MO.isUndef())
726 UndefUses.push_back(MO.getReg());
729 // Move to the next instr slot.
730 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
734 // Create empty intervals for registers defined by implicit_def's (except
735 // for those implicit_def that define values which are liveout of their
737 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
738 unsigned UndefReg = UndefUses[i];
739 (void)getOrCreateInterval(UndefReg);
743 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
744 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
745 return new LiveInterval(reg, Weight);
748 /// dupInterval - Duplicate a live interval. The caller is responsible for
749 /// managing the allocated memory.
750 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
751 LiveInterval *NewLI = createInterval(li->reg);
752 NewLI->Copy(*li, mri_, getVNInfoAllocator());
756 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
757 /// copy field and returns the source register that defines it.
758 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
762 if (VNI->getCopy()->isExtractSubreg()) {
763 // If it's extracting out of a physical register, return the sub-register.
764 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
765 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
766 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
767 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
768 if (SrcSubReg == DstSubReg)
769 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
770 // reg1034 can still be coalesced to EDX.
772 assert(DstSubReg == 0);
773 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
776 } else if (VNI->getCopy()->isInsertSubreg() ||
777 VNI->getCopy()->isSubregToReg())
778 return VNI->getCopy()->getOperand(2).getReg();
780 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
781 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
783 llvm_unreachable("Unrecognized copy instruction!");
787 //===----------------------------------------------------------------------===//
788 // Register allocator hooks.
791 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
792 /// allow one) virtual register operand, then its uses are implicitly using
793 /// the register. Returns the virtual register.
794 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
795 MachineInstr *MI) const {
797 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
798 MachineOperand &MO = MI->getOperand(i);
799 if (!MO.isReg() || !MO.isUse())
801 unsigned Reg = MO.getReg();
802 if (Reg == 0 || Reg == li.reg)
805 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
806 !allocatableRegs_[Reg])
808 // FIXME: For now, only remat MI with at most one register operand.
810 "Can't rematerialize instruction with multiple register operand!");
819 /// isValNoAvailableAt - Return true if the val# of the specified interval
820 /// which reaches the given instruction also reaches the specified use index.
821 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
822 SlotIndex UseIdx) const {
823 SlotIndex Index = getInstructionIndex(MI);
824 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
825 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
826 return UI != li.end() && UI->valno == ValNo;
829 /// isReMaterializable - Returns true if the definition MI of the specified
830 /// val# of the specified interval is re-materializable.
831 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
832 const VNInfo *ValNo, MachineInstr *MI,
833 SmallVectorImpl<LiveInterval*> &SpillIs,
838 if (!tii_->isTriviallyReMaterializable(MI, aa_))
841 // Target-specific code can mark an instruction as being rematerializable
842 // if it has one virtual reg use, though it had better be something like
843 // a PIC base register which is likely to be live everywhere.
844 unsigned ImpUse = getReMatImplicitUse(li, MI);
846 const LiveInterval &ImpLi = getInterval(ImpUse);
847 for (MachineRegisterInfo::use_nodbg_iterator
848 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
850 MachineInstr *UseMI = &*ri;
851 SlotIndex UseIdx = getInstructionIndex(UseMI);
852 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
854 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
858 // If a register operand of the re-materialized instruction is going to
859 // be spilled next, then it's not legal to re-materialize this instruction.
860 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
861 if (ImpUse == SpillIs[i]->reg)
867 /// isReMaterializable - Returns true if the definition MI of the specified
868 /// val# of the specified interval is re-materializable.
869 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
870 const VNInfo *ValNo, MachineInstr *MI) {
871 SmallVector<LiveInterval*, 4> Dummy1;
873 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
876 /// isReMaterializable - Returns true if every definition of MI of every
877 /// val# of the specified interval is re-materializable.
878 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
879 SmallVectorImpl<LiveInterval*> &SpillIs,
882 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
884 const VNInfo *VNI = *i;
886 continue; // Dead val#.
887 // Is the def for the val# rematerializable?
888 if (!VNI->isDefAccurate())
890 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
891 bool DefIsLoad = false;
893 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
900 /// FilterFoldedOps - Filter out two-address use operands. Return
901 /// true if it finds any issue with the operands that ought to prevent
903 static bool FilterFoldedOps(MachineInstr *MI,
904 SmallVector<unsigned, 2> &Ops,
906 SmallVector<unsigned, 2> &FoldOps) {
908 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
909 unsigned OpIdx = Ops[i];
910 MachineOperand &MO = MI->getOperand(OpIdx);
911 // FIXME: fold subreg use.
915 MRInfo |= (unsigned)VirtRegMap::isMod;
917 // Filter out two-address use operand(s).
918 if (MI->isRegTiedToDefOperand(OpIdx)) {
919 MRInfo = VirtRegMap::isModRef;
922 MRInfo |= (unsigned)VirtRegMap::isRef;
924 FoldOps.push_back(OpIdx);
930 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
931 /// slot / to reg or any rematerialized load into ith operand of specified
932 /// MI. If it is successul, MI is updated with the newly created MI and
934 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
935 VirtRegMap &vrm, MachineInstr *DefMI,
937 SmallVector<unsigned, 2> &Ops,
938 bool isSS, int Slot, unsigned Reg) {
939 // If it is an implicit def instruction, just delete it.
940 if (MI->isImplicitDef()) {
941 RemoveMachineInstrFromMaps(MI);
942 vrm.RemoveMachineInstrFromMaps(MI);
943 MI->eraseFromParent();
948 // Filter the list of operand indexes that are to be folded. Abort if
949 // any operand will prevent folding.
951 SmallVector<unsigned, 2> FoldOps;
952 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
955 // The only time it's safe to fold into a two address instruction is when
956 // it's folding reload and spill from / into a spill stack slot.
957 if (DefMI && (MRInfo & VirtRegMap::isMod))
960 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
961 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
963 // Remember this instruction uses the spill slot.
964 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
966 // Attempt to fold the memory reference into the instruction. If
967 // we can do this, we don't need to insert spill code.
968 MachineBasicBlock &MBB = *MI->getParent();
969 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
970 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
971 vrm.transferSpillPts(MI, fmi);
972 vrm.transferRestorePts(MI, fmi);
973 vrm.transferEmergencySpills(MI, fmi);
974 ReplaceMachineInstrInMaps(MI, fmi);
975 MI = MBB.insert(MBB.erase(MI), fmi);
982 /// canFoldMemoryOperand - Returns true if the specified load / store
983 /// folding is possible.
984 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
985 SmallVector<unsigned, 2> &Ops,
987 // Filter the list of operand indexes that are to be folded. Abort if
988 // any operand will prevent folding.
990 SmallVector<unsigned, 2> FoldOps;
991 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
994 // It's only legal to remat for a use, not a def.
995 if (ReMat && (MRInfo & VirtRegMap::isMod))
998 return tii_->canFoldMemoryOperand(MI, FoldOps);
1001 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1002 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1004 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1009 for (++itr; itr != li.ranges.end(); ++itr) {
1010 MachineBasicBlock *mbb2 =
1011 indexes_->getMBBCoveringRange(itr->start, itr->end);
1020 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1021 /// interval on to-be re-materialized operands of MI) with new register.
1022 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1023 MachineInstr *MI, unsigned NewVReg,
1025 // There is an implicit use. That means one of the other operand is
1026 // being remat'ed and the remat'ed instruction has li.reg as an
1027 // use operand. Make sure we rewrite that as well.
1028 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1029 MachineOperand &MO = MI->getOperand(i);
1032 unsigned Reg = MO.getReg();
1033 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1035 if (!vrm.isReMaterialized(Reg))
1037 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1038 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1040 UseMO->setReg(NewVReg);
1044 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1045 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1046 bool LiveIntervals::
1047 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1048 bool TrySplit, SlotIndex index, SlotIndex end,
1050 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1051 unsigned Slot, int LdSlot,
1052 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1054 const TargetRegisterClass* rc,
1055 SmallVector<int, 4> &ReMatIds,
1056 const MachineLoopInfo *loopInfo,
1057 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1058 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1059 std::vector<LiveInterval*> &NewLIs) {
1060 bool CanFold = false;
1062 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1063 MachineOperand& mop = MI->getOperand(i);
1066 unsigned Reg = mop.getReg();
1067 unsigned RegI = Reg;
1068 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1073 bool TryFold = !DefIsReMat;
1074 bool FoldSS = true; // Default behavior unless it's a remat.
1075 int FoldSlot = Slot;
1077 // If this is the rematerializable definition MI itself and
1078 // all of its uses are rematerialized, simply delete it.
1079 if (MI == ReMatOrigDefMI && CanDelete) {
1080 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1082 RemoveMachineInstrFromMaps(MI);
1083 vrm.RemoveMachineInstrFromMaps(MI);
1084 MI->eraseFromParent();
1088 // If def for this use can't be rematerialized, then try folding.
1089 // If def is rematerializable and it's a load, also try folding.
1090 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1092 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1098 // Scan all of the operands of this instruction rewriting operands
1099 // to use NewVReg instead of li.reg as appropriate. We do this for
1102 // 1. If the instr reads the same spilled vreg multiple times, we
1103 // want to reuse the NewVReg.
1104 // 2. If the instr is a two-addr instruction, we are required to
1105 // keep the src/dst regs pinned.
1107 // Keep track of whether we replace a use and/or def so that we can
1108 // create the spill interval with the appropriate range.
1110 HasUse = mop.isUse();
1111 HasDef = mop.isDef();
1112 SmallVector<unsigned, 2> Ops;
1114 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1115 const MachineOperand &MOj = MI->getOperand(j);
1118 unsigned RegJ = MOj.getReg();
1119 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1123 if (!MOj.isUndef()) {
1124 HasUse |= MOj.isUse();
1125 HasDef |= MOj.isDef();
1130 // Create a new virtual register for the spill interval.
1131 // Create the new register now so we can map the fold instruction
1132 // to the new register so when it is unfolded we get the correct
1134 bool CreatedNewVReg = false;
1136 NewVReg = mri_->createVirtualRegister(rc);
1138 CreatedNewVReg = true;
1140 // The new virtual register should get the same allocation hints as the
1142 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1143 if (Hint.first || Hint.second)
1144 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1150 // Do not fold load / store here if we are splitting. We'll find an
1151 // optimal point to insert a load / store later.
1153 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1154 Ops, FoldSS, FoldSlot, NewVReg)) {
1155 // Folding the load/store can completely change the instruction in
1156 // unpredictable ways, rescan it from the beginning.
1159 // We need to give the new vreg the same stack slot as the
1160 // spilled interval.
1161 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1167 if (isNotInMIMap(MI))
1169 goto RestartInstruction;
1172 // We'll try to fold it later if it's profitable.
1173 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1177 mop.setReg(NewVReg);
1178 if (mop.isImplicit())
1179 rewriteImplicitOps(li, MI, NewVReg, vrm);
1181 // Reuse NewVReg for other reads.
1182 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1183 MachineOperand &mopj = MI->getOperand(Ops[j]);
1184 mopj.setReg(NewVReg);
1185 if (mopj.isImplicit())
1186 rewriteImplicitOps(li, MI, NewVReg, vrm);
1189 if (CreatedNewVReg) {
1191 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1192 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1193 // Each valnum may have its own remat id.
1194 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1196 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1198 if (!CanDelete || (HasUse && HasDef)) {
1199 // If this is a two-addr instruction then its use operands are
1200 // rematerializable but its def is not. It should be assigned a
1202 vrm.assignVirt2StackSlot(NewVReg, Slot);
1205 vrm.assignVirt2StackSlot(NewVReg, Slot);
1207 } else if (HasUse && HasDef &&
1208 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1209 // If this interval hasn't been assigned a stack slot (because earlier
1210 // def is a deleted remat def), do it now.
1211 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1212 vrm.assignVirt2StackSlot(NewVReg, Slot);
1215 // Re-matting an instruction with virtual register use. Add the
1216 // register as an implicit use on the use MI.
1217 if (DefIsReMat && ImpUse)
1218 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1220 // Create a new register interval for this spill / remat.
1221 LiveInterval &nI = getOrCreateInterval(NewVReg);
1222 if (CreatedNewVReg) {
1223 NewLIs.push_back(&nI);
1224 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1226 vrm.setIsSplitFromReg(NewVReg, li.reg);
1230 if (CreatedNewVReg) {
1231 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1232 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1233 DEBUG(dbgs() << " +" << LR);
1236 // Extend the split live interval to this def / use.
1237 SlotIndex End = index.getDefIndex();
1238 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1239 nI.getValNumInfo(nI.getNumValNums()-1));
1240 DEBUG(dbgs() << " +" << LR);
1245 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1246 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1247 DEBUG(dbgs() << " +" << LR);
1252 dbgs() << "\t\t\t\tAdded new interval: ";
1253 nI.print(dbgs(), tri_);
1259 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1261 MachineBasicBlock *MBB,
1262 SlotIndex Idx) const {
1263 SlotIndex End = getMBBEndIdx(MBB);
1264 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1265 if (VNI->kills[j].isPHI())
1268 SlotIndex KillIdx = VNI->kills[j];
1269 if (KillIdx > Idx && KillIdx <= End)
1275 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1276 /// during spilling.
1278 struct RewriteInfo {
1283 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1284 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1287 struct RewriteInfoCompare {
1288 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1289 return LHS.Index < RHS.Index;
1294 void LiveIntervals::
1295 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1296 LiveInterval::Ranges::const_iterator &I,
1297 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1298 unsigned Slot, int LdSlot,
1299 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1301 const TargetRegisterClass* rc,
1302 SmallVector<int, 4> &ReMatIds,
1303 const MachineLoopInfo *loopInfo,
1304 BitVector &SpillMBBs,
1305 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1306 BitVector &RestoreMBBs,
1307 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1308 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1309 std::vector<LiveInterval*> &NewLIs) {
1310 bool AllCanFold = true;
1311 unsigned NewVReg = 0;
1312 SlotIndex start = I->start.getBaseIndex();
1313 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1315 // First collect all the def / use in this live range that will be rewritten.
1316 // Make sure they are sorted according to instruction index.
1317 std::vector<RewriteInfo> RewriteMIs;
1318 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1319 re = mri_->reg_end(); ri != re; ) {
1320 MachineInstr *MI = &*ri;
1321 MachineOperand &O = ri.getOperand();
1323 if (MI->isDebugValue()) {
1324 // Modify DBG_VALUE now that the value is in a spill slot.
1325 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1326 uint64_t Offset = MI->getOperand(1).getImm();
1327 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1328 DebugLoc DL = MI->getDebugLoc();
1329 int FI = isLoadSS ? LdSlot : (int)Slot;
1330 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1331 Offset, MDPtr, DL)) {
1332 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1333 ReplaceMachineInstrInMaps(MI, NewDV);
1334 MachineBasicBlock *MBB = MI->getParent();
1335 MBB->insert(MBB->erase(MI), NewDV);
1340 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1341 RemoveMachineInstrFromMaps(MI);
1342 vrm.RemoveMachineInstrFromMaps(MI);
1343 MI->eraseFromParent();
1346 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1347 SlotIndex index = getInstructionIndex(MI);
1348 if (index < start || index >= end)
1352 // Must be defined by an implicit def. It should not be spilled. Note,
1353 // this is for correctness reason. e.g.
1354 // 8 %reg1024<def> = IMPLICIT_DEF
1355 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1356 // The live range [12, 14) are not part of the r1024 live interval since
1357 // it's defined by an implicit def. It will not conflicts with live
1358 // interval of r1025. Now suppose both registers are spilled, you can
1359 // easily see a situation where both registers are reloaded before
1360 // the INSERT_SUBREG and both target registers that would overlap.
1362 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1364 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1366 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1367 // Now rewrite the defs and uses.
1368 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1369 RewriteInfo &rwi = RewriteMIs[i];
1371 SlotIndex index = rwi.Index;
1372 bool MIHasUse = rwi.HasUse;
1373 bool MIHasDef = rwi.HasDef;
1374 MachineInstr *MI = rwi.MI;
1375 // If MI def and/or use the same register multiple times, then there
1376 // are multiple entries.
1377 unsigned NumUses = MIHasUse;
1378 while (i != e && RewriteMIs[i].MI == MI) {
1379 assert(RewriteMIs[i].Index == index);
1380 bool isUse = RewriteMIs[i].HasUse;
1381 if (isUse) ++NumUses;
1383 MIHasDef |= RewriteMIs[i].HasDef;
1386 MachineBasicBlock *MBB = MI->getParent();
1388 if (ImpUse && MI != ReMatDefMI) {
1389 // Re-matting an instruction with virtual register use. Prevent interval
1390 // from being spilled.
1391 getInterval(ImpUse).markNotSpillable();
1394 unsigned MBBId = MBB->getNumber();
1395 unsigned ThisVReg = 0;
1397 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1398 if (NVI != MBBVRegsMap.end()) {
1399 ThisVReg = NVI->second;
1406 // It's better to start a new interval to avoid artifically
1407 // extend the new interval.
1408 if (MIHasDef && !MIHasUse) {
1409 MBBVRegsMap.erase(MBB->getNumber());
1415 bool IsNew = ThisVReg == 0;
1417 // This ends the previous live interval. If all of its def / use
1418 // can be folded, give it a low spill weight.
1419 if (NewVReg && TrySplit && AllCanFold) {
1420 LiveInterval &nI = getOrCreateInterval(NewVReg);
1427 bool HasDef = false;
1428 bool HasUse = false;
1429 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1430 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1431 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1432 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1433 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1434 if (!HasDef && !HasUse)
1437 AllCanFold &= CanFold;
1439 // Update weight of spill interval.
1440 LiveInterval &nI = getOrCreateInterval(NewVReg);
1442 // The spill weight is now infinity as it cannot be spilled again.
1443 nI.markNotSpillable();
1447 // Keep track of the last def and first use in each MBB.
1449 if (MI != ReMatOrigDefMI || !CanDelete) {
1450 bool HasKill = false;
1452 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1454 // If this is a two-address code, then this index starts a new VNInfo.
1455 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1457 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1459 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1460 SpillIdxes.find(MBBId);
1462 if (SII == SpillIdxes.end()) {
1463 std::vector<SRInfo> S;
1464 S.push_back(SRInfo(index, NewVReg, true));
1465 SpillIdxes.insert(std::make_pair(MBBId, S));
1466 } else if (SII->second.back().vreg != NewVReg) {
1467 SII->second.push_back(SRInfo(index, NewVReg, true));
1468 } else if (index > SII->second.back().index) {
1469 // If there is an earlier def and this is a two-address
1470 // instruction, then it's not possible to fold the store (which
1471 // would also fold the load).
1472 SRInfo &Info = SII->second.back();
1474 Info.canFold = !HasUse;
1476 SpillMBBs.set(MBBId);
1477 } else if (SII != SpillIdxes.end() &&
1478 SII->second.back().vreg == NewVReg &&
1479 index > SII->second.back().index) {
1480 // There is an earlier def that's not killed (must be two-address).
1481 // The spill is no longer needed.
1482 SII->second.pop_back();
1483 if (SII->second.empty()) {
1484 SpillIdxes.erase(MBBId);
1485 SpillMBBs.reset(MBBId);
1492 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1493 SpillIdxes.find(MBBId);
1494 if (SII != SpillIdxes.end() &&
1495 SII->second.back().vreg == NewVReg &&
1496 index > SII->second.back().index)
1497 // Use(s) following the last def, it's not safe to fold the spill.
1498 SII->second.back().canFold = false;
1499 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1500 RestoreIdxes.find(MBBId);
1501 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1502 // If we are splitting live intervals, only fold if it's the first
1503 // use and there isn't another use later in the MBB.
1504 RII->second.back().canFold = false;
1506 // Only need a reload if there isn't an earlier def / use.
1507 if (RII == RestoreIdxes.end()) {
1508 std::vector<SRInfo> Infos;
1509 Infos.push_back(SRInfo(index, NewVReg, true));
1510 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1512 RII->second.push_back(SRInfo(index, NewVReg, true));
1514 RestoreMBBs.set(MBBId);
1518 // Update spill weight.
1519 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1520 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1523 if (NewVReg && TrySplit && AllCanFold) {
1524 // If all of its def / use can be folded, give it a low spill weight.
1525 LiveInterval &nI = getOrCreateInterval(NewVReg);
1530 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1531 unsigned vr, BitVector &RestoreMBBs,
1532 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1533 if (!RestoreMBBs[Id])
1535 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1536 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1537 if (Restores[i].index == index &&
1538 Restores[i].vreg == vr &&
1539 Restores[i].canFold)
1544 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1545 unsigned vr, BitVector &RestoreMBBs,
1546 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1547 if (!RestoreMBBs[Id])
1549 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1550 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1551 if (Restores[i].index == index && Restores[i].vreg)
1552 Restores[i].index = SlotIndex();
1555 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1556 /// spilled and create empty intervals for their uses.
1558 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1559 const TargetRegisterClass* rc,
1560 std::vector<LiveInterval*> &NewLIs) {
1561 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1562 re = mri_->reg_end(); ri != re; ) {
1563 MachineOperand &O = ri.getOperand();
1564 MachineInstr *MI = &*ri;
1566 if (MI->isDebugValue()) {
1567 // Remove debug info for now.
1569 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1573 assert(MI->isImplicitDef() &&
1574 "Register def was not rewritten?");
1575 RemoveMachineInstrFromMaps(MI);
1576 vrm.RemoveMachineInstrFromMaps(MI);
1577 MI->eraseFromParent();
1579 // This must be an use of an implicit_def so it's not part of the live
1580 // interval. Create a new empty live interval for it.
1581 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1582 unsigned NewVReg = mri_->createVirtualRegister(rc);
1584 vrm.setIsImplicitlyDefined(NewVReg);
1585 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1586 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1587 MachineOperand &MO = MI->getOperand(i);
1588 if (MO.isReg() && MO.getReg() == li.reg) {
1598 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1599 // Limit the loop depth ridiculousness.
1600 if (loopDepth > 200)
1603 // The loop depth is used to roughly estimate the number of times the
1604 // instruction is executed. Something like 10^d is simple, but will quickly
1605 // overflow a float. This expression behaves like 10^d for small d, but is
1606 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1607 // headroom before overflow.
1608 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1610 return (isDef + isUse) * lc;
1614 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1615 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1616 normalizeSpillWeight(*NewLIs[i]);
1619 std::vector<LiveInterval*> LiveIntervals::
1620 addIntervalsForSpillsFast(const LiveInterval &li,
1621 const MachineLoopInfo *loopInfo,
1623 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1625 std::vector<LiveInterval*> added;
1627 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1630 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1635 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1637 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1638 while (RI != mri_->reg_end()) {
1639 MachineInstr* MI = &*RI;
1641 SmallVector<unsigned, 2> Indices;
1642 bool HasUse = false;
1643 bool HasDef = false;
1645 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1646 MachineOperand& mop = MI->getOperand(i);
1647 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1649 HasUse |= MI->getOperand(i).isUse();
1650 HasDef |= MI->getOperand(i).isDef();
1652 Indices.push_back(i);
1655 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1656 Indices, true, slot, li.reg)) {
1657 unsigned NewVReg = mri_->createVirtualRegister(rc);
1659 vrm.assignVirt2StackSlot(NewVReg, slot);
1661 // create a new register for this spill
1662 LiveInterval &nI = getOrCreateInterval(NewVReg);
1663 nI.markNotSpillable();
1665 // Rewrite register operands to use the new vreg.
1666 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1667 E = Indices.end(); I != E; ++I) {
1668 MI->getOperand(*I).setReg(NewVReg);
1670 if (MI->getOperand(*I).isUse())
1671 MI->getOperand(*I).setIsKill(true);
1674 // Fill in the new live interval.
1675 SlotIndex index = getInstructionIndex(MI);
1677 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1678 nI.getNextValue(SlotIndex(), 0, false,
1679 getVNInfoAllocator()));
1680 DEBUG(dbgs() << " +" << LR);
1682 vrm.addRestorePoint(NewVReg, MI);
1685 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1686 nI.getNextValue(SlotIndex(), 0, false,
1687 getVNInfoAllocator()));
1688 DEBUG(dbgs() << " +" << LR);
1690 vrm.addSpillPoint(NewVReg, true, MI);
1693 added.push_back(&nI);
1696 dbgs() << "\t\t\t\tadded new interval: ";
1703 RI = mri_->reg_begin(li.reg);
1709 std::vector<LiveInterval*> LiveIntervals::
1710 addIntervalsForSpills(const LiveInterval &li,
1711 SmallVectorImpl<LiveInterval*> &SpillIs,
1712 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1714 if (EnableFastSpilling)
1715 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1717 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1720 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1721 li.print(dbgs(), tri_);
1725 // Each bit specify whether a spill is required in the MBB.
1726 BitVector SpillMBBs(mf_->getNumBlockIDs());
1727 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1728 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1729 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1730 DenseMap<unsigned,unsigned> MBBVRegsMap;
1731 std::vector<LiveInterval*> NewLIs;
1732 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1734 unsigned NumValNums = li.getNumValNums();
1735 SmallVector<MachineInstr*, 4> ReMatDefs;
1736 ReMatDefs.resize(NumValNums, NULL);
1737 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1738 ReMatOrigDefs.resize(NumValNums, NULL);
1739 SmallVector<int, 4> ReMatIds;
1740 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1741 BitVector ReMatDelete(NumValNums);
1742 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1744 // Spilling a split live interval. It cannot be split any further. Also,
1745 // it's also guaranteed to be a single val# / range interval.
1746 if (vrm.getPreSplitReg(li.reg)) {
1747 vrm.setIsSplitFromReg(li.reg, 0);
1748 // Unset the split kill marker on the last use.
1749 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1750 if (KillIdx != SlotIndex()) {
1751 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1752 assert(KillMI && "Last use disappeared?");
1753 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1754 assert(KillOp != -1 && "Last use disappeared?");
1755 KillMI->getOperand(KillOp).setIsKill(false);
1757 vrm.removeKillPoint(li.reg);
1758 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1759 Slot = vrm.getStackSlot(li.reg);
1760 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1761 MachineInstr *ReMatDefMI = DefIsReMat ?
1762 vrm.getReMaterializedMI(li.reg) : NULL;
1764 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1765 bool isLoad = isLoadSS ||
1766 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1767 bool IsFirstRange = true;
1768 for (LiveInterval::Ranges::const_iterator
1769 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1770 // If this is a split live interval with multiple ranges, it means there
1771 // are two-address instructions that re-defined the value. Only the
1772 // first def can be rematerialized!
1774 // Note ReMatOrigDefMI has already been deleted.
1775 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1776 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1777 false, vrm, rc, ReMatIds, loopInfo,
1778 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1779 MBBVRegsMap, NewLIs);
1781 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1782 Slot, 0, false, false, false,
1783 false, vrm, rc, ReMatIds, loopInfo,
1784 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1785 MBBVRegsMap, NewLIs);
1787 IsFirstRange = false;
1790 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1791 normalizeSpillWeights(NewLIs);
1795 bool TrySplit = !intervalIsInOneMBB(li);
1798 bool NeedStackSlot = false;
1799 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1801 const VNInfo *VNI = *i;
1802 unsigned VN = VNI->id;
1803 if (VNI->isUnused())
1804 continue; // Dead val#.
1805 // Is the def for the val# rematerializable?
1806 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1807 ? getInstructionFromIndex(VNI->def) : 0;
1809 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1810 // Remember how to remat the def of this val#.
1811 ReMatOrigDefs[VN] = ReMatDefMI;
1812 // Original def may be modified so we have to make a copy here.
1813 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1814 CloneMIs.push_back(Clone);
1815 ReMatDefs[VN] = Clone;
1817 bool CanDelete = true;
1818 if (VNI->hasPHIKill()) {
1819 // A kill is a phi node, not all of its uses can be rematerialized.
1820 // It must not be deleted.
1822 // Need a stack slot if there is any live range where uses cannot be
1824 NeedStackSlot = true;
1827 ReMatDelete.set(VN);
1829 // Need a stack slot if there is any live range where uses cannot be
1831 NeedStackSlot = true;
1835 // One stack slot per live interval.
1836 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1837 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1838 Slot = vrm.assignVirt2StackSlot(li.reg);
1840 // This case only occurs when the prealloc splitter has already assigned
1841 // a stack slot to this vreg.
1843 Slot = vrm.getStackSlot(li.reg);
1846 // Create new intervals and rewrite defs and uses.
1847 for (LiveInterval::Ranges::const_iterator
1848 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1849 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1850 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1851 bool DefIsReMat = ReMatDefMI != NULL;
1852 bool CanDelete = ReMatDelete[I->valno->id];
1854 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1855 bool isLoad = isLoadSS ||
1856 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1857 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1858 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1859 CanDelete, vrm, rc, ReMatIds, loopInfo,
1860 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1861 MBBVRegsMap, NewLIs);
1864 // Insert spills / restores if we are splitting.
1866 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1867 normalizeSpillWeights(NewLIs);
1871 SmallPtrSet<LiveInterval*, 4> AddedKill;
1872 SmallVector<unsigned, 2> Ops;
1873 if (NeedStackSlot) {
1874 int Id = SpillMBBs.find_first();
1876 std::vector<SRInfo> &spills = SpillIdxes[Id];
1877 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1878 SlotIndex index = spills[i].index;
1879 unsigned VReg = spills[i].vreg;
1880 LiveInterval &nI = getOrCreateInterval(VReg);
1881 bool isReMat = vrm.isReMaterialized(VReg);
1882 MachineInstr *MI = getInstructionFromIndex(index);
1883 bool CanFold = false;
1884 bool FoundUse = false;
1886 if (spills[i].canFold) {
1888 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1889 MachineOperand &MO = MI->getOperand(j);
1890 if (!MO.isReg() || MO.getReg() != VReg)
1897 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1898 RestoreMBBs, RestoreIdxes))) {
1899 // MI has two-address uses of the same register. If the use
1900 // isn't the first and only use in the BB, then we can't fold
1901 // it. FIXME: Move this to rewriteInstructionsForSpills.
1908 // Fold the store into the def if possible.
1909 bool Folded = false;
1910 if (CanFold && !Ops.empty()) {
1911 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1914 // Also folded uses, do not issue a load.
1915 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1916 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1918 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1922 // Otherwise tell the spiller to issue a spill.
1924 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1925 bool isKill = LR->end == index.getStoreIndex();
1926 if (!MI->registerDefIsDead(nI.reg))
1927 // No need to spill a dead def.
1928 vrm.addSpillPoint(VReg, isKill, MI);
1930 AddedKill.insert(&nI);
1933 Id = SpillMBBs.find_next(Id);
1937 int Id = RestoreMBBs.find_first();
1939 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1940 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1941 SlotIndex index = restores[i].index;
1942 if (index == SlotIndex())
1944 unsigned VReg = restores[i].vreg;
1945 LiveInterval &nI = getOrCreateInterval(VReg);
1946 bool isReMat = vrm.isReMaterialized(VReg);
1947 MachineInstr *MI = getInstructionFromIndex(index);
1948 bool CanFold = false;
1950 if (restores[i].canFold) {
1952 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1953 MachineOperand &MO = MI->getOperand(j);
1954 if (!MO.isReg() || MO.getReg() != VReg)
1958 // If this restore were to be folded, it would have been folded
1967 // Fold the load into the use if possible.
1968 bool Folded = false;
1969 if (CanFold && !Ops.empty()) {
1971 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1973 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1975 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1976 // If the rematerializable def is a load, also try to fold it.
1977 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1978 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1979 Ops, isLoadSS, LdSlot, VReg);
1981 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1983 // Re-matting an instruction with virtual register use. Add the
1984 // register as an implicit use on the use MI and mark the register
1985 // interval as unspillable.
1986 LiveInterval &ImpLi = getInterval(ImpUse);
1987 ImpLi.markNotSpillable();
1988 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1993 // If folding is not possible / failed, then tell the spiller to issue a
1994 // load / rematerialization for us.
1996 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1998 vrm.addRestorePoint(VReg, MI);
2000 Id = RestoreMBBs.find_next(Id);
2003 // Finalize intervals: add kills, finalize spill weights, and filter out
2005 std::vector<LiveInterval*> RetNewLIs;
2006 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2007 LiveInterval *LI = NewLIs[i];
2009 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2010 if (!AddedKill.count(LI)) {
2011 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2012 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2013 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2014 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2015 assert(UseIdx != -1);
2016 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2017 LastUse->getOperand(UseIdx).setIsKill();
2018 vrm.addKillPoint(LI->reg, LastUseIdx);
2021 RetNewLIs.push_back(LI);
2025 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2026 normalizeSpillWeights(RetNewLIs);
2030 /// hasAllocatableSuperReg - Return true if the specified physical register has
2031 /// any super register that's allocatable.
2032 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2033 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2034 if (allocatableRegs_[*AS] && hasInterval(*AS))
2039 /// getRepresentativeReg - Find the largest super register of the specified
2040 /// physical register.
2041 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2042 // Find the largest super-register that is allocatable.
2043 unsigned BestReg = Reg;
2044 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2045 unsigned SuperReg = *AS;
2046 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2054 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2055 /// specified interval that conflicts with the specified physical register.
2056 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2057 unsigned PhysReg) const {
2058 unsigned NumConflicts = 0;
2059 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2060 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2061 E = mri_->reg_end(); I != E; ++I) {
2062 MachineOperand &O = I.getOperand();
2063 MachineInstr *MI = O.getParent();
2064 if (MI->isDebugValue())
2066 SlotIndex Index = getInstructionIndex(MI);
2067 if (pli.liveAt(Index))
2070 return NumConflicts;
2073 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2074 /// around all defs and uses of the specified interval. Return true if it
2075 /// was able to cut its interval.
2076 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2077 unsigned PhysReg, VirtRegMap &vrm) {
2078 unsigned SpillReg = getRepresentativeReg(PhysReg);
2080 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2081 // If there are registers which alias PhysReg, but which are not a
2082 // sub-register of the chosen representative super register. Assert
2083 // since we can't handle it yet.
2084 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2085 tri_->isSuperRegister(*AS, SpillReg));
2088 SmallVector<unsigned, 4> PRegs;
2089 if (hasInterval(SpillReg))
2090 PRegs.push_back(SpillReg);
2092 SmallSet<unsigned, 4> Added;
2093 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2094 if (Added.insert(*AS) && hasInterval(*AS)) {
2095 PRegs.push_back(*AS);
2096 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2101 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2102 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2103 E = mri_->reg_end(); I != E; ++I) {
2104 MachineOperand &O = I.getOperand();
2105 MachineInstr *MI = O.getParent();
2106 if (MI->isDebugValue() || SeenMIs.count(MI))
2109 SlotIndex Index = getInstructionIndex(MI);
2110 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2111 unsigned PReg = PRegs[i];
2112 LiveInterval &pli = getInterval(PReg);
2113 if (!pli.liveAt(Index))
2115 vrm.addEmergencySpill(PReg, MI);
2116 SlotIndex StartIdx = Index.getLoadIndex();
2117 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2118 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2119 pli.removeRange(StartIdx, EndIdx);
2123 raw_string_ostream Msg(msg);
2124 Msg << "Ran out of registers during register allocation!";
2125 if (MI->isInlineAsm()) {
2126 Msg << "\nPlease check your inline asm statement for invalid "
2127 << "constraints:\n";
2128 MI->print(Msg, tm_);
2130 report_fatal_error(Msg.str());
2132 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2133 if (!hasInterval(*AS))
2135 LiveInterval &spli = getInterval(*AS);
2136 if (spli.liveAt(Index))
2137 spli.removeRange(Index.getLoadIndex(),
2138 Index.getNextIndex().getBaseIndex());
2145 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2146 MachineInstr* startInst) {
2147 LiveInterval& Interval = getOrCreateInterval(reg);
2148 VNInfo* VN = Interval.getNextValue(
2149 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2150 startInst, true, getVNInfoAllocator());
2151 VN->setHasPHIKill(true);
2152 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2154 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2155 getMBBEndIdx(startInst->getParent()), VN);
2156 Interval.addRange(LR);