1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
222 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223 for (LiveInterval::Ranges::const_iterator
224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225 for (SlotIndex index = I->start.getBaseIndex(),
226 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
228 index = index.getNextIndex()) {
229 MachineInstr *MI = getInstructionFromIndex(index);
231 continue; // skip deleted instructions
233 if (JoinedCopies.count(MI))
235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236 MachineOperand& MO = MI->getOperand(i);
239 unsigned PhysReg = MO.getReg();
240 if (PhysReg == 0 || PhysReg == Reg ||
241 TargetRegisterInfo::isVirtualRegister(PhysReg))
243 if (tri_->regsOverlap(Reg, PhysReg))
253 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
254 if (TargetRegisterInfo::isPhysicalRegister(reg))
255 dbgs() << tri_->getName(reg);
257 dbgs() << "%reg" << reg;
262 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
263 unsigned Reg = MI.getOperand(MOIdx).getReg();
264 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
265 const MachineOperand &MO = MI.getOperand(i);
268 if (MO.getReg() == Reg && MO.isDef()) {
269 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
270 MI.getOperand(MOIdx).getSubReg() &&
278 /// isPartialRedef - Return true if the specified def at the specific index is
279 /// partially re-defining the specified live interval. A common case of this is
280 /// a definition of the sub-register.
281 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
282 LiveInterval &interval) {
283 if (!MO.getSubReg() || MO.isEarlyClobber())
286 SlotIndex RedefIndex = MIIdx.getDefIndex();
287 const LiveRange *OldLR =
288 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
289 if (OldLR->valno->isDefAccurate()) {
290 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
291 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
296 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
297 MachineBasicBlock::iterator mi,
301 LiveInterval &interval) {
303 dbgs() << "\t\tregister: ";
304 printRegName(interval.reg, tri_);
307 // Virtual registers may be defined multiple times (due to phi
308 // elimination and 2-addr elimination). Much of what we do only has to be
309 // done once for the vreg. We use an empty interval to detect the first
310 // time we see a vreg.
311 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
312 if (interval.empty()) {
313 // Get the Idx of the defining instructions.
314 SlotIndex defIndex = MIIdx.getDefIndex();
315 // Earlyclobbers move back one, so that they overlap the live range
317 if (MO.isEarlyClobber())
318 defIndex = MIIdx.getUseIndex();
320 // Make sure the first definition is not a partial redefinition. Add an
321 // <imp-def> of the full register.
323 mi->addRegisterDefined(interval.reg);
325 MachineInstr *CopyMI = NULL;
326 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
327 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
328 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
331 // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates
332 // implicit defs without really knowing. It shows up as INSERT_SUBREG
333 // using an undefined register.
334 if (mi->isInsertSubreg())
335 mi->getOperand(1).setIsUndef();
338 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
340 assert(ValNo->id == 0 && "First value in interval is not 0?");
342 // Loop over all of the blocks that the vreg is defined in. There are
343 // two cases we have to handle here. The most common case is a vreg
344 // whose lifetime is contained within a basic block. In this case there
345 // will be a single kill, in MBB, which comes after the definition.
346 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
347 // FIXME: what about dead vars?
349 if (vi.Kills[0] != mi)
350 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
352 killIdx = defIndex.getStoreIndex();
354 // If the kill happens after the definition, we have an intra-block
356 if (killIdx > defIndex) {
357 assert(vi.AliveBlocks.empty() &&
358 "Shouldn't be alive across any blocks!");
359 LiveRange LR(defIndex, killIdx, ValNo);
360 interval.addRange(LR);
361 DEBUG(dbgs() << " +" << LR << "\n");
362 ValNo->addKill(killIdx);
367 // The other case we handle is when a virtual register lives to the end
368 // of the defining block, potentially live across some blocks, then is
369 // live into some number of blocks, but gets killed. Start by adding a
370 // range that goes from this definition to the end of the defining block.
371 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
372 DEBUG(dbgs() << " +" << NewLR);
373 interval.addRange(NewLR);
375 bool PHIJoin = lv_->isPHIJoin(interval.reg);
378 // A phi join register is killed at the end of the MBB and revived as a new
379 // valno in the killing blocks.
380 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
381 DEBUG(dbgs() << " phi-join");
382 ValNo->addKill(indexes_->getTerminatorGap(mbb));
383 ValNo->setHasPHIKill(true);
385 // Iterate over all of the blocks that the variable is completely
386 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
388 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
389 E = vi.AliveBlocks.end(); I != E; ++I) {
390 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
391 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
392 interval.addRange(LR);
393 DEBUG(dbgs() << " +" << LR);
397 // Finally, this virtual register is live from the start of any killing
398 // block to the 'use' slot of the killing instruction.
399 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
400 MachineInstr *Kill = vi.Kills[i];
401 SlotIndex Start = getMBBStartIdx(Kill->getParent());
402 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
404 // Create interval with one of a NEW value number. Note that this value
405 // number isn't actually defined by an instruction, weird huh? :)
407 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
409 ValNo->setIsPHIDef(true);
411 LiveRange LR(Start, killIdx, ValNo);
412 interval.addRange(LR);
413 ValNo->addKill(killIdx);
414 DEBUG(dbgs() << " +" << LR);
418 if (MultipleDefsBySameMI(*mi, MOIdx))
419 // Multiple defs of the same virtual register by the same instruction.
420 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
421 // This is likely due to elimination of REG_SEQUENCE instructions. Return
422 // here since there is nothing to do.
425 // If this is the second time we see a virtual register definition, it
426 // must be due to phi elimination or two addr elimination. If this is
427 // the result of two address elimination, then the vreg is one of the
428 // def-and-use register operand.
430 // It may also be partial redef like this:
431 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
432 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
433 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
434 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
435 // If this is a two-address definition, then we have already processed
436 // the live range. The only problem is that we didn't realize there
437 // are actually two values in the live interval. Because of this we
438 // need to take the LiveRegion that defines this register and split it
440 SlotIndex RedefIndex = MIIdx.getDefIndex();
441 if (MO.isEarlyClobber())
442 RedefIndex = MIIdx.getUseIndex();
444 const LiveRange *OldLR =
445 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
446 VNInfo *OldValNo = OldLR->valno;
447 SlotIndex DefIndex = OldValNo->def.getDefIndex();
449 // Delete the previous value, which should be short and continuous,
450 // because the 2-addr copy must be in the same MBB as the redef.
451 interval.removeRange(DefIndex, RedefIndex);
453 // The new value number (#1) is defined by the instruction we claimed
455 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
456 false, // update at *
458 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
460 // Value#0 is now defined by the 2-addr instruction.
461 OldValNo->def = RedefIndex;
462 OldValNo->setCopy(0);
464 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
465 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
467 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
468 OldValNo->setCopy(&*mi);
470 // Add the new live interval which replaces the range for the input copy.
471 LiveRange LR(DefIndex, RedefIndex, ValNo);
472 DEBUG(dbgs() << " replace range with " << LR);
473 interval.addRange(LR);
474 ValNo->addKill(RedefIndex);
476 // If this redefinition is dead, we need to add a dummy unit live
477 // range covering the def slot.
479 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
483 dbgs() << " RESULT: ";
484 interval.print(dbgs(), tri_);
486 } else if (lv_->isPHIJoin(interval.reg)) {
487 // In the case of PHI elimination, each variable definition is only
488 // live until the end of the block. We've already taken care of the
489 // rest of the live range.
491 SlotIndex defIndex = MIIdx.getDefIndex();
492 if (MO.isEarlyClobber())
493 defIndex = MIIdx.getUseIndex();
496 MachineInstr *CopyMI = NULL;
497 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
498 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
499 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
501 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
503 SlotIndex killIndex = getMBBEndIdx(mbb);
504 LiveRange LR(defIndex, killIndex, ValNo);
505 interval.addRange(LR);
506 ValNo->addKill(indexes_->getTerminatorGap(mbb));
507 ValNo->setHasPHIKill(true);
508 DEBUG(dbgs() << " phi-join +" << LR);
510 llvm_unreachable("Multiply defined register");
514 DEBUG(dbgs() << '\n');
517 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
518 MachineBasicBlock::iterator mi,
521 LiveInterval &interval,
522 MachineInstr *CopyMI) {
523 // A physical register cannot be live across basic block, so its
524 // lifetime must end somewhere in its defining basic block.
526 dbgs() << "\t\tregister: ";
527 printRegName(interval.reg, tri_);
530 SlotIndex baseIndex = MIIdx;
531 SlotIndex start = baseIndex.getDefIndex();
532 // Earlyclobbers move back one.
533 if (MO.isEarlyClobber())
534 start = MIIdx.getUseIndex();
535 SlotIndex end = start;
537 // If it is not used after definition, it is considered dead at
538 // the instruction defining it. Hence its interval is:
539 // [defSlot(def), defSlot(def)+1)
540 // For earlyclobbers, the defSlot was pushed back one; the extra
541 // advance below compensates.
543 DEBUG(dbgs() << " dead");
544 end = start.getStoreIndex();
548 // If it is not dead on definition, it must be killed by a
549 // subsequent instruction. Hence its interval is:
550 // [defSlot(def), useSlot(kill)+1)
551 baseIndex = baseIndex.getNextIndex();
552 while (++mi != MBB->end()) {
554 if (mi->isDebugValue())
556 if (getInstructionFromIndex(baseIndex) == 0)
557 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
559 if (mi->killsRegister(interval.reg, tri_)) {
560 DEBUG(dbgs() << " killed");
561 end = baseIndex.getDefIndex();
564 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
566 if (mi->isRegTiedToUseOperand(DefIdx)) {
567 // Two-address instruction.
568 end = baseIndex.getDefIndex();
570 // Another instruction redefines the register before it is ever read.
571 // Then the register is essentially dead at the instruction that
572 // defines it. Hence its interval is:
573 // [defSlot(def), defSlot(def)+1)
574 DEBUG(dbgs() << " dead");
575 end = start.getStoreIndex();
581 baseIndex = baseIndex.getNextIndex();
584 // The only case we should have a dead physreg here without a killing or
585 // instruction where we know it's dead is if it is live-in to the function
586 // and never used. Another possible case is the implicit use of the
587 // physical register has been deleted by two-address pass.
588 end = start.getStoreIndex();
591 assert(start < end && "did not find end of interval?");
593 // Already exists? Extend old live interval.
594 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
595 bool Extend = OldLR != interval.end();
596 VNInfo *ValNo = Extend
597 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
598 if (MO.isEarlyClobber() && Extend)
599 ValNo->setHasRedefByEC(true);
600 LiveRange LR(start, end, ValNo);
601 interval.addRange(LR);
602 LR.valno->addKill(end);
603 DEBUG(dbgs() << " +" << LR << '\n');
606 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
607 MachineBasicBlock::iterator MI,
611 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
612 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
613 getOrCreateInterval(MO.getReg()));
614 else if (allocatableRegs_[MO.getReg()]) {
615 MachineInstr *CopyMI = NULL;
616 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
617 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
618 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
620 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
621 getOrCreateInterval(MO.getReg()), CopyMI);
622 // Def of a register also defines its sub-registers.
623 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
624 // If MI also modifies the sub-register explicitly, avoid processing it
625 // more than once. Do not pass in TRI here so it checks for exact match.
626 if (!MI->definesRegister(*AS))
627 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
628 getOrCreateInterval(*AS), 0);
632 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
634 LiveInterval &interval, bool isAlias) {
636 dbgs() << "\t\tlivein register: ";
637 printRegName(interval.reg, tri_);
640 // Look for kills, if it reaches a def before it's killed, then it shouldn't
641 // be considered a livein.
642 MachineBasicBlock::iterator mi = MBB->begin();
643 MachineBasicBlock::iterator E = MBB->end();
644 // Skip over DBG_VALUE at the start of the MBB.
645 if (mi != E && mi->isDebugValue()) {
646 while (++mi != E && mi->isDebugValue())
649 // MBB is empty except for DBG_VALUE's.
653 SlotIndex baseIndex = MIIdx;
654 SlotIndex start = baseIndex;
655 if (getInstructionFromIndex(baseIndex) == 0)
656 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
658 SlotIndex end = baseIndex;
659 bool SeenDefUse = false;
662 if (mi->killsRegister(interval.reg, tri_)) {
663 DEBUG(dbgs() << " killed");
664 end = baseIndex.getDefIndex();
667 } else if (mi->definesRegister(interval.reg, tri_)) {
668 // Another instruction redefines the register before it is ever read.
669 // Then the register is essentially dead at the instruction that defines
670 // it. Hence its interval is:
671 // [defSlot(def), defSlot(def)+1)
672 DEBUG(dbgs() << " dead");
673 end = start.getStoreIndex();
678 while (++mi != E && mi->isDebugValue())
679 // Skip over DBG_VALUE.
682 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
685 // Live-in register might not be used at all.
688 DEBUG(dbgs() << " dead");
689 end = MIIdx.getStoreIndex();
691 DEBUG(dbgs() << " live through");
697 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
698 0, false, VNInfoAllocator);
699 vni->setIsPHIDef(true);
700 LiveRange LR(start, end, vni);
702 interval.addRange(LR);
703 LR.valno->addKill(end);
704 DEBUG(dbgs() << " +" << LR << '\n');
707 /// computeIntervals - computes the live intervals for virtual
708 /// registers. for some ordering of the machine instructions [1,N] a
709 /// live interval is an interval [i, j) where 1 <= i <= j < N for
710 /// which a variable is live
711 void LiveIntervals::computeIntervals() {
712 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
713 << "********** Function: "
714 << ((Value*)mf_->getFunction())->getName() << '\n');
716 SmallVector<unsigned, 8> UndefUses;
717 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
719 MachineBasicBlock *MBB = MBBI;
723 // Track the index of the current machine instr.
724 SlotIndex MIIndex = getMBBStartIdx(MBB);
725 DEBUG(dbgs() << "BB#" << MBB->getNumber()
726 << ":\t\t# derived from " << MBB->getName() << "\n");
728 // Create intervals for live-ins to this BB first.
729 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
730 LE = MBB->livein_end(); LI != LE; ++LI) {
731 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
732 // Multiple live-ins can alias the same register.
733 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
734 if (!hasInterval(*AS))
735 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
739 // Skip over empty initial indices.
740 if (getInstructionFromIndex(MIIndex) == 0)
741 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
743 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
745 DEBUG(dbgs() << MIIndex << "\t" << *MI);
746 if (MI->isDebugValue())
750 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
751 MachineOperand &MO = MI->getOperand(i);
752 if (!MO.isReg() || !MO.getReg())
755 // handle register defs - build intervals
757 handleRegisterDef(MBB, MI, MIIndex, MO, i);
758 else if (MO.isUndef())
759 UndefUses.push_back(MO.getReg());
762 // Move to the next instr slot.
763 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
767 // Create empty intervals for registers defined by implicit_def's (except
768 // for those implicit_def that define values which are liveout of their
770 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
771 unsigned UndefReg = UndefUses[i];
772 (void)getOrCreateInterval(UndefReg);
776 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
777 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
778 return new LiveInterval(reg, Weight);
781 /// dupInterval - Duplicate a live interval. The caller is responsible for
782 /// managing the allocated memory.
783 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
784 LiveInterval *NewLI = createInterval(li->reg);
785 NewLI->Copy(*li, mri_, getVNInfoAllocator());
789 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
790 /// copy field and returns the source register that defines it.
791 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
795 if (VNI->getCopy()->isExtractSubreg()) {
796 // If it's extracting out of a physical register, return the sub-register.
797 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
798 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
799 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
800 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
801 if (SrcSubReg == DstSubReg)
802 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
803 // reg1034 can still be coalesced to EDX.
805 assert(DstSubReg == 0);
806 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
809 } else if (VNI->getCopy()->isInsertSubreg() ||
810 VNI->getCopy()->isSubregToReg())
811 return VNI->getCopy()->getOperand(2).getReg();
813 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
814 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
816 llvm_unreachable("Unrecognized copy instruction!");
820 //===----------------------------------------------------------------------===//
821 // Register allocator hooks.
824 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
825 /// allow one) virtual register operand, then its uses are implicitly using
826 /// the register. Returns the virtual register.
827 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
828 MachineInstr *MI) const {
830 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
831 MachineOperand &MO = MI->getOperand(i);
832 if (!MO.isReg() || !MO.isUse())
834 unsigned Reg = MO.getReg();
835 if (Reg == 0 || Reg == li.reg)
838 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
839 !allocatableRegs_[Reg])
841 // FIXME: For now, only remat MI with at most one register operand.
843 "Can't rematerialize instruction with multiple register operand!");
852 /// isValNoAvailableAt - Return true if the val# of the specified interval
853 /// which reaches the given instruction also reaches the specified use index.
854 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
855 SlotIndex UseIdx) const {
856 SlotIndex Index = getInstructionIndex(MI);
857 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
858 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
859 return UI != li.end() && UI->valno == ValNo;
862 /// isReMaterializable - Returns true if the definition MI of the specified
863 /// val# of the specified interval is re-materializable.
864 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
865 const VNInfo *ValNo, MachineInstr *MI,
866 SmallVectorImpl<LiveInterval*> &SpillIs,
871 if (!tii_->isTriviallyReMaterializable(MI, aa_))
874 // Target-specific code can mark an instruction as being rematerializable
875 // if it has one virtual reg use, though it had better be something like
876 // a PIC base register which is likely to be live everywhere.
877 unsigned ImpUse = getReMatImplicitUse(li, MI);
879 const LiveInterval &ImpLi = getInterval(ImpUse);
880 for (MachineRegisterInfo::use_nodbg_iterator
881 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
883 MachineInstr *UseMI = &*ri;
884 SlotIndex UseIdx = getInstructionIndex(UseMI);
885 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
887 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
891 // If a register operand of the re-materialized instruction is going to
892 // be spilled next, then it's not legal to re-materialize this instruction.
893 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
894 if (ImpUse == SpillIs[i]->reg)
900 /// isReMaterializable - Returns true if the definition MI of the specified
901 /// val# of the specified interval is re-materializable.
902 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
903 const VNInfo *ValNo, MachineInstr *MI) {
904 SmallVector<LiveInterval*, 4> Dummy1;
906 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
909 /// isReMaterializable - Returns true if every definition of MI of every
910 /// val# of the specified interval is re-materializable.
911 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
912 SmallVectorImpl<LiveInterval*> &SpillIs,
915 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
917 const VNInfo *VNI = *i;
919 continue; // Dead val#.
920 // Is the def for the val# rematerializable?
921 if (!VNI->isDefAccurate())
923 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
924 bool DefIsLoad = false;
926 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
933 /// FilterFoldedOps - Filter out two-address use operands. Return
934 /// true if it finds any issue with the operands that ought to prevent
936 static bool FilterFoldedOps(MachineInstr *MI,
937 SmallVector<unsigned, 2> &Ops,
939 SmallVector<unsigned, 2> &FoldOps) {
941 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
942 unsigned OpIdx = Ops[i];
943 MachineOperand &MO = MI->getOperand(OpIdx);
944 // FIXME: fold subreg use.
948 MRInfo |= (unsigned)VirtRegMap::isMod;
950 // Filter out two-address use operand(s).
951 if (MI->isRegTiedToDefOperand(OpIdx)) {
952 MRInfo = VirtRegMap::isModRef;
955 MRInfo |= (unsigned)VirtRegMap::isRef;
957 FoldOps.push_back(OpIdx);
963 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
964 /// slot / to reg or any rematerialized load into ith operand of specified
965 /// MI. If it is successul, MI is updated with the newly created MI and
967 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
968 VirtRegMap &vrm, MachineInstr *DefMI,
970 SmallVector<unsigned, 2> &Ops,
971 bool isSS, int Slot, unsigned Reg) {
972 // If it is an implicit def instruction, just delete it.
973 if (MI->isImplicitDef()) {
974 RemoveMachineInstrFromMaps(MI);
975 vrm.RemoveMachineInstrFromMaps(MI);
976 MI->eraseFromParent();
981 // Filter the list of operand indexes that are to be folded. Abort if
982 // any operand will prevent folding.
984 SmallVector<unsigned, 2> FoldOps;
985 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
988 // The only time it's safe to fold into a two address instruction is when
989 // it's folding reload and spill from / into a spill stack slot.
990 if (DefMI && (MRInfo & VirtRegMap::isMod))
993 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
994 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
996 // Remember this instruction uses the spill slot.
997 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
999 // Attempt to fold the memory reference into the instruction. If
1000 // we can do this, we don't need to insert spill code.
1001 MachineBasicBlock &MBB = *MI->getParent();
1002 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1003 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1004 vrm.transferSpillPts(MI, fmi);
1005 vrm.transferRestorePts(MI, fmi);
1006 vrm.transferEmergencySpills(MI, fmi);
1007 ReplaceMachineInstrInMaps(MI, fmi);
1008 MI = MBB.insert(MBB.erase(MI), fmi);
1015 /// canFoldMemoryOperand - Returns true if the specified load / store
1016 /// folding is possible.
1017 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1018 SmallVector<unsigned, 2> &Ops,
1020 // Filter the list of operand indexes that are to be folded. Abort if
1021 // any operand will prevent folding.
1022 unsigned MRInfo = 0;
1023 SmallVector<unsigned, 2> FoldOps;
1024 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1027 // It's only legal to remat for a use, not a def.
1028 if (ReMat && (MRInfo & VirtRegMap::isMod))
1031 return tii_->canFoldMemoryOperand(MI, FoldOps);
1034 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1035 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1037 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1042 for (++itr; itr != li.ranges.end(); ++itr) {
1043 MachineBasicBlock *mbb2 =
1044 indexes_->getMBBCoveringRange(itr->start, itr->end);
1053 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1054 /// interval on to-be re-materialized operands of MI) with new register.
1055 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1056 MachineInstr *MI, unsigned NewVReg,
1058 // There is an implicit use. That means one of the other operand is
1059 // being remat'ed and the remat'ed instruction has li.reg as an
1060 // use operand. Make sure we rewrite that as well.
1061 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1062 MachineOperand &MO = MI->getOperand(i);
1065 unsigned Reg = MO.getReg();
1066 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1068 if (!vrm.isReMaterialized(Reg))
1070 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1071 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1073 UseMO->setReg(NewVReg);
1077 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1078 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1079 bool LiveIntervals::
1080 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1081 bool TrySplit, SlotIndex index, SlotIndex end,
1083 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1084 unsigned Slot, int LdSlot,
1085 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1087 const TargetRegisterClass* rc,
1088 SmallVector<int, 4> &ReMatIds,
1089 const MachineLoopInfo *loopInfo,
1090 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1091 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1092 std::vector<LiveInterval*> &NewLIs) {
1093 bool CanFold = false;
1095 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1096 MachineOperand& mop = MI->getOperand(i);
1099 unsigned Reg = mop.getReg();
1100 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1105 bool TryFold = !DefIsReMat;
1106 bool FoldSS = true; // Default behavior unless it's a remat.
1107 int FoldSlot = Slot;
1109 // If this is the rematerializable definition MI itself and
1110 // all of its uses are rematerialized, simply delete it.
1111 if (MI == ReMatOrigDefMI && CanDelete) {
1112 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1114 RemoveMachineInstrFromMaps(MI);
1115 vrm.RemoveMachineInstrFromMaps(MI);
1116 MI->eraseFromParent();
1120 // If def for this use can't be rematerialized, then try folding.
1121 // If def is rematerializable and it's a load, also try folding.
1122 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1124 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1130 // Scan all of the operands of this instruction rewriting operands
1131 // to use NewVReg instead of li.reg as appropriate. We do this for
1134 // 1. If the instr reads the same spilled vreg multiple times, we
1135 // want to reuse the NewVReg.
1136 // 2. If the instr is a two-addr instruction, we are required to
1137 // keep the src/dst regs pinned.
1139 // Keep track of whether we replace a use and/or def so that we can
1140 // create the spill interval with the appropriate range.
1141 SmallVector<unsigned, 2> Ops;
1142 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1144 // Create a new virtual register for the spill interval.
1145 // Create the new register now so we can map the fold instruction
1146 // to the new register so when it is unfolded we get the correct
1148 bool CreatedNewVReg = false;
1150 NewVReg = mri_->createVirtualRegister(rc);
1152 CreatedNewVReg = true;
1154 // The new virtual register should get the same allocation hints as the
1156 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1157 if (Hint.first || Hint.second)
1158 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1164 // Do not fold load / store here if we are splitting. We'll find an
1165 // optimal point to insert a load / store later.
1167 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1168 Ops, FoldSS, FoldSlot, NewVReg)) {
1169 // Folding the load/store can completely change the instruction in
1170 // unpredictable ways, rescan it from the beginning.
1173 // We need to give the new vreg the same stack slot as the
1174 // spilled interval.
1175 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1181 if (isNotInMIMap(MI))
1183 goto RestartInstruction;
1186 // We'll try to fold it later if it's profitable.
1187 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1191 mop.setReg(NewVReg);
1192 if (mop.isImplicit())
1193 rewriteImplicitOps(li, MI, NewVReg, vrm);
1195 // Reuse NewVReg for other reads.
1196 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1197 MachineOperand &mopj = MI->getOperand(Ops[j]);
1198 mopj.setReg(NewVReg);
1199 if (mopj.isImplicit())
1200 rewriteImplicitOps(li, MI, NewVReg, vrm);
1203 if (CreatedNewVReg) {
1205 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1206 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1207 // Each valnum may have its own remat id.
1208 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1210 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1212 if (!CanDelete || (HasUse && HasDef)) {
1213 // If this is a two-addr instruction then its use operands are
1214 // rematerializable but its def is not. It should be assigned a
1216 vrm.assignVirt2StackSlot(NewVReg, Slot);
1219 vrm.assignVirt2StackSlot(NewVReg, Slot);
1221 } else if (HasUse && HasDef &&
1222 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1223 // If this interval hasn't been assigned a stack slot (because earlier
1224 // def is a deleted remat def), do it now.
1225 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1226 vrm.assignVirt2StackSlot(NewVReg, Slot);
1229 // Re-matting an instruction with virtual register use. Add the
1230 // register as an implicit use on the use MI.
1231 if (DefIsReMat && ImpUse)
1232 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1234 // Create a new register interval for this spill / remat.
1235 LiveInterval &nI = getOrCreateInterval(NewVReg);
1236 if (CreatedNewVReg) {
1237 NewLIs.push_back(&nI);
1238 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1240 vrm.setIsSplitFromReg(NewVReg, li.reg);
1244 if (CreatedNewVReg) {
1245 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1246 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1247 DEBUG(dbgs() << " +" << LR);
1250 // Extend the split live interval to this def / use.
1251 SlotIndex End = index.getDefIndex();
1252 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1253 nI.getValNumInfo(nI.getNumValNums()-1));
1254 DEBUG(dbgs() << " +" << LR);
1259 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1260 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1261 DEBUG(dbgs() << " +" << LR);
1266 dbgs() << "\t\t\t\tAdded new interval: ";
1267 nI.print(dbgs(), tri_);
1273 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1275 MachineBasicBlock *MBB,
1276 SlotIndex Idx) const {
1277 SlotIndex End = getMBBEndIdx(MBB);
1278 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1279 if (VNI->kills[j].isPHI())
1282 SlotIndex KillIdx = VNI->kills[j];
1283 assert(getInstructionFromIndex(KillIdx) && "Dangling kill");
1284 if (KillIdx > Idx && KillIdx <= End)
1290 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1291 /// during spilling.
1293 struct RewriteInfo {
1296 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1299 struct RewriteInfoCompare {
1300 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1301 return LHS.Index < RHS.Index;
1306 void LiveIntervals::
1307 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1308 LiveInterval::Ranges::const_iterator &I,
1309 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1310 unsigned Slot, int LdSlot,
1311 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1313 const TargetRegisterClass* rc,
1314 SmallVector<int, 4> &ReMatIds,
1315 const MachineLoopInfo *loopInfo,
1316 BitVector &SpillMBBs,
1317 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1318 BitVector &RestoreMBBs,
1319 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1320 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1321 std::vector<LiveInterval*> &NewLIs) {
1322 bool AllCanFold = true;
1323 unsigned NewVReg = 0;
1324 SlotIndex start = I->start.getBaseIndex();
1325 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1327 // First collect all the def / use in this live range that will be rewritten.
1328 // Make sure they are sorted according to instruction index.
1329 std::vector<RewriteInfo> RewriteMIs;
1330 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1331 re = mri_->reg_end(); ri != re; ) {
1332 MachineInstr *MI = &*ri;
1333 MachineOperand &O = ri.getOperand();
1335 if (MI->isDebugValue()) {
1336 // Modify DBG_VALUE now that the value is in a spill slot.
1337 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1338 uint64_t Offset = MI->getOperand(1).getImm();
1339 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1340 DebugLoc DL = MI->getDebugLoc();
1341 int FI = isLoadSS ? LdSlot : (int)Slot;
1342 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1343 Offset, MDPtr, DL)) {
1344 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1345 ReplaceMachineInstrInMaps(MI, NewDV);
1346 MachineBasicBlock *MBB = MI->getParent();
1347 MBB->insert(MBB->erase(MI), NewDV);
1352 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1353 RemoveMachineInstrFromMaps(MI);
1354 vrm.RemoveMachineInstrFromMaps(MI);
1355 MI->eraseFromParent();
1358 assert(!(O.isImplicit() && O.isUse()) &&
1359 "Spilling register that's used as implicit use?");
1360 SlotIndex index = getInstructionIndex(MI);
1361 if (index < start || index >= end)
1365 // Must be defined by an implicit def. It should not be spilled. Note,
1366 // this is for correctness reason. e.g.
1367 // 8 %reg1024<def> = IMPLICIT_DEF
1368 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1369 // The live range [12, 14) are not part of the r1024 live interval since
1370 // it's defined by an implicit def. It will not conflicts with live
1371 // interval of r1025. Now suppose both registers are spilled, you can
1372 // easily see a situation where both registers are reloaded before
1373 // the INSERT_SUBREG and both target registers that would overlap.
1375 RewriteMIs.push_back(RewriteInfo(index, MI));
1377 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1379 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1380 // Now rewrite the defs and uses.
1381 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1382 RewriteInfo &rwi = RewriteMIs[i];
1384 SlotIndex index = rwi.Index;
1385 MachineInstr *MI = rwi.MI;
1386 // If MI def and/or use the same register multiple times, then there
1387 // are multiple entries.
1388 while (i != e && RewriteMIs[i].MI == MI) {
1389 assert(RewriteMIs[i].Index == index);
1392 MachineBasicBlock *MBB = MI->getParent();
1394 if (ImpUse && MI != ReMatDefMI) {
1395 // Re-matting an instruction with virtual register use. Prevent interval
1396 // from being spilled.
1397 getInterval(ImpUse).markNotSpillable();
1400 unsigned MBBId = MBB->getNumber();
1401 unsigned ThisVReg = 0;
1403 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1404 if (NVI != MBBVRegsMap.end()) {
1405 ThisVReg = NVI->second;
1412 // It's better to start a new interval to avoid artifically
1413 // extend the new interval.
1414 if (MI->readsWritesVirtualRegister(li.reg) ==
1415 std::make_pair(false,true)) {
1416 MBBVRegsMap.erase(MBB->getNumber());
1422 bool IsNew = ThisVReg == 0;
1424 // This ends the previous live interval. If all of its def / use
1425 // can be folded, give it a low spill weight.
1426 if (NewVReg && TrySplit && AllCanFold) {
1427 LiveInterval &nI = getOrCreateInterval(NewVReg);
1434 bool HasDef = false;
1435 bool HasUse = false;
1436 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1437 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1438 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1439 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1440 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1441 if (!HasDef && !HasUse)
1444 AllCanFold &= CanFold;
1446 // Update weight of spill interval.
1447 LiveInterval &nI = getOrCreateInterval(NewVReg);
1449 // The spill weight is now infinity as it cannot be spilled again.
1450 nI.markNotSpillable();
1454 // Keep track of the last def and first use in each MBB.
1456 if (MI != ReMatOrigDefMI || !CanDelete) {
1457 bool HasKill = false;
1459 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1461 // If this is a two-address code, then this index starts a new VNInfo.
1462 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1464 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1466 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1467 SpillIdxes.find(MBBId);
1469 if (SII == SpillIdxes.end()) {
1470 std::vector<SRInfo> S;
1471 S.push_back(SRInfo(index, NewVReg, true));
1472 SpillIdxes.insert(std::make_pair(MBBId, S));
1473 } else if (SII->second.back().vreg != NewVReg) {
1474 SII->second.push_back(SRInfo(index, NewVReg, true));
1475 } else if (index > SII->second.back().index) {
1476 // If there is an earlier def and this is a two-address
1477 // instruction, then it's not possible to fold the store (which
1478 // would also fold the load).
1479 SRInfo &Info = SII->second.back();
1481 Info.canFold = !HasUse;
1483 SpillMBBs.set(MBBId);
1484 } else if (SII != SpillIdxes.end() &&
1485 SII->second.back().vreg == NewVReg &&
1486 index > SII->second.back().index) {
1487 // There is an earlier def that's not killed (must be two-address).
1488 // The spill is no longer needed.
1489 SII->second.pop_back();
1490 if (SII->second.empty()) {
1491 SpillIdxes.erase(MBBId);
1492 SpillMBBs.reset(MBBId);
1499 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1500 SpillIdxes.find(MBBId);
1501 if (SII != SpillIdxes.end() &&
1502 SII->second.back().vreg == NewVReg &&
1503 index > SII->second.back().index)
1504 // Use(s) following the last def, it's not safe to fold the spill.
1505 SII->second.back().canFold = false;
1506 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1507 RestoreIdxes.find(MBBId);
1508 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1509 // If we are splitting live intervals, only fold if it's the first
1510 // use and there isn't another use later in the MBB.
1511 RII->second.back().canFold = false;
1513 // Only need a reload if there isn't an earlier def / use.
1514 if (RII == RestoreIdxes.end()) {
1515 std::vector<SRInfo> Infos;
1516 Infos.push_back(SRInfo(index, NewVReg, true));
1517 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1519 RII->second.push_back(SRInfo(index, NewVReg, true));
1521 RestoreMBBs.set(MBBId);
1525 // Update spill weight.
1526 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1527 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1530 if (NewVReg && TrySplit && AllCanFold) {
1531 // If all of its def / use can be folded, give it a low spill weight.
1532 LiveInterval &nI = getOrCreateInterval(NewVReg);
1537 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1538 unsigned vr, BitVector &RestoreMBBs,
1539 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1540 if (!RestoreMBBs[Id])
1542 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1543 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1544 if (Restores[i].index == index &&
1545 Restores[i].vreg == vr &&
1546 Restores[i].canFold)
1551 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1552 unsigned vr, BitVector &RestoreMBBs,
1553 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1554 if (!RestoreMBBs[Id])
1556 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1557 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1558 if (Restores[i].index == index && Restores[i].vreg)
1559 Restores[i].index = SlotIndex();
1562 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1563 /// spilled and create empty intervals for their uses.
1565 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1566 const TargetRegisterClass* rc,
1567 std::vector<LiveInterval*> &NewLIs) {
1568 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1569 re = mri_->reg_end(); ri != re; ) {
1570 MachineOperand &O = ri.getOperand();
1571 MachineInstr *MI = &*ri;
1573 if (MI->isDebugValue()) {
1574 // Remove debug info for now.
1576 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1580 assert(MI->isImplicitDef() &&
1581 "Register def was not rewritten?");
1582 RemoveMachineInstrFromMaps(MI);
1583 vrm.RemoveMachineInstrFromMaps(MI);
1584 MI->eraseFromParent();
1586 // This must be an use of an implicit_def so it's not part of the live
1587 // interval. Create a new empty live interval for it.
1588 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1589 unsigned NewVReg = mri_->createVirtualRegister(rc);
1591 vrm.setIsImplicitlyDefined(NewVReg);
1592 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1593 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1594 MachineOperand &MO = MI->getOperand(i);
1595 if (MO.isReg() && MO.getReg() == li.reg) {
1605 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1606 // Limit the loop depth ridiculousness.
1607 if (loopDepth > 200)
1610 // The loop depth is used to roughly estimate the number of times the
1611 // instruction is executed. Something like 10^d is simple, but will quickly
1612 // overflow a float. This expression behaves like 10^d for small d, but is
1613 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1614 // headroom before overflow.
1615 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1617 return (isDef + isUse) * lc;
1621 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1622 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1623 normalizeSpillWeight(*NewLIs[i]);
1626 std::vector<LiveInterval*> LiveIntervals::
1627 addIntervalsForSpillsFast(const LiveInterval &li,
1628 const MachineLoopInfo *loopInfo,
1630 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1632 std::vector<LiveInterval*> added;
1634 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1637 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1642 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1644 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1645 while (RI != mri_->reg_end()) {
1646 MachineInstr* MI = &*RI;
1648 SmallVector<unsigned, 2> Indices;
1649 bool HasUse, HasDef;
1650 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(li.reg, &Indices);
1652 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1653 Indices, true, slot, li.reg)) {
1654 unsigned NewVReg = mri_->createVirtualRegister(rc);
1656 vrm.assignVirt2StackSlot(NewVReg, slot);
1658 // create a new register for this spill
1659 LiveInterval &nI = getOrCreateInterval(NewVReg);
1660 nI.markNotSpillable();
1662 // Rewrite register operands to use the new vreg.
1663 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1664 E = Indices.end(); I != E; ++I) {
1665 MI->getOperand(*I).setReg(NewVReg);
1667 if (MI->getOperand(*I).isUse())
1668 MI->getOperand(*I).setIsKill(true);
1671 // Fill in the new live interval.
1672 SlotIndex index = getInstructionIndex(MI);
1674 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1675 nI.getNextValue(SlotIndex(), 0, false,
1676 getVNInfoAllocator()));
1677 DEBUG(dbgs() << " +" << LR);
1679 vrm.addRestorePoint(NewVReg, MI);
1682 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1683 nI.getNextValue(SlotIndex(), 0, false,
1684 getVNInfoAllocator()));
1685 DEBUG(dbgs() << " +" << LR);
1687 vrm.addSpillPoint(NewVReg, true, MI);
1690 added.push_back(&nI);
1693 dbgs() << "\t\t\t\tadded new interval: ";
1700 RI = mri_->reg_begin(li.reg);
1706 std::vector<LiveInterval*> LiveIntervals::
1707 addIntervalsForSpills(const LiveInterval &li,
1708 SmallVectorImpl<LiveInterval*> &SpillIs,
1709 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1711 if (EnableFastSpilling)
1712 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1714 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1717 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1718 li.print(dbgs(), tri_);
1722 // Each bit specify whether a spill is required in the MBB.
1723 BitVector SpillMBBs(mf_->getNumBlockIDs());
1724 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1725 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1726 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1727 DenseMap<unsigned,unsigned> MBBVRegsMap;
1728 std::vector<LiveInterval*> NewLIs;
1729 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1731 unsigned NumValNums = li.getNumValNums();
1732 SmallVector<MachineInstr*, 4> ReMatDefs;
1733 ReMatDefs.resize(NumValNums, NULL);
1734 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1735 ReMatOrigDefs.resize(NumValNums, NULL);
1736 SmallVector<int, 4> ReMatIds;
1737 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1738 BitVector ReMatDelete(NumValNums);
1739 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1741 // Spilling a split live interval. It cannot be split any further. Also,
1742 // it's also guaranteed to be a single val# / range interval.
1743 if (vrm.getPreSplitReg(li.reg)) {
1744 vrm.setIsSplitFromReg(li.reg, 0);
1745 // Unset the split kill marker on the last use.
1746 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1747 if (KillIdx != SlotIndex()) {
1748 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1749 assert(KillMI && "Last use disappeared?");
1750 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1751 assert(KillOp != -1 && "Last use disappeared?");
1752 KillMI->getOperand(KillOp).setIsKill(false);
1754 vrm.removeKillPoint(li.reg);
1755 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1756 Slot = vrm.getStackSlot(li.reg);
1757 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1758 MachineInstr *ReMatDefMI = DefIsReMat ?
1759 vrm.getReMaterializedMI(li.reg) : NULL;
1761 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1762 bool isLoad = isLoadSS ||
1763 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1764 bool IsFirstRange = true;
1765 for (LiveInterval::Ranges::const_iterator
1766 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1767 // If this is a split live interval with multiple ranges, it means there
1768 // are two-address instructions that re-defined the value. Only the
1769 // first def can be rematerialized!
1771 // Note ReMatOrigDefMI has already been deleted.
1772 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1773 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1774 false, vrm, rc, ReMatIds, loopInfo,
1775 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1776 MBBVRegsMap, NewLIs);
1778 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1779 Slot, 0, false, false, false,
1780 false, vrm, rc, ReMatIds, loopInfo,
1781 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1782 MBBVRegsMap, NewLIs);
1784 IsFirstRange = false;
1787 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1788 normalizeSpillWeights(NewLIs);
1792 bool TrySplit = !intervalIsInOneMBB(li);
1795 bool NeedStackSlot = false;
1796 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1798 const VNInfo *VNI = *i;
1799 unsigned VN = VNI->id;
1800 if (VNI->isUnused())
1801 continue; // Dead val#.
1802 // Is the def for the val# rematerializable?
1803 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1804 ? getInstructionFromIndex(VNI->def) : 0;
1806 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1807 // Remember how to remat the def of this val#.
1808 ReMatOrigDefs[VN] = ReMatDefMI;
1809 // Original def may be modified so we have to make a copy here.
1810 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1811 CloneMIs.push_back(Clone);
1812 ReMatDefs[VN] = Clone;
1814 bool CanDelete = true;
1815 if (VNI->hasPHIKill()) {
1816 // A kill is a phi node, not all of its uses can be rematerialized.
1817 // It must not be deleted.
1819 // Need a stack slot if there is any live range where uses cannot be
1821 NeedStackSlot = true;
1824 ReMatDelete.set(VN);
1826 // Need a stack slot if there is any live range where uses cannot be
1828 NeedStackSlot = true;
1832 // One stack slot per live interval.
1833 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1834 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1835 Slot = vrm.assignVirt2StackSlot(li.reg);
1837 // This case only occurs when the prealloc splitter has already assigned
1838 // a stack slot to this vreg.
1840 Slot = vrm.getStackSlot(li.reg);
1843 // Create new intervals and rewrite defs and uses.
1844 for (LiveInterval::Ranges::const_iterator
1845 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1846 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1847 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1848 bool DefIsReMat = ReMatDefMI != NULL;
1849 bool CanDelete = ReMatDelete[I->valno->id];
1851 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1852 bool isLoad = isLoadSS ||
1853 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1854 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1855 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1856 CanDelete, vrm, rc, ReMatIds, loopInfo,
1857 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1858 MBBVRegsMap, NewLIs);
1861 // Insert spills / restores if we are splitting.
1863 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1864 normalizeSpillWeights(NewLIs);
1868 SmallPtrSet<LiveInterval*, 4> AddedKill;
1869 SmallVector<unsigned, 2> Ops;
1870 if (NeedStackSlot) {
1871 int Id = SpillMBBs.find_first();
1873 std::vector<SRInfo> &spills = SpillIdxes[Id];
1874 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1875 SlotIndex index = spills[i].index;
1876 unsigned VReg = spills[i].vreg;
1877 LiveInterval &nI = getOrCreateInterval(VReg);
1878 bool isReMat = vrm.isReMaterialized(VReg);
1879 MachineInstr *MI = getInstructionFromIndex(index);
1880 bool CanFold = false;
1881 bool FoundUse = false;
1883 if (spills[i].canFold) {
1885 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1886 MachineOperand &MO = MI->getOperand(j);
1887 if (!MO.isReg() || MO.getReg() != VReg)
1894 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1895 RestoreMBBs, RestoreIdxes))) {
1896 // MI has two-address uses of the same register. If the use
1897 // isn't the first and only use in the BB, then we can't fold
1898 // it. FIXME: Move this to rewriteInstructionsForSpills.
1905 // Fold the store into the def if possible.
1906 bool Folded = false;
1907 if (CanFold && !Ops.empty()) {
1908 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1911 // Also folded uses, do not issue a load.
1912 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1913 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1915 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1919 // Otherwise tell the spiller to issue a spill.
1921 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1922 bool isKill = LR->end == index.getStoreIndex();
1923 if (!MI->registerDefIsDead(nI.reg))
1924 // No need to spill a dead def.
1925 vrm.addSpillPoint(VReg, isKill, MI);
1927 AddedKill.insert(&nI);
1930 Id = SpillMBBs.find_next(Id);
1934 int Id = RestoreMBBs.find_first();
1936 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1937 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1938 SlotIndex index = restores[i].index;
1939 if (index == SlotIndex())
1941 unsigned VReg = restores[i].vreg;
1942 LiveInterval &nI = getOrCreateInterval(VReg);
1943 bool isReMat = vrm.isReMaterialized(VReg);
1944 MachineInstr *MI = getInstructionFromIndex(index);
1945 bool CanFold = false;
1947 if (restores[i].canFold) {
1949 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1950 MachineOperand &MO = MI->getOperand(j);
1951 if (!MO.isReg() || MO.getReg() != VReg)
1955 // If this restore were to be folded, it would have been folded
1964 // Fold the load into the use if possible.
1965 bool Folded = false;
1966 if (CanFold && !Ops.empty()) {
1968 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1970 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1972 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1973 // If the rematerializable def is a load, also try to fold it.
1974 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1975 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1976 Ops, isLoadSS, LdSlot, VReg);
1978 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1980 // Re-matting an instruction with virtual register use. Add the
1981 // register as an implicit use on the use MI and mark the register
1982 // interval as unspillable.
1983 LiveInterval &ImpLi = getInterval(ImpUse);
1984 ImpLi.markNotSpillable();
1985 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1990 // If folding is not possible / failed, then tell the spiller to issue a
1991 // load / rematerialization for us.
1993 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1995 vrm.addRestorePoint(VReg, MI);
1997 Id = RestoreMBBs.find_next(Id);
2000 // Finalize intervals: add kills, finalize spill weights, and filter out
2002 std::vector<LiveInterval*> RetNewLIs;
2003 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2004 LiveInterval *LI = NewLIs[i];
2006 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2007 if (!AddedKill.count(LI)) {
2008 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2009 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2010 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2011 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2012 assert(UseIdx != -1);
2013 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2014 LastUse->getOperand(UseIdx).setIsKill();
2015 vrm.addKillPoint(LI->reg, LastUseIdx);
2018 RetNewLIs.push_back(LI);
2022 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2023 normalizeSpillWeights(RetNewLIs);
2027 /// hasAllocatableSuperReg - Return true if the specified physical register has
2028 /// any super register that's allocatable.
2029 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2030 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2031 if (allocatableRegs_[*AS] && hasInterval(*AS))
2036 /// getRepresentativeReg - Find the largest super register of the specified
2037 /// physical register.
2038 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2039 // Find the largest super-register that is allocatable.
2040 unsigned BestReg = Reg;
2041 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2042 unsigned SuperReg = *AS;
2043 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2051 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2052 /// specified interval that conflicts with the specified physical register.
2053 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2054 unsigned PhysReg) const {
2055 unsigned NumConflicts = 0;
2056 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2057 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2058 E = mri_->reg_end(); I != E; ++I) {
2059 MachineOperand &O = I.getOperand();
2060 MachineInstr *MI = O.getParent();
2061 if (MI->isDebugValue())
2063 SlotIndex Index = getInstructionIndex(MI);
2064 if (pli.liveAt(Index))
2067 return NumConflicts;
2070 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2071 /// around all defs and uses of the specified interval. Return true if it
2072 /// was able to cut its interval.
2073 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2074 unsigned PhysReg, VirtRegMap &vrm) {
2075 unsigned SpillReg = getRepresentativeReg(PhysReg);
2077 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2078 // If there are registers which alias PhysReg, but which are not a
2079 // sub-register of the chosen representative super register. Assert
2080 // since we can't handle it yet.
2081 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2082 tri_->isSuperRegister(*AS, SpillReg));
2085 SmallVector<unsigned, 4> PRegs;
2086 if (hasInterval(SpillReg))
2087 PRegs.push_back(SpillReg);
2089 SmallSet<unsigned, 4> Added;
2090 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2091 if (Added.insert(*AS) && hasInterval(*AS)) {
2092 PRegs.push_back(*AS);
2093 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2098 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2099 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2100 E = mri_->reg_end(); I != E; ++I) {
2101 MachineOperand &O = I.getOperand();
2102 MachineInstr *MI = O.getParent();
2103 if (MI->isDebugValue() || SeenMIs.count(MI))
2106 SlotIndex Index = getInstructionIndex(MI);
2107 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2108 unsigned PReg = PRegs[i];
2109 LiveInterval &pli = getInterval(PReg);
2110 if (!pli.liveAt(Index))
2112 vrm.addEmergencySpill(PReg, MI);
2113 SlotIndex StartIdx = Index.getLoadIndex();
2114 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2115 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2116 pli.removeRange(StartIdx, EndIdx);
2120 raw_string_ostream Msg(msg);
2121 Msg << "Ran out of registers during register allocation!";
2122 if (MI->isInlineAsm()) {
2123 Msg << "\nPlease check your inline asm statement for invalid "
2124 << "constraints:\n";
2125 MI->print(Msg, tm_);
2127 report_fatal_error(Msg.str());
2129 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2130 if (!hasInterval(*AS))
2132 LiveInterval &spli = getInterval(*AS);
2133 if (spli.liveAt(Index))
2134 spli.removeRange(Index.getLoadIndex(),
2135 Index.getNextIndex().getBaseIndex());
2142 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2143 MachineInstr* startInst) {
2144 LiveInterval& Interval = getOrCreateInterval(reg);
2145 VNInfo* VN = Interval.getNextValue(
2146 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2147 startInst, true, getVNInfoAllocator());
2148 VN->setHasPHIKill(true);
2149 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2151 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2152 getMBBEndIdx(startInst->getParent()), VN);
2153 Interval.addRange(LR);