1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
13 #include "VirtRegMap.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Target/TargetMachine.h"
19 #include "llvm/Target/TargetInstrInfo.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/raw_ostream.h"
29 enum SpillerName { trivial, standard, splitting, inline_ };
32 static cl::opt<SpillerName>
34 cl::desc("Spiller to use: (default: standard)"),
36 cl::values(clEnumVal(trivial, "trivial spiller"),
37 clEnumVal(standard, "default spiller"),
38 clEnumVal(splitting, "splitting spiller"),
39 clEnumValN(inline_, "inline", "inline spiller"),
43 // Spiller virtual destructor implementation.
44 Spiller::~Spiller() {}
48 /// Utility class for spillers.
49 class SpillerBase : public Spiller {
53 MachineFrameInfo *mfi;
54 MachineRegisterInfo *mri;
55 const TargetInstrInfo *tii;
56 const TargetRegisterInfo *tri;
59 /// Construct a spiller base.
60 SpillerBase(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
61 : mf(mf), lis(lis), vrm(vrm)
63 mfi = mf->getFrameInfo();
64 mri = &mf->getRegInfo();
65 tii = mf->getTarget().getInstrInfo();
66 tri = mf->getTarget().getRegisterInfo();
69 /// Add spill ranges for every use/def of the live interval, inserting loads
70 /// immediately before each use, and stores after each def. No folding or
71 /// remat is attempted.
72 void trivialSpillEverywhere(LiveInterval *li,
73 std::vector<LiveInterval*> &newIntervals) {
74 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
76 assert(li->weight != HUGE_VALF &&
77 "Attempting to spill already spilled value.");
79 assert(!li->isStackSlot() &&
80 "Trying to spill a stack slot.");
82 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
84 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
85 unsigned ss = vrm->assignVirt2StackSlot(li->reg);
87 // Iterate over reg uses/defs.
88 for (MachineRegisterInfo::reg_iterator
89 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) {
91 // Grab the use/def instr.
92 MachineInstr *mi = &*regItr;
94 DEBUG(dbgs() << " Processing " << *mi);
96 // Step regItr to the next use/def instr.
99 } while (regItr != mri->reg_end() && (&*regItr == mi));
101 // Collect uses & defs for this instr.
102 SmallVector<unsigned, 2> indices;
105 for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
106 MachineOperand &op = mi->getOperand(i);
107 if (!op.isReg() || op.getReg() != li->reg)
109 hasUse |= mi->getOperand(i).isUse();
110 hasDef |= mi->getOperand(i).isDef();
111 indices.push_back(i);
114 // Create a new vreg & interval for this instr.
115 unsigned newVReg = mri->createVirtualRegister(trc);
117 vrm->assignVirt2StackSlot(newVReg, ss);
118 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
119 newLI->weight = HUGE_VALF;
121 // Update the reg operands & kill flags.
122 for (unsigned i = 0; i < indices.size(); ++i) {
123 unsigned mopIdx = indices[i];
124 MachineOperand &mop = mi->getOperand(mopIdx);
126 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) {
130 assert(hasUse || hasDef);
132 // Insert reload if necessary.
133 MachineBasicBlock::iterator miItr(mi);
135 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc,
137 MachineInstr *loadInstr(prior(miItr));
138 SlotIndex loadIndex =
139 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
140 vrm->addSpillSlotUse(ss, loadInstr);
141 SlotIndex endIndex = loadIndex.getNextIndex();
143 newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
144 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
147 // Insert store if necessary.
149 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg,
151 MachineInstr *storeInstr(llvm::next(miItr));
152 SlotIndex storeIndex =
153 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
154 vrm->addSpillSlotUse(ss, storeInstr);
155 SlotIndex beginIndex = storeIndex.getPrevIndex();
157 newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
158 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
161 newIntervals.push_back(newLI);
166 } // end anonymous namespace
170 /// Spills any live range using the spill-everywhere method with no attempt at
172 class TrivialSpiller : public SpillerBase {
175 TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
176 : SpillerBase(mf, lis, vrm) {}
178 void spill(LiveInterval *li,
179 std::vector<LiveInterval*> &newIntervals,
180 SmallVectorImpl<LiveInterval*> &,
182 // Ignore spillIs - we don't use it.
183 trivialSpillEverywhere(li, newIntervals);
187 } // end anonymous namespace
191 /// Falls back on LiveIntervals::addIntervalsForSpills.
192 class StandardSpiller : public Spiller {
195 const MachineLoopInfo *loopInfo;
198 StandardSpiller(LiveIntervals *lis, const MachineLoopInfo *loopInfo,
200 : lis(lis), loopInfo(loopInfo), vrm(vrm) {}
202 /// Falls back on LiveIntervals::addIntervalsForSpills.
203 void spill(LiveInterval *li,
204 std::vector<LiveInterval*> &newIntervals,
205 SmallVectorImpl<LiveInterval*> &spillIs,
207 std::vector<LiveInterval*> added =
208 lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
209 newIntervals.insert(newIntervals.end(), added.begin(), added.end());
213 } // end anonymous namespace
217 /// When a call to spill is placed this spiller will first try to break the
218 /// interval up into its component values (one new interval per value).
219 /// If this fails, or if a call is placed to spill a previously split interval
220 /// then the spiller falls back on the standard spilling mechanism.
221 class SplittingSpiller : public StandardSpiller {
223 SplittingSpiller(MachineFunction *mf, LiveIntervals *lis,
224 const MachineLoopInfo *loopInfo, VirtRegMap *vrm)
225 : StandardSpiller(lis, loopInfo, vrm) {
227 mri = &mf->getRegInfo();
228 tii = mf->getTarget().getInstrInfo();
229 tri = mf->getTarget().getRegisterInfo();
232 void spill(LiveInterval *li,
233 std::vector<LiveInterval*> &newIntervals,
234 SmallVectorImpl<LiveInterval*> &spillIs,
235 SlotIndex *earliestStart) {
236 if (worthTryingToSplit(li))
237 tryVNISplit(li, earliestStart);
239 StandardSpiller::spill(li, newIntervals, spillIs, earliestStart);
244 MachineRegisterInfo *mri;
245 const TargetInstrInfo *tii;
246 const TargetRegisterInfo *tri;
247 DenseSet<LiveInterval*> alreadySplit;
249 bool worthTryingToSplit(LiveInterval *li) const {
250 return (!alreadySplit.count(li) && li->getNumValNums() > 1);
253 /// Try to break a LiveInterval into its component values.
254 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li,
255 SlotIndex *earliestStart) {
257 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
259 std::vector<LiveInterval*> added;
260 SmallVector<VNInfo*, 4> vnis;
262 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
264 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
265 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
266 VNInfo *vni = *vniItr;
272 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
273 LiveInterval *splitInterval = extractVNI(li, vni);
275 if (splitInterval != 0) {
276 DEBUG(dbgs() << *splitInterval << "\n");
277 added.push_back(splitInterval);
278 alreadySplit.insert(splitInterval);
279 if (earliestStart != 0) {
280 if (splitInterval->beginIndex() < *earliestStart)
281 *earliestStart = splitInterval->beginIndex();
284 DEBUG(dbgs() << "0\n");
288 DEBUG(dbgs() << "Original LI: " << *li << "\n");
290 // If there original interval still contains some live ranges
291 // add it to added and alreadySplit.
294 alreadySplit.insert(li);
295 if (earliestStart != 0) {
296 if (li->beginIndex() < *earliestStart)
297 *earliestStart = li->beginIndex();
304 /// Extract the given value number from the interval.
305 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
306 assert(vni->isDefAccurate() || vni->isPHIDef());
308 // Create a new vreg and live interval, copy VNI ranges over.
309 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
310 unsigned newVReg = mri->createVirtualRegister(trc);
312 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
313 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
315 // Start by copying all live ranges in the VN to the new interval.
316 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
317 rItr != rEnd; ++rItr) {
318 if (rItr->valno == vni) {
319 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI));
323 // Erase the old VNI & ranges.
324 li->removeValNo(vni);
326 // Collect all current uses of the register belonging to the given VNI.
327 // We'll use this to rename the register after we've dealt with the def.
328 std::set<MachineInstr*> uses;
329 for (MachineRegisterInfo::use_iterator
330 useItr = mri->use_begin(li->reg), useEnd = mri->use_end();
331 useItr != useEnd; ++useItr) {
332 uses.insert(&*useItr);
335 // Process the def instruction for this VNI.
336 if (newVNI->isPHIDef()) {
337 // Insert a copy at the start of the MBB. The range proceeding the
338 // copy will be attached to the original LiveInterval.
339 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
340 tii->copyRegToReg(*defMBB, defMBB->begin(), newVReg, li->reg, trc, trc,
342 MachineInstr *copyMI = defMBB->begin();
343 copyMI->addRegisterKilled(li->reg, tri);
344 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
345 VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
346 0, false, lis->getVNInfoAllocator());
347 phiDefVNI->setIsPHIDef(true);
348 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
349 LiveRange *oldPHIDefRange =
350 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
352 // If the old phi def starts in the middle of the range chop it up.
353 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) {
354 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end,
355 oldPHIDefRange->valno);
356 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB);
357 newLI->addRange(oldPHIDefRange2);
358 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) {
359 // Otherwise if it's at the start of the range just trim it.
360 oldPHIDefRange->start = copyIdx.getDefIndex();
362 assert(false && "PHI def range doesn't cover PHI def?");
365 newVNI->def = copyIdx.getDefIndex();
366 newVNI->setCopy(copyMI);
367 newVNI->setIsPHIDef(false); // not a PHI def anymore.
368 newVNI->setIsDefAccurate(true);
370 // non-PHI def. Rename the def. If it's two-addr that means renaming the use
371 // and inserting a new copy too.
372 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
373 // We'll rename this now, so we can remove it from uses.
375 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg);
376 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx),
377 twoAddrUseIsUndef = false;
379 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) {
380 MachineOperand &mo = defInst->getOperand(i);
381 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) {
383 if (isTwoAddr && mo.isUse() && mo.isUndef())
384 twoAddrUseIsUndef = true;
388 SlotIndex defIdx = lis->getInstructionIndex(defInst);
389 newVNI->def = defIdx.getDefIndex();
391 if (isTwoAddr && !twoAddrUseIsUndef) {
392 MachineBasicBlock *defMBB = defInst->getParent();
393 tii->copyRegToReg(*defMBB, defInst, newVReg, li->reg, trc, trc,
395 MachineInstr *copyMI = prior(MachineBasicBlock::iterator(defInst));
396 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
397 copyMI->addRegisterKilled(li->reg, tri);
398 LiveRange *origUseRange =
399 li->getLiveRangeContaining(newVNI->def.getUseIndex());
400 origUseRange->end = copyIdx.getDefIndex();
401 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
402 true, lis->getVNInfoAllocator());
403 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
404 newLI->addRange(copyRange);
408 for (std::set<MachineInstr*>::iterator
409 usesItr = uses.begin(), usesEnd = uses.end();
410 usesItr != usesEnd; ++usesItr) {
411 MachineInstr *useInst = *usesItr;
412 SlotIndex useIdx = lis->getInstructionIndex(useInst);
413 LiveRange *useRange =
414 newLI->getLiveRangeContaining(useIdx.getUseIndex());
416 // If this use doesn't belong to the new interval skip it.
420 // This use doesn't belong to the VNI, skip it.
421 if (useRange->valno != newVNI)
424 // Check if this instr is two address.
425 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
426 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
428 // Rename uses (and defs for two-address instrs).
429 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
430 MachineOperand &mo = useInst->getOperand(i);
431 if (mo.isReg() && (mo.isUse() || isTwoAddress) &&
432 (mo.getReg() == li->reg)) {
437 // If this is a two address instruction we've got some extra work to do.
439 // We modified the def operand, so we need to copy back to the original
441 MachineBasicBlock *useMBB = useInst->getParent();
442 MachineBasicBlock::iterator useItr(useInst);
443 tii->copyRegToReg(*useMBB, llvm::next(useItr), li->reg, newVReg, trc, trc,
445 MachineInstr *copyMI = llvm::next(useItr);
446 copyMI->addRegisterKilled(newVReg, tri);
447 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
449 // Change the old two-address defined range & vni to start at
450 // (and be defined by) the copy.
451 LiveRange *origDefRange =
452 li->getLiveRangeContaining(useIdx.getDefIndex());
453 origDefRange->start = copyIdx.getDefIndex();
454 origDefRange->valno->def = copyIdx.getDefIndex();
455 origDefRange->valno->setCopy(copyMI);
457 // Insert a new range & vni for the two-address-to-copy value. This
458 // will be attached to the new live interval.
460 newLI->getNextValue(useIdx.getDefIndex(), 0, true,
461 lis->getVNInfoAllocator());
462 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
463 newLI->addRange(copyRange);
467 // Iterate over any PHI kills - we'll need to insert new copies for them.
468 for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
470 if (LRI->valno != newVNI || LRI->end.isPHI())
472 SlotIndex killIdx = LRI->end;
473 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
475 tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
476 li->reg, newVReg, trc, trc,
478 MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
479 copyMI->addRegisterKilled(newVReg, tri);
480 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
482 // Save the current end. We may need it to add a new range if the
483 // current range runs of the end of the MBB.
484 SlotIndex newKillRangeEnd = LRI->end;
485 LRI->end = copyIdx.getDefIndex();
487 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
488 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
489 "PHI kill range doesn't reach kill-block end. Not sane.");
490 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
491 newKillRangeEnd, newVNI));
494 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
496 lis->getVNInfoAllocator());
497 newKillVNI->setHasPHIKill(true);
498 li->addRange(LiveRange(copyIdx.getDefIndex(),
499 lis->getMBBEndIdx(killMBB),
502 newVNI->setHasPHIKill(false);
509 } // end anonymous namespace
513 Spiller *createInlineSpiller(MachineFunction*,
515 const MachineLoopInfo*,
519 llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
520 const MachineLoopInfo *loopInfo,
522 switch (spillerOpt) {
523 default: assert(0 && "unknown spiller");
524 case trivial: return new TrivialSpiller(mf, lis, vrm);
525 case standard: return new StandardSpiller(lis, loopInfo, vrm);
526 case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm);
527 case inline_: return createInlineSpiller(mf, lis, loopInfo, vrm);