1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
13 #include "VirtRegMap.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Target/TargetMachine.h"
19 #include "llvm/Target/TargetInstrInfo.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/raw_ostream.h"
29 enum SpillerName { trivial, standard, splitting };
32 static cl::opt<SpillerName>
34 cl::desc("Spiller to use: (default: standard)"),
36 cl::values(clEnumVal(trivial, "trivial spiller"),
37 clEnumVal(standard, "default spiller"),
38 clEnumVal(splitting, "splitting spiller"),
42 // Spiller virtual destructor implementation.
43 Spiller::~Spiller() {}
47 /// Utility class for spillers.
48 class SpillerBase : public Spiller {
52 MachineFrameInfo *mfi;
53 MachineRegisterInfo *mri;
54 const TargetInstrInfo *tii;
55 const TargetRegisterInfo *tri;
58 /// Construct a spiller base.
59 SpillerBase(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
60 : mf(mf), lis(lis), vrm(vrm)
62 mfi = mf->getFrameInfo();
63 mri = &mf->getRegInfo();
64 tii = mf->getTarget().getInstrInfo();
65 tri = mf->getTarget().getRegisterInfo();
68 /// Add spill ranges for every use/def of the live interval, inserting loads
69 /// immediately before each use, and stores after each def. No folding or
70 /// remat is attempted.
71 void trivialSpillEverywhere(LiveInterval *li,
72 std::vector<LiveInterval*> &newIntervals) {
73 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
75 assert(li->weight != HUGE_VALF &&
76 "Attempting to spill already spilled value.");
78 assert(!li->isStackSlot() &&
79 "Trying to spill a stack slot.");
81 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
83 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
84 unsigned ss = vrm->assignVirt2StackSlot(li->reg);
86 // Iterate over reg uses/defs.
87 for (MachineRegisterInfo::reg_iterator
88 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) {
90 // Grab the use/def instr.
91 MachineInstr *mi = &*regItr;
93 DEBUG(dbgs() << " Processing " << *mi);
95 // Step regItr to the next use/def instr.
98 } while (regItr != mri->reg_end() && (&*regItr == mi));
100 // Collect uses & defs for this instr.
101 SmallVector<unsigned, 2> indices;
104 for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
105 MachineOperand &op = mi->getOperand(i);
106 if (!op.isReg() || op.getReg() != li->reg)
108 hasUse |= mi->getOperand(i).isUse();
109 hasDef |= mi->getOperand(i).isDef();
110 indices.push_back(i);
113 // Create a new vreg & interval for this instr.
114 unsigned newVReg = mri->createVirtualRegister(trc);
116 vrm->assignVirt2StackSlot(newVReg, ss);
117 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
118 newLI->weight = HUGE_VALF;
120 // Update the reg operands & kill flags.
121 for (unsigned i = 0; i < indices.size(); ++i) {
122 unsigned mopIdx = indices[i];
123 MachineOperand &mop = mi->getOperand(mopIdx);
125 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) {
129 assert(hasUse || hasDef);
131 // Insert reload if necessary.
132 MachineBasicBlock::iterator miItr(mi);
134 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc,
136 MachineInstr *loadInstr(prior(miItr));
137 SlotIndex loadIndex =
138 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
139 SlotIndex endIndex = loadIndex.getNextIndex();
141 newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
142 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
145 // Insert store if necessary.
147 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg,
149 MachineInstr *storeInstr(llvm::next(miItr));
150 SlotIndex storeIndex =
151 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
152 SlotIndex beginIndex = storeIndex.getPrevIndex();
154 newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
155 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
158 newIntervals.push_back(newLI);
163 } // end anonymous namespace
167 /// Spills any live range using the spill-everywhere method with no attempt at
169 class TrivialSpiller : public SpillerBase {
172 TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
173 : SpillerBase(mf, lis, vrm) {}
175 void spill(LiveInterval *li,
176 std::vector<LiveInterval*> &newIntervals,
177 SmallVectorImpl<LiveInterval*> &,
179 // Ignore spillIs - we don't use it.
180 trivialSpillEverywhere(li, newIntervals);
184 } // end anonymous namespace
188 /// Falls back on LiveIntervals::addIntervalsForSpills.
189 class StandardSpiller : public Spiller {
192 const MachineLoopInfo *loopInfo;
195 StandardSpiller(LiveIntervals *lis, const MachineLoopInfo *loopInfo,
197 : lis(lis), loopInfo(loopInfo), vrm(vrm) {}
199 /// Falls back on LiveIntervals::addIntervalsForSpills.
200 void spill(LiveInterval *li,
201 std::vector<LiveInterval*> &newIntervals,
202 SmallVectorImpl<LiveInterval*> &spillIs,
204 std::vector<LiveInterval*> added =
205 lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
206 newIntervals.insert(newIntervals.end(), added.begin(), added.end());
210 } // end anonymous namespace
214 /// When a call to spill is placed this spiller will first try to break the
215 /// interval up into its component values (one new interval per value).
216 /// If this fails, or if a call is placed to spill a previously split interval
217 /// then the spiller falls back on the standard spilling mechanism.
218 class SplittingSpiller : public StandardSpiller {
220 SplittingSpiller(MachineFunction *mf, LiveIntervals *lis,
221 const MachineLoopInfo *loopInfo, VirtRegMap *vrm)
222 : StandardSpiller(lis, loopInfo, vrm) {
224 mri = &mf->getRegInfo();
225 tii = mf->getTarget().getInstrInfo();
226 tri = mf->getTarget().getRegisterInfo();
229 void spill(LiveInterval *li,
230 std::vector<LiveInterval*> &newIntervals,
231 SmallVectorImpl<LiveInterval*> &spillIs,
232 SlotIndex *earliestStart) {
233 if (worthTryingToSplit(li))
234 tryVNISplit(li, earliestStart);
236 StandardSpiller::spill(li, newIntervals, spillIs, earliestStart);
241 MachineRegisterInfo *mri;
242 const TargetInstrInfo *tii;
243 const TargetRegisterInfo *tri;
244 DenseSet<LiveInterval*> alreadySplit;
246 bool worthTryingToSplit(LiveInterval *li) const {
247 return (!alreadySplit.count(li) && li->getNumValNums() > 1);
250 /// Try to break a LiveInterval into its component values.
251 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li,
252 SlotIndex *earliestStart) {
254 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
256 std::vector<LiveInterval*> added;
257 SmallVector<VNInfo*, 4> vnis;
259 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
261 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
262 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
263 VNInfo *vni = *vniItr;
269 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
270 LiveInterval *splitInterval = extractVNI(li, vni);
272 if (splitInterval != 0) {
273 DEBUG(dbgs() << *splitInterval << "\n");
274 added.push_back(splitInterval);
275 alreadySplit.insert(splitInterval);
276 if (earliestStart != 0) {
277 if (splitInterval->beginIndex() < *earliestStart)
278 *earliestStart = splitInterval->beginIndex();
281 DEBUG(dbgs() << "0\n");
285 DEBUG(dbgs() << "Original LI: " << *li << "\n");
287 // If there original interval still contains some live ranges
288 // add it to added and alreadySplit.
291 alreadySplit.insert(li);
292 if (earliestStart != 0) {
293 if (li->beginIndex() < *earliestStart)
294 *earliestStart = li->beginIndex();
301 /// Extract the given value number from the interval.
302 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
303 assert(vni->isDefAccurate() || vni->isPHIDef());
305 // Create a new vreg and live interval, copy VNI ranges over.
306 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
307 unsigned newVReg = mri->createVirtualRegister(trc);
309 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
310 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
312 // Start by copying all live ranges in the VN to the new interval.
313 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
314 rItr != rEnd; ++rItr) {
315 if (rItr->valno == vni) {
316 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI));
320 // Erase the old VNI & ranges.
321 li->removeValNo(vni);
323 // Collect all current uses of the register belonging to the given VNI.
324 // We'll use this to rename the register after we've dealt with the def.
325 std::set<MachineInstr*> uses;
326 for (MachineRegisterInfo::use_iterator
327 useItr = mri->use_begin(li->reg), useEnd = mri->use_end();
328 useItr != useEnd; ++useItr) {
329 uses.insert(&*useItr);
332 // Process the def instruction for this VNI.
333 if (newVNI->isPHIDef()) {
334 // Insert a copy at the start of the MBB. The range proceeding the
335 // copy will be attached to the original LiveInterval.
336 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
337 tii->copyRegToReg(*defMBB, defMBB->begin(), newVReg, li->reg, trc, trc,
339 MachineInstr *copyMI = defMBB->begin();
340 copyMI->addRegisterKilled(li->reg, tri);
341 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
342 VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
343 0, false, lis->getVNInfoAllocator());
344 phiDefVNI->setIsPHIDef(true);
345 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
346 LiveRange *oldPHIDefRange =
347 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
349 // If the old phi def starts in the middle of the range chop it up.
350 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) {
351 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end,
352 oldPHIDefRange->valno);
353 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB);
354 newLI->addRange(oldPHIDefRange2);
355 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) {
356 // Otherwise if it's at the start of the range just trim it.
357 oldPHIDefRange->start = copyIdx.getDefIndex();
359 assert(false && "PHI def range doesn't cover PHI def?");
362 newVNI->def = copyIdx.getDefIndex();
363 newVNI->setCopy(copyMI);
364 newVNI->setIsPHIDef(false); // not a PHI def anymore.
365 newVNI->setIsDefAccurate(true);
367 // non-PHI def. Rename the def. If it's two-addr that means renaming the use
368 // and inserting a new copy too.
369 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
370 // We'll rename this now, so we can remove it from uses.
372 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg);
373 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx),
374 twoAddrUseIsUndef = false;
376 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) {
377 MachineOperand &mo = defInst->getOperand(i);
378 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) {
380 if (isTwoAddr && mo.isUse() && mo.isUndef())
381 twoAddrUseIsUndef = true;
385 SlotIndex defIdx = lis->getInstructionIndex(defInst);
386 newVNI->def = defIdx.getDefIndex();
388 if (isTwoAddr && !twoAddrUseIsUndef) {
389 MachineBasicBlock *defMBB = defInst->getParent();
390 tii->copyRegToReg(*defMBB, defInst, newVReg, li->reg, trc, trc,
392 MachineInstr *copyMI = prior(MachineBasicBlock::iterator(defInst));
393 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
394 copyMI->addRegisterKilled(li->reg, tri);
395 LiveRange *origUseRange =
396 li->getLiveRangeContaining(newVNI->def.getUseIndex());
397 origUseRange->end = copyIdx.getDefIndex();
398 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
399 true, lis->getVNInfoAllocator());
400 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
401 newLI->addRange(copyRange);
405 for (std::set<MachineInstr*>::iterator
406 usesItr = uses.begin(), usesEnd = uses.end();
407 usesItr != usesEnd; ++usesItr) {
408 MachineInstr *useInst = *usesItr;
409 SlotIndex useIdx = lis->getInstructionIndex(useInst);
410 LiveRange *useRange =
411 newLI->getLiveRangeContaining(useIdx.getUseIndex());
413 // If this use doesn't belong to the new interval skip it.
417 // This use doesn't belong to the VNI, skip it.
418 if (useRange->valno != newVNI)
421 // Check if this instr is two address.
422 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
423 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
425 // Rename uses (and defs for two-address instrs).
426 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
427 MachineOperand &mo = useInst->getOperand(i);
428 if (mo.isReg() && (mo.isUse() || isTwoAddress) &&
429 (mo.getReg() == li->reg)) {
434 // If this is a two address instruction we've got some extra work to do.
436 // We modified the def operand, so we need to copy back to the original
438 MachineBasicBlock *useMBB = useInst->getParent();
439 MachineBasicBlock::iterator useItr(useInst);
440 tii->copyRegToReg(*useMBB, llvm::next(useItr), li->reg, newVReg, trc, trc,
442 MachineInstr *copyMI = llvm::next(useItr);
443 copyMI->addRegisterKilled(newVReg, tri);
444 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
446 // Change the old two-address defined range & vni to start at
447 // (and be defined by) the copy.
448 LiveRange *origDefRange =
449 li->getLiveRangeContaining(useIdx.getDefIndex());
450 origDefRange->start = copyIdx.getDefIndex();
451 origDefRange->valno->def = copyIdx.getDefIndex();
452 origDefRange->valno->setCopy(copyMI);
454 // Insert a new range & vni for the two-address-to-copy value. This
455 // will be attached to the new live interval.
457 newLI->getNextValue(useIdx.getDefIndex(), 0, true,
458 lis->getVNInfoAllocator());
459 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
460 newLI->addRange(copyRange);
464 // Iterate over any PHI kills - we'll need to insert new copies for them.
465 for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
467 if (LRI->valno != newVNI || LRI->end.isPHI())
469 SlotIndex killIdx = LRI->end;
470 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
472 tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
473 li->reg, newVReg, trc, trc,
475 MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
476 copyMI->addRegisterKilled(newVReg, tri);
477 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
479 // Save the current end. We may need it to add a new range if the
480 // current range runs of the end of the MBB.
481 SlotIndex newKillRangeEnd = LRI->end;
482 LRI->end = copyIdx.getDefIndex();
484 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
485 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
486 "PHI kill range doesn't reach kill-block end. Not sane.");
487 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
488 newKillRangeEnd, newVNI));
491 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
493 lis->getVNInfoAllocator());
494 newKillVNI->setHasPHIKill(true);
495 li->addRange(LiveRange(copyIdx.getDefIndex(),
496 lis->getMBBEndIdx(killMBB),
499 newVNI->setHasPHIKill(false);
506 } // end anonymous namespace
509 llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
510 const MachineLoopInfo *loopInfo,
512 switch (spillerOpt) {
513 default: assert(0 && "unknown spiller");
514 case trivial: return new TrivialSpiller(mf, lis, vrm);
515 case standard: return new StandardSpiller(lis, loopInfo, vrm);
516 case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm);