1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
13 #include "VirtRegMap.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Target/TargetMachine.h"
19 #include "llvm/Target/TargetInstrInfo.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
28 enum SpillerName { trivial, standard, splitting };
31 static cl::opt<SpillerName>
33 cl::desc("Spiller to use: (default: standard)"),
35 cl::values(clEnumVal(trivial, "trivial spiller"),
36 clEnumVal(standard, "default spiller"),
37 clEnumVal(splitting, "splitting spiller"),
41 // Spiller virtual destructor implementation.
42 Spiller::~Spiller() {}
46 /// Utility class for spillers.
47 class SpillerBase : public Spiller {
51 MachineFrameInfo *mfi;
52 MachineRegisterInfo *mri;
53 const TargetInstrInfo *tii;
56 /// Construct a spiller base.
57 SpillerBase(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
58 : mf(mf), lis(lis), vrm(vrm)
60 mfi = mf->getFrameInfo();
61 mri = &mf->getRegInfo();
62 tii = mf->getTarget().getInstrInfo();
65 /// Add spill ranges for every use/def of the live interval, inserting loads
66 /// immediately before each use, and stores after each def. No folding or
67 /// remat is attempted.
68 std::vector<LiveInterval*> trivialSpillEverywhere(LiveInterval *li) {
69 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
71 assert(li->weight != HUGE_VALF &&
72 "Attempting to spill already spilled value.");
74 assert(!li->isStackSlot() &&
75 "Trying to spill a stack slot.");
77 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
79 std::vector<LiveInterval*> added;
81 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
82 unsigned ss = vrm->assignVirt2StackSlot(li->reg);
84 // Iterate over reg uses/defs.
85 for (MachineRegisterInfo::reg_iterator
86 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) {
88 // Grab the use/def instr.
89 MachineInstr *mi = &*regItr;
91 DEBUG(dbgs() << " Processing " << *mi);
93 // Step regItr to the next use/def instr.
96 } while (regItr != mri->reg_end() && (&*regItr == mi));
98 // Collect uses & defs for this instr.
99 SmallVector<unsigned, 2> indices;
102 for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
103 MachineOperand &op = mi->getOperand(i);
104 if (!op.isReg() || op.getReg() != li->reg)
106 hasUse |= mi->getOperand(i).isUse();
107 hasDef |= mi->getOperand(i).isDef();
108 indices.push_back(i);
111 // Create a new vreg & interval for this instr.
112 unsigned newVReg = mri->createVirtualRegister(trc);
114 vrm->assignVirt2StackSlot(newVReg, ss);
115 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
116 newLI->weight = HUGE_VALF;
118 // Update the reg operands & kill flags.
119 for (unsigned i = 0; i < indices.size(); ++i) {
120 unsigned mopIdx = indices[i];
121 MachineOperand &mop = mi->getOperand(mopIdx);
123 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) {
127 assert(hasUse || hasDef);
129 // Insert reload if necessary.
130 MachineBasicBlock::iterator miItr(mi);
132 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc);
133 MachineInstr *loadInstr(prior(miItr));
134 SlotIndex loadIndex =
135 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
136 SlotIndex endIndex = loadIndex.getNextIndex();
138 newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
139 loadVNI->addKill(endIndex);
140 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
143 // Insert store if necessary.
145 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg, true,
147 MachineInstr *storeInstr(llvm::next(miItr));
148 SlotIndex storeIndex =
149 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
150 SlotIndex beginIndex = storeIndex.getPrevIndex();
152 newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
153 storeVNI->addKill(storeIndex);
154 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
157 added.push_back(newLI);
164 } // end anonymous namespace
168 /// Spills any live range using the spill-everywhere method with no attempt at
170 class TrivialSpiller : public SpillerBase {
173 TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
174 : SpillerBase(mf, lis, vrm) {}
176 std::vector<LiveInterval*> spill(LiveInterval *li,
177 SmallVectorImpl<LiveInterval*> &spillIs,
179 // Ignore spillIs - we don't use it.
180 return trivialSpillEverywhere(li);
184 } // end anonymous namespace
188 /// Falls back on LiveIntervals::addIntervalsForSpills.
189 class StandardSpiller : public Spiller {
192 const MachineLoopInfo *loopInfo;
195 StandardSpiller(LiveIntervals *lis, const MachineLoopInfo *loopInfo,
197 : lis(lis), loopInfo(loopInfo), vrm(vrm) {}
199 /// Falls back on LiveIntervals::addIntervalsForSpills.
200 std::vector<LiveInterval*> spill(LiveInterval *li,
201 SmallVectorImpl<LiveInterval*> &spillIs,
203 return lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
207 } // end anonymous namespace
211 /// When a call to spill is placed this spiller will first try to break the
212 /// interval up into its component values (one new interval per value).
213 /// If this fails, or if a call is placed to spill a previously split interval
214 /// then the spiller falls back on the standard spilling mechanism.
215 class SplittingSpiller : public StandardSpiller {
217 SplittingSpiller(MachineFunction *mf, LiveIntervals *lis,
218 const MachineLoopInfo *loopInfo, VirtRegMap *vrm)
219 : StandardSpiller(lis, loopInfo, vrm) {
221 mri = &mf->getRegInfo();
222 tii = mf->getTarget().getInstrInfo();
223 tri = mf->getTarget().getRegisterInfo();
226 std::vector<LiveInterval*> spill(LiveInterval *li,
227 SmallVectorImpl<LiveInterval*> &spillIs,
228 SlotIndex *earliestStart) {
230 if (worthTryingToSplit(li)) {
231 return tryVNISplit(li, earliestStart);
234 return StandardSpiller::spill(li, spillIs, earliestStart);
239 MachineRegisterInfo *mri;
240 const TargetInstrInfo *tii;
241 const TargetRegisterInfo *tri;
242 DenseSet<LiveInterval*> alreadySplit;
244 bool worthTryingToSplit(LiveInterval *li) const {
245 return (!alreadySplit.count(li) && li->getNumValNums() > 1);
248 /// Try to break a LiveInterval into its component values.
249 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li,
250 SlotIndex *earliestStart) {
252 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
254 std::vector<LiveInterval*> added;
255 SmallVector<VNInfo*, 4> vnis;
257 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
259 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
260 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
261 VNInfo *vni = *vniItr;
263 // Skip unused VNIs, or VNIs with no kills.
264 if (vni->isUnused() || vni->kills.empty())
267 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
268 LiveInterval *splitInterval = extractVNI(li, vni);
270 if (splitInterval != 0) {
271 DEBUG(dbgs() << *splitInterval << "\n");
272 added.push_back(splitInterval);
273 alreadySplit.insert(splitInterval);
274 if (earliestStart != 0) {
275 if (splitInterval->beginIndex() < *earliestStart)
276 *earliestStart = splitInterval->beginIndex();
279 DEBUG(dbgs() << "0\n");
283 DEBUG(dbgs() << "Original LI: " << *li << "\n");
285 // If there original interval still contains some live ranges
286 // add it to added and alreadySplit.
289 alreadySplit.insert(li);
290 if (earliestStart != 0) {
291 if (li->beginIndex() < *earliestStart)
292 *earliestStart = li->beginIndex();
299 /// Extract the given value number from the interval.
300 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
301 assert(vni->isDefAccurate() || vni->isPHIDef());
302 assert(!vni->kills.empty());
304 // Create a new vreg and live interval, copy VNI kills & ranges over.
305 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
306 unsigned newVReg = mri->createVirtualRegister(trc);
308 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
309 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
311 // Start by copying all live ranges in the VN to the new interval.
312 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
313 rItr != rEnd; ++rItr) {
314 if (rItr->valno == vni) {
315 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI));
319 // Erase the old VNI & ranges.
320 li->removeValNo(vni);
322 // Collect all current uses of the register belonging to the given VNI.
323 // We'll use this to rename the register after we've dealt with the def.
324 std::set<MachineInstr*> uses;
325 for (MachineRegisterInfo::use_iterator
326 useItr = mri->use_begin(li->reg), useEnd = mri->use_end();
327 useItr != useEnd; ++useItr) {
328 uses.insert(&*useItr);
331 // Process the def instruction for this VNI.
332 if (newVNI->isPHIDef()) {
333 // Insert a copy at the start of the MBB. The range proceeding the
334 // copy will be attached to the original LiveInterval.
335 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
336 tii->copyRegToReg(*defMBB, defMBB->begin(), newVReg, li->reg, trc, trc);
337 MachineInstr *copyMI = defMBB->begin();
338 copyMI->addRegisterKilled(li->reg, tri);
339 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
340 VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
341 0, false, lis->getVNInfoAllocator());
342 phiDefVNI->setIsPHIDef(true);
343 phiDefVNI->addKill(copyIdx.getDefIndex());
344 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
345 LiveRange *oldPHIDefRange =
346 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
348 // If the old phi def starts in the middle of the range chop it up.
349 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) {
350 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end,
351 oldPHIDefRange->valno);
352 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB);
353 newLI->addRange(oldPHIDefRange2);
354 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) {
355 // Otherwise if it's at the start of the range just trim it.
356 oldPHIDefRange->start = copyIdx.getDefIndex();
358 assert(false && "PHI def range doesn't cover PHI def?");
361 newVNI->def = copyIdx.getDefIndex();
362 newVNI->setCopy(copyMI);
363 newVNI->setIsPHIDef(false); // not a PHI def anymore.
364 newVNI->setIsDefAccurate(true);
366 // non-PHI def. Rename the def. If it's two-addr that means renaming the use
367 // and inserting a new copy too.
368 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
369 // We'll rename this now, so we can remove it from uses.
371 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg);
372 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx),
373 twoAddrUseIsUndef = false;
375 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) {
376 MachineOperand &mo = defInst->getOperand(i);
377 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) {
379 if (isTwoAddr && mo.isUse() && mo.isUndef())
380 twoAddrUseIsUndef = true;
384 SlotIndex defIdx = lis->getInstructionIndex(defInst);
385 newVNI->def = defIdx.getDefIndex();
387 if (isTwoAddr && !twoAddrUseIsUndef) {
388 MachineBasicBlock *defMBB = defInst->getParent();
389 tii->copyRegToReg(*defMBB, defInst, newVReg, li->reg, trc, trc);
390 MachineInstr *copyMI = prior(MachineBasicBlock::iterator(defInst));
391 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
392 copyMI->addRegisterKilled(li->reg, tri);
393 LiveRange *origUseRange =
394 li->getLiveRangeContaining(newVNI->def.getUseIndex());
395 VNInfo *origUseVNI = origUseRange->valno;
396 origUseRange->end = copyIdx.getDefIndex();
397 bool updatedKills = false;
398 for (unsigned k = 0; k < origUseVNI->kills.size(); ++k) {
399 if (origUseVNI->kills[k] == defIdx.getDefIndex()) {
400 origUseVNI->kills[k] = copyIdx.getDefIndex();
405 assert(updatedKills && "Failed to update VNI kill list.");
406 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
407 true, lis->getVNInfoAllocator());
408 copyVNI->addKill(defIdx.getDefIndex());
409 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
410 newLI->addRange(copyRange);
414 for (std::set<MachineInstr*>::iterator
415 usesItr = uses.begin(), usesEnd = uses.end();
416 usesItr != usesEnd; ++usesItr) {
417 MachineInstr *useInst = *usesItr;
418 SlotIndex useIdx = lis->getInstructionIndex(useInst);
419 LiveRange *useRange =
420 newLI->getLiveRangeContaining(useIdx.getUseIndex());
422 // If this use doesn't belong to the new interval skip it.
426 // This use doesn't belong to the VNI, skip it.
427 if (useRange->valno != newVNI)
430 // Check if this instr is two address.
431 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
432 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
434 // Rename uses (and defs for two-address instrs).
435 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
436 MachineOperand &mo = useInst->getOperand(i);
437 if (mo.isReg() && (mo.isUse() || isTwoAddress) &&
438 (mo.getReg() == li->reg)) {
443 // If this is a two address instruction we've got some extra work to do.
445 // We modified the def operand, so we need to copy back to the original
447 MachineBasicBlock *useMBB = useInst->getParent();
448 MachineBasicBlock::iterator useItr(useInst);
449 tii->copyRegToReg(*useMBB, next(useItr), li->reg, newVReg, trc, trc);
450 MachineInstr *copyMI = next(useItr);
451 copyMI->addRegisterKilled(newVReg, tri);
452 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
454 // Change the old two-address defined range & vni to start at
455 // (and be defined by) the copy.
456 LiveRange *origDefRange =
457 li->getLiveRangeContaining(useIdx.getDefIndex());
458 origDefRange->start = copyIdx.getDefIndex();
459 origDefRange->valno->def = copyIdx.getDefIndex();
460 origDefRange->valno->setCopy(copyMI);
462 // Insert a new range & vni for the two-address-to-copy value. This
463 // will be attached to the new live interval.
465 newLI->getNextValue(useIdx.getDefIndex(), 0, true,
466 lis->getVNInfoAllocator());
467 copyVNI->addKill(copyIdx.getDefIndex());
468 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
469 newLI->addRange(copyRange);
473 // Iterate over any PHI kills - we'll need to insert new copies for them.
474 for (VNInfo::KillSet::iterator
475 killItr = newVNI->kills.begin(), killEnd = newVNI->kills.end();
476 killItr != killEnd; ++killItr) {
477 SlotIndex killIdx(*killItr);
478 if (killItr->isPHI()) {
479 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
480 LiveRange *oldKillRange =
481 newLI->getLiveRangeContaining(killIdx);
483 assert(oldKillRange != 0 && "No kill range?");
485 tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
486 li->reg, newVReg, trc, trc);
487 MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
488 copyMI->addRegisterKilled(newVReg, tri);
489 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
491 // Save the current end. We may need it to add a new range if the
492 // current range runs of the end of the MBB.
493 SlotIndex newKillRangeEnd = oldKillRange->end;
494 oldKillRange->end = copyIdx.getDefIndex();
496 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
497 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
498 "PHI kill range doesn't reach kill-block end. Not sane.");
499 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
500 newKillRangeEnd, newVNI));
503 *killItr = oldKillRange->end;
504 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
506 lis->getVNInfoAllocator());
507 newKillVNI->addKill(lis->getMBBTerminatorGap(killMBB));
508 newKillVNI->setHasPHIKill(true);
509 li->addRange(LiveRange(copyIdx.getDefIndex(),
510 lis->getMBBEndIdx(killMBB),
516 newVNI->setHasPHIKill(false);
523 } // end anonymous namespace
526 llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
527 const MachineLoopInfo *loopInfo,
529 switch (spillerOpt) {
530 default: assert(0 && "unknown spiller");
531 case trivial: return new TrivialSpiller(mf, lis, vrm);
532 case standard: return new StandardSpiller(lis, loopInfo, vrm);
533 case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm);