1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
13 #include "VirtRegMap.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Target/TargetMachine.h"
19 #include "llvm/Target/TargetInstrInfo.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
28 enum SpillerName { trivial, standard, splitting };
31 static cl::opt<SpillerName>
33 cl::desc("Spiller to use: (default: standard)"),
35 cl::values(clEnumVal(trivial, "trivial spiller"),
36 clEnumVal(standard, "default spiller"),
37 clEnumVal(splitting, "splitting spiller"),
41 // Spiller virtual destructor implementation.
42 Spiller::~Spiller() {}
46 /// Utility class for spillers.
47 class SpillerBase : public Spiller {
52 MachineFrameInfo *mfi;
53 MachineRegisterInfo *mri;
54 const TargetInstrInfo *tii;
57 /// Construct a spiller base.
58 SpillerBase(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
59 : mf(mf), lis(lis), vrm(vrm)
61 mfi = mf->getFrameInfo();
62 mri = &mf->getRegInfo();
63 tii = mf->getTarget().getInstrInfo();
66 /// Add spill ranges for every use/def of the live interval, inserting loads
67 /// immediately before each use, and stores after each def. No folding or
68 /// remat is attempted.
69 std::vector<LiveInterval*> trivialSpillEverywhere(LiveInterval *li) {
70 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
72 assert(li->weight != HUGE_VALF &&
73 "Attempting to spill already spilled value.");
75 assert(!li->isStackSlot() &&
76 "Trying to spill a stack slot.");
78 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
80 std::vector<LiveInterval*> added;
82 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
83 unsigned ss = vrm->assignVirt2StackSlot(li->reg);
85 // Iterate over reg uses/defs.
86 for (MachineRegisterInfo::reg_iterator
87 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) {
89 // Grab the use/def instr.
90 MachineInstr *mi = &*regItr;
92 DEBUG(dbgs() << " Processing " << *mi);
94 // Step regItr to the next use/def instr.
97 } while (regItr != mri->reg_end() && (&*regItr == mi));
99 // Collect uses & defs for this instr.
100 SmallVector<unsigned, 2> indices;
103 for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
104 MachineOperand &op = mi->getOperand(i);
105 if (!op.isReg() || op.getReg() != li->reg)
107 hasUse |= mi->getOperand(i).isUse();
108 hasDef |= mi->getOperand(i).isDef();
109 indices.push_back(i);
112 // Create a new vreg & interval for this instr.
113 unsigned newVReg = mri->createVirtualRegister(trc);
115 vrm->assignVirt2StackSlot(newVReg, ss);
116 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
117 newLI->weight = HUGE_VALF;
119 // Update the reg operands & kill flags.
120 for (unsigned i = 0; i < indices.size(); ++i) {
121 unsigned mopIdx = indices[i];
122 MachineOperand &mop = mi->getOperand(mopIdx);
124 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) {
128 assert(hasUse || hasDef);
130 // Insert reload if necessary.
131 MachineBasicBlock::iterator miItr(mi);
133 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc);
134 MachineInstr *loadInstr(prior(miItr));
135 SlotIndex loadIndex =
136 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
137 SlotIndex endIndex = loadIndex.getNextIndex();
139 newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
140 loadVNI->addKill(endIndex);
141 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
144 // Insert store if necessary.
146 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg, true,
148 MachineInstr *storeInstr(llvm::next(miItr));
149 SlotIndex storeIndex =
150 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
151 SlotIndex beginIndex = storeIndex.getPrevIndex();
153 newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
154 storeVNI->addKill(storeIndex);
155 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
158 added.push_back(newLI);
167 /// Spills any live range using the spill-everywhere method with no attempt at
169 class TrivialSpiller : public SpillerBase {
172 TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
173 : SpillerBase(mf, lis, vrm) {}
175 std::vector<LiveInterval*> spill(LiveInterval *li,
176 SmallVectorImpl<LiveInterval*> &spillIs,
178 // Ignore spillIs - we don't use it.
179 return trivialSpillEverywhere(li);
184 /// Falls back on LiveIntervals::addIntervalsForSpills.
185 class StandardSpiller : public Spiller {
188 const MachineLoopInfo *loopInfo;
191 StandardSpiller(LiveIntervals *lis, const MachineLoopInfo *loopInfo,
193 : lis(lis), loopInfo(loopInfo), vrm(vrm) {}
195 /// Falls back on LiveIntervals::addIntervalsForSpills.
196 std::vector<LiveInterval*> spill(LiveInterval *li,
197 SmallVectorImpl<LiveInterval*> &spillIs,
199 return lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
204 /// When a call to spill is placed this spiller will first try to break the
205 /// interval up into its component values (one new interval per value).
206 /// If this fails, or if a call is placed to spill a previously split interval
207 /// then the spiller falls back on the standard spilling mechanism.
208 class SplittingSpiller : public StandardSpiller {
210 SplittingSpiller(MachineFunction *mf, LiveIntervals *lis,
211 const MachineLoopInfo *loopInfo, VirtRegMap *vrm)
212 : StandardSpiller(lis, loopInfo, vrm) {
214 mri = &mf->getRegInfo();
215 tii = mf->getTarget().getInstrInfo();
216 tri = mf->getTarget().getRegisterInfo();
219 std::vector<LiveInterval*> spill(LiveInterval *li,
220 SmallVectorImpl<LiveInterval*> &spillIs,
221 SlotIndex *earliestStart) {
223 if (worthTryingToSplit(li)) {
224 return tryVNISplit(li, earliestStart);
227 return StandardSpiller::spill(li, spillIs, earliestStart);
232 MachineRegisterInfo *mri;
233 const TargetInstrInfo *tii;
234 const TargetRegisterInfo *tri;
235 DenseSet<LiveInterval*> alreadySplit;
237 bool worthTryingToSplit(LiveInterval *li) const {
238 return (!alreadySplit.count(li) && li->getNumValNums() > 1);
241 /// Try to break a LiveInterval into its component values.
242 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li,
243 SlotIndex *earliestStart) {
245 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
247 std::vector<LiveInterval*> added;
248 SmallVector<VNInfo*, 4> vnis;
250 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
252 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
253 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
254 VNInfo *vni = *vniItr;
256 // Skip unused VNIs, or VNIs with no kills.
257 if (vni->isUnused() || vni->kills.empty())
260 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
261 LiveInterval *splitInterval = extractVNI(li, vni);
263 if (splitInterval != 0) {
264 DEBUG(dbgs() << *splitInterval << "\n");
265 added.push_back(splitInterval);
266 alreadySplit.insert(splitInterval);
267 if (earliestStart != 0) {
268 if (splitInterval->beginIndex() < *earliestStart)
269 *earliestStart = splitInterval->beginIndex();
272 DEBUG(dbgs() << "0\n");
276 DEBUG(dbgs() << "Original LI: " << *li << "\n");
278 // If there original interval still contains some live ranges
279 // add it to added and alreadySplit.
282 alreadySplit.insert(li);
283 if (earliestStart != 0) {
284 if (li->beginIndex() < *earliestStart)
285 *earliestStart = li->beginIndex();
292 /// Extract the given value number from the interval.
293 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
294 assert(vni->isDefAccurate() || vni->isPHIDef());
295 assert(!vni->kills.empty());
297 // Create a new vreg and live interval, copy VNI kills & ranges over.
298 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
299 unsigned newVReg = mri->createVirtualRegister(trc);
301 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
302 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
304 // Start by copying all live ranges in the VN to the new interval.
305 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
306 rItr != rEnd; ++rItr) {
307 if (rItr->valno == vni) {
308 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI));
312 // Erase the old VNI & ranges.
313 li->removeValNo(vni);
315 // Collect all current uses of the register belonging to the given VNI.
316 // We'll use this to rename the register after we've dealt with the def.
317 std::set<MachineInstr*> uses;
318 for (MachineRegisterInfo::use_iterator
319 useItr = mri->use_begin(li->reg), useEnd = mri->use_end();
320 useItr != useEnd; ++useItr) {
321 uses.insert(&*useItr);
324 // Process the def instruction for this VNI.
325 if (newVNI->isPHIDef()) {
326 // Insert a copy at the start of the MBB. The range proceeding the
327 // copy will be attached to the original LiveInterval.
328 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
329 tii->copyRegToReg(*defMBB, defMBB->begin(), newVReg, li->reg, trc, trc);
330 MachineInstr *copyMI = defMBB->begin();
331 copyMI->addRegisterKilled(li->reg, tri);
332 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
333 VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
334 0, false, lis->getVNInfoAllocator());
335 phiDefVNI->setIsPHIDef(true);
336 phiDefVNI->addKill(copyIdx.getDefIndex());
337 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
338 LiveRange *oldPHIDefRange =
339 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
341 // If the old phi def starts in the middle of the range chop it up.
342 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) {
343 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end,
344 oldPHIDefRange->valno);
345 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB);
346 newLI->addRange(oldPHIDefRange2);
347 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) {
348 // Otherwise if it's at the start of the range just trim it.
349 oldPHIDefRange->start = copyIdx.getDefIndex();
351 assert(false && "PHI def range doesn't cover PHI def?");
354 newVNI->def = copyIdx.getDefIndex();
355 newVNI->setCopy(copyMI);
356 newVNI->setIsPHIDef(false); // not a PHI def anymore.
357 newVNI->setIsDefAccurate(true);
359 // non-PHI def. Rename the def. If it's two-addr that means renaming the use
360 // and inserting a new copy too.
361 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
362 // We'll rename this now, so we can remove it from uses.
364 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg);
365 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx),
366 twoAddrUseIsUndef = false;
368 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) {
369 MachineOperand &mo = defInst->getOperand(i);
370 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) {
372 if (isTwoAddr && mo.isUse() && mo.isUndef())
373 twoAddrUseIsUndef = true;
377 SlotIndex defIdx = lis->getInstructionIndex(defInst);
378 newVNI->def = defIdx.getDefIndex();
380 if (isTwoAddr && !twoAddrUseIsUndef) {
381 MachineBasicBlock *defMBB = defInst->getParent();
382 tii->copyRegToReg(*defMBB, defInst, newVReg, li->reg, trc, trc);
383 MachineInstr *copyMI = prior(MachineBasicBlock::iterator(defInst));
384 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
385 copyMI->addRegisterKilled(li->reg, tri);
386 LiveRange *origUseRange =
387 li->getLiveRangeContaining(newVNI->def.getUseIndex());
388 VNInfo *origUseVNI = origUseRange->valno;
389 origUseRange->end = copyIdx.getDefIndex();
390 bool updatedKills = false;
391 for (unsigned k = 0; k < origUseVNI->kills.size(); ++k) {
392 if (origUseVNI->kills[k] == defIdx.getDefIndex()) {
393 origUseVNI->kills[k] = copyIdx.getDefIndex();
398 assert(updatedKills && "Failed to update VNI kill list.");
399 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
400 true, lis->getVNInfoAllocator());
401 copyVNI->addKill(defIdx.getDefIndex());
402 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
403 newLI->addRange(copyRange);
407 for (std::set<MachineInstr*>::iterator
408 usesItr = uses.begin(), usesEnd = uses.end();
409 usesItr != usesEnd; ++usesItr) {
410 MachineInstr *useInst = *usesItr;
411 SlotIndex useIdx = lis->getInstructionIndex(useInst);
412 LiveRange *useRange =
413 newLI->getLiveRangeContaining(useIdx.getUseIndex());
415 // If this use doesn't belong to the new interval skip it.
419 // This use doesn't belong to the VNI, skip it.
420 if (useRange->valno != newVNI)
423 // Check if this instr is two address.
424 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
425 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
427 // Rename uses (and defs for two-address instrs).
428 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
429 MachineOperand &mo = useInst->getOperand(i);
430 if (mo.isReg() && (mo.isUse() || isTwoAddress) &&
431 (mo.getReg() == li->reg)) {
436 // If this is a two address instruction we've got some extra work to do.
438 // We modified the def operand, so we need to copy back to the original
440 MachineBasicBlock *useMBB = useInst->getParent();
441 MachineBasicBlock::iterator useItr(useInst);
442 tii->copyRegToReg(*useMBB, next(useItr), li->reg, newVReg, trc, trc);
443 MachineInstr *copyMI = next(useItr);
444 copyMI->addRegisterKilled(newVReg, tri);
445 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
447 // Change the old two-address defined range & vni to start at
448 // (and be defined by) the copy.
449 LiveRange *origDefRange =
450 li->getLiveRangeContaining(useIdx.getDefIndex());
451 origDefRange->start = copyIdx.getDefIndex();
452 origDefRange->valno->def = copyIdx.getDefIndex();
453 origDefRange->valno->setCopy(copyMI);
455 // Insert a new range & vni for the two-address-to-copy value. This
456 // will be attached to the new live interval.
458 newLI->getNextValue(useIdx.getDefIndex(), 0, true,
459 lis->getVNInfoAllocator());
460 copyVNI->addKill(copyIdx.getDefIndex());
461 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
462 newLI->addRange(copyRange);
466 // Iterate over any PHI kills - we'll need to insert new copies for them.
467 for (VNInfo::KillSet::iterator
468 killItr = newVNI->kills.begin(), killEnd = newVNI->kills.end();
469 killItr != killEnd; ++killItr) {
470 SlotIndex killIdx(*killItr);
471 if (killItr->isPHI()) {
472 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
473 LiveRange *oldKillRange =
474 newLI->getLiveRangeContaining(killIdx);
476 assert(oldKillRange != 0 && "No kill range?");
478 tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
479 li->reg, newVReg, trc, trc);
480 MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
481 copyMI->addRegisterKilled(newVReg, tri);
482 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
484 // Save the current end. We may need it to add a new range if the
485 // current range runs of the end of the MBB.
486 SlotIndex newKillRangeEnd = oldKillRange->end;
487 oldKillRange->end = copyIdx.getDefIndex();
489 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
490 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
491 "PHI kill range doesn't reach kill-block end. Not sane.");
492 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
493 newKillRangeEnd, newVNI));
496 *killItr = oldKillRange->end;
497 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
499 lis->getVNInfoAllocator());
500 newKillVNI->addKill(lis->getMBBTerminatorGap(killMBB));
501 newKillVNI->setHasPHIKill(true);
502 li->addRange(LiveRange(copyIdx.getDefIndex(),
503 lis->getMBBEndIdx(killMBB),
509 newVNI->setHasPHIKill(false);
518 llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
519 const MachineLoopInfo *loopInfo,
521 switch (spillerOpt) {
522 case trivial: return new TrivialSpiller(mf, lis, vrm); break;
523 case standard: return new StandardSpiller(lis, loopInfo, vrm); break;
524 case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm); break;
525 default: llvm_unreachable("Unreachable!"); break;