1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
13 #include "VirtRegMap.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineLoopInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/Target/TargetMachine.h"
21 #include "llvm/Target/TargetInstrInfo.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
31 enum SpillerName { trivial, standard, splitting, inline_ };
34 static cl::opt<SpillerName>
36 cl::desc("Spiller to use: (default: standard)"),
38 cl::values(clEnumVal(trivial, "trivial spiller"),
39 clEnumVal(standard, "default spiller"),
40 clEnumVal(splitting, "splitting spiller"),
41 clEnumValN(inline_, "inline", "inline spiller"),
45 // Spiller virtual destructor implementation.
46 Spiller::~Spiller() {}
50 /// Utility class for spillers.
51 class SpillerBase : public Spiller {
53 MachineFunctionPass *pass;
57 MachineFrameInfo *mfi;
58 MachineRegisterInfo *mri;
59 const TargetInstrInfo *tii;
60 const TargetRegisterInfo *tri;
62 /// Construct a spiller base.
63 SpillerBase(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
64 : pass(&pass), mf(&mf), vrm(&vrm)
66 lis = &pass.getAnalysis<LiveIntervals>();
67 mfi = mf.getFrameInfo();
68 mri = &mf.getRegInfo();
69 tii = mf.getTarget().getInstrInfo();
70 tri = mf.getTarget().getRegisterInfo();
73 /// Add spill ranges for every use/def of the live interval, inserting loads
74 /// immediately before each use, and stores after each def. No folding or
75 /// remat is attempted.
76 void trivialSpillEverywhere(LiveInterval *li,
77 std::vector<LiveInterval*> &newIntervals) {
78 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
80 assert(li->weight != HUGE_VALF &&
81 "Attempting to spill already spilled value.");
83 assert(!li->isStackSlot() &&
84 "Trying to spill a stack slot.");
86 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
88 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
89 unsigned ss = vrm->assignVirt2StackSlot(li->reg);
91 // Iterate over reg uses/defs.
92 for (MachineRegisterInfo::reg_iterator
93 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) {
95 // Grab the use/def instr.
96 MachineInstr *mi = &*regItr;
98 DEBUG(dbgs() << " Processing " << *mi);
100 // Step regItr to the next use/def instr.
103 } while (regItr != mri->reg_end() && (&*regItr == mi));
105 // Collect uses & defs for this instr.
106 SmallVector<unsigned, 2> indices;
109 for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
110 MachineOperand &op = mi->getOperand(i);
111 if (!op.isReg() || op.getReg() != li->reg)
113 hasUse |= mi->getOperand(i).isUse();
114 hasDef |= mi->getOperand(i).isDef();
115 indices.push_back(i);
118 // Create a new vreg & interval for this instr.
119 unsigned newVReg = mri->createVirtualRegister(trc);
121 vrm->assignVirt2StackSlot(newVReg, ss);
122 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
123 newLI->weight = HUGE_VALF;
125 // Update the reg operands & kill flags.
126 for (unsigned i = 0; i < indices.size(); ++i) {
127 unsigned mopIdx = indices[i];
128 MachineOperand &mop = mi->getOperand(mopIdx);
130 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) {
134 assert(hasUse || hasDef);
136 // Insert reload if necessary.
137 MachineBasicBlock::iterator miItr(mi);
139 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc,
141 MachineInstr *loadInstr(prior(miItr));
142 SlotIndex loadIndex =
143 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
144 vrm->addSpillSlotUse(ss, loadInstr);
145 SlotIndex endIndex = loadIndex.getNextIndex();
147 newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
148 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
151 // Insert store if necessary.
153 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg,
155 MachineInstr *storeInstr(llvm::next(miItr));
156 SlotIndex storeIndex =
157 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
158 vrm->addSpillSlotUse(ss, storeInstr);
159 SlotIndex beginIndex = storeIndex.getPrevIndex();
161 newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
162 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
165 newIntervals.push_back(newLI);
170 } // end anonymous namespace
174 /// Spills any live range using the spill-everywhere method with no attempt at
176 class TrivialSpiller : public SpillerBase {
179 TrivialSpiller(MachineFunctionPass &pass, MachineFunction &mf,
181 : SpillerBase(pass, mf, vrm) {}
183 void spill(LiveInterval *li,
184 std::vector<LiveInterval*> &newIntervals,
185 SmallVectorImpl<LiveInterval*> &,
187 // Ignore spillIs - we don't use it.
188 trivialSpillEverywhere(li, newIntervals);
192 } // end anonymous namespace
196 /// Falls back on LiveIntervals::addIntervalsForSpills.
197 class StandardSpiller : public Spiller {
200 MachineLoopInfo *loopInfo;
203 StandardSpiller(MachineFunctionPass &pass, MachineFunction &mf,
205 : lis(&pass.getAnalysis<LiveIntervals>()),
206 loopInfo(pass.getAnalysisIfAvailable<MachineLoopInfo>()),
209 /// Falls back on LiveIntervals::addIntervalsForSpills.
210 void spill(LiveInterval *li,
211 std::vector<LiveInterval*> &newIntervals,
212 SmallVectorImpl<LiveInterval*> &spillIs,
214 std::vector<LiveInterval*> added =
215 lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
216 newIntervals.insert(newIntervals.end(), added.begin(), added.end());
220 } // end anonymous namespace
224 /// When a call to spill is placed this spiller will first try to break the
225 /// interval up into its component values (one new interval per value).
226 /// If this fails, or if a call is placed to spill a previously split interval
227 /// then the spiller falls back on the standard spilling mechanism.
228 class SplittingSpiller : public StandardSpiller {
230 SplittingSpiller(MachineFunctionPass &pass, MachineFunction &mf,
232 : StandardSpiller(pass, mf, vrm) {
233 mri = &mf.getRegInfo();
234 tii = mf.getTarget().getInstrInfo();
235 tri = mf.getTarget().getRegisterInfo();
238 void spill(LiveInterval *li,
239 std::vector<LiveInterval*> &newIntervals,
240 SmallVectorImpl<LiveInterval*> &spillIs,
241 SlotIndex *earliestStart) {
242 if (worthTryingToSplit(li))
243 tryVNISplit(li, earliestStart);
245 StandardSpiller::spill(li, newIntervals, spillIs, earliestStart);
250 MachineRegisterInfo *mri;
251 const TargetInstrInfo *tii;
252 const TargetRegisterInfo *tri;
253 DenseSet<LiveInterval*> alreadySplit;
255 bool worthTryingToSplit(LiveInterval *li) const {
256 return (!alreadySplit.count(li) && li->getNumValNums() > 1);
259 /// Try to break a LiveInterval into its component values.
260 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li,
261 SlotIndex *earliestStart) {
263 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
265 std::vector<LiveInterval*> added;
266 SmallVector<VNInfo*, 4> vnis;
268 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
270 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
271 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
272 VNInfo *vni = *vniItr;
278 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
279 LiveInterval *splitInterval = extractVNI(li, vni);
281 if (splitInterval != 0) {
282 DEBUG(dbgs() << *splitInterval << "\n");
283 added.push_back(splitInterval);
284 alreadySplit.insert(splitInterval);
285 if (earliestStart != 0) {
286 if (splitInterval->beginIndex() < *earliestStart)
287 *earliestStart = splitInterval->beginIndex();
290 DEBUG(dbgs() << "0\n");
294 DEBUG(dbgs() << "Original LI: " << *li << "\n");
296 // If there original interval still contains some live ranges
297 // add it to added and alreadySplit.
300 alreadySplit.insert(li);
301 if (earliestStart != 0) {
302 if (li->beginIndex() < *earliestStart)
303 *earliestStart = li->beginIndex();
310 /// Extract the given value number from the interval.
311 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
312 assert(vni->isDefAccurate() || vni->isPHIDef());
314 // Create a new vreg and live interval, copy VNI ranges over.
315 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
316 unsigned newVReg = mri->createVirtualRegister(trc);
318 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
319 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
321 // Start by copying all live ranges in the VN to the new interval.
322 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
323 rItr != rEnd; ++rItr) {
324 if (rItr->valno == vni) {
325 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI));
329 // Erase the old VNI & ranges.
330 li->removeValNo(vni);
332 // Collect all current uses of the register belonging to the given VNI.
333 // We'll use this to rename the register after we've dealt with the def.
334 std::set<MachineInstr*> uses;
335 for (MachineRegisterInfo::use_iterator
336 useItr = mri->use_begin(li->reg), useEnd = mri->use_end();
337 useItr != useEnd; ++useItr) {
338 uses.insert(&*useItr);
341 // Process the def instruction for this VNI.
342 if (newVNI->isPHIDef()) {
343 // Insert a copy at the start of the MBB. The range proceeding the
344 // copy will be attached to the original LiveInterval.
345 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
346 MachineInstr *copyMI = BuildMI(*defMBB, defMBB->begin(), DebugLoc(),
347 tii->get(TargetOpcode::COPY), newVReg)
348 .addReg(li->reg, RegState::Kill);
349 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
350 VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
351 0, false, lis->getVNInfoAllocator());
352 phiDefVNI->setIsPHIDef(true);
353 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
354 LiveRange *oldPHIDefRange =
355 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
357 // If the old phi def starts in the middle of the range chop it up.
358 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) {
359 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end,
360 oldPHIDefRange->valno);
361 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB);
362 newLI->addRange(oldPHIDefRange2);
363 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) {
364 // Otherwise if it's at the start of the range just trim it.
365 oldPHIDefRange->start = copyIdx.getDefIndex();
367 assert(false && "PHI def range doesn't cover PHI def?");
370 newVNI->def = copyIdx.getDefIndex();
371 newVNI->setCopy(copyMI);
372 newVNI->setIsPHIDef(false); // not a PHI def anymore.
373 newVNI->setIsDefAccurate(true);
375 // non-PHI def. Rename the def. If it's two-addr that means renaming the
376 // use and inserting a new copy too.
377 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
378 // We'll rename this now, so we can remove it from uses.
380 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg);
381 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx),
382 twoAddrUseIsUndef = false;
384 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) {
385 MachineOperand &mo = defInst->getOperand(i);
386 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) {
388 if (isTwoAddr && mo.isUse() && mo.isUndef())
389 twoAddrUseIsUndef = true;
393 SlotIndex defIdx = lis->getInstructionIndex(defInst);
394 newVNI->def = defIdx.getDefIndex();
396 if (isTwoAddr && !twoAddrUseIsUndef) {
397 MachineBasicBlock *defMBB = defInst->getParent();
398 MachineInstr *copyMI = BuildMI(*defMBB, defInst, DebugLoc(),
399 tii->get(TargetOpcode::COPY), newVReg)
400 .addReg(li->reg, RegState::Kill);
401 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
402 LiveRange *origUseRange =
403 li->getLiveRangeContaining(newVNI->def.getUseIndex());
404 origUseRange->end = copyIdx.getDefIndex();
405 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
406 true, lis->getVNInfoAllocator());
407 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
408 newLI->addRange(copyRange);
412 for (std::set<MachineInstr*>::iterator
413 usesItr = uses.begin(), usesEnd = uses.end();
414 usesItr != usesEnd; ++usesItr) {
415 MachineInstr *useInst = *usesItr;
416 SlotIndex useIdx = lis->getInstructionIndex(useInst);
417 LiveRange *useRange =
418 newLI->getLiveRangeContaining(useIdx.getUseIndex());
420 // If this use doesn't belong to the new interval skip it.
424 // This use doesn't belong to the VNI, skip it.
425 if (useRange->valno != newVNI)
428 // Check if this instr is two address.
429 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
430 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
432 // Rename uses (and defs for two-address instrs).
433 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
434 MachineOperand &mo = useInst->getOperand(i);
435 if (mo.isReg() && (mo.isUse() || isTwoAddress) &&
436 (mo.getReg() == li->reg)) {
441 // If this is a two address instruction we've got some extra work to do.
443 // We modified the def operand, so we need to copy back to the original
445 MachineBasicBlock *useMBB = useInst->getParent();
446 MachineBasicBlock::iterator useItr(useInst);
447 MachineInstr *copyMI = BuildMI(*useMBB, llvm::next(useItr), DebugLoc(),
448 tii->get(TargetOpcode::COPY), newVReg)
449 .addReg(li->reg, RegState::Kill);
450 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
452 // Change the old two-address defined range & vni to start at
453 // (and be defined by) the copy.
454 LiveRange *origDefRange =
455 li->getLiveRangeContaining(useIdx.getDefIndex());
456 origDefRange->start = copyIdx.getDefIndex();
457 origDefRange->valno->def = copyIdx.getDefIndex();
458 origDefRange->valno->setCopy(copyMI);
460 // Insert a new range & vni for the two-address-to-copy value. This
461 // will be attached to the new live interval.
463 newLI->getNextValue(useIdx.getDefIndex(), 0, true,
464 lis->getVNInfoAllocator());
465 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
466 newLI->addRange(copyRange);
470 // Iterate over any PHI kills - we'll need to insert new copies for them.
471 for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
473 if (LRI->valno != newVNI || LRI->end.isPHI())
475 SlotIndex killIdx = LRI->end;
476 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
477 MachineInstr *copyMI = BuildMI(*killMBB, killMBB->getFirstTerminator(),
478 DebugLoc(), tii->get(TargetOpcode::COPY),
480 .addReg(newVReg, RegState::Kill);
481 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
483 // Save the current end. We may need it to add a new range if the
484 // current range runs of the end of the MBB.
485 SlotIndex newKillRangeEnd = LRI->end;
486 LRI->end = copyIdx.getDefIndex();
488 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
489 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
490 "PHI kill range doesn't reach kill-block end. Not sane.");
491 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
492 newKillRangeEnd, newVNI));
495 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
497 lis->getVNInfoAllocator());
498 newKillVNI->setHasPHIKill(true);
499 li->addRange(LiveRange(copyIdx.getDefIndex(),
500 lis->getMBBEndIdx(killMBB),
503 newVNI->setHasPHIKill(false);
510 } // end anonymous namespace
514 Spiller *createInlineSpiller(MachineFunctionPass &pass,
519 llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass,
522 switch (spillerOpt) {
523 default: assert(0 && "unknown spiller");
524 case trivial: return new TrivialSpiller(pass, mf, vrm);
525 case standard: return new StandardSpiller(pass, mf, vrm);
526 case splitting: return new SplittingSpiller(pass, mf, vrm);
527 case inline_: return createInlineSpiller(pass, mf, vrm);