1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "spiller"
18 #include "VirtRegMap.h"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineLoopInfo.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
32 class InlineSpiller : public Spiller {
35 MachineLoopInfo &loops_;
37 MachineFrameInfo &mfi_;
38 MachineRegisterInfo &mri_;
39 const TargetInstrInfo &tii_;
40 const TargetRegisterInfo &tri_;
41 const BitVector reserved_;
43 SplitAnalysis splitAnalysis_;
45 // Variables that are valid during spill(), but used by multiple methods.
47 std::vector<LiveInterval*> *newIntervals_;
48 const TargetRegisterClass *rc_;
50 const SmallVectorImpl<LiveInterval*> *spillIs_;
52 // Values of the current interval that can potentially remat.
53 SmallPtrSet<VNInfo*, 8> reMattable_;
55 // Values in reMattable_ that failed to remat at some point.
56 SmallPtrSet<VNInfo*, 8> usedValues_;
61 InlineSpiller(MachineFunctionPass &pass,
65 lis_(pass.getAnalysis<LiveIntervals>()),
66 loops_(pass.getAnalysis<MachineLoopInfo>()),
68 mfi_(*mf.getFrameInfo()),
69 mri_(mf.getRegInfo()),
70 tii_(*mf.getTarget().getInstrInfo()),
71 tri_(*mf.getTarget().getRegisterInfo()),
72 reserved_(tri_.getReservedRegs(mf_)),
73 splitAnalysis_(mf, lis_, loops_) {}
75 void spill(LiveInterval *li,
76 std::vector<LiveInterval*> &newIntervals,
77 SmallVectorImpl<LiveInterval*> &spillIs,
78 SlotIndex *earliestIndex);
83 bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
85 bool reMaterializeFor(MachineBasicBlock::iterator MI);
86 void reMaterializeAll();
88 bool coalesceStackAccess(MachineInstr *MI);
89 bool foldMemoryOperand(MachineBasicBlock::iterator MI,
90 const SmallVectorImpl<unsigned> &Ops);
91 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
92 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
97 Spiller *createInlineSpiller(MachineFunctionPass &pass,
100 return new InlineSpiller(pass, mf, vrm);
104 /// split - try splitting the current interval into pieces that may allocate
105 /// separately. Return true if successful.
106 bool InlineSpiller::split() {
107 // FIXME: Add intra-MBB splitting.
108 if (lis_.intervalIsInOneMBB(*li_))
111 splitAnalysis_.analyze(li_);
113 if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) {
114 SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_)
115 .splitAroundLoop(loop);
121 /// allUsesAvailableAt - Return true if all registers used by OrigMI at
122 /// OrigIdx are also available with the same value at UseIdx.
123 bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
126 OrigIdx = OrigIdx.getUseIndex();
127 UseIdx = UseIdx.getUseIndex();
128 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
129 const MachineOperand &MO = OrigMI->getOperand(i);
130 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
132 // Reserved registers are OK.
133 if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
135 // We don't want to move any defs.
138 // We cannot depend on virtual registers in spillIs_. They will be spilled.
139 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
140 if ((*spillIs_)[si]->reg == MO.getReg())
143 LiveInterval &LI = lis_.getInterval(MO.getReg());
144 const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
147 if (OVNI != LI.getVNInfoAt(UseIdx))
153 /// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
155 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
156 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
157 VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
159 DEBUG(dbgs() << "\tadding <undef> flags: ");
160 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
161 MachineOperand &MO = MI->getOperand(i);
162 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
165 DEBUG(dbgs() << UseIdx << '\t' << *MI);
168 if (!reMattable_.count(OrigVNI)) {
169 DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
170 << UseIdx << '\t' << *MI);
173 MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
174 if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
175 usedValues_.insert(OrigVNI);
176 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
180 // If the instruction also writes li_->reg, it had better not require the same
181 // register for uses and defs.
183 SmallVector<unsigned, 8> Ops;
184 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
186 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
187 MachineOperand &MO = MI->getOperand(Ops[i]);
188 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
189 usedValues_.insert(OrigVNI);
190 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
196 // Alocate a new register for the remat.
197 unsigned NewVReg = mri_.createVirtualRegister(rc_);
199 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
200 NewLI.markNotSpillable();
201 newIntervals_->push_back(&NewLI);
203 // Finally we can rematerialize OrigMI before MI.
204 MachineBasicBlock &MBB = *MI->getParent();
205 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
206 MachineBasicBlock::iterator RematMI = MI;
207 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
208 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI);
211 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
212 MachineOperand &MO = MI->getOperand(Ops[i]);
213 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
218 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
220 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
221 lis_.getVNInfoAllocator());
222 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
223 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
227 /// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
228 /// and trim the live ranges after.
229 void InlineSpiller::reMaterializeAll() {
230 // Do a quick scan of the interval values to find if any are remattable.
233 for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
234 E = li_->vni_end(); I != E; ++I) {
236 if (VNI->isUnused() || !VNI->isDefAccurate())
238 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
239 if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
241 reMattable_.insert(VNI);
244 // Often, no defs are remattable.
245 if (reMattable_.empty())
248 // Try to remat before all uses of li_->reg.
249 bool anyRemat = false;
250 for (MachineRegisterInfo::use_nodbg_iterator
251 RI = mri_.use_nodbg_begin(li_->reg);
252 MachineInstr *MI = RI.skipInstruction();)
253 anyRemat |= reMaterializeFor(MI);
258 // Remove any values that were completely rematted.
259 bool anyRemoved = false;
260 for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
261 E = reMattable_.end(); I != E; ++I) {
263 if (VNI->hasPHIKill() || usedValues_.count(VNI))
265 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
266 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
267 lis_.RemoveMachineInstrFromMaps(DefMI);
268 vrm_.RemoveMachineInstrFromMaps(DefMI);
269 DefMI->eraseFromParent();
270 li_->removeValNo(VNI);
277 // Removing values may cause debug uses where li_ is not live.
278 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
279 MachineInstr *MI = RI.skipInstruction();) {
280 if (!MI->isDebugValue())
282 // Try to preserve the debug value if li_ is live immediately after it.
283 MachineBasicBlock::iterator NextMI = MI;
285 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
286 SlotIndex NearIdx = lis_.getInstructionIndex(NextMI);
287 if (li_->liveAt(NearIdx))
290 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
291 MI->eraseFromParent();
295 /// If MI is a load or store of stackSlot_, it can be removed.
296 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
299 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) &&
300 !(reg = tii_.isStoreToStackSlot(MI, FI)))
303 // We have a stack access. Is it the right register and slot?
304 if (reg != li_->reg || FI != stackSlot_)
307 DEBUG(dbgs() << "Coalescing stack access: " << *MI);
308 lis_.RemoveMachineInstrFromMaps(MI);
309 MI->eraseFromParent();
313 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
314 /// Return true on success, and MI will be erased.
315 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
316 const SmallVectorImpl<unsigned> &Ops) {
317 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
319 SmallVector<unsigned, 8> FoldOps;
320 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
321 unsigned Idx = Ops[i];
322 MachineOperand &MO = MI->getOperand(Idx);
325 // FIXME: Teach targets to deal with subregs.
328 // Tied use operands should not be passed to foldMemoryOperand.
329 if (!MI->isRegTiedToDefOperand(Idx))
330 FoldOps.push_back(Idx);
333 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
336 lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
337 vrm_.addSpillSlotUse(stackSlot_, FoldMI);
338 MI->eraseFromParent();
339 DEBUG(dbgs() << "\tfolded: " << *FoldMI);
343 /// insertReload - Insert a reload of NewLI.reg before MI.
344 void InlineSpiller::insertReload(LiveInterval &NewLI,
345 MachineBasicBlock::iterator MI) {
346 MachineBasicBlock &MBB = *MI->getParent();
347 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
348 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
349 --MI; // Point to load instruction.
350 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
351 vrm_.addSpillSlotUse(stackSlot_, MI);
352 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
353 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
354 lis_.getVNInfoAllocator());
355 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
358 /// insertSpill - Insert a spill of NewLI.reg after MI.
359 void InlineSpiller::insertSpill(LiveInterval &NewLI,
360 MachineBasicBlock::iterator MI) {
361 MachineBasicBlock &MBB = *MI->getParent();
362 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
363 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
364 --MI; // Point to store instruction.
365 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
366 vrm_.addSpillSlotUse(stackSlot_, MI);
367 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
368 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
369 lis_.getVNInfoAllocator());
370 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
373 void InlineSpiller::spill(LiveInterval *li,
374 std::vector<LiveInterval*> &newIntervals,
375 SmallVectorImpl<LiveInterval*> &spillIs,
376 SlotIndex *earliestIndex) {
377 DEBUG(dbgs() << "Inline spilling " << *li << "\n");
378 assert(li->isSpillable() && "Attempting to spill already spilled value.");
379 assert(!li->isStackSlot() && "Trying to spill a stack slot.");
382 newIntervals_ = &newIntervals;
383 rc_ = mri_.getRegClass(li->reg);
391 // Remat may handle everything.
395 stackSlot_ = vrm_.getStackSlot(li->reg);
396 if (stackSlot_ == VirtRegMap::NO_STACK_SLOT)
397 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
399 // Iterate over instructions using register.
400 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
401 MachineInstr *MI = RI.skipInstruction();) {
403 // Debug values are not allowed to affect codegen.
404 if (MI->isDebugValue()) {
405 // Modify DBG_VALUE now that the value is in a spill slot.
406 uint64_t Offset = MI->getOperand(1).getImm();
407 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
408 DebugLoc DL = MI->getDebugLoc();
409 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
410 Offset, MDPtr, DL)) {
411 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
412 MachineBasicBlock *MBB = MI->getParent();
413 MBB->insert(MBB->erase(MI), NewDV);
415 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
416 MI->eraseFromParent();
421 // Stack slot accesses may coalesce away.
422 if (coalesceStackAccess(MI))
425 // Analyze instruction.
427 SmallVector<unsigned, 8> Ops;
428 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
430 // Attempt to fold memory ops.
431 if (foldMemoryOperand(MI, Ops))
434 // Allocate interval around instruction.
435 // FIXME: Infer regclass from instruction alone.
436 unsigned NewVReg = mri_.createVirtualRegister(rc_);
438 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
439 NewLI.markNotSpillable();
442 insertReload(NewLI, MI);
444 // Rewrite instruction operands.
445 bool hasLiveDef = false;
446 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
447 MachineOperand &MO = MI->getOperand(Ops[i]);
450 if (!MI->isRegTiedToDefOperand(Ops[i]))
458 // FIXME: Use a second vreg if instruction has no tied ops.
459 if (Writes && hasLiveDef)
460 insertSpill(NewLI, MI);
462 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
463 newIntervals.push_back(&NewLI);