1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
17 #include "LiveRangeEdit.h"
18 #include "VirtRegMap.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/LiveStackAnalysis.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
34 VerifySpills("verify-spills", cl::desc("Verify after each spill/split"));
37 class InlineSpiller : public Spiller {
38 MachineFunctionPass &pass_;
44 MachineFrameInfo &mfi_;
45 MachineRegisterInfo &mri_;
46 const TargetInstrInfo &tii_;
47 const TargetRegisterInfo &tri_;
48 const BitVector reserved_;
50 // Variables that are valid during spill(), but used by multiple methods.
52 const TargetRegisterClass *rc_;
55 // Values that failed to remat at some point.
56 SmallPtrSet<VNInfo*, 8> usedValues_;
61 InlineSpiller(MachineFunctionPass &pass,
66 lis_(pass.getAnalysis<LiveIntervals>()),
67 lss_(pass.getAnalysis<LiveStacks>()),
68 aa_(&pass.getAnalysis<AliasAnalysis>()),
70 mfi_(*mf.getFrameInfo()),
71 mri_(mf.getRegInfo()),
72 tii_(*mf.getTarget().getInstrInfo()),
73 tri_(*mf.getTarget().getRegisterInfo()),
74 reserved_(tri_.getReservedRegs(mf_)) {}
76 void spill(LiveInterval *li,
77 SmallVectorImpl<LiveInterval*> &newIntervals,
78 const SmallVectorImpl<LiveInterval*> &spillIs);
80 void spill(LiveRangeEdit &);
83 bool reMaterializeFor(MachineBasicBlock::iterator MI);
84 void reMaterializeAll();
86 bool coalesceStackAccess(MachineInstr *MI);
87 bool foldMemoryOperand(MachineBasicBlock::iterator MI,
88 const SmallVectorImpl<unsigned> &Ops,
89 MachineInstr *LoadMI = 0);
90 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
91 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
96 Spiller *createInlineSpiller(MachineFunctionPass &pass,
100 mf.verify(&pass, "When creating inline spiller");
101 return new InlineSpiller(pass, mf, vrm);
105 /// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of
107 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
108 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
109 VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx);
112 DEBUG(dbgs() << "\tadding <undef> flags: ");
113 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
114 MachineOperand &MO = MI->getOperand(i);
115 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg())
118 DEBUG(dbgs() << UseIdx << '\t' << *MI);
122 LiveRangeEdit::Remat RM(OrigVNI);
123 if (!edit_->canRematerializeAt(RM, UseIdx, false, lis_)) {
124 usedValues_.insert(OrigVNI);
125 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
129 // If the instruction also writes edit_->getReg(), it had better not require
130 // the same register for uses and defs.
132 SmallVector<unsigned, 8> Ops;
133 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops);
135 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
136 MachineOperand &MO = MI->getOperand(Ops[i]);
137 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
138 usedValues_.insert(OrigVNI);
139 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
145 // Before rematerializing into a register for a single instruction, try to
146 // fold a load into the instruction. That avoids allocating a new register.
147 if (RM.OrigMI->getDesc().canFoldAsLoad() &&
148 foldMemoryOperand(MI, Ops, RM.OrigMI)) {
149 edit_->markRematerialized(RM.ParentVNI);
153 // Alocate a new register for the remat.
154 LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_);
155 NewLI.markNotSpillable();
157 // Rematting for a copy: Set allocation hint to be the destination register.
159 mri_.setRegAllocationHint(NewLI.reg, 0, MI->getOperand(0).getReg());
161 // Finally we can rematerialize OrigMI before MI.
162 SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
164 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
165 << *lis_.getInstructionFromIndex(DefIdx));
168 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
169 MachineOperand &MO = MI->getOperand(Ops[i]);
170 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) {
171 MO.setReg(NewLI.reg);
175 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
177 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator());
178 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
179 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
183 /// reMaterializeAll - Try to rematerialize as many uses as possible,
184 /// and trim the live ranges after.
185 void InlineSpiller::reMaterializeAll() {
186 // Do a quick scan of the interval values to find if any are remattable.
187 if (!edit_->anyRematerializable(lis_, tii_, aa_))
192 // Try to remat before all uses of edit_->getReg().
193 bool anyRemat = false;
194 for (MachineRegisterInfo::use_nodbg_iterator
195 RI = mri_.use_nodbg_begin(edit_->getReg());
196 MachineInstr *MI = RI.skipInstruction();)
197 anyRemat |= reMaterializeFor(MI);
202 // Remove any values that were completely rematted.
203 bool anyRemoved = false;
204 for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(),
205 E = edit_->getParent().vni_end(); I != E; ++I) {
207 if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) ||
208 usedValues_.count(VNI))
210 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
211 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
212 lis_.RemoveMachineInstrFromMaps(DefMI);
213 vrm_.RemoveMachineInstrFromMaps(DefMI);
214 DefMI->eraseFromParent();
215 VNI->def = SlotIndex();
222 // Removing values may cause debug uses where parent is not live.
223 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg());
224 MachineInstr *MI = RI.skipInstruction();) {
225 if (!MI->isDebugValue())
227 // Try to preserve the debug value if parent is live immediately after it.
228 MachineBasicBlock::iterator NextMI = MI;
230 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
231 SlotIndex Idx = lis_.getInstructionIndex(NextMI);
232 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx);
233 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI)))
236 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
237 MI->eraseFromParent();
241 /// If MI is a load or store of stackSlot_, it can be removed.
242 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
245 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) &&
246 !(reg = tii_.isStoreToStackSlot(MI, FI)))
249 // We have a stack access. Is it the right register and slot?
250 if (reg != edit_->getReg() || FI != stackSlot_)
253 DEBUG(dbgs() << "Coalescing stack access: " << *MI);
254 lis_.RemoveMachineInstrFromMaps(MI);
255 MI->eraseFromParent();
259 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
260 /// @param MI Instruction using or defining the current register.
261 /// @param Ops Operand indices from readsWritesVirtualRegister().
262 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
263 /// @return True on success, and MI will be erased.
264 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
265 const SmallVectorImpl<unsigned> &Ops,
266 MachineInstr *LoadMI) {
267 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
269 SmallVector<unsigned, 8> FoldOps;
270 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
271 unsigned Idx = Ops[i];
272 MachineOperand &MO = MI->getOperand(Idx);
275 // FIXME: Teach targets to deal with subregs.
278 // We cannot fold a load instruction into a def.
279 if (LoadMI && MO.isDef())
281 // Tied use operands should not be passed to foldMemoryOperand.
282 if (!MI->isRegTiedToDefOperand(Idx))
283 FoldOps.push_back(Idx);
286 MachineInstr *FoldMI =
287 LoadMI ? tii_.foldMemoryOperand(MI, FoldOps, LoadMI)
288 : tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
291 lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
293 vrm_.addSpillSlotUse(stackSlot_, FoldMI);
294 MI->eraseFromParent();
295 DEBUG(dbgs() << "\tfolded: " << *FoldMI);
299 /// insertReload - Insert a reload of NewLI.reg before MI.
300 void InlineSpiller::insertReload(LiveInterval &NewLI,
301 MachineBasicBlock::iterator MI) {
302 MachineBasicBlock &MBB = *MI->getParent();
303 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
304 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
305 --MI; // Point to load instruction.
306 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
307 vrm_.addSpillSlotUse(stackSlot_, MI);
308 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
309 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
310 lis_.getVNInfoAllocator());
311 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
314 /// insertSpill - Insert a spill of NewLI.reg after MI.
315 void InlineSpiller::insertSpill(LiveInterval &NewLI,
316 MachineBasicBlock::iterator MI) {
317 MachineBasicBlock &MBB = *MI->getParent();
319 // Get the defined value. It could be an early clobber so keep the def index.
320 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
321 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx);
322 assert(VNI && VNI->def.getDefIndex() == Idx && "Inconsistent VNInfo");
325 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
326 --MI; // Point to store instruction.
327 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
328 vrm_.addSpillSlotUse(stackSlot_, MI);
329 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
330 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator());
331 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
334 void InlineSpiller::spill(LiveInterval *li,
335 SmallVectorImpl<LiveInterval*> &newIntervals,
336 const SmallVectorImpl<LiveInterval*> &spillIs) {
337 LiveRangeEdit edit(*li, newIntervals, spillIs);
340 mf_.verify(&pass_, "After inline spill");
343 void InlineSpiller::spill(LiveRangeEdit &edit) {
345 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
346 && "Trying to spill a stack slot.");
347 DEBUG(dbgs() << "Inline spilling "
348 << mri_.getRegClass(edit.getReg())->getName()
349 << ':' << edit.getParent() << "\n");
350 assert(edit.getParent().isSpillable() &&
351 "Attempting to spill already spilled value.");
355 // Remat may handle everything.
356 if (edit_->getParent().empty())
359 rc_ = mri_.getRegClass(edit.getReg());
360 stackSlot_ = vrm_.assignVirt2StackSlot(edit_->getReg());
362 // Update LiveStacks now that we are committed to spilling.
363 LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_);
364 assert(stacklvr.empty() && "Just created stack slot not empty");
365 stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator());
366 stacklvr.MergeRangesInAsValue(edit_->getParent(), stacklvr.getValNumInfo(0));
368 // Iterate over instructions using register.
369 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg());
370 MachineInstr *MI = RI.skipInstruction();) {
372 // Debug values are not allowed to affect codegen.
373 if (MI->isDebugValue()) {
374 // Modify DBG_VALUE now that the value is in a spill slot.
375 uint64_t Offset = MI->getOperand(1).getImm();
376 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
377 DebugLoc DL = MI->getDebugLoc();
378 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
379 Offset, MDPtr, DL)) {
380 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
381 MachineBasicBlock *MBB = MI->getParent();
382 MBB->insert(MBB->erase(MI), NewDV);
384 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
385 MI->eraseFromParent();
390 // Stack slot accesses may coalesce away.
391 if (coalesceStackAccess(MI))
394 // Analyze instruction.
396 SmallVector<unsigned, 8> Ops;
397 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops);
399 // Attempt to fold memory ops.
400 if (foldMemoryOperand(MI, Ops))
403 // Allocate interval around instruction.
404 // FIXME: Infer regclass from instruction alone.
405 LiveInterval &NewLI = edit.create(mri_, lis_, vrm_);
406 NewLI.markNotSpillable();
409 insertReload(NewLI, MI);
411 // Rewrite instruction operands.
412 bool hasLiveDef = false;
413 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
414 MachineOperand &MO = MI->getOperand(Ops[i]);
415 MO.setReg(NewLI.reg);
417 if (!MI->isRegTiedToDefOperand(Ops[i]))
425 // FIXME: Use a second vreg if instruction has no tied ops.
426 if (Writes && hasLiveDef)
427 insertSpill(NewLI, MI);
429 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');