1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "spiller"
17 #include "VirtRegMap.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/raw_ostream.h"
30 class InlineSpiller : public Spiller {
34 MachineFrameInfo &mfi_;
35 MachineRegisterInfo &mri_;
36 const TargetInstrInfo &tii_;
37 const TargetRegisterInfo &tri_;
38 const BitVector reserved_;
40 // Variables that are valid during spill(), but used by multiple methods.
42 const TargetRegisterClass *rc_;
44 const SmallVectorImpl<LiveInterval*> *spillIs_;
49 InlineSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
50 : mf_(*mf), lis_(*lis), vrm_(*vrm),
51 mfi_(*mf->getFrameInfo()),
52 mri_(mf->getRegInfo()),
53 tii_(*mf->getTarget().getInstrInfo()),
54 tri_(*mf->getTarget().getRegisterInfo()),
55 reserved_(tri_.getReservedRegs(mf_)) {}
57 void spill(LiveInterval *li,
58 std::vector<LiveInterval*> &newIntervals,
59 SmallVectorImpl<LiveInterval*> &spillIs,
60 SlotIndex *earliestIndex);
61 bool reMaterialize(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
62 bool foldMemoryOperand(MachineBasicBlock::iterator MI,
63 const SmallVectorImpl<unsigned> &Ops);
64 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
65 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
70 Spiller *createInlineSpiller(MachineFunction *mf,
72 const MachineLoopInfo *mli,
74 return new InlineSpiller(mf, lis, vrm);
78 /// reMaterialize - Attempt to rematerialize li_->reg before MI instead of
80 bool InlineSpiller::reMaterialize(LiveInterval &NewLI,
81 MachineBasicBlock::iterator MI) {
82 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
83 LiveRange *LR = li_->getLiveRangeContaining(UseIdx);
85 DEBUG(dbgs() << "\tundef at " << UseIdx << ", adding <undef> flags.\n");
86 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
87 MachineOperand &MO = MI->getOperand(i);
88 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
94 // Find the instruction that defined this value of li_->reg.
95 if (!LR->valno->isDefAccurate())
97 SlotIndex OrigDefIdx = LR->valno->def;
98 MachineInstr *OrigDefMI = lis_.getInstructionFromIndex(OrigDefIdx);
102 // FIXME: Provide AliasAnalysis argument.
103 if (!tii_.isTriviallyReMaterializable(OrigDefMI))
106 // A rematerializable instruction may be using other virtual registers.
107 // Make sure they are available at the new location.
108 for (unsigned i = 0, e = OrigDefMI->getNumOperands(); i != e; ++i) {
109 MachineOperand &MO = OrigDefMI->getOperand(i);
110 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
112 // Reserved physregs are OK. Others are not (probably from coalescing).
113 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
114 if (reserved_.test(MO.getReg()))
119 // We don't want to move any virtual defs.
122 // We have a use of a virtual register other than li_->reg.
125 // We cannot depend on virtual registers in spillIs_. They will be spilled.
126 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
127 if ((*spillIs_)[si]->reg == MO.getReg())
130 // Is the register available here with the same value as at OrigDefMI?
131 LiveInterval &ULI = lis_.getInterval(MO.getReg());
132 LiveRange *HereLR = ULI.getLiveRangeContaining(UseIdx);
135 LiveRange *DefLR = ULI.getLiveRangeContaining(OrigDefIdx.getUseIndex());
136 if (!DefLR || DefLR->valno != HereLR->valno)
140 // Finally we can rematerialize OrigDefMI before MI.
141 MachineBasicBlock &MBB = *MI->getParent();
142 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigDefMI, tri_);
143 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--MI).getDefIndex();
144 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *MI);
145 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
146 lis_.getVNInfoAllocator());
147 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
151 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
152 /// Return true on success, and MI will be erased.
153 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
154 const SmallVectorImpl<unsigned> &Ops) {
155 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
157 SmallVector<unsigned, 8> FoldOps;
158 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
159 unsigned Idx = Ops[i];
160 MachineOperand &MO = MI->getOperand(Idx);
163 // FIXME: Teach targets to deal with subregs.
166 // Tied use operands should not be passed to foldMemoryOperand.
167 if (!MI->isRegTiedToDefOperand(Idx))
168 FoldOps.push_back(Idx);
171 MachineInstr *FoldMI = tii_.foldMemoryOperand(mf_, MI, FoldOps, stackSlot_);
174 MachineBasicBlock &MBB = *MI->getParent();
175 lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
176 vrm_.addSpillSlotUse(stackSlot_, FoldMI);
177 MBB.insert(MBB.erase(MI), FoldMI);
178 DEBUG(dbgs() << "\tfolded: " << *FoldMI);
182 /// insertReload - Insert a reload of NewLI.reg before MI.
183 void InlineSpiller::insertReload(LiveInterval &NewLI,
184 MachineBasicBlock::iterator MI) {
185 MachineBasicBlock &MBB = *MI->getParent();
186 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
187 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
188 --MI; // Point to load instruction.
189 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
190 vrm_.addSpillSlotUse(stackSlot_, MI);
191 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
192 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
193 lis_.getVNInfoAllocator());
194 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
197 /// insertSpill - Insert a spill of NewLI.reg after MI.
198 void InlineSpiller::insertSpill(LiveInterval &NewLI,
199 MachineBasicBlock::iterator MI) {
200 MachineBasicBlock &MBB = *MI->getParent();
201 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
202 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
203 --MI; // Point to store instruction.
204 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
205 vrm_.addSpillSlotUse(stackSlot_, MI);
206 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
207 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
208 lis_.getVNInfoAllocator());
209 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
212 void InlineSpiller::spill(LiveInterval *li,
213 std::vector<LiveInterval*> &newIntervals,
214 SmallVectorImpl<LiveInterval*> &spillIs,
215 SlotIndex *earliestIndex) {
216 DEBUG(dbgs() << "Inline spilling " << *li << "\n");
217 assert(li->isSpillable() && "Attempting to spill already spilled value.");
218 assert(!li->isStackSlot() && "Trying to spill a stack slot.");
221 rc_ = mri_.getRegClass(li->reg);
222 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
225 // Iterate over instructions using register.
226 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
227 MachineInstr *MI = RI.skipInstruction();) {
229 // Analyze instruction.
231 SmallVector<unsigned, 8> Ops;
232 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
234 // Allocate interval around instruction.
235 // FIXME: Infer regclass from instruction alone.
236 unsigned NewVReg = mri_.createVirtualRegister(rc_);
238 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
239 NewLI.markNotSpillable();
241 // Attempt remat instead of reload.
242 bool NeedsReload = Reads && !reMaterialize(NewLI, MI);
244 // Attempt to fold memory ops.
245 if (NewLI.empty() && foldMemoryOperand(MI, Ops))
249 insertReload(NewLI, MI);
251 // Rewrite instruction operands.
252 bool hasLiveDef = false;
253 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
254 MachineOperand &MO = MI->getOperand(Ops[i]);
257 if (!MI->isRegTiedToDefOperand(Ops[i]))
265 // FIXME: Use a second vreg if instruction has no tied ops.
266 if (Writes && hasLiveDef)
267 insertSpill(NewLI, MI);
269 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
270 newIntervals.push_back(&NewLI);