1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "llvm/CodeGen/MachineDominators.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineLoopInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetSubtarget.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/ADT/SmallSet.h"
34 class VISIBILITY_HIDDEN LoopDependencies {
35 const MachineLoopInfo &MLI;
36 const MachineDominatorTree &MDT;
39 typedef std::map<unsigned, std::pair<const MachineOperand *, unsigned> >
43 LoopDependencies(const MachineLoopInfo &mli,
44 const MachineDominatorTree &mdt) :
47 void VisitLoop(const MachineLoop *Loop) {
49 MachineBasicBlock *Header = Loop->getHeader();
50 SmallSet<unsigned, 8> LoopLiveIns;
51 for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(),
52 LE = Header->livein_end(); LI != LE; ++LI)
53 LoopLiveIns.insert(*LI);
55 VisitRegion(MDT.getNode(Header), Loop, LoopLiveIns);
59 void VisitRegion(const MachineDomTreeNode *Node,
60 const MachineLoop *Loop,
61 const SmallSet<unsigned, 8> &LoopLiveIns) {
62 MachineBasicBlock *MBB = Node->getBlock();
63 if (!Loop->contains(MBB)) return;
66 for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
67 I != E; ++I, ++Count) {
68 const MachineInstr *MI = I;
69 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
70 const MachineOperand &MO = MI->getOperand(i);
71 if (!MO.isReg() || !MO.isUse())
73 unsigned MOReg = MO.getReg();
74 if (LoopLiveIns.count(MOReg))
75 Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
79 const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
80 for (unsigned I = 0, E = Children.size(); I != E; ++I)
81 VisitRegion(Children[I], Loop, LoopLiveIns);
86 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineBasicBlock *bb,
87 const TargetMachine &tm,
88 const MachineLoopInfo &mli,
89 const MachineDominatorTree &mdt)
90 : ScheduleDAG(0, bb, tm), MLI(mli), MDT(mdt) {}
92 void ScheduleDAGInstrs::BuildSchedUnits() {
94 SUnits.reserve(BB->size());
96 // We build scheduling units by walking a block's instruction list from bottom
99 // Remember where defs and uses of each physical register are as we procede.
100 std::vector<SUnit *> Defs[TargetRegisterInfo::FirstVirtualRegister] = {};
101 std::vector<SUnit *> Uses[TargetRegisterInfo::FirstVirtualRegister] = {};
103 // Remember where unknown loads are after the most recent unknown store
105 std::vector<SUnit *> PendingLoads;
107 // Remember where a generic side-effecting instruction is as we procede. If
108 // ChainMMO is null, this is assumed to have arbitrary side-effects. If
109 // ChainMMO is non-null, then Chain makes only a single memory reference.
111 MachineMemOperand *ChainMMO = 0;
113 // Memory references to specific known memory locations are tracked so that
114 // they can be given more precise dependencies.
115 std::map<const Value *, SUnit *> MemDefs;
116 std::map<const Value *, std::vector<SUnit *> > MemUses;
118 // Terminators can perform control transfers, we we need to make sure that
119 // all the work of the block is done before the terminator.
120 SUnit *Terminator = 0;
122 LoopDependencies LoopRegs(MLI, MDT);
124 // Track which regs are live into a loop, to help guide back-edge-aware
126 SmallSet<unsigned, 8> LoopLiveInRegs;
127 if (MachineLoop *ML = MLI.getLoopFor(BB))
128 if (BB == ML->getLoopLatch()) {
129 MachineBasicBlock *Header = ML->getHeader();
130 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(),
131 E = Header->livein_end(); I != E; ++I)
132 LoopLiveInRegs.insert(*I);
133 LoopRegs.VisitLoop(ML);
136 // Check to see if the scheduler cares about latencies.
137 bool UnitLatencies = ForceUnitLatencies();
139 // Ask the target if address-backscheduling is desirable, and if so how much.
140 unsigned SpecialAddressLatency =
141 TM.getSubtarget<TargetSubtarget>().getSpecialAddressLatency();
143 for (MachineBasicBlock::iterator MII = BB->end(), MIE = BB->begin();
145 MachineInstr *MI = prior(MII);
146 const TargetInstrDesc &TID = MI->getDesc();
147 SUnit *SU = NewSUnit(MI);
149 // Assign the Latency field of SU using target-provided information.
155 // Add register-based dependencies (data, anti, and output).
156 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
157 const MachineOperand &MO = MI->getOperand(j);
158 if (!MO.isReg()) continue;
159 unsigned Reg = MO.getReg();
160 if (Reg == 0) continue;
162 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
163 std::vector<SUnit *> &UseList = Uses[Reg];
164 std::vector<SUnit *> &DefList = Defs[Reg];
165 // Optionally add output and anti dependencies.
166 // TODO: Using a latency of 1 here assumes there's no cost for
167 // reusing registers.
168 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
169 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
170 SUnit *DefSU = DefList[i];
172 (Kind != SDep::Output || !MO.isDead() ||
173 !DefSU->getInstr()->registerDefIsDead(Reg)))
174 DefSU->addPred(SDep(SU, Kind, /*Latency=*/1, /*Reg=*/Reg));
176 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
177 std::vector<SUnit *> &DefList = Defs[*Alias];
178 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
179 SUnit *DefSU = DefList[i];
181 (Kind != SDep::Output || !MO.isDead() ||
182 !DefSU->getInstr()->registerDefIsDead(Reg)))
183 DefSU->addPred(SDep(SU, Kind, /*Latency=*/1, /*Reg=*/ *Alias));
188 // Add any data dependencies.
189 unsigned DataLatency = SU->Latency;
190 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
191 SUnit *UseSU = UseList[i];
193 unsigned LDataLatency = DataLatency;
194 // Optionally add in a special extra latency for nodes that
196 // TODO: Do this for register aliases too.
197 if (SpecialAddressLatency != 0 && !UnitLatencies) {
198 MachineInstr *UseMI = UseSU->getInstr();
199 const TargetInstrDesc &UseTID = UseMI->getDesc();
200 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
201 assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
202 if ((UseTID.mayLoad() || UseTID.mayStore()) &&
203 (unsigned)RegUseIndex < UseTID.getNumOperands() &&
204 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
205 LDataLatency += SpecialAddressLatency;
207 UseSU->addPred(SDep(SU, SDep::Data, LDataLatency, Reg));
210 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
211 std::vector<SUnit *> &UseList = Uses[*Alias];
212 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
213 SUnit *UseSU = UseList[i];
215 UseSU->addPred(SDep(SU, SDep::Data, DataLatency, *Alias));
219 // If a def is going to wrap back around to the top of the loop,
221 // TODO: Blocks in loops without terminators can benefit too.
222 if (!UnitLatencies && Terminator && DefList.empty()) {
223 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
224 if (I != LoopRegs.Deps.end()) {
225 const MachineOperand *UseMO = I->second.first;
226 unsigned Count = I->second.second;
227 const MachineInstr *UseMI = UseMO->getParent();
228 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
229 const TargetInstrDesc &UseTID = UseMI->getDesc();
230 // TODO: If we knew the total depth of the region here, we could
231 // handle the case where the whole loop is inside the region but
232 // is large enough that the isScheduleHigh trick isn't needed.
233 if (UseMOIdx < UseTID.getNumOperands()) {
234 // Currently, we only support scheduling regions consisting of
235 // single basic blocks. Check to see if the instruction is in
236 // the same region by checking to see if it has the same parent.
237 if (UseMI->getParent() != MI->getParent()) {
238 unsigned Latency = SU->Latency;
239 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
240 Latency += SpecialAddressLatency;
241 // This is a wild guess as to the portion of the latency which
242 // will be overlapped by work done outside the current
243 // scheduling region.
244 Latency -= std::min(Latency, Count);
245 // Add the artifical edge.
246 Terminator->addPred(SDep(SU, SDep::Order, Latency,
247 /*Reg=*/0, /*isNormalMemory=*/false,
248 /*isMustAlias=*/false,
249 /*isArtificial=*/true));
250 } else if (SpecialAddressLatency > 0 &&
251 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
252 // The entire loop body is within the current scheduling region
253 // and the latency of this operation is assumed to be greater
254 // than the latency of the loop.
255 // TODO: Recursively mark data-edge predecessors as
256 // isScheduleHigh too.
257 SU->isScheduleHigh = true;
260 LoopRegs.Deps.erase(I);
267 DefList.push_back(SU);
269 UseList.push_back(SU);
273 // Add chain dependencies.
274 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
275 // after stack slots are lowered to actual addresses.
276 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
277 // produce more precise dependence information.
278 if (TID.isCall() || TID.isReturn() || TID.isBranch() ||
279 TID.hasUnmodeledSideEffects()) {
281 // This is the conservative case. Add dependencies on all memory
284 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
286 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
287 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
288 PendingLoads.clear();
289 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
290 E = MemDefs.end(); I != E; ++I) {
291 I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
294 for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
295 MemUses.begin(), E = MemUses.end(); I != E; ++I) {
296 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
297 I->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency));
300 // See if it is known to just have a single memory reference.
301 MachineInstr *ChainMI = Chain->getInstr();
302 const TargetInstrDesc &ChainTID = ChainMI->getDesc();
303 if (!ChainTID.isCall() && !ChainTID.isReturn() && !ChainTID.isBranch() &&
304 !ChainTID.hasUnmodeledSideEffects() &&
305 ChainMI->hasOneMemOperand() &&
306 !ChainMI->memoperands_begin()->isVolatile() &&
307 ChainMI->memoperands_begin()->getValue())
308 // We know that the Chain accesses one specific memory location.
309 ChainMMO = &*ChainMI->memoperands_begin();
311 // Unknown memory accesses. Assume the worst.
313 } else if (TID.mayStore()) {
314 if (MI->hasOneMemOperand() &&
315 MI->memoperands_begin()->getValue() &&
316 !MI->memoperands_begin()->isVolatile() &&
317 isa<PseudoSourceValue>(MI->memoperands_begin()->getValue())) {
318 // A store to a specific PseudoSourceValue. Add precise dependencies.
319 const Value *V = MI->memoperands_begin()->getValue();
320 // Handle the def in MemDefs, if there is one.
321 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
322 if (I != MemDefs.end()) {
323 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
324 /*isNormalMemory=*/true));
329 // Handle the uses in MemUses, if there are any.
330 std::map<const Value *, std::vector<SUnit *> >::iterator J =
332 if (J != MemUses.end()) {
333 for (unsigned i = 0, e = J->second.size(); i != e; ++i)
334 J->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
335 /*isNormalMemory=*/true));
338 // Add a general dependence too, if needed.
340 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
342 // Treat all other stores conservatively.
344 } else if (TID.mayLoad()) {
345 if (TII->isInvariantLoad(MI)) {
346 // Invariant load, no chain dependencies needed!
347 } else if (MI->hasOneMemOperand() &&
348 MI->memoperands_begin()->getValue() &&
349 !MI->memoperands_begin()->isVolatile() &&
350 isa<PseudoSourceValue>(MI->memoperands_begin()->getValue())) {
351 // A load from a specific PseudoSourceValue. Add precise dependencies.
352 const Value *V = MI->memoperands_begin()->getValue();
353 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
354 if (I != MemDefs.end())
355 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
356 /*isNormalMemory=*/true));
357 MemUses[V].push_back(SU);
359 // Add a general dependence too, if needed.
360 if (Chain && (!ChainMMO ||
361 (ChainMMO->isStore() || ChainMMO->isVolatile())))
362 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
363 } else if (MI->hasVolatileMemoryRef()) {
364 // Treat volatile loads conservatively. Note that this includes
365 // cases where memoperand information is unavailable.
368 // A normal load. Just depend on the general chain.
370 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
371 PendingLoads.push_back(SU);
375 // Add chain edges from the terminator to ensure that all the work of the
376 // block is completed before any control transfers.
377 if (Terminator && SU->Succs.empty())
378 Terminator->addPred(SDep(SU, SDep::Order, SU->Latency));
379 if (TID.isTerminator() || MI->isLabel())
384 void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
385 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
387 // Compute the latency for the node. We use the sum of the latencies for
388 // all nodes flagged together into this SUnit.
390 InstrItins.getLatency(SU->getInstr()->getDesc().getSchedClass());
392 // Simplistic target-independent heuristic: assume that loads take
394 if (InstrItins.isEmpty())
395 if (SU->getInstr()->getDesc().mayLoad())
399 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
400 SU->getInstr()->dump();
403 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
405 raw_string_ostream oss(s);
406 SU->getInstr()->print(oss);
410 // EmitSchedule - Emit the machine code in scheduled order.
411 MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
412 // For MachineInstr-based scheduling, we're rescheduling the instructions in
413 // the block, so start by removing them from the block.
415 BB->remove(BB->begin());
417 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
418 SUnit *SU = Sequence[i];
420 // Null SUnit* is a noop.
425 BB->push_back(SU->getInstr());