1 //===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a wrapper around MCSchedModel that allows the interface
11 // to benefit from information currently only available in TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/TargetSchedule.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/raw_ostream.h"
18 #include "llvm/Target/TargetInstrInfo.h"
19 #include "llvm/Target/TargetMachine.h"
20 #include "llvm/Target/TargetRegisterInfo.h"
21 #include "llvm/Target/TargetSubtargetInfo.h"
25 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
26 cl::desc("Use TargetSchedModel for latency lookup"));
28 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
29 cl::desc("Use InstrItineraryData for latency lookup"));
31 bool TargetSchedModel::hasInstrSchedModel() const {
32 return EnableSchedModel && SchedModel.hasInstrSchedModel();
35 bool TargetSchedModel::hasInstrItineraries() const {
36 return EnableSchedItins && !InstrItins.isEmpty();
39 static unsigned gcd(unsigned Dividend, unsigned Divisor) {
40 // Dividend and Divisor will be naturally swapped as needed.
42 unsigned Rem = Dividend % Divisor;
48 static unsigned lcm(unsigned A, unsigned B) {
49 unsigned LCM = (uint64_t(A) * B) / gcd(A, B);
50 assert((LCM >= A && LCM >= B) && "LCM overflow");
54 void TargetSchedModel::init(const MCSchedModel &sm,
55 const TargetSubtargetInfo *sti,
56 const TargetInstrInfo *tii) {
60 STI->initInstrItins(InstrItins);
62 unsigned NumRes = SchedModel.getNumProcResourceKinds();
63 ResourceFactors.resize(NumRes);
64 ResourceLCM = SchedModel.IssueWidth;
65 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
66 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
68 ResourceLCM = lcm(ResourceLCM, NumUnits);
70 MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
71 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
72 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
73 ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
77 unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
78 const MCSchedClassDesc *SC) const {
79 if (hasInstrItineraries()) {
80 int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
81 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI);
83 if (hasInstrSchedModel()) {
85 SC = resolveSchedClass(MI);
87 return SC->NumMicroOps;
89 return MI->isTransient() ? 0 : 1;
92 // The machine model may explicitly specify an invalid latency, which
93 // effectively means infinite latency. Since users of the TargetSchedule API
94 // don't know how to handle this, we convert it to a very large latency that is
95 // easy to distinguish when debugging the DAG but won't induce overflow.
96 static unsigned capLatency(int Cycles) {
97 return Cycles >= 0 ? Cycles : 1000;
100 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
101 /// evaluation of predicates that depend on instruction operands or flags.
102 const MCSchedClassDesc *TargetSchedModel::
103 resolveSchedClass(const MachineInstr *MI) const {
105 // Get the definition's scheduling class descriptor from this machine model.
106 unsigned SchedClass = MI->getDesc().getSchedClass();
107 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
108 if (!SCDesc->isValid())
114 while (SCDesc->isVariant()) {
115 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
117 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
118 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
123 /// Find the def index of this operand. This index maps to the machine model and
124 /// is independent of use operands. Def operands may be reordered with uses or
125 /// merged with uses without affecting the def index (e.g. before/after
126 /// regalloc). However, an instruction's def operands must never be reordered
127 /// with respect to each other.
128 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
130 for (unsigned i = 0; i != DefOperIdx; ++i) {
131 const MachineOperand &MO = MI->getOperand(i);
132 if (MO.isReg() && MO.isDef())
138 /// Find the use index of this operand. This is independent of the instruction's
141 /// Note that uses are not determined by the operand's isUse property, which
142 /// is simply the inverse of isDef. Here we consider any readsReg operand to be
143 /// a "use". The machine model allows an operand to be both a Def and Use.
144 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
146 for (unsigned i = 0; i != UseOperIdx; ++i) {
147 const MachineOperand &MO = MI->getOperand(i);
148 if (MO.isReg() && MO.readsReg())
154 // Top-level API for clients that know the operand indices.
155 unsigned TargetSchedModel::computeOperandLatency(
156 const MachineInstr *DefMI, unsigned DefOperIdx,
157 const MachineInstr *UseMI, unsigned UseOperIdx) const {
159 if (!hasInstrSchedModel() && !hasInstrItineraries())
160 return TII->defaultDefLatency(SchedModel, DefMI);
162 if (hasInstrItineraries()) {
165 OperLatency = TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx,
169 unsigned DefClass = DefMI->getDesc().getSchedClass();
170 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
172 if (OperLatency >= 0)
175 // No operand latency was found.
176 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
178 // Expected latency is the max of the stage latency and itinerary props.
179 // Rather than directly querying InstrItins stage latency, we call a TII
180 // hook to allow subtargets to specialize latency. This hook is only
181 // applicable to the InstrItins model. InstrSchedModel should model all
182 // special cases without TII hooks.
183 InstrLatency = std::max(InstrLatency,
184 TII->defaultDefLatency(SchedModel, DefMI));
187 // hasInstrSchedModel()
188 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
189 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
190 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
191 // Lookup the definition's write latency in SubtargetInfo.
192 const MCWriteLatencyEntry *WLEntry =
193 STI->getWriteLatencyEntry(SCDesc, DefIdx);
194 unsigned WriteID = WLEntry->WriteResourceID;
195 unsigned Latency = capLatency(WLEntry->Cycles);
199 // Lookup the use's latency adjustment in SubtargetInfo.
200 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
201 if (UseDesc->NumReadAdvanceEntries == 0)
203 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
204 int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
205 if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap
207 return Latency - Advance;
209 // If DefIdx does not exist in the model (e.g. implicit defs), then return
210 // unit latency (defaultDefLatency may be too conservative).
212 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
213 && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
214 && SchedModel.isComplete()) {
216 raw_string_ostream ss(Err);
217 ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
219 report_fatal_error(ss.str());
222 // FIXME: Automatically giving all implicit defs defaultDefLatency is
223 // undesirable. We should only do it for defs that are known to the MC
224 // desc like flags. Truly implicit defs should get 1 cycle latency.
225 return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, DefMI);
228 unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
229 assert(hasInstrSchedModel() && "Only call this function with a SchedModel");
231 unsigned SCIdx = TII->get(Opcode).getSchedClass();
232 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SCIdx);
233 unsigned Latency = 0;
235 if (SCDesc->isValid() && !SCDesc->isVariant()) {
236 for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries;
237 DefIdx != DefEnd; ++DefIdx) {
238 // Lookup the definition's write latency in SubtargetInfo.
239 const MCWriteLatencyEntry *WLEntry =
240 STI->getWriteLatencyEntry(SCDesc, DefIdx);
241 Latency = std::max(Latency, capLatency(WLEntry->Cycles));
246 assert(Latency && "No MI sched latency");
251 TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
252 bool UseDefaultDefLatency) const {
253 // For the itinerary model, fall back to the old subtarget hook.
254 // Allow subtargets to compute Bundle latencies outside the machine model.
255 if (hasInstrItineraries() || MI->isBundle() ||
256 (!hasInstrSchedModel() && !UseDefaultDefLatency))
257 return TII->getInstrLatency(&InstrItins, MI);
259 if (hasInstrSchedModel()) {
260 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
261 if (SCDesc->isValid()) {
262 unsigned Latency = 0;
263 for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries;
264 DefIdx != DefEnd; ++DefIdx) {
265 // Lookup the definition's write latency in SubtargetInfo.
266 const MCWriteLatencyEntry *WLEntry =
267 STI->getWriteLatencyEntry(SCDesc, DefIdx);
268 Latency = std::max(Latency, capLatency(WLEntry->Cycles));
273 return TII->defaultDefLatency(SchedModel, MI);
276 unsigned TargetSchedModel::
277 computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
278 const MachineInstr *DepMI) const {
279 if (SchedModel.MicroOpBufferSize <= 1)
282 // MicroOpBufferSize > 1 indicates an out-of-order processor that can dispatch
283 // WAW dependencies in the same cycle.
285 // Treat predication as a data dependency for out-of-order cpus. In-order
286 // cpus do not need to treat predicated writes specially.
288 // TODO: The following hack exists because predication passes do not
289 // correctly append imp-use operands, and readsReg() strangely returns false
290 // for predicated defs.
291 unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
292 const MachineFunction &MF = *DefMI->getParent()->getParent();
293 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
294 if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI))
295 return computeInstrLatency(DefMI);
297 // If we have a per operand scheduling model, check if this def is writing
298 // an unbuffered resource. If so, it treated like an in-order cpu.
299 if (hasInstrSchedModel()) {
300 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
301 if (SCDesc->isValid()) {
302 for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
303 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
304 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)