1 //===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a wrapper around MCSchedModel that allows the interface
11 // to benefit from information currently only available in TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/TargetSchedule.h"
16 #include "llvm/Target/TargetInstrInfo.h"
17 #include "llvm/Target/TargetRegisterInfo.h"
18 #include "llvm/Target/TargetSubtargetInfo.h"
19 #include "llvm/Support/CommandLine.h"
20 #include "llvm/Support/raw_ostream.h"
24 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
25 cl::desc("Use TargetSchedModel for latency lookup"));
27 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
28 cl::desc("Use InstrItineraryData for latency lookup"));
30 bool TargetSchedModel::hasInstrSchedModel() const {
31 return EnableSchedModel && SchedModel.hasInstrSchedModel();
34 bool TargetSchedModel::hasInstrItineraries() const {
35 return EnableSchedItins && !InstrItins.isEmpty();
38 void TargetSchedModel::init(const MCSchedModel &sm,
39 const TargetSubtargetInfo *sti,
40 const TargetInstrInfo *tii) {
44 STI->initInstrItins(InstrItins);
47 /// If we can determine the operand latency from the def only, without machine
48 /// model or itinerary lookup, do so. Otherwise return -1.
49 int TargetSchedModel::getDefLatency(const MachineInstr *DefMI,
52 // Return a latency based on the itinerary properties and defining instruction
53 // if possible. Some common subtargets don't require per-operand latency,
54 // especially for minimum latencies.
56 // If MinLatency is invalid, then use the itinerary for MinLatency. If no
57 // itinerary exists either, then use single cycle latency.
58 if (SchedModel.MinLatency < 0 && !hasInstrItineraries()) {
61 return SchedModel.MinLatency;
63 else if (!hasInstrSchedModel() && !hasInstrItineraries()) {
64 return TII->defaultDefLatency(&SchedModel, DefMI);
66 // ...operand lookup required
70 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
71 /// evaluation of predicates that depend on instruction operands or flags.
72 const MCSchedClassDesc *TargetSchedModel::
73 resolveSchedClass(const MachineInstr *MI) const {
75 // Get the definition's scheduling class descriptor from this machine model.
76 unsigned SchedClass = MI->getDesc().getSchedClass();
77 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
82 while (SCDesc->isVariant()) {
83 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
85 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
86 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
91 /// Find the def index of this operand. This index maps to the machine model and
92 /// is independent of use operands. Def operands may be reordered with uses or
93 /// merged with uses without affecting the def index (e.g. before/after
94 /// regalloc). However, an instruction's def operands must never be reordered
95 /// with respect to each other.
96 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
98 for (unsigned i = 0; i != DefOperIdx; ++i) {
99 const MachineOperand &MO = MI->getOperand(i);
100 if (MO.isReg() && MO.isDef())
106 /// Find the use index of this operand. This is independent of the instruction's
109 /// Note that uses are not determined by the operand's isUse property, which
110 /// is simply the inverse of isDef. Here we consider any readsReg operand to be
111 /// a "use". The machine model allows an operand to be both a Def and Use.
112 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
114 for (unsigned i = 0; i != UseOperIdx; ++i) {
115 const MachineOperand &MO = MI->getOperand(i);
116 if (MO.isReg() && MO.readsReg())
122 // Top-level API for clients that know the operand indices.
123 unsigned TargetSchedModel::computeOperandLatency(
124 const MachineInstr *DefMI, unsigned DefOperIdx,
125 const MachineInstr *UseMI, unsigned UseOperIdx,
126 bool FindMin) const {
128 int DefLatency = getDefLatency(DefMI, FindMin);
132 if (hasInstrItineraries()) {
136 TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx);
139 unsigned DefClass = DefMI->getDesc().getSchedClass();
140 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
142 if (OperLatency >= 0)
145 // No operand latency was found.
146 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
148 // Expected latency is the max of the stage latency and itinerary props.
149 // Rather than directly querying InstrItins stage latency, we call a TII
150 // hook to allow subtargets to specialize latency. This hook is only
151 // applicable to the InstrItins model. InstrSchedModel should model all
152 // special cases without TII hooks.
154 InstrLatency = std::max(InstrLatency,
155 TII->defaultDefLatency(&SchedModel, DefMI));
158 assert(!FindMin && hasInstrSchedModel() &&
159 "Expected a SchedModel for this cpu");
160 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
161 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
162 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
163 // Lookup the definition's write latency in SubtargetInfo.
164 const MCWriteLatencyEntry *WLEntry =
165 STI->getWriteLatencyEntry(SCDesc, DefIdx);
166 unsigned WriteID = WLEntry->WriteResourceID;
167 unsigned Latency = WLEntry->Cycles;
171 // Lookup the use's latency adjustment in SubtargetInfo.
172 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
173 if (UseDesc->NumReadAdvanceEntries == 0)
175 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
176 return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
178 // If DefIdx does not exist in the model (e.g. implicit defs), then return
179 // unit latency (defaultDefLatency may be too conservative).
181 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
182 && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()) {
184 raw_string_ostream ss(Err);
185 ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
187 report_fatal_error(ss.str());
193 unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI) const {
194 if (hasInstrItineraries()) {
195 // For the itinerary model, fall back to the old subtarget hook.
196 return TII->getInstrLatency(&InstrItins, MI);
198 if (hasInstrSchedModel()) {
199 unsigned Latency = 0;
200 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
201 for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries;
202 DefIdx != DefEnd; ++DefIdx) {
203 // Lookup the definition's write latency in SubtargetInfo.
204 const MCWriteLatencyEntry *WLEntry =
205 STI->getWriteLatencyEntry(SCDesc, DefIdx);
206 Latency = std::max(Latency, WLEntry->Cycles);
210 return TII->defaultDefLatency(&SchedModel, MI);