1 //===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a wrapper around MCSchedModel that allows the interface
11 // to benefit from information currently only available in TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/TargetSchedule.h"
16 #include "llvm/Target/TargetInstrInfo.h"
17 #include "llvm/Target/TargetRegisterInfo.h"
18 #include "llvm/Target/TargetSubtargetInfo.h"
19 #include "llvm/Support/CommandLine.h"
20 #include "llvm/Support/raw_ostream.h"
24 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
25 cl::desc("Use TargetSchedModel for latency lookup"));
27 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
28 cl::desc("Use InstrItineraryData for latency lookup"));
30 void TargetSchedModel::init(const MCSchedModel &sm,
31 const TargetSubtargetInfo *sti,
32 const TargetInstrInfo *tii) {
36 STI->initInstrItins(InstrItins);
39 /// If we can determine the operand latency from the def only, without machine
40 /// model or itinerary lookup, do so. Otherwise return -1.
41 int TargetSchedModel::getDefLatency(const MachineInstr *DefMI,
44 // Return a latency based on the itinerary properties and defining instruction
45 // if possible. Some common subtargets don't require per-operand latency,
46 // especially for minimum latencies.
48 // If MinLatency is invalid, then use the itinerary for MinLatency. If no
49 // itinerary exists either, then use single cycle latency.
50 if (SchedModel.MinLatency < 0
51 && !(EnableSchedItins && hasInstrItineraries())) {
54 return SchedModel.MinLatency;
56 else if (!(EnableSchedModel && hasInstrSchedModel())
57 && !(EnableSchedItins && hasInstrItineraries())) {
58 return TII->defaultDefLatency(&SchedModel, DefMI);
60 // ...operand lookup required
64 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
65 /// evaluation of predicates that depend on instruction operands or flags.
66 const MCSchedClassDesc *TargetSchedModel::
67 resolveSchedClass(const MachineInstr *MI) const {
69 // Get the definition's scheduling class descriptor from this machine model.
70 unsigned SchedClass = MI->getDesc().getSchedClass();
71 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
76 while (SCDesc->isVariant()) {
77 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
79 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
80 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
85 /// Find the def index of this operand. This index maps to the machine model and
86 /// is independent of use operands. Def operands may be reordered with uses or
87 /// merged with uses without affecting the def index (e.g. before/after
88 /// regalloc). However, an instruction's def operands must never be reordered
89 /// with respect to each other.
90 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
92 for (unsigned i = 0; i != DefOperIdx; ++i) {
93 const MachineOperand &MO = MI->getOperand(i);
94 if (MO.isReg() && MO.isDef())
100 /// Find the use index of this operand. This is independent of the instruction's
103 /// Note that uses are not determined by the operand's isUse property, which
104 /// is simply the inverse of isDef. Here we consider any readsReg operand to be
105 /// a "use". The machine model allows an operand to be both a Def and Use.
106 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
108 for (unsigned i = 0; i != UseOperIdx; ++i) {
109 const MachineOperand &MO = MI->getOperand(i);
110 if (MO.isReg() && MO.readsReg())
116 // Top-level API for clients that know the operand indices.
117 unsigned TargetSchedModel::computeOperandLatency(
118 const MachineInstr *DefMI, unsigned DefOperIdx,
119 const MachineInstr *UseMI, unsigned UseOperIdx,
120 bool FindMin) const {
122 int DefLatency = getDefLatency(DefMI, FindMin);
126 if (EnableSchedItins && hasInstrItineraries()) {
130 TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx);
133 unsigned DefClass = DefMI->getDesc().getSchedClass();
134 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
136 if (OperLatency >= 0)
139 // No operand latency was found.
140 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
142 // Expected latency is the max of the stage latency and itinerary props.
144 InstrLatency = std::max(InstrLatency,
145 TII->defaultDefLatency(&SchedModel, DefMI));
148 assert(!FindMin && EnableSchedModel && hasInstrSchedModel() &&
149 "Expected a SchedModel for this cpu");
150 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
151 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
152 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
153 // Lookup the definition's write latency in SubtargetInfo.
154 const MCWriteLatencyEntry *WLEntry =
155 STI->getWriteLatencyEntry(SCDesc, DefIdx);
156 unsigned WriteID = WLEntry->WriteResourceID;
157 unsigned Latency = WLEntry->Cycles;
161 // Lookup the use's latency adjustment in SubtargetInfo.
162 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
163 if (UseDesc->NumReadAdvanceEntries == 0)
165 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
166 return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
168 // If DefIdx does not exist in the model (e.g. implicit defs), then return
169 // unit latency (defaultDefLatency may be too conservative).
171 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
172 && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()) {
174 raw_string_ostream ss(Err);
175 ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
177 report_fatal_error(ss.str());