X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=include%2Fllvm%2FCodeGen%2FTargetSchedule.h;h=19a172beeaaad98dcf153510473e0dd5b681cd33;hb=3955d288873e12d76b8b172321670728d8e026e0;hp=541ee66c0ccc5621faf89371a1405e71ad04aa37;hpb=f43fe1d163b34e1de5d045773728c571b59d1cdd;p=oota-llvm.git diff --git a/include/llvm/CodeGen/TargetSchedule.h b/include/llvm/CodeGen/TargetSchedule.h index 541ee66c0cc..19a172beeaa 100644 --- a/include/llvm/CodeGen/TargetSchedule.h +++ b/include/llvm/CodeGen/TargetSchedule.h @@ -13,11 +13,13 @@ // //===----------------------------------------------------------------------===// -#ifndef LLVM_TARGET_TARGETSCHEDMODEL_H -#define LLVM_TARGET_TARGETSCHEDMODEL_H +#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H +#define LLVM_CODEGEN_TARGETSCHEDULE_H -#include "llvm/MC/MCSchedule.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/MC/MCInstrItineraries.h" +#include "llvm/MC/MCSchedule.h" +#include "llvm/Target/TargetSubtargetInfo.h" namespace llvm { @@ -34,45 +36,143 @@ class TargetSchedModel { InstrItineraryData InstrItins; const TargetSubtargetInfo *STI; const TargetInstrInfo *TII; + + SmallVector ResourceFactors; + unsigned MicroOpFactor; // Multiply to normalize microops to resource units. + unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor. public: TargetSchedModel(): STI(0), TII(0) {} + /// \brief Initialize the machine model for instruction scheduling. + /// + /// The machine model API keeps a copy of the top-level MCSchedModel table + /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve + /// dynamic properties. void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti, const TargetInstrInfo *tii); + /// Return the MCSchedClassDesc for this instruction. + const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const; + + /// \brief TargetInstrInfo getter. const TargetInstrInfo *getInstrInfo() const { return TII; } - /// Return true if this machine model includes an instruction-level scheduling - /// model. This is more detailed than the course grain IssueWidth and default + /// \brief Return true if this machine model includes an instruction-level + /// scheduling model. + /// + /// This is more detailed than the course grain IssueWidth and default /// latency properties, but separate from the per-cycle itinerary data. - bool hasInstrSchedModel() const { return SchedModel.hasInstrSchedModel(); } + bool hasInstrSchedModel() const; - /// Return true if this machine model includes cycle-to-cycle itinerary - /// data. This models scheduling at each stage in the processor pipeline. - bool hasInstrItineraries() const { return !InstrItins.isEmpty(); } + const MCSchedModel *getMCSchedModel() const { return &SchedModel; } - /// computeOperandLatency - Compute and return the latency of the given data - /// dependent def and use when the operand indices are already known. UseMI - /// may be NULL for an unknown user. + /// \brief Return true if this machine model includes cycle-to-cycle itinerary + /// data. /// - /// FindMin may be set to get the minimum vs. expected latency. Minimum - /// latency is used for scheduling groups, while expected latency is for - /// instruction cost and critical path. - unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, - const MachineInstr *UseMI, unsigned UseOperIdx, - bool FindMin) const; + /// This models scheduling at each stage in the processor pipeline. + bool hasInstrItineraries() const; + + const InstrItineraryData *getInstrItineraries() const { + if (hasInstrItineraries()) + return &InstrItins; + return 0; + } + /// \brief Identify the processor corresponding to the current subtarget. unsigned getProcessorID() const { return SchedModel.getProcessorID(); } + + /// \brief Maximum number of micro-ops that may be scheduled per cycle. unsigned getIssueWidth() const { return SchedModel.IssueWidth; } -private: - /// getDefLatency is a helper for computeOperandLatency. Return the - /// instruction's latency if operand lookup is not required. - /// Otherwise return -1. - int getDefLatency(const MachineInstr *DefMI, bool FindMin) const; + /// \brief Return the number of issue slots required for this MI. + unsigned getNumMicroOps(const MachineInstr *MI, + const MCSchedClassDesc *SC = 0) const; - /// Return the MCSchedClassDesc for this instruction. - const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const; + /// \brief Get the number of kinds of resources for this target. + unsigned getNumProcResourceKinds() const { + return SchedModel.getNumProcResourceKinds(); + } + + /// \brief Get a processor resource by ID for convenience. + const MCProcResourceDesc *getProcResource(unsigned PIdx) const { + return SchedModel.getProcResource(PIdx); + } + +#ifndef NDEBUG + const char *getResourceName(unsigned PIdx) const { + if (!PIdx) + return "MOps"; + return SchedModel.getProcResource(PIdx)->Name; + } +#endif + + typedef const MCWriteProcResEntry *ProcResIter; + + // \brief Get an iterator into the processor resources consumed by this + // scheduling class. + ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const { + // The subtarget holds a single resource table for all processors. + return STI->getWriteProcResBegin(SC); + } + ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const { + return STI->getWriteProcResEnd(SC); + } + + /// \brief Multiply the number of units consumed for a resource by this factor + /// to normalize it relative to other resources. + unsigned getResourceFactor(unsigned ResIdx) const { + return ResourceFactors[ResIdx]; + } + + /// \brief Multiply number of micro-ops by this factor to normalize it + /// relative to other resources. + unsigned getMicroOpFactor() const { + return MicroOpFactor; + } + + /// \brief Multiply cycle count by this factor to normalize it relative to + /// other resources. This is the number of resource units per cycle. + unsigned getLatencyFactor() const { + return ResourceLCM; + } + + /// \brief Number of micro-ops that may be buffered for OOO execution. + unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; } + + /// \brief Number of resource units that may be buffered for OOO execution. + /// \return The buffer size in resource units or -1 for unlimited. + int getResourceBufferSize(unsigned PIdx) const { + return SchedModel.getProcResource(PIdx)->BufferSize; + } + + /// \brief Compute operand latency based on the available machine model. + /// + /// Compute and return the latency of the given data dependent def and use + /// when the operand indices are already known. UseMI may be NULL for an + /// unknown user. + unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, + const MachineInstr *UseMI, unsigned UseOperIdx) + const; + + /// \brief Compute the instruction latency based on the available machine + /// model. + /// + /// Compute and return the expected latency of this instruction independent of + /// a particular use. computeOperandLatency is the prefered API, but this is + /// occasionally useful to help estimate instruction cost. + /// + /// If UseDefaultDefLatency is false and no new machine sched model is + /// present this method falls back to TII->getInstrLatency with an empty + /// instruction itinerary (this is so we preserve the previous behavior of the + /// if converter after moving it to TargetSchedModel). + unsigned computeInstrLatency(const MachineInstr *MI, + bool UseDefaultDefLatency = true) const; + + /// \brief Output dependency latency of a pair of defs of the same register. + /// + /// This is typically one cycle. + unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx, + const MachineInstr *DepMI) const; }; } // namespace llvm