Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. //===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines a wrapper around MCSchedModel that allows the interface to
  10. // benefit from information currently only available in TargetInstrInfo.
  11. // Ideally, the scheduling interface would be fully defined in the MC layer.
  12. //
  13. //===----------------------------------------------------------------------===//
  14.  
  15. #ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
  16. #define LLVM_CODEGEN_TARGETSCHEDULE_H
  17.  
  18. #include "llvm/ADT/SmallVector.h"
  19. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  20. #include "llvm/Config/llvm-config.h"
  21. #include "llvm/MC/MCInstrItineraries.h"
  22. #include "llvm/MC/MCSchedule.h"
  23.  
  24. namespace llvm {
  25.  
  26. class MachineInstr;
  27. class TargetInstrInfo;
  28.  
  29. /// Provide an instruction scheduling machine model to CodeGen passes.
  30. class TargetSchedModel {
  31.   // For efficiency, hold a copy of the statically defined MCSchedModel for this
  32.   // processor.
  33.   MCSchedModel SchedModel;
  34.   InstrItineraryData InstrItins;
  35.   const TargetSubtargetInfo *STI = nullptr;
  36.   const TargetInstrInfo *TII = nullptr;
  37.  
  38.   SmallVector<unsigned, 16> ResourceFactors;
  39.  
  40.   // Multiply to normalize microops to resource units.
  41.   unsigned MicroOpFactor = 0;
  42.  
  43.   // Resource units per cycle. Latency normalization factor.
  44.   unsigned ResourceLCM = 0;
  45.  
  46.   unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
  47.  
  48. public:
  49.   TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}
  50.  
  51.   /// Initialize the machine model for instruction scheduling.
  52.   ///
  53.   /// The machine model API keeps a copy of the top-level MCSchedModel table
  54.   /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
  55.   /// dynamic properties.
  56.   void init(const TargetSubtargetInfo *TSInfo);
  57.  
  58.   /// Return the MCSchedClassDesc for this instruction.
  59.   const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
  60.  
  61.   /// TargetSubtargetInfo getter.
  62.   const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
  63.  
  64.   /// TargetInstrInfo getter.
  65.   const TargetInstrInfo *getInstrInfo() const { return TII; }
  66.  
  67.   /// Return true if this machine model includes an instruction-level
  68.   /// scheduling model.
  69.   ///
  70.   /// This is more detailed than the course grain IssueWidth and default
  71.   /// latency properties, but separate from the per-cycle itinerary data.
  72.   bool hasInstrSchedModel() const;
  73.  
  74.   const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
  75.  
  76.   /// Return true if this machine model includes cycle-to-cycle itinerary
  77.   /// data.
  78.   ///
  79.   /// This models scheduling at each stage in the processor pipeline.
  80.   bool hasInstrItineraries() const;
  81.  
  82.   const InstrItineraryData *getInstrItineraries() const {
  83.     if (hasInstrItineraries())
  84.       return &InstrItins;
  85.     return nullptr;
  86.   }
  87.  
  88.   /// Return true if this machine model includes an instruction-level
  89.   /// scheduling model or cycle-to-cycle itinerary data.
  90.   bool hasInstrSchedModelOrItineraries() const {
  91.     return hasInstrSchedModel() || hasInstrItineraries();
  92.   }
  93.  
  94.   /// Identify the processor corresponding to the current subtarget.
  95.   unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
  96.  
  97.   /// Maximum number of micro-ops that may be scheduled per cycle.
  98.   unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
  99.  
  100.   /// Return true if new group must begin.
  101.   bool mustBeginGroup(const MachineInstr *MI,
  102.                           const MCSchedClassDesc *SC = nullptr) const;
  103.   /// Return true if current group must end.
  104.   bool mustEndGroup(const MachineInstr *MI,
  105.                           const MCSchedClassDesc *SC = nullptr) const;
  106.  
  107.   /// Return the number of issue slots required for this MI.
  108.   unsigned getNumMicroOps(const MachineInstr *MI,
  109.                           const MCSchedClassDesc *SC = nullptr) const;
  110.  
  111.   /// Get the number of kinds of resources for this target.
  112.   unsigned getNumProcResourceKinds() const {
  113.     return SchedModel.getNumProcResourceKinds();
  114.   }
  115.  
  116.   /// Get a processor resource by ID for convenience.
  117.   const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
  118.     return SchedModel.getProcResource(PIdx);
  119.   }
  120.  
  121. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  122.   const char *getResourceName(unsigned PIdx) const {
  123.     if (!PIdx)
  124.       return "MOps";
  125.     return SchedModel.getProcResource(PIdx)->Name;
  126.   }
  127. #endif
  128.  
  129.   using ProcResIter = const MCWriteProcResEntry *;
  130.  
  131.   // Get an iterator into the processor resources consumed by this
  132.   // scheduling class.
  133.   ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
  134.     // The subtarget holds a single resource table for all processors.
  135.     return STI->getWriteProcResBegin(SC);
  136.   }
  137.   ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
  138.     return STI->getWriteProcResEnd(SC);
  139.   }
  140.  
  141.   /// Multiply the number of units consumed for a resource by this factor
  142.   /// to normalize it relative to other resources.
  143.   unsigned getResourceFactor(unsigned ResIdx) const {
  144.     return ResourceFactors[ResIdx];
  145.   }
  146.  
  147.   /// Multiply number of micro-ops by this factor to normalize it
  148.   /// relative to other resources.
  149.   unsigned getMicroOpFactor() const {
  150.     return MicroOpFactor;
  151.   }
  152.  
  153.   /// Multiply cycle count by this factor to normalize it relative to
  154.   /// other resources. This is the number of resource units per cycle.
  155.   unsigned getLatencyFactor() const {
  156.     return ResourceLCM;
  157.   }
  158.  
  159.   /// Number of micro-ops that may be buffered for OOO execution.
  160.   unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
  161.  
  162.   /// Number of resource units that may be buffered for OOO execution.
  163.   /// \return The buffer size in resource units or -1 for unlimited.
  164.   int getResourceBufferSize(unsigned PIdx) const {
  165.     return SchedModel.getProcResource(PIdx)->BufferSize;
  166.   }
  167.  
  168.   /// Compute operand latency based on the available machine model.
  169.   ///
  170.   /// Compute and return the latency of the given data dependent def and use
  171.   /// when the operand indices are already known. UseMI may be NULL for an
  172.   /// unknown user.
  173.   unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
  174.                                  const MachineInstr *UseMI, unsigned UseOperIdx)
  175.     const;
  176.  
  177.   /// Compute the instruction latency based on the available machine
  178.   /// model.
  179.   ///
  180.   /// Compute and return the expected latency of this instruction independent of
  181.   /// a particular use. computeOperandLatency is the preferred API, but this is
  182.   /// occasionally useful to help estimate instruction cost.
  183.   ///
  184.   /// If UseDefaultDefLatency is false and no new machine sched model is
  185.   /// present this method falls back to TII->getInstrLatency with an empty
  186.   /// instruction itinerary (this is so we preserve the previous behavior of the
  187.   /// if converter after moving it to TargetSchedModel).
  188.   unsigned computeInstrLatency(const MachineInstr *MI,
  189.                                bool UseDefaultDefLatency = true) const;
  190.   unsigned computeInstrLatency(const MCInst &Inst) const;
  191.   unsigned computeInstrLatency(unsigned Opcode) const;
  192.  
  193.  
  194.   /// Output dependency latency of a pair of defs of the same register.
  195.   ///
  196.   /// This is typically one cycle.
  197.   unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
  198.                                 const MachineInstr *DepMI) const;
  199.  
  200.   /// Compute the reciprocal throughput of the given instruction.
  201.   double computeReciprocalThroughput(const MachineInstr *MI) const;
  202.   double computeReciprocalThroughput(const MCInst &MI) const;
  203.   double computeReciprocalThroughput(unsigned Opcode) const;
  204. };
  205.  
  206. } // end namespace llvm
  207.  
  208. #endif // LLVM_CODEGEN_TARGETSCHEDULE_H
  209.