Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/include/llvm/CodeGen/TargetSchedule.h
Line
Count
Source (jump to first uncovered line)
1
//===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This file defines a wrapper around MCSchedModel that allows the interface to
11
// benefit from information currently only available in TargetInstrInfo.
12
// Ideally, the scheduling interface would be fully defined in the MC layer.
13
//
14
//===----------------------------------------------------------------------===//
15
16
#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
17
#define LLVM_CODEGEN_TARGETSCHEDULE_H
18
19
#include "llvm/ADT/Optional.h"
20
#include "llvm/ADT/SmallVector.h"
21
#include "llvm/MC/MCInstrItineraries.h"
22
#include "llvm/MC/MCSchedule.h"
23
#include "llvm/Target/TargetSubtargetInfo.h"
24
25
namespace llvm {
26
27
class MachineInstr;
28
class TargetInstrInfo;
29
30
/// Provide an instruction scheduling machine model to CodeGen passes.
31
class TargetSchedModel {
32
  // For efficiency, hold a copy of the statically defined MCSchedModel for this
33
  // processor.
34
  MCSchedModel SchedModel;
35
  InstrItineraryData InstrItins;
36
  const TargetSubtargetInfo *STI = nullptr;
37
  const TargetInstrInfo *TII = nullptr;
38
39
  SmallVector<unsigned, 16> ResourceFactors;
40
  unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
41
  unsigned ResourceLCM;   // Resource units per cycle. Latency normalization factor.
42
43
  unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
44
45
public:
46
788k
  TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}
47
48
  /// \brief Initialize the machine model for instruction scheduling.
49
  ///
50
  /// The machine model API keeps a copy of the top-level MCSchedModel table
51
  /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
52
  /// dynamic properties.
53
  void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
54
            const TargetInstrInfo *tii);
55
56
  /// Return the MCSchedClassDesc for this instruction.
57
  const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
58
59
  /// \brief TargetSubtargetInfo getter.
60
11.1k
  const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
61
62
  /// \brief TargetInstrInfo getter.
63
61.7M
  const TargetInstrInfo *getInstrInfo() const { return TII; }
64
65
  /// \brief Return true if this machine model includes an instruction-level
66
  /// scheduling model.
67
  ///
68
  /// This is more detailed than the course grain IssueWidth and default
69
  /// latency properties, but separate from the per-cycle itinerary data.
70
  bool hasInstrSchedModel() const;
71
72
1.37M
  const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
73
74
  /// \brief Return true if this machine model includes cycle-to-cycle itinerary
75
  /// data.
76
  ///
77
  /// This models scheduling at each stage in the processor pipeline.
78
  bool hasInstrItineraries() const;
79
80
4.42M
  const InstrItineraryData *getInstrItineraries() const {
81
4.42M
    if (hasInstrItineraries())
82
11.6k
      return &InstrItins;
83
4.41M
    return nullptr;
84
4.42M
  }
85
86
  /// \brief Return true if this machine model includes an instruction-level
87
  /// scheduling model or cycle-to-cycle itinerary data.
88
99.6k
  bool hasInstrSchedModelOrItineraries() const {
89
451
    return hasInstrSchedModel() || hasInstrItineraries();
90
99.6k
  }
91
92
  /// \brief Identify the processor corresponding to the current subtarget.
93
61.7M
  unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
94
95
  /// \brief Maximum number of micro-ops that may be scheduled per cycle.
96
77.1M
  unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
97
98
  /// \brief Return true if new group must begin.
99
  bool mustBeginGroup(const MachineInstr *MI,
100
                          const MCSchedClassDesc *SC = nullptr) const;
101
  /// \brief Return true if current group must end.
102
  bool mustEndGroup(const MachineInstr *MI,
103
                          const MCSchedClassDesc *SC = nullptr) const;
104
105
  /// \brief Return the number of issue slots required for this MI.
106
  unsigned getNumMicroOps(const MachineInstr *MI,
107
                          const MCSchedClassDesc *SC = nullptr) const;
108
109
  /// \brief Get the number of kinds of resources for this target.
110
51.5M
  unsigned getNumProcResourceKinds() const {
111
51.5M
    return SchedModel.getNumProcResourceKinds();
112
51.5M
  }
113
114
  /// \brief Get a processor resource by ID for convenience.
115
14.3M
  const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
116
14.3M
    return SchedModel.getProcResource(PIdx);
117
14.3M
  }
118
119
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
120
  const char *getResourceName(unsigned PIdx) const {
121
    if (!PIdx)
122
      return "MOps";
123
    return SchedModel.getProcResource(PIdx)->Name;
124
  }
125
#endif
126
127
  using ProcResIter = const MCWriteProcResEntry *;
128
129
  // \brief Get an iterator into the processor resources consumed by this
130
  // scheduling class.
131
86.8M
  ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
132
86.8M
    // The subtarget holds a single resource table for all processors.
133
86.8M
    return STI->getWriteProcResBegin(SC);
134
86.8M
  }
135
86.8M
  ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
136
86.8M
    return STI->getWriteProcResEnd(SC);
137
86.8M
  }
138
139
  /// \brief Multiply the number of units consumed for a resource by this factor
140
  /// to normalize it relative to other resources.
141
55.6M
  unsigned getResourceFactor(unsigned ResIdx) const {
142
55.6M
    return ResourceFactors[ResIdx];
143
55.6M
  }
144
145
  /// \brief Multiply number of micro-ops by this factor to normalize it
146
  /// relative to other resources.
147
68.3M
  unsigned getMicroOpFactor() const {
148
68.3M
    return MicroOpFactor;
149
68.3M
  }
150
151
  /// \brief Multiply cycle count by this factor to normalize it relative to
152
  /// other resources. This is the number of resource units per cycle.
153
42.7M
  unsigned getLatencyFactor() const {
154
42.7M
    return ResourceLCM;
155
42.7M
  }
156
157
  /// \brief Number of micro-ops that may be buffered for OOO execution.
158
50.2M
  unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
159
160
  /// \brief Number of resource units that may be buffered for OOO execution.
161
  /// \return The buffer size in resource units or -1 for unlimited.
162
0
  int getResourceBufferSize(unsigned PIdx) const {
163
0
    return SchedModel.getProcResource(PIdx)->BufferSize;
164
0
  }
165
166
  /// \brief Compute operand latency based on the available machine model.
167
  ///
168
  /// Compute and return the latency of the given data dependent def and use
169
  /// when the operand indices are already known. UseMI may be NULL for an
170
  /// unknown user.
171
  unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
172
                                 const MachineInstr *UseMI, unsigned UseOperIdx)
173
    const;
174
175
  /// \brief Compute the instruction latency based on the available machine
176
  /// model.
177
  ///
178
  /// Compute and return the expected latency of this instruction independent of
179
  /// a particular use. computeOperandLatency is the preferred API, but this is
180
  /// occasionally useful to help estimate instruction cost.
181
  ///
182
  /// If UseDefaultDefLatency is false and no new machine sched model is
183
  /// present this method falls back to TII->getInstrLatency with an empty
184
  /// instruction itinerary (this is so we preserve the previous behavior of the
185
  /// if converter after moving it to TargetSchedModel).
186
  unsigned computeInstrLatency(const MachineInstr *MI,
187
                               bool UseDefaultDefLatency = true) const;
188
  unsigned computeInstrLatency(unsigned Opcode) const;
189
190
191
  /// \brief Output dependency latency of a pair of defs of the same register.
192
  ///
193
  /// This is typically one cycle.
194
  unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
195
                                const MachineInstr *DepMI) const;
196
197
  /// \brief Compute the reciprocal throughput of the given instruction.
198
  Optional<double> computeInstrRThroughput(const MachineInstr *MI) const;
199
  Optional<double> computeInstrRThroughput(unsigned Opcode) const;
200
};
201
202
} // end namespace llvm
203
204
#endif // LLVM_CODEGEN_TARGETSCHEDULE_H