/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/include/llvm/CodeGen/MachineScheduler.h
Line | Count | Source (jump to first uncovered line) |
1 | | //===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file provides an interface for customizing the standard MachineScheduler |
11 | | // pass. Note that the entire pass may be replaced as follows: |
12 | | // |
13 | | // <Target>TargetMachine::createPassConfig(PassManagerBase &PM) { |
14 | | // PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID); |
15 | | // ...} |
16 | | // |
17 | | // The MachineScheduler pass is only responsible for choosing the regions to be |
18 | | // scheduled. Targets can override the DAG builder and scheduler without |
19 | | // replacing the pass as follows: |
20 | | // |
21 | | // ScheduleDAGInstrs *<Target>PassConfig:: |
22 | | // createMachineScheduler(MachineSchedContext *C) { |
23 | | // return new CustomMachineScheduler(C); |
24 | | // } |
25 | | // |
26 | | // The default scheduler, ScheduleDAGMILive, builds the DAG and drives list |
27 | | // scheduling while updating the instruction stream, register pressure, and live |
28 | | // intervals. Most targets don't need to override the DAG builder and list |
29 | | // scheduler, but subtargets that require custom scheduling heuristics may |
30 | | // plugin an alternate MachineSchedStrategy. The strategy is responsible for |
31 | | // selecting the highest priority node from the list: |
32 | | // |
33 | | // ScheduleDAGInstrs *<Target>PassConfig:: |
34 | | // createMachineScheduler(MachineSchedContext *C) { |
35 | | // return new ScheduleDAGMILive(C, CustomStrategy(C)); |
36 | | // } |
37 | | // |
38 | | // The DAG builder can also be customized in a sense by adding DAG mutations |
39 | | // that will run after DAG building and before list scheduling. DAG mutations |
40 | | // can adjust dependencies based on target-specific knowledge or add weak edges |
41 | | // to aid heuristics: |
42 | | // |
43 | | // ScheduleDAGInstrs *<Target>PassConfig:: |
44 | | // createMachineScheduler(MachineSchedContext *C) { |
45 | | // ScheduleDAGMI *DAG = createGenericSchedLive(C); |
46 | | // DAG->addMutation(new CustomDAGMutation(...)); |
47 | | // return DAG; |
48 | | // } |
49 | | // |
50 | | // A target that supports alternative schedulers can use the |
51 | | // MachineSchedRegistry to allow command line selection. This can be done by |
52 | | // implementing the following boilerplate: |
53 | | // |
54 | | // static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) { |
55 | | // return new CustomMachineScheduler(C); |
56 | | // } |
57 | | // static MachineSchedRegistry |
58 | | // SchedCustomRegistry("custom", "Run my target's custom scheduler", |
59 | | // createCustomMachineSched); |
60 | | // |
61 | | // |
62 | | // Finally, subtargets that don't need to implement custom heuristics but would |
63 | | // like to configure the GenericScheduler's policy for a given scheduler region, |
64 | | // including scheduling direction and register pressure tracking policy, can do |
65 | | // this: |
66 | | // |
67 | | // void <SubTarget>Subtarget:: |
68 | | // overrideSchedPolicy(MachineSchedPolicy &Policy, |
69 | | // unsigned NumRegionInstrs) const { |
70 | | // Policy.<Flag> = true; |
71 | | // } |
72 | | // |
73 | | //===----------------------------------------------------------------------===// |
74 | | |
75 | | #ifndef LLVM_CODEGEN_MACHINESCHEDULER_H |
76 | | #define LLVM_CODEGEN_MACHINESCHEDULER_H |
77 | | |
78 | | #include "llvm/ADT/ArrayRef.h" |
79 | | #include "llvm/ADT/BitVector.h" |
80 | | #include "llvm/ADT/STLExtras.h" |
81 | | #include "llvm/ADT/SmallVector.h" |
82 | | #include "llvm/ADT/StringRef.h" |
83 | | #include "llvm/ADT/Twine.h" |
84 | | #include "llvm/Analysis/AliasAnalysis.h" |
85 | | #include "llvm/CodeGen/MachineBasicBlock.h" |
86 | | #include "llvm/CodeGen/MachinePassRegistry.h" |
87 | | #include "llvm/CodeGen/RegisterPressure.h" |
88 | | #include "llvm/CodeGen/ScheduleDAG.h" |
89 | | #include "llvm/CodeGen/ScheduleDAGInstrs.h" |
90 | | #include "llvm/CodeGen/ScheduleDAGMutation.h" |
91 | | #include "llvm/CodeGen/TargetSchedule.h" |
92 | | #include "llvm/Support/CommandLine.h" |
93 | | #include "llvm/Support/ErrorHandling.h" |
94 | | #include <algorithm> |
95 | | #include <cassert> |
96 | | #include <memory> |
97 | | #include <string> |
98 | | #include <vector> |
99 | | |
100 | | namespace llvm { |
101 | | |
102 | | extern cl::opt<bool> ForceTopDown; |
103 | | extern cl::opt<bool> ForceBottomUp; |
104 | | |
105 | | class LiveIntervals; |
106 | | class MachineDominatorTree; |
107 | | class MachineFunction; |
108 | | class MachineInstr; |
109 | | class MachineLoopInfo; |
110 | | class RegisterClassInfo; |
111 | | class SchedDFSResult; |
112 | | class ScheduleHazardRecognizer; |
113 | | class TargetInstrInfo; |
114 | | class TargetPassConfig; |
115 | | class TargetRegisterInfo; |
116 | | |
117 | | /// MachineSchedContext provides enough context from the MachineScheduler pass |
118 | | /// for the target to instantiate a scheduler. |
119 | | struct MachineSchedContext { |
120 | | MachineFunction *MF = nullptr; |
121 | | const MachineLoopInfo *MLI = nullptr; |
122 | | const MachineDominatorTree *MDT = nullptr; |
123 | | const TargetPassConfig *PassConfig = nullptr; |
124 | | AliasAnalysis *AA = nullptr; |
125 | | LiveIntervals *LIS = nullptr; |
126 | | |
127 | | RegisterClassInfo *RegClassInfo; |
128 | | |
129 | | MachineSchedContext(); |
130 | | virtual ~MachineSchedContext(); |
131 | | }; |
132 | | |
133 | | /// MachineSchedRegistry provides a selection of available machine instruction |
134 | | /// schedulers. |
135 | | class MachineSchedRegistry : public MachinePassRegistryNode { |
136 | | public: |
137 | | using ScheduleDAGCtor = ScheduleDAGInstrs *(*)(MachineSchedContext *); |
138 | | |
139 | | // RegisterPassParser requires a (misnamed) FunctionPassCtor type. |
140 | | using FunctionPassCtor = ScheduleDAGCtor; |
141 | | |
142 | | static MachinePassRegistry Registry; |
143 | | |
144 | | MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C) |
145 | 913k | : MachinePassRegistryNode(N, D, (MachinePassCtor)C) { |
146 | 913k | Registry.Add(this); |
147 | 913k | } |
148 | | |
149 | 913k | ~MachineSchedRegistry() { Registry.Remove(this); } |
150 | | |
151 | | // Accessors. |
152 | | // |
153 | 544k | MachineSchedRegistry *getNext() const { |
154 | 544k | return (MachineSchedRegistry *)MachinePassRegistryNode::getNext(); |
155 | 544k | } |
156 | | |
157 | 92.3k | static MachineSchedRegistry *getList() { |
158 | 92.3k | return (MachineSchedRegistry *)Registry.getList(); |
159 | 92.3k | } |
160 | | |
161 | 184k | static void setListener(MachinePassRegistryListener *L) { |
162 | 184k | Registry.setListener(L); |
163 | 184k | } |
164 | | }; |
165 | | |
166 | | class ScheduleDAGMI; |
167 | | |
168 | | /// Define a generic scheduling policy for targets that don't provide their own |
169 | | /// MachineSchedStrategy. This can be overriden for each scheduling region |
170 | | /// before building the DAG. |
171 | | struct MachineSchedPolicy { |
172 | | // Allow the scheduler to disable register pressure tracking. |
173 | | bool ShouldTrackPressure = false; |
174 | | /// Track LaneMasks to allow reordering of independent subregister writes |
175 | | /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks() |
176 | | bool ShouldTrackLaneMasks = false; |
177 | | |
178 | | // Allow the scheduler to force top-down or bottom-up scheduling. If neither |
179 | | // is true, the scheduler runs in both directions and converges. |
180 | | bool OnlyTopDown = false; |
181 | | bool OnlyBottomUp = false; |
182 | | |
183 | | // Disable heuristic that tries to fetch nodes from long dependency chains |
184 | | // first. |
185 | | bool DisableLatencyHeuristic = false; |
186 | | |
187 | 549k | MachineSchedPolicy() = default; |
188 | | }; |
189 | | |
190 | | /// MachineSchedStrategy - Interface to the scheduling algorithm used by |
191 | | /// ScheduleDAGMI. |
192 | | /// |
193 | | /// Initialization sequence: |
194 | | /// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots |
195 | | class MachineSchedStrategy { |
196 | | virtual void anchor(); |
197 | | |
198 | | public: |
199 | 563k | virtual ~MachineSchedStrategy() = default; |
200 | | |
201 | | /// Optionally override the per-region scheduling policy. |
202 | | virtual void initPolicy(MachineBasicBlock::iterator Begin, |
203 | | MachineBasicBlock::iterator End, |
204 | 4.97k | unsigned NumRegionInstrs) {} |
205 | | |
206 | 0 | virtual void dumpPolicy() const {} |
207 | | |
208 | | /// Check if pressure tracking is needed before building the DAG and |
209 | | /// initializing this strategy. Called after initPolicy. |
210 | 4.95k | virtual bool shouldTrackPressure() const { return true; } |
211 | | |
212 | | /// Returns true if lanemasks should be tracked. LaneMask tracking is |
213 | | /// necessary to reorder independent subregister defs for the same vreg. |
214 | | /// This has to be enabled in combination with shouldTrackPressure(). |
215 | 4.95k | virtual bool shouldTrackLaneMasks() const { return false; } |
216 | | |
217 | | // If this method returns true, handling of the scheduling regions |
218 | | // themselves (in case of a scheduling boundary in MBB) will be done |
219 | | // beginning with the topmost region of MBB. |
220 | 4.17M | virtual bool doMBBSchedRegionsTopDown() const { return false; } |
221 | | |
222 | | /// Initialize the strategy after building the DAG for a new region. |
223 | | virtual void initialize(ScheduleDAGMI *DAG) = 0; |
224 | | |
225 | | /// Tell the strategy that MBB is about to be processed. |
226 | 4.19M | virtual void enterMBB(MachineBasicBlock *MBB) {}; |
227 | | |
228 | | /// Tell the strategy that current MBB is done. |
229 | 4.19M | virtual void leaveMBB() {}; |
230 | | |
231 | | /// Notify this strategy that all roots have been released (including those |
232 | | /// that depend on EntrySU or ExitSU). |
233 | 5.33k | virtual void registerRoots() {} |
234 | | |
235 | | /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to |
236 | | /// schedule the node at the top of the unscheduled region. Otherwise it will |
237 | | /// be scheduled at the bottom. |
238 | | virtual SUnit *pickNode(bool &IsTopNode) = 0; |
239 | | |
240 | | /// \brief Scheduler callback to notify that a new subtree is scheduled. |
241 | 0 | virtual void scheduleTree(unsigned SubtreeID) {} |
242 | | |
243 | | /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an |
244 | | /// instruction and updated scheduled/remaining flags in the DAG nodes. |
245 | | virtual void schedNode(SUnit *SU, bool IsTopNode) = 0; |
246 | | |
247 | | /// When all predecessor dependencies have been resolved, free this node for |
248 | | /// top-down scheduling. |
249 | | virtual void releaseTopNode(SUnit *SU) = 0; |
250 | | |
251 | | /// When all successor dependencies have been resolved, free this node for |
252 | | /// bottom-up scheduling. |
253 | | virtual void releaseBottomNode(SUnit *SU) = 0; |
254 | | }; |
255 | | |
256 | | /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply |
257 | | /// schedules machine instructions according to the given MachineSchedStrategy |
258 | | /// without much extra book-keeping. This is the common functionality between |
259 | | /// PreRA and PostRA MachineScheduler. |
260 | | class ScheduleDAGMI : public ScheduleDAGInstrs { |
261 | | protected: |
262 | | AliasAnalysis *AA; |
263 | | LiveIntervals *LIS; |
264 | | std::unique_ptr<MachineSchedStrategy> SchedImpl; |
265 | | |
266 | | /// Topo - A topological ordering for SUnits which permits fast IsReachable |
267 | | /// and similar queries. |
268 | | ScheduleDAGTopologicalSort Topo; |
269 | | |
270 | | /// Ordered list of DAG postprocessing steps. |
271 | | std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations; |
272 | | |
273 | | /// The top of the unscheduled zone. |
274 | | MachineBasicBlock::iterator CurrentTop; |
275 | | |
276 | | /// The bottom of the unscheduled zone. |
277 | | MachineBasicBlock::iterator CurrentBottom; |
278 | | |
279 | | /// Record the next node in a scheduled cluster. |
280 | | const SUnit *NextClusterPred = nullptr; |
281 | | const SUnit *NextClusterSucc = nullptr; |
282 | | |
283 | | #ifndef NDEBUG |
284 | | /// The number of instructions scheduled so far. Used to cut off the |
285 | | /// scheduler at the point determined by misched-cutoff. |
286 | | unsigned NumInstrsScheduled = 0; |
287 | | #endif |
288 | | |
289 | | public: |
290 | | ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S, |
291 | | bool RemoveKillFlags) |
292 | | : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA), |
293 | 563k | LIS(C->LIS), SchedImpl(std::move(S)), Topo(SUnits, &ExitSU) {} |
294 | | |
295 | | // Provide a vtable anchor |
296 | | ~ScheduleDAGMI() override; |
297 | | |
298 | | /// If this method returns true, handling of the scheduling regions |
299 | | /// themselves (in case of a scheduling boundary in MBB) will be done |
300 | | /// beginning with the topmost region of MBB. |
301 | 4.18M | bool doMBBSchedRegionsTopDown() const override { |
302 | 4.18M | return SchedImpl->doMBBSchedRegionsTopDown(); |
303 | 4.18M | } |
304 | | |
305 | | // Returns LiveIntervals instance for use in DAG mutators and such. |
306 | 14.2M | LiveIntervals *getLIS() const { return LIS; } |
307 | | |
308 | | /// Return true if this DAG supports VReg liveness and RegPressure. |
309 | 0 | virtual bool hasVRegLiveness() const { return false; } |
310 | | |
311 | | /// Add a postprocessing step to the DAG builder. |
312 | | /// Mutations are applied in the order that they are added after normal DAG |
313 | | /// building and before MachineSchedStrategy initialization. |
314 | | /// |
315 | | /// ScheduleDAGMI takes ownership of the Mutation object. |
316 | 2.02M | void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) { |
317 | 2.02M | if (Mutation) |
318 | 2.02M | Mutations.push_back(std::move(Mutation)); |
319 | 2.02M | } |
320 | | |
321 | | /// \brief True if an edge can be added from PredSU to SuccSU without creating |
322 | | /// a cycle. |
323 | | bool canAddEdge(SUnit *SuccSU, SUnit *PredSU); |
324 | | |
325 | | /// \brief Add a DAG edge to the given SU with the given predecessor |
326 | | /// dependence data. |
327 | | /// |
328 | | /// \returns true if the edge may be added without creating a cycle OR if an |
329 | | /// equivalent edge already existed (false indicates failure). |
330 | | bool addEdge(SUnit *SuccSU, const SDep &PredDep); |
331 | | |
332 | 20.6M | MachineBasicBlock::iterator top() const { return CurrentTop; } |
333 | 20.6M | MachineBasicBlock::iterator bottom() const { return CurrentBottom; } |
334 | | |
335 | | /// Implement the ScheduleDAGInstrs interface for handling the next scheduling |
336 | | /// region. This covers all instructions in a block, while schedule() may only |
337 | | /// cover a subset. |
338 | | void enterRegion(MachineBasicBlock *bb, |
339 | | MachineBasicBlock::iterator begin, |
340 | | MachineBasicBlock::iterator end, |
341 | | unsigned regioninstrs) override; |
342 | | |
343 | | /// Implement ScheduleDAGInstrs interface for scheduling a sequence of |
344 | | /// reorderable instructions. |
345 | | void schedule() override; |
346 | | |
347 | | void startBlock(MachineBasicBlock *bb) override; |
348 | | void finishBlock() override; |
349 | | |
350 | | /// Change the position of an instruction within the basic block and update |
351 | | /// live ranges and region boundary iterators. |
352 | | void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); |
353 | | |
354 | 79.0M | const SUnit *getNextClusterPred() const { return NextClusterPred; } |
355 | | |
356 | 55.6M | const SUnit *getNextClusterSucc() const { return NextClusterSucc; } |
357 | | |
358 | | void viewGraph(const Twine &Name, const Twine &Title) override; |
359 | | void viewGraph() override; |
360 | | |
361 | | protected: |
362 | | // Top-Level entry points for the schedule() driver... |
363 | | |
364 | | /// Apply each ScheduleDAGMutation step in order. This allows different |
365 | | /// instances of ScheduleDAGMI to perform custom DAG postprocessing. |
366 | | void postprocessDAG(); |
367 | | |
368 | | /// Release ExitSU predecessors and setup scheduler queues. |
369 | | void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots); |
370 | | |
371 | | /// Update scheduler DAG and queues after scheduling an instruction. |
372 | | void updateQueues(SUnit *SU, bool IsTopNode); |
373 | | |
374 | | /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues. |
375 | | void placeDebugValues(); |
376 | | |
377 | | /// \brief dump the scheduled Sequence. |
378 | | void dumpSchedule() const; |
379 | | |
380 | | // Lesser helpers... |
381 | | bool checkSchedLimit(); |
382 | | |
383 | | void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, |
384 | | SmallVectorImpl<SUnit*> &BotRoots); |
385 | | |
386 | | void releaseSucc(SUnit *SU, SDep *SuccEdge); |
387 | | void releaseSuccessors(SUnit *SU); |
388 | | void releasePred(SUnit *SU, SDep *PredEdge); |
389 | | void releasePredecessors(SUnit *SU); |
390 | | }; |
391 | | |
392 | | /// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules |
393 | | /// machine instructions while updating LiveIntervals and tracking regpressure. |
394 | | class ScheduleDAGMILive : public ScheduleDAGMI { |
395 | | protected: |
396 | | RegisterClassInfo *RegClassInfo; |
397 | | |
398 | | /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees |
399 | | /// will be empty. |
400 | | SchedDFSResult *DFSResult = nullptr; |
401 | | BitVector ScheduledTrees; |
402 | | |
403 | | MachineBasicBlock::iterator LiveRegionEnd; |
404 | | |
405 | | /// Maps vregs to the SUnits of their uses in the current scheduling region. |
406 | | VReg2SUnitMultiMap VRegUses; |
407 | | |
408 | | // Map each SU to its summary of pressure changes. This array is updated for |
409 | | // liveness during bottom-up scheduling. Top-down scheduling may proceed but |
410 | | // has no affect on the pressure diffs. |
411 | | PressureDiffs SUPressureDiffs; |
412 | | |
413 | | /// Register pressure in this region computed by initRegPressure. |
414 | | bool ShouldTrackPressure = false; |
415 | | bool ShouldTrackLaneMasks = false; |
416 | | IntervalPressure RegPressure; |
417 | | RegPressureTracker RPTracker; |
418 | | |
419 | | /// List of pressure sets that exceed the target's pressure limit before |
420 | | /// scheduling, listed in increasing set ID order. Each pressure set is paired |
421 | | /// with its max pressure in the currently scheduled regions. |
422 | | std::vector<PressureChange> RegionCriticalPSets; |
423 | | |
424 | | /// The top of the unscheduled zone. |
425 | | IntervalPressure TopPressure; |
426 | | RegPressureTracker TopRPTracker; |
427 | | |
428 | | /// The bottom of the unscheduled zone. |
429 | | IntervalPressure BotPressure; |
430 | | RegPressureTracker BotRPTracker; |
431 | | |
432 | | /// True if disconnected subregister components are already renamed. |
433 | | /// The renaming is only done on demand if lane masks are tracked. |
434 | | bool DisconnectedComponentsRenamed = false; |
435 | | |
436 | | public: |
437 | | ScheduleDAGMILive(MachineSchedContext *C, |
438 | | std::unique_ptr<MachineSchedStrategy> S) |
439 | | : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false), |
440 | | RegClassInfo(C->RegClassInfo), RPTracker(RegPressure), |
441 | 552k | TopRPTracker(TopPressure), BotRPTracker(BotPressure) {} |
442 | | |
443 | | ~ScheduleDAGMILive() override; |
444 | | |
445 | | /// Return true if this DAG supports VReg liveness and RegPressure. |
446 | 0 | bool hasVRegLiveness() const override { return true; } |
447 | | |
448 | | /// \brief Return true if register pressure tracking is enabled. |
449 | 283M | bool isTrackingPressure() const { return ShouldTrackPressure; } |
450 | | |
451 | | /// Get current register pressure for the top scheduled instructions. |
452 | 0 | const IntervalPressure &getTopPressure() const { return TopPressure; } |
453 | 4.64M | const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } |
454 | | |
455 | | /// Get current register pressure for the bottom scheduled instructions. |
456 | 0 | const IntervalPressure &getBotPressure() const { return BotPressure; } |
457 | 8.74M | const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } |
458 | | |
459 | | /// Get register pressure for the entire scheduling region before scheduling. |
460 | 52.0M | const IntervalPressure &getRegPressure() const { return RegPressure; } |
461 | | |
462 | 52.0M | const std::vector<PressureChange> &getRegionCriticalPSets() const { |
463 | 52.0M | return RegionCriticalPSets; |
464 | 52.0M | } |
465 | | |
466 | 36.6M | PressureDiff &getPressureDiff(const SUnit *SU) { |
467 | 36.6M | return SUPressureDiffs[SU->NodeNum]; |
468 | 36.6M | } |
469 | | |
470 | | /// Compute a DFSResult after DAG building is complete, and before any |
471 | | /// queue comparisons. |
472 | | void computeDFSResult(); |
473 | | |
474 | | /// Return a non-null DFS result if the scheduling strategy initialized it. |
475 | 10 | const SchedDFSResult *getDFSResult() const { return DFSResult; } |
476 | | |
477 | 10 | BitVector &getScheduledTrees() { return ScheduledTrees; } |
478 | | |
479 | | /// Implement the ScheduleDAGInstrs interface for handling the next scheduling |
480 | | /// region. This covers all instructions in a block, while schedule() may only |
481 | | /// cover a subset. |
482 | | void enterRegion(MachineBasicBlock *bb, |
483 | | MachineBasicBlock::iterator begin, |
484 | | MachineBasicBlock::iterator end, |
485 | | unsigned regioninstrs) override; |
486 | | |
487 | | /// Implement ScheduleDAGInstrs interface for scheduling a sequence of |
488 | | /// reorderable instructions. |
489 | | void schedule() override; |
490 | | |
491 | | /// Compute the cyclic critical path through the DAG. |
492 | | unsigned computeCyclicCriticalPath(); |
493 | | |
494 | | protected: |
495 | | // Top-Level entry points for the schedule() driver... |
496 | | |
497 | | /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking |
498 | | /// enabled. This sets up three trackers. RPTracker will cover the entire DAG |
499 | | /// region, TopTracker and BottomTracker will be initialized to the top and |
500 | | /// bottom of the DAG region without covereing any unscheduled instruction. |
501 | | void buildDAGWithRegPressure(); |
502 | | |
503 | | /// Release ExitSU predecessors and setup scheduler queues. Re-position |
504 | | /// the Top RP tracker in case the region beginning has changed. |
505 | | void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots); |
506 | | |
507 | | /// Move an instruction and update register pressure. |
508 | | void scheduleMI(SUnit *SU, bool IsTopNode); |
509 | | |
510 | | // Lesser helpers... |
511 | | |
512 | | void initRegPressure(); |
513 | | |
514 | | void updatePressureDiffs(ArrayRef<RegisterMaskPair> LiveUses); |
515 | | |
516 | | void updateScheduledPressure(const SUnit *SU, |
517 | | const std::vector<unsigned> &NewMaxPressure); |
518 | | |
519 | | void collectVRegUses(SUnit &SU); |
520 | | }; |
521 | | |
522 | | //===----------------------------------------------------------------------===// |
523 | | /// |
524 | | /// Helpers for implementing custom MachineSchedStrategy classes. These take |
525 | | /// care of the book-keeping associated with list scheduling heuristics. |
526 | | /// |
527 | | //===----------------------------------------------------------------------===// |
528 | | |
529 | | /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience |
530 | | /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified |
531 | | /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in. |
532 | | /// |
533 | | /// This is a convenience class that may be used by implementations of |
534 | | /// MachineSchedStrategy. |
535 | | class ReadyQueue { |
536 | | unsigned ID; |
537 | | std::string Name; |
538 | | std::vector<SUnit*> Queue; |
539 | | |
540 | | public: |
541 | 2.21M | ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {} |
542 | | |
543 | 511M | unsigned getID() const { return ID; } |
544 | | |
545 | 0 | StringRef getName() const { return Name; } |
546 | | |
547 | | // SU is in this queue if it's NodeQueueID is a superset of this ID. |
548 | 27.1M | bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); } |
549 | | |
550 | 26.8M | bool empty() const { return Queue.empty(); } |
551 | | |
552 | 18.5M | void clear() { Queue.clear(); } |
553 | | |
554 | 54.5M | unsigned size() const { return Queue.size(); } |
555 | | |
556 | | using iterator = std::vector<SUnit*>::iterator; |
557 | | |
558 | 38.6M | iterator begin() { return Queue.begin(); } |
559 | | |
560 | 80.8M | iterator end() { return Queue.end(); } |
561 | | |
562 | 35.2M | ArrayRef<SUnit*> elements() { return Queue; } |
563 | | |
564 | 27.1M | iterator find(SUnit *SU) { return llvm::find(Queue, SU); } |
565 | | |
566 | 28.7M | void push(SUnit *SU) { |
567 | 28.7M | Queue.push_back(SU); |
568 | 28.7M | SU->NodeQueueId |= ID; |
569 | 28.7M | } |
570 | | |
571 | 28.7M | iterator remove(iterator I) { |
572 | 28.7M | (*I)->NodeQueueId &= ~ID; |
573 | 28.7M | *I = Queue.back(); |
574 | 28.7M | unsigned idx = I - Queue.begin(); |
575 | 28.7M | Queue.pop_back(); |
576 | 28.7M | return Queue.begin() + idx; |
577 | 28.7M | } |
578 | | |
579 | | void dump() const; |
580 | | }; |
581 | | |
582 | | /// Summarize the unscheduled region. |
583 | | struct SchedRemainder { |
584 | | // Critical path through the DAG in expected latency. |
585 | | unsigned CriticalPath; |
586 | | unsigned CyclicCritPath; |
587 | | |
588 | | // Scaled count of micro-ops left to schedule. |
589 | | unsigned RemIssueCount; |
590 | | |
591 | | bool IsAcyclicLatencyLimited; |
592 | | |
593 | | // Unscheduled resources |
594 | | SmallVector<unsigned, 16> RemainingCounts; |
595 | | |
596 | 557k | SchedRemainder() { reset(); } |
597 | | |
598 | 4.63M | void reset() { |
599 | 4.63M | CriticalPath = 0; |
600 | 4.63M | CyclicCritPath = 0; |
601 | 4.63M | RemIssueCount = 0; |
602 | 4.63M | IsAcyclicLatencyLimited = false; |
603 | 4.63M | RemainingCounts.clear(); |
604 | 4.63M | } |
605 | | |
606 | | void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); |
607 | | }; |
608 | | |
609 | | /// Each Scheduling boundary is associated with ready queues. It tracks the |
610 | | /// current cycle in the direction of movement, and maintains the state |
611 | | /// of "hazards" and other interlocks at the current cycle. |
612 | | class SchedBoundary { |
613 | | public: |
614 | | /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) |
615 | | enum { |
616 | | TopQID = 1, |
617 | | BotQID = 2, |
618 | | LogMaxQID = 2 |
619 | | }; |
620 | | |
621 | | ScheduleDAGMI *DAG = nullptr; |
622 | | const TargetSchedModel *SchedModel = nullptr; |
623 | | SchedRemainder *Rem = nullptr; |
624 | | |
625 | | ReadyQueue Available; |
626 | | ReadyQueue Pending; |
627 | | |
628 | | ScheduleHazardRecognizer *HazardRec = nullptr; |
629 | | |
630 | | private: |
631 | | /// True if the pending Q should be checked/updated before scheduling another |
632 | | /// instruction. |
633 | | bool CheckPending; |
634 | | |
635 | | /// Number of cycles it takes to issue the instructions scheduled in this |
636 | | /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls. |
637 | | /// See getStalls(). |
638 | | unsigned CurrCycle; |
639 | | |
640 | | /// Micro-ops issued in the current cycle |
641 | | unsigned CurrMOps; |
642 | | |
643 | | /// MinReadyCycle - Cycle of the soonest available instruction. |
644 | | unsigned MinReadyCycle; |
645 | | |
646 | | // The expected latency of the critical path in this scheduled zone. |
647 | | unsigned ExpectedLatency; |
648 | | |
649 | | // The latency of dependence chains leading into this zone. |
650 | | // For each node scheduled bottom-up: DLat = max DLat, N.Depth. |
651 | | // For each cycle scheduled: DLat -= 1. |
652 | | unsigned DependentLatency; |
653 | | |
654 | | /// Count the scheduled (issued) micro-ops that can be retired by |
655 | | /// time=CurrCycle assuming the first scheduled instr is retired at time=0. |
656 | | unsigned RetiredMOps; |
657 | | |
658 | | // Count scheduled resources that have been executed. Resources are |
659 | | // considered executed if they become ready in the time that it takes to |
660 | | // saturate any resource including the one in question. Counts are scaled |
661 | | // for direct comparison with other resources. Counts can be compared with |
662 | | // MOps * getMicroOpFactor and Latency * getLatencyFactor. |
663 | | SmallVector<unsigned, 16> ExecutedResCounts; |
664 | | |
665 | | /// Cache the max count for a single resource. |
666 | | unsigned MaxExecutedResCount; |
667 | | |
668 | | // Cache the critical resources ID in this scheduled zone. |
669 | | unsigned ZoneCritResIdx; |
670 | | |
671 | | // Is the scheduled region resource limited vs. latency limited. |
672 | | bool IsResourceLimited; |
673 | | |
674 | | // Record the highest cycle at which each resource has been reserved by a |
675 | | // scheduled instruction. |
676 | | SmallVector<unsigned, 16> ReservedCycles; |
677 | | |
678 | | #ifndef NDEBUG |
679 | | // Remember the greatest possible stall as an upper bound on the number of |
680 | | // times we should retry the pending queue because of a hazard. |
681 | | unsigned MaxObservedStall; |
682 | | #endif |
683 | | |
684 | | public: |
685 | | /// Pending queues extend the ready queues with the same ID and the |
686 | | /// PendingFlag set. |
687 | | SchedBoundary(unsigned ID, const Twine &Name): |
688 | 1.10M | Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") { |
689 | 1.10M | reset(); |
690 | 1.10M | } |
691 | | |
692 | | ~SchedBoundary(); |
693 | | |
694 | | void reset(); |
695 | | |
696 | | void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, |
697 | | SchedRemainder *rem); |
698 | | |
699 | 511M | bool isTop() const { |
700 | 511M | return Available.getID() == TopQID; |
701 | 511M | } |
702 | | |
703 | | /// Number of cycles to issue the instructions scheduled in this zone. |
704 | 31.8M | unsigned getCurrCycle() const { return CurrCycle; } |
705 | | |
706 | | /// Micro-ops issued in the current cycle |
707 | 16.1M | unsigned getCurrMOps() const { return CurrMOps; } |
708 | | |
709 | | // The latency of dependence chains leading into this zone. |
710 | 17.6M | unsigned getDependentLatency() const { return DependentLatency; } |
711 | | |
712 | | /// Get the number of latency cycles "covered" by the scheduled |
713 | | /// instructions. This is the larger of the critical path within the zone |
714 | | /// and the number of cycles required to issue the instructions. |
715 | 27.6M | unsigned getScheduledLatency() const { |
716 | 27.6M | return std::max(ExpectedLatency, CurrCycle); |
717 | 27.6M | } |
718 | | |
719 | 111M | unsigned getUnscheduledLatency(SUnit *SU) const { |
720 | 111M | return isTop() ? SU->getHeight()59.6M : SU->getDepth()52.3M ; |
721 | 111M | } |
722 | | |
723 | 256M | unsigned getResourceCount(unsigned ResIdx) const { |
724 | 256M | return ExecutedResCounts[ResIdx]; |
725 | 256M | } |
726 | | |
727 | | /// Get the scaled count of scheduled micro-ops and resources, including |
728 | | /// executed resources. |
729 | 27.5M | unsigned getCriticalCount() const { |
730 | 27.5M | if (!ZoneCritResIdx) |
731 | 11.9M | return RetiredMOps * SchedModel->getMicroOpFactor(); |
732 | 15.6M | return getResourceCount(ZoneCritResIdx); |
733 | 27.5M | } |
734 | | |
735 | | /// Get a scaled count for the minimum execution time of the scheduled |
736 | | /// micro-ops that are ready to execute by getExecutedCount. Notice the |
737 | | /// feedback loop. |
738 | 0 | unsigned getExecutedCount() const { |
739 | 0 | return std::max(CurrCycle * SchedModel->getLatencyFactor(), |
740 | 0 | MaxExecutedResCount); |
741 | 0 | } |
742 | | |
743 | 17.7M | unsigned getZoneCritResIdx() const { return ZoneCritResIdx; } |
744 | | |
745 | | // Is the scheduled region resource limited vs. latency limited. |
746 | 7.10M | bool isResourceLimited() const { return IsResourceLimited; } |
747 | | |
748 | | /// Get the difference between the given SUnit's ready time and the current |
749 | | /// cycle. |
750 | | unsigned getLatencyStallCycles(SUnit *SU); |
751 | | |
752 | | unsigned getNextResourceCycle(unsigned PIdx, unsigned Cycles); |
753 | | |
754 | | bool checkHazard(SUnit *SU); |
755 | | |
756 | | unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs); |
757 | | |
758 | | unsigned getOtherResourceCount(unsigned &OtherCritIdx); |
759 | | |
760 | | void releaseNode(SUnit *SU, unsigned ReadyCycle); |
761 | | |
762 | | void bumpCycle(unsigned NextCycle); |
763 | | |
764 | | void incExecutedResources(unsigned PIdx, unsigned Count); |
765 | | |
766 | | unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle); |
767 | | |
768 | | void bumpNode(SUnit *SU); |
769 | | |
770 | | void releasePending(); |
771 | | |
772 | | void removeReady(SUnit *SU); |
773 | | |
774 | | /// Call this before applying any other heuristics to the Available queue. |
775 | | /// Updates the Available/Pending Q's if necessary and returns the single |
776 | | /// available instruction, or NULL if there are multiple candidates. |
777 | | SUnit *pickOnlyChoice(); |
778 | | |
779 | | void dumpScheduledState() const; |
780 | | }; |
781 | | |
782 | | /// Base class for GenericScheduler. This class maintains information about |
783 | | /// scheduling candidates based on TargetSchedModel making it easy to implement |
784 | | /// heuristics for either preRA or postRA scheduling. |
785 | | class GenericSchedulerBase : public MachineSchedStrategy { |
786 | | public: |
787 | | /// Represent the type of SchedCandidate found within a single queue. |
788 | | /// pickNodeBidirectional depends on these listed by decreasing priority. |
789 | | enum CandReason : uint8_t { |
790 | | NoCand, Only1, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, |
791 | | RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, |
792 | | TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder}; |
793 | | |
794 | | #ifndef NDEBUG |
795 | | static const char *getReasonStr(GenericSchedulerBase::CandReason Reason); |
796 | | #endif |
797 | | |
798 | | /// Policy for scheduling the next instruction in the candidate's zone. |
799 | | struct CandPolicy { |
800 | | bool ReduceLatency = false; |
801 | | unsigned ReduceResIdx = 0; |
802 | | unsigned DemandResIdx = 0; |
803 | | |
804 | 119M | CandPolicy() = default; |
805 | | |
806 | 5.64M | bool operator==(const CandPolicy &RHS) const { |
807 | 5.64M | return ReduceLatency == RHS.ReduceLatency && |
808 | 5.61M | ReduceResIdx == RHS.ReduceResIdx && |
809 | 5.58M | DemandResIdx == RHS.DemandResIdx; |
810 | 5.64M | } |
811 | 5.64M | bool operator!=(const CandPolicy &RHS) const { |
812 | 5.64M | return !(*this == RHS); |
813 | 5.64M | } |
814 | | }; |
815 | | |
816 | | /// Status of an instruction's critical resource consumption. |
817 | | struct SchedResourceDelta { |
818 | | // Count critical resources in the scheduled region required by SU. |
819 | | unsigned CritResources = 0; |
820 | | |
821 | | // Count critical resources from another region consumed by SU. |
822 | | unsigned DemandedResources = 0; |
823 | | |
824 | 217M | SchedResourceDelta() = default; |
825 | | |
826 | 29.4M | bool operator==(const SchedResourceDelta &RHS) const { |
827 | 29.4M | return CritResources == RHS.CritResources |
828 | 28.7M | && DemandedResources == RHS.DemandedResources; |
829 | 29.4M | } |
830 | 0 | bool operator!=(const SchedResourceDelta &RHS) const { |
831 | 0 | return !operator==(RHS); |
832 | 0 | } |
833 | | }; |
834 | | |
835 | | /// Store the state used by GenericScheduler heuristics, required for the |
836 | | /// lifetime of one invocation of pickNode(). |
837 | | struct SchedCandidate { |
838 | | CandPolicy Policy; |
839 | | |
840 | | // The best SUnit candidate. |
841 | | SUnit *SU; |
842 | | |
843 | | // The reason for this candidate. |
844 | | CandReason Reason; |
845 | | |
846 | | // Whether this candidate should be scheduled at top/bottom. |
847 | | bool AtTop; |
848 | | |
849 | | // Register pressure values for the best candidate. |
850 | | RegPressureDelta RPDelta; |
851 | | |
852 | | // Critical resource consumption of the best candidate. |
853 | | SchedResourceDelta ResDelta; |
854 | | |
855 | 1.30M | SchedCandidate() { reset(CandPolicy()); } |
856 | 86.1M | SchedCandidate(const CandPolicy &Policy) { reset(Policy); } |
857 | | |
858 | 100M | void reset(const CandPolicy &NewPolicy) { |
859 | 100M | Policy = NewPolicy; |
860 | 100M | SU = nullptr; |
861 | 100M | Reason = NoCand; |
862 | 100M | AtTop = false; |
863 | 100M | RPDelta = RegPressureDelta(); |
864 | 100M | ResDelta = SchedResourceDelta(); |
865 | 100M | } |
866 | | |
867 | 112M | bool isValid() const { return SU; } |
868 | | |
869 | | // Copy the status of another candidate without changing policy. |
870 | 30.6M | void setBest(SchedCandidate &Best) { |
871 | 30.6M | assert(Best.Reason != NoCand && "uninitialized Sched candidate"); |
872 | 30.6M | SU = Best.SU; |
873 | 30.6M | Reason = Best.Reason; |
874 | 30.6M | AtTop = Best.AtTop; |
875 | 30.6M | RPDelta = Best.RPDelta; |
876 | 30.6M | ResDelta = Best.ResDelta; |
877 | 30.6M | } |
878 | | |
879 | | void initResourceDelta(const ScheduleDAGMI *DAG, |
880 | | const TargetSchedModel *SchedModel); |
881 | | }; |
882 | | |
883 | | protected: |
884 | | const MachineSchedContext *Context; |
885 | | const TargetSchedModel *SchedModel = nullptr; |
886 | | const TargetRegisterInfo *TRI = nullptr; |
887 | | |
888 | | SchedRemainder Rem; |
889 | | |
890 | 557k | GenericSchedulerBase(const MachineSchedContext *C) : Context(C) {} |
891 | | |
892 | | void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, |
893 | | SchedBoundary *OtherZone); |
894 | | |
895 | | #ifndef NDEBUG |
896 | | void traceCandidate(const SchedCandidate &Cand); |
897 | | #endif |
898 | | }; |
899 | | |
900 | | /// GenericScheduler shrinks the unscheduled zone using heuristics to balance |
901 | | /// the schedule. |
902 | | class GenericScheduler : public GenericSchedulerBase { |
903 | | public: |
904 | | GenericScheduler(const MachineSchedContext *C): |
905 | | GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"), |
906 | 549k | Bot(SchedBoundary::BotQID, "BotQ") {} |
907 | | |
908 | | void initPolicy(MachineBasicBlock::iterator Begin, |
909 | | MachineBasicBlock::iterator End, |
910 | | unsigned NumRegionInstrs) override; |
911 | | |
912 | | void dumpPolicy() const override; |
913 | | |
914 | 11.7M | bool shouldTrackPressure() const override { |
915 | 11.7M | return RegionPolicy.ShouldTrackPressure; |
916 | 11.7M | } |
917 | | |
918 | 11.7M | bool shouldTrackLaneMasks() const override { |
919 | 11.7M | return RegionPolicy.ShouldTrackLaneMasks; |
920 | 11.7M | } |
921 | | |
922 | | void initialize(ScheduleDAGMI *dag) override; |
923 | | |
924 | | SUnit *pickNode(bool &IsTopNode) override; |
925 | | |
926 | | void schedNode(SUnit *SU, bool IsTopNode) override; |
927 | | |
928 | 10.9M | void releaseTopNode(SUnit *SU) override { |
929 | 10.9M | if (SU->isScheduled) |
930 | 46.6k | return; |
931 | 10.9M | |
932 | 10.9M | Top.releaseNode(SU, SU->TopReadyCycle); |
933 | 10.9M | TopCand.SU = nullptr; |
934 | 10.9M | } |
935 | | |
936 | 16.4M | void releaseBottomNode(SUnit *SU) override { |
937 | 16.4M | if (SU->isScheduled) |
938 | 309k | return; |
939 | 16.4M | |
940 | 16.1M | Bot.releaseNode(SU, SU->BotReadyCycle); |
941 | 16.1M | BotCand.SU = nullptr; |
942 | 16.1M | } |
943 | | |
944 | | void registerRoots() override; |
945 | | |
946 | | protected: |
947 | | ScheduleDAGMILive *DAG = nullptr; |
948 | | |
949 | | MachineSchedPolicy RegionPolicy; |
950 | | |
951 | | // State of the top and bottom scheduled instruction boundaries. |
952 | | SchedBoundary Top; |
953 | | SchedBoundary Bot; |
954 | | |
955 | | /// Candidate last picked from Top boundary. |
956 | | SchedCandidate TopCand; |
957 | | /// Candidate last picked from Bot boundary. |
958 | | SchedCandidate BotCand; |
959 | | |
960 | | void checkAcyclicLatency(); |
961 | | |
962 | | void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, |
963 | | const RegPressureTracker &RPTracker, |
964 | | RegPressureTracker &TempTracker); |
965 | | |
966 | | void tryCandidate(SchedCandidate &Cand, |
967 | | SchedCandidate &TryCand, |
968 | | SchedBoundary *Zone); |
969 | | |
970 | | SUnit *pickNodeBidirectional(bool &IsTopNode); |
971 | | |
972 | | void pickNodeFromQueue(SchedBoundary &Zone, |
973 | | const CandPolicy &ZonePolicy, |
974 | | const RegPressureTracker &RPTracker, |
975 | | SchedCandidate &Candidate); |
976 | | |
977 | | void reschedulePhysRegCopies(SUnit *SU, bool isTop); |
978 | | }; |
979 | | |
980 | | /// PostGenericScheduler - Interface to the scheduling algorithm used by |
981 | | /// ScheduleDAGMI. |
982 | | /// |
983 | | /// Callbacks from ScheduleDAGMI: |
984 | | /// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ... |
985 | | class PostGenericScheduler : public GenericSchedulerBase { |
986 | | ScheduleDAGMI *DAG; |
987 | | SchedBoundary Top; |
988 | | SmallVector<SUnit*, 8> BotRoots; |
989 | | |
990 | | public: |
991 | | PostGenericScheduler(const MachineSchedContext *C): |
992 | 8.60k | GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {} |
993 | | |
994 | 8.60k | ~PostGenericScheduler() override = default; |
995 | | |
996 | | void initPolicy(MachineBasicBlock::iterator Begin, |
997 | | MachineBasicBlock::iterator End, |
998 | 14.6k | unsigned NumRegionInstrs) override { |
999 | 14.6k | /* no configurable policy */ |
1000 | 14.6k | } |
1001 | | |
1002 | | /// PostRA scheduling does not track pressure. |
1003 | 0 | bool shouldTrackPressure() const override { return false; } |
1004 | | |
1005 | | void initialize(ScheduleDAGMI *Dag) override; |
1006 | | |
1007 | | void registerRoots() override; |
1008 | | |
1009 | | SUnit *pickNode(bool &IsTopNode) override; |
1010 | | |
1011 | 0 | void scheduleTree(unsigned SubtreeID) override { |
1012 | 0 | llvm_unreachable("PostRA scheduler does not support subtree analysis."); |
1013 | 0 | } |
1014 | | |
1015 | | void schedNode(SUnit *SU, bool IsTopNode) override; |
1016 | | |
1017 | 26.5k | void releaseTopNode(SUnit *SU) override { |
1018 | 26.5k | if (SU->isScheduled) |
1019 | 0 | return; |
1020 | 26.5k | Top.releaseNode(SU, SU->TopReadyCycle); |
1021 | 26.5k | } |
1022 | | |
1023 | | // Only called for roots. |
1024 | 8.07k | void releaseBottomNode(SUnit *SU) override { |
1025 | 8.07k | BotRoots.push_back(SU); |
1026 | 8.07k | } |
1027 | | |
1028 | | protected: |
1029 | | void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand); |
1030 | | |
1031 | | void pickNodeFromQueue(SchedCandidate &Cand); |
1032 | | }; |
1033 | | |
1034 | | /// Create the standard converging machine scheduler. This will be used as the |
1035 | | /// default scheduler if the target does not set a default. |
1036 | | /// Adds default DAG mutations. |
1037 | | ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C); |
1038 | | |
1039 | | /// Create a generic scheduler with no vreg liveness or DAG mutation passes. |
1040 | | ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C); |
1041 | | |
1042 | | std::unique_ptr<ScheduleDAGMutation> |
1043 | | createLoadClusterDAGMutation(const TargetInstrInfo *TII, |
1044 | | const TargetRegisterInfo *TRI); |
1045 | | |
1046 | | std::unique_ptr<ScheduleDAGMutation> |
1047 | | createStoreClusterDAGMutation(const TargetInstrInfo *TII, |
1048 | | const TargetRegisterInfo *TRI); |
1049 | | |
1050 | | std::unique_ptr<ScheduleDAGMutation> |
1051 | | createCopyConstrainDAGMutation(const TargetInstrInfo *TII, |
1052 | | const TargetRegisterInfo *TRI); |
1053 | | |
1054 | | } // end namespace llvm |
1055 | | |
1056 | | #endif // LLVM_CODEGEN_MACHINESCHEDULER_H |