xref: /llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h (revision 9e6494c0fb29dfb5d4d2b7bf3ed7af261efee034)
1 //===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides an interface for customizing the standard MachineScheduler
10 // pass. Note that the entire pass may be replaced as follows:
11 //
12 // <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
13 //   PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
14 //   ...}
15 //
16 // The MachineScheduler pass is only responsible for choosing the regions to be
17 // scheduled. Targets can override the DAG builder and scheduler without
18 // replacing the pass as follows:
19 //
20 // ScheduleDAGInstrs *<Target>PassConfig::
21 // createMachineScheduler(MachineSchedContext *C) {
22 //   return new CustomMachineScheduler(C);
23 // }
24 //
25 // The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
26 // scheduling while updating the instruction stream, register pressure, and live
27 // intervals. Most targets don't need to override the DAG builder and list
28 // scheduler, but subtargets that require custom scheduling heuristics may
29 // plugin an alternate MachineSchedStrategy. The strategy is responsible for
30 // selecting the highest priority node from the list:
31 //
32 // ScheduleDAGInstrs *<Target>PassConfig::
33 // createMachineScheduler(MachineSchedContext *C) {
34 //   return new ScheduleDAGMILive(C, CustomStrategy(C));
35 // }
36 //
37 // The DAG builder can also be customized in a sense by adding DAG mutations
38 // that will run after DAG building and before list scheduling. DAG mutations
39 // can adjust dependencies based on target-specific knowledge or add weak edges
40 // to aid heuristics:
41 //
42 // ScheduleDAGInstrs *<Target>PassConfig::
43 // createMachineScheduler(MachineSchedContext *C) {
44 //   ScheduleDAGMI *DAG = createGenericSchedLive(C);
45 //   DAG->addMutation(new CustomDAGMutation(...));
46 //   return DAG;
47 // }
48 //
49 // A target that supports alternative schedulers can use the
50 // MachineSchedRegistry to allow command line selection. This can be done by
51 // implementing the following boilerplate:
52 //
53 // static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
54 //  return new CustomMachineScheduler(C);
55 // }
56 // static MachineSchedRegistry
57 // SchedCustomRegistry("custom", "Run my target's custom scheduler",
58 //                     createCustomMachineSched);
59 //
60 //
61 // Finally, subtargets that don't need to implement custom heuristics but would
62 // like to configure the GenericScheduler's policy for a given scheduler region,
63 // including scheduling direction and register pressure tracking policy, can do
64 // this:
65 //
66 // void <SubTarget>Subtarget::
67 // overrideSchedPolicy(MachineSchedPolicy &Policy,
68 //                     unsigned NumRegionInstrs) const {
69 //   Policy.<Flag> = true;
70 // }
71 //
72 //===----------------------------------------------------------------------===//
73 
74 #ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75 #define LLVM_CODEGEN_MACHINESCHEDULER_H
76 
77 #include "llvm/ADT/APInt.h"
78 #include "llvm/ADT/ArrayRef.h"
79 #include "llvm/ADT/BitVector.h"
80 #include "llvm/ADT/STLExtras.h"
81 #include "llvm/ADT/SmallVector.h"
82 #include "llvm/ADT/StringRef.h"
83 #include "llvm/ADT/Twine.h"
84 #include "llvm/CodeGen/MachineBasicBlock.h"
85 #include "llvm/CodeGen/MachinePassRegistry.h"
86 #include "llvm/CodeGen/RegisterPressure.h"
87 #include "llvm/CodeGen/ScheduleDAG.h"
88 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
89 #include "llvm/CodeGen/ScheduleDAGMutation.h"
90 #include "llvm/CodeGen/TargetSchedule.h"
91 #include "llvm/Support/CommandLine.h"
92 #include "llvm/Support/ErrorHandling.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <llvm/Support/raw_ostream.h>
96 #include <memory>
97 #include <string>
98 #include <vector>
99 
100 namespace llvm {
101 
102 namespace MISched {
103 enum Direction {
104   Unspecified,
105   TopDown,
106   BottomUp,
107   Bidirectional,
108 };
109 } // namespace MISched
110 
111 extern cl::opt<MISched::Direction> PreRADirection;
112 extern cl::opt<bool> VerifyScheduling;
113 #ifndef NDEBUG
114 extern cl::opt<bool> ViewMISchedDAGs;
115 extern cl::opt<bool> PrintDAGs;
116 #else
117 extern const bool ViewMISchedDAGs;
118 extern const bool PrintDAGs;
119 #endif
120 
121 class AAResults;
122 class LiveIntervals;
123 class MachineDominatorTree;
124 class MachineFunction;
125 class MachineInstr;
126 class MachineLoopInfo;
127 class RegisterClassInfo;
128 class SchedDFSResult;
129 class ScheduleHazardRecognizer;
130 class TargetInstrInfo;
131 class TargetPassConfig;
132 class TargetRegisterInfo;
133 
134 /// MachineSchedContext provides enough context from the MachineScheduler pass
135 /// for the target to instantiate a scheduler.
136 struct MachineSchedContext {
137   MachineFunction *MF = nullptr;
138   const MachineLoopInfo *MLI = nullptr;
139   const MachineDominatorTree *MDT = nullptr;
140   const TargetPassConfig *PassConfig = nullptr;
141   AAResults *AA = nullptr;
142   LiveIntervals *LIS = nullptr;
143 
144   RegisterClassInfo *RegClassInfo;
145 
146   MachineSchedContext();
147   MachineSchedContext &operator=(const MachineSchedContext &other) = delete;
148   MachineSchedContext(const MachineSchedContext &other) = delete;
149   virtual ~MachineSchedContext();
150 };
151 
152 /// MachineSchedRegistry provides a selection of available machine instruction
153 /// schedulers.
154 class MachineSchedRegistry
155     : public MachinePassRegistryNode<
156           ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
157 public:
158   using ScheduleDAGCtor = ScheduleDAGInstrs *(*)(MachineSchedContext *);
159 
160   // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
161   using FunctionPassCtor = ScheduleDAGCtor;
162 
163   static MachinePassRegistry<ScheduleDAGCtor> Registry;
164 
165   MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
166       : MachinePassRegistryNode(N, D, C) {
167     Registry.Add(this);
168   }
169 
170   ~MachineSchedRegistry() { Registry.Remove(this); }
171 
172   // Accessors.
173   //
174   MachineSchedRegistry *getNext() const {
175     return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
176   }
177 
178   static MachineSchedRegistry *getList() {
179     return (MachineSchedRegistry *)Registry.getList();
180   }
181 
182   static void setListener(MachinePassRegistryListener<FunctionPassCtor> *L) {
183     Registry.setListener(L);
184   }
185 };
186 
187 class ScheduleDAGMI;
188 
189 /// Define a generic scheduling policy for targets that don't provide their own
190 /// MachineSchedStrategy. This can be overriden for each scheduling region
191 /// before building the DAG.
192 struct MachineSchedPolicy {
193   // Allow the scheduler to disable register pressure tracking.
194   bool ShouldTrackPressure = false;
195   /// Track LaneMasks to allow reordering of independent subregister writes
196   /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
197   bool ShouldTrackLaneMasks = false;
198 
199   // Allow the scheduler to force top-down or bottom-up scheduling. If neither
200   // is true, the scheduler runs in both directions and converges.
201   bool OnlyTopDown = false;
202   bool OnlyBottomUp = false;
203 
204   // Disable heuristic that tries to fetch nodes from long dependency chains
205   // first.
206   bool DisableLatencyHeuristic = false;
207 
208   // Compute DFSResult for use in scheduling heuristics.
209   bool ComputeDFSResult = false;
210 
211   MachineSchedPolicy() = default;
212 };
213 
214 /// MachineSchedStrategy - Interface to the scheduling algorithm used by
215 /// ScheduleDAGMI.
216 ///
217 /// Initialization sequence:
218 ///   initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
219 class MachineSchedStrategy {
220   virtual void anchor();
221 
222 public:
223   virtual ~MachineSchedStrategy() = default;
224 
225   /// Optionally override the per-region scheduling policy.
226   virtual void initPolicy(MachineBasicBlock::iterator Begin,
227                           MachineBasicBlock::iterator End,
228                           unsigned NumRegionInstrs) {}
229 
230   virtual MachineSchedPolicy getPolicy() const { return {}; }
231   virtual void dumpPolicy() const {}
232 
233   /// Check if pressure tracking is needed before building the DAG and
234   /// initializing this strategy. Called after initPolicy.
235   virtual bool shouldTrackPressure() const { return true; }
236 
237   /// Returns true if lanemasks should be tracked. LaneMask tracking is
238   /// necessary to reorder independent subregister defs for the same vreg.
239   /// This has to be enabled in combination with shouldTrackPressure().
240   virtual bool shouldTrackLaneMasks() const { return false; }
241 
242   // If this method returns true, handling of the scheduling regions
243   // themselves (in case of a scheduling boundary in MBB) will be done
244   // beginning with the topmost region of MBB.
245   virtual bool doMBBSchedRegionsTopDown() const { return false; }
246 
247   /// Initialize the strategy after building the DAG for a new region.
248   virtual void initialize(ScheduleDAGMI *DAG) = 0;
249 
250   /// Tell the strategy that MBB is about to be processed.
251   virtual void enterMBB(MachineBasicBlock *MBB) {};
252 
253   /// Tell the strategy that current MBB is done.
254   virtual void leaveMBB() {};
255 
256   /// Notify this strategy that all roots have been released (including those
257   /// that depend on EntrySU or ExitSU).
258   virtual void registerRoots() {}
259 
260   /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
261   /// schedule the node at the top of the unscheduled region. Otherwise it will
262   /// be scheduled at the bottom.
263   virtual SUnit *pickNode(bool &IsTopNode) = 0;
264 
265   /// Scheduler callback to notify that a new subtree is scheduled.
266   virtual void scheduleTree(unsigned SubtreeID) {}
267 
268   /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
269   /// instruction and updated scheduled/remaining flags in the DAG nodes.
270   virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
271 
272   /// When all predecessor dependencies have been resolved, free this node for
273   /// top-down scheduling.
274   virtual void releaseTopNode(SUnit *SU) = 0;
275 
276   /// When all successor dependencies have been resolved, free this node for
277   /// bottom-up scheduling.
278   virtual void releaseBottomNode(SUnit *SU) = 0;
279 };
280 
281 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
282 /// schedules machine instructions according to the given MachineSchedStrategy
283 /// without much extra book-keeping. This is the common functionality between
284 /// PreRA and PostRA MachineScheduler.
285 class ScheduleDAGMI : public ScheduleDAGInstrs {
286 protected:
287   AAResults *AA;
288   LiveIntervals *LIS;
289   std::unique_ptr<MachineSchedStrategy> SchedImpl;
290 
291   /// Ordered list of DAG postprocessing steps.
292   std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
293 
294   /// The top of the unscheduled zone.
295   MachineBasicBlock::iterator CurrentTop;
296 
297   /// The bottom of the unscheduled zone.
298   MachineBasicBlock::iterator CurrentBottom;
299 
300   /// Record the next node in a scheduled cluster.
301   const SUnit *NextClusterPred = nullptr;
302   const SUnit *NextClusterSucc = nullptr;
303 
304 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
305   /// The number of instructions scheduled so far. Used to cut off the
306   /// scheduler at the point determined by misched-cutoff.
307   unsigned NumInstrsScheduled = 0;
308 #endif
309 
310 public:
311   ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
312                 bool RemoveKillFlags)
313       : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
314         LIS(C->LIS), SchedImpl(std::move(S)) {}
315 
316   // Provide a vtable anchor
317   ~ScheduleDAGMI() override;
318 
319   /// If this method returns true, handling of the scheduling regions
320   /// themselves (in case of a scheduling boundary in MBB) will be done
321   /// beginning with the topmost region of MBB.
322   bool doMBBSchedRegionsTopDown() const override {
323     return SchedImpl->doMBBSchedRegionsTopDown();
324   }
325 
326   // Returns LiveIntervals instance for use in DAG mutators and such.
327   LiveIntervals *getLIS() const { return LIS; }
328 
329   /// Return true if this DAG supports VReg liveness and RegPressure.
330   virtual bool hasVRegLiveness() const { return false; }
331 
332   /// Add a postprocessing step to the DAG builder.
333   /// Mutations are applied in the order that they are added after normal DAG
334   /// building and before MachineSchedStrategy initialization.
335   ///
336   /// ScheduleDAGMI takes ownership of the Mutation object.
337   void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
338     if (Mutation)
339       Mutations.push_back(std::move(Mutation));
340   }
341 
342   MachineBasicBlock::iterator top() const { return CurrentTop; }
343   MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
344 
345   /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
346   /// region. This covers all instructions in a block, while schedule() may only
347   /// cover a subset.
348   void enterRegion(MachineBasicBlock *bb,
349                    MachineBasicBlock::iterator begin,
350                    MachineBasicBlock::iterator end,
351                    unsigned regioninstrs) override;
352 
353   /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
354   /// reorderable instructions.
355   void schedule() override;
356 
357   void startBlock(MachineBasicBlock *bb) override;
358   void finishBlock() override;
359 
360   /// Change the position of an instruction within the basic block and update
361   /// live ranges and region boundary iterators.
362   void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
363 
364   const SUnit *getNextClusterPred() const { return NextClusterPred; }
365 
366   const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
367 
368   void viewGraph(const Twine &Name, const Twine &Title) override;
369   void viewGraph() override;
370 
371 protected:
372   // Top-Level entry points for the schedule() driver...
373 
374   /// Apply each ScheduleDAGMutation step in order. This allows different
375   /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
376   void postProcessDAG();
377 
378   /// Release ExitSU predecessors and setup scheduler queues.
379   void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
380 
381   /// Update scheduler DAG and queues after scheduling an instruction.
382   void updateQueues(SUnit *SU, bool IsTopNode);
383 
384   /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
385   void placeDebugValues();
386 
387   /// dump the scheduled Sequence.
388   void dumpSchedule() const;
389   /// Print execution trace of the schedule top-down or bottom-up.
390   void dumpScheduleTraceTopDown() const;
391   void dumpScheduleTraceBottomUp() const;
392 
393   // Lesser helpers...
394   bool checkSchedLimit();
395 
396   void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
397                              SmallVectorImpl<SUnit*> &BotRoots);
398 
399   void releaseSucc(SUnit *SU, SDep *SuccEdge);
400   void releaseSuccessors(SUnit *SU);
401   void releasePred(SUnit *SU, SDep *PredEdge);
402   void releasePredecessors(SUnit *SU);
403 };
404 
405 /// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
406 /// machine instructions while updating LiveIntervals and tracking regpressure.
407 class ScheduleDAGMILive : public ScheduleDAGMI {
408 protected:
409   RegisterClassInfo *RegClassInfo;
410 
411   /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
412   /// will be empty.
413   SchedDFSResult *DFSResult = nullptr;
414   BitVector ScheduledTrees;
415 
416   MachineBasicBlock::iterator LiveRegionEnd;
417 
418   /// Maps vregs to the SUnits of their uses in the current scheduling region.
419   VReg2SUnitMultiMap VRegUses;
420 
421   // Map each SU to its summary of pressure changes. This array is updated for
422   // liveness during bottom-up scheduling. Top-down scheduling may proceed but
423   // has no affect on the pressure diffs.
424   PressureDiffs SUPressureDiffs;
425 
426   /// Register pressure in this region computed by initRegPressure.
427   bool ShouldTrackPressure = false;
428   bool ShouldTrackLaneMasks = false;
429   IntervalPressure RegPressure;
430   RegPressureTracker RPTracker;
431 
432   /// List of pressure sets that exceed the target's pressure limit before
433   /// scheduling, listed in increasing set ID order. Each pressure set is paired
434   /// with its max pressure in the currently scheduled regions.
435   std::vector<PressureChange> RegionCriticalPSets;
436 
437   /// The top of the unscheduled zone.
438   IntervalPressure TopPressure;
439   RegPressureTracker TopRPTracker;
440 
441   /// The bottom of the unscheduled zone.
442   IntervalPressure BotPressure;
443   RegPressureTracker BotRPTracker;
444 
445 public:
446   ScheduleDAGMILive(MachineSchedContext *C,
447                     std::unique_ptr<MachineSchedStrategy> S)
448       : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
449         RegClassInfo(C->RegClassInfo), RPTracker(RegPressure),
450         TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
451 
452   ~ScheduleDAGMILive() override;
453 
454   /// Return true if this DAG supports VReg liveness and RegPressure.
455   bool hasVRegLiveness() const override { return true; }
456 
457   /// Return true if register pressure tracking is enabled.
458   bool isTrackingPressure() const { return ShouldTrackPressure; }
459 
460   /// Get current register pressure for the top scheduled instructions.
461   const IntervalPressure &getTopPressure() const { return TopPressure; }
462   const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
463 
464   /// Get current register pressure for the bottom scheduled instructions.
465   const IntervalPressure &getBotPressure() const { return BotPressure; }
466   const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
467 
468   /// Get register pressure for the entire scheduling region before scheduling.
469   const IntervalPressure &getRegPressure() const { return RegPressure; }
470 
471   const std::vector<PressureChange> &getRegionCriticalPSets() const {
472     return RegionCriticalPSets;
473   }
474 
475   PressureDiff &getPressureDiff(const SUnit *SU) {
476     return SUPressureDiffs[SU->NodeNum];
477   }
478   const PressureDiff &getPressureDiff(const SUnit *SU) const {
479     return SUPressureDiffs[SU->NodeNum];
480   }
481 
482   /// Compute a DFSResult after DAG building is complete, and before any
483   /// queue comparisons.
484   void computeDFSResult();
485 
486   /// Return a non-null DFS result if the scheduling strategy initialized it.
487   const SchedDFSResult *getDFSResult() const { return DFSResult; }
488 
489   BitVector &getScheduledTrees() { return ScheduledTrees; }
490 
491   /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
492   /// region. This covers all instructions in a block, while schedule() may only
493   /// cover a subset.
494   void enterRegion(MachineBasicBlock *bb,
495                    MachineBasicBlock::iterator begin,
496                    MachineBasicBlock::iterator end,
497                    unsigned regioninstrs) override;
498 
499   /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
500   /// reorderable instructions.
501   void schedule() override;
502 
503   /// Compute the cyclic critical path through the DAG.
504   unsigned computeCyclicCriticalPath();
505 
506   void dump() const override;
507 
508 protected:
509   // Top-Level entry points for the schedule() driver...
510 
511   /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
512   /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
513   /// region, TopTracker and BottomTracker will be initialized to the top and
514   /// bottom of the DAG region without covereing any unscheduled instruction.
515   void buildDAGWithRegPressure();
516 
517   /// Release ExitSU predecessors and setup scheduler queues. Re-position
518   /// the Top RP tracker in case the region beginning has changed.
519   void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
520 
521   /// Move an instruction and update register pressure.
522   void scheduleMI(SUnit *SU, bool IsTopNode);
523 
524   // Lesser helpers...
525 
526   void initRegPressure();
527 
528   void updatePressureDiffs(ArrayRef<VRegMaskOrUnit> LiveUses);
529 
530   void updateScheduledPressure(const SUnit *SU,
531                                const std::vector<unsigned> &NewMaxPressure);
532 
533   void collectVRegUses(SUnit &SU);
534 };
535 
536 //===----------------------------------------------------------------------===//
537 ///
538 /// Helpers for implementing custom MachineSchedStrategy classes. These take
539 /// care of the book-keeping associated with list scheduling heuristics.
540 ///
541 //===----------------------------------------------------------------------===//
542 
543 /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
544 /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
545 /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
546 ///
547 /// This is a convenience class that may be used by implementations of
548 /// MachineSchedStrategy.
549 class ReadyQueue {
550   unsigned ID;
551   std::string Name;
552   std::vector<SUnit*> Queue;
553 
554 public:
555   ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
556 
557   unsigned getID() const { return ID; }
558 
559   StringRef getName() const { return Name; }
560 
561   // SU is in this queue if it's NodeQueueID is a superset of this ID.
562   bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
563 
564   bool empty() const { return Queue.empty(); }
565 
566   void clear() { Queue.clear(); }
567 
568   unsigned size() const { return Queue.size(); }
569 
570   using iterator = std::vector<SUnit*>::iterator;
571 
572   iterator begin() { return Queue.begin(); }
573 
574   iterator end() { return Queue.end(); }
575 
576   ArrayRef<SUnit*> elements() { return Queue; }
577 
578   iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
579 
580   void push(SUnit *SU) {
581     Queue.push_back(SU);
582     SU->NodeQueueId |= ID;
583   }
584 
585   iterator remove(iterator I) {
586     (*I)->NodeQueueId &= ~ID;
587     *I = Queue.back();
588     unsigned idx = I - Queue.begin();
589     Queue.pop_back();
590     return Queue.begin() + idx;
591   }
592 
593   void dump() const;
594 };
595 
596 /// Summarize the unscheduled region.
597 struct SchedRemainder {
598   // Critical path through the DAG in expected latency.
599   unsigned CriticalPath;
600   unsigned CyclicCritPath;
601 
602   // Scaled count of micro-ops left to schedule.
603   unsigned RemIssueCount;
604 
605   bool IsAcyclicLatencyLimited;
606 
607   // Unscheduled resources
608   SmallVector<unsigned, 16> RemainingCounts;
609 
610   SchedRemainder() { reset(); }
611 
612   void reset() {
613     CriticalPath = 0;
614     CyclicCritPath = 0;
615     RemIssueCount = 0;
616     IsAcyclicLatencyLimited = false;
617     RemainingCounts.clear();
618   }
619 
620   void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
621 };
622 
623 /// ResourceSegments are a collection of intervals closed on the
624 /// left and opened on the right:
625 ///
626 ///     list{ [a1, b1), [a2, b2), ..., [a_N, b_N) }
627 ///
628 /// The collection has the following properties:
629 ///
630 /// 1. The list is ordered: a_i < b_i and b_i < a_(i+1)
631 ///
632 /// 2. The intervals in the collection do not intersect each other.
633 ///
634 /// A \ref ResourceSegments instance represents the cycle
635 /// reservation history of the instance of and individual resource.
636 class ResourceSegments {
637 public:
638   /// Represents an interval of discrete integer values closed on
639   /// the left and open on the right: [a, b).
640   typedef std::pair<int64_t, int64_t> IntervalTy;
641 
642   /// Adds an interval [a, b) to the collection of the instance.
643   ///
644   /// When adding [a, b[ to the collection, the operation merges the
645   /// adjacent intervals. For example
646   ///
647   ///       0  1  2  3  4  5  6  7  8  9  10
648   ///       [-----)  [--)     [--)
649   ///     +       [--)
650   ///     = [-----------)     [--)
651   ///
652   /// To be able to debug duplicate resource usage, the function has
653   /// assertion that checks that no interval should be added if it
654   /// overlaps any of the intervals in the collection. We can
655   /// require this because by definition a \ref ResourceSegments is
656   /// attached only to an individual resource instance.
657   void add(IntervalTy A, const unsigned CutOff = 10);
658 
659 public:
660   /// Checks whether intervals intersect.
661   static bool intersects(IntervalTy A, IntervalTy B);
662 
663   /// These function return the interval used by a resource in bottom and top
664   /// scheduling.
665   ///
666   /// Consider an instruction that uses resources X0, X1 and X2 as follows:
667   ///
668   /// X0 X1 X1 X2    +--------+-------------+--------------+
669   ///                |Resource|AcquireAtCycle|ReleaseAtCycle|
670   ///                +--------+-------------+--------------+
671   ///                |   X0   |     0       |       1      |
672   ///                +--------+-------------+--------------+
673   ///                |   X1   |     1       |       3      |
674   ///                +--------+-------------+--------------+
675   ///                |   X2   |     3       |       4      |
676   ///                +--------+-------------+--------------+
677   ///
678   /// If we can schedule the instruction at cycle C, we need to
679   /// compute the interval of the resource as follows:
680   ///
681   /// # TOP DOWN SCHEDULING
682   ///
683   /// Cycles scheduling flows to the _right_, in the same direction
684   /// of time.
685   ///
686   ///       C      1      2      3      4      5  ...
687   /// ------|------|------|------|------|------|----->
688   ///       X0     X1     X1     X2   ---> direction of time
689   /// X0    [C, C+1)
690   /// X1           [C+1,      C+3)
691   /// X2                         [C+3, C+4)
692   ///
693   /// Therefore, the formula to compute the interval for a resource
694   /// of an instruction that can be scheduled at cycle C in top-down
695   /// scheduling is:
696   ///
697   ///       [C+AcquireAtCycle, C+ReleaseAtCycle)
698   ///
699   ///
700   /// # BOTTOM UP SCHEDULING
701   ///
702   /// Cycles scheduling flows to the _left_, in opposite direction
703   /// of time.
704   ///
705   /// In bottom up scheduling, the scheduling happens in opposite
706   /// direction to the execution of the cycles of the
707   /// instruction. When the instruction is scheduled at cycle `C`,
708   /// the resources are allocated in the past relative to `C`:
709   ///
710   ///       2      1      C     -1     -2     -3     -4     -5  ...
711   /// <-----|------|------|------|------|------|------|------|---
712   ///                     X0     X1     X1     X2   ---> direction of time
713   /// X0           (C+1, C]
714   /// X1                  (C,        C-2]
715   /// X2                              (C-2, C-3]
716   ///
717   /// Therefore, the formula to compute the interval for a resource
718   /// of an instruction that can be scheduled at cycle C in bottom-up
719   /// scheduling is:
720   ///
721   ///       [C-ReleaseAtCycle+1, C-AcquireAtCycle+1)
722   ///
723   ///
724   /// NOTE: In both cases, the number of cycles booked by a
725   /// resources is the value (ReleaseAtCycle - AcquireAtCycle).
726   static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle,
727                                               unsigned ReleaseAtCycle) {
728     return std::make_pair<long, long>((long)C - (long)ReleaseAtCycle + 1L,
729                                       (long)C - (long)AcquireAtCycle + 1L);
730   }
731   static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle,
732                                            unsigned ReleaseAtCycle) {
733     return std::make_pair<long, long>((long)C + (long)AcquireAtCycle,
734                                       (long)C + (long)ReleaseAtCycle);
735   }
736 
737 private:
738   /// Finds the first cycle in which a resource can be allocated.
739   ///
740   /// The function uses the \param IntervalBuider [*] to build a
741   /// resource interval [a, b[ out of the input parameters \param
742   /// CurrCycle, \param AcquireAtCycle and \param ReleaseAtCycle.
743   ///
744   /// The function then loops through the intervals in the ResourceSegments
745   /// and shifts the interval [a, b[ and the ReturnCycle to the
746   /// right until there is no intersection between the intervals of
747   /// the \ref ResourceSegments instance and the new shifted [a, b[. When
748   /// this condition is met, the ReturnCycle  (which
749   /// correspond to the cycle in which the resource can be
750   /// allocated) is returned.
751   ///
752   ///               c = CurrCycle in input
753   ///               c   1   2   3   4   5   6   7   8   9   10 ... ---> (time
754   ///               flow)
755   ///  ResourceSegments...  [---)   [-------)           [-----------)
756   ///               c   [1     3[  -> AcquireAtCycle=1, ReleaseAtCycle=3
757   ///                 ++c   [1     3)
758   ///                     ++c   [1     3)
759   ///                         ++c   [1     3)
760   ///                             ++c   [1     3)
761   ///                                 ++c   [1     3)    ---> returns c
762   ///                                 incremented by 5 (c+5)
763   ///
764   ///
765   /// Notice that for bottom-up scheduling the diagram is slightly
766   /// different because the current cycle c is always on the right
767   /// of the interval [a, b) (see \ref
768   /// `getResourceIntervalBottom`). This is because the cycle
769   /// increments for bottom-up scheduling moved in the direction
770   /// opposite to the direction of time:
771   ///
772   ///     --------> direction of time.
773   ///     XXYZZZ    (resource usage)
774   ///     --------> direction of top-down execution cycles.
775   ///     <-------- direction of bottom-up execution cycles.
776   ///
777   /// Even though bottom-up scheduling moves against the flow of
778   /// time, the algorithm used to find the first free slot in between
779   /// intervals is the same as for top-down scheduling.
780   ///
781   /// [*] See \ref `getResourceIntervalTop` and
782   /// \ref `getResourceIntervalBottom` to see how such resource intervals
783   /// are built.
784   unsigned getFirstAvailableAt(
785       unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle,
786       std::function<IntervalTy(unsigned, unsigned, unsigned)> IntervalBuilder)
787       const;
788 
789 public:
790   /// getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop
791   /// should be merged in a single function in which a function that
792   /// creates the `NewInterval` is passed as a parameter.
793   unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle,
794                                          unsigned AcquireAtCycle,
795                                          unsigned ReleaseAtCycle) const {
796     return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
797                                getResourceIntervalBottom);
798   }
799   unsigned getFirstAvailableAtFromTop(unsigned CurrCycle,
800                                       unsigned AcquireAtCycle,
801                                       unsigned ReleaseAtCycle) const {
802     return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
803                                getResourceIntervalTop);
804   }
805 
806 private:
807   std::list<IntervalTy> _Intervals;
808   /// Merge all adjacent intervals in the collection. For all pairs
809   /// of adjacient intervals, it performs [a, b) + [b, c) -> [a, c).
810   ///
811   /// Before performing the merge operation, the intervals are
812   /// sorted with \ref sort_predicate.
813   void sortAndMerge();
814 
815 public:
816   // constructor for empty set
817   explicit ResourceSegments(){};
818   bool empty() const { return _Intervals.empty(); }
819   explicit ResourceSegments(const std::list<IntervalTy> &Intervals)
820       : _Intervals(Intervals) {
821     sortAndMerge();
822   }
823 
824   friend bool operator==(const ResourceSegments &c1,
825                          const ResourceSegments &c2) {
826     return c1._Intervals == c2._Intervals;
827   }
828   friend llvm::raw_ostream &operator<<(llvm::raw_ostream &os,
829                                        const ResourceSegments &Segments) {
830     os << "{ ";
831     for (auto p : Segments._Intervals)
832       os << "[" << p.first << ", " << p.second << "), ";
833     os << "}\n";
834     return os;
835   }
836 };
837 
838 /// Each Scheduling boundary is associated with ready queues. It tracks the
839 /// current cycle in the direction of movement, and maintains the state
840 /// of "hazards" and other interlocks at the current cycle.
841 class SchedBoundary {
842 public:
843   /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
844   enum {
845     TopQID = 1,
846     BotQID = 2,
847     LogMaxQID = 2
848   };
849 
850   ScheduleDAGMI *DAG = nullptr;
851   const TargetSchedModel *SchedModel = nullptr;
852   SchedRemainder *Rem = nullptr;
853 
854   ReadyQueue Available;
855   ReadyQueue Pending;
856 
857   ScheduleHazardRecognizer *HazardRec = nullptr;
858 
859 private:
860   /// True if the pending Q should be checked/updated before scheduling another
861   /// instruction.
862   bool CheckPending;
863 
864   /// Number of cycles it takes to issue the instructions scheduled in this
865   /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
866   /// See getStalls().
867   unsigned CurrCycle;
868 
869   /// Micro-ops issued in the current cycle
870   unsigned CurrMOps;
871 
872   /// MinReadyCycle - Cycle of the soonest available instruction.
873   unsigned MinReadyCycle;
874 
875   // The expected latency of the critical path in this scheduled zone.
876   unsigned ExpectedLatency;
877 
878   // The latency of dependence chains leading into this zone.
879   // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
880   // For each cycle scheduled: DLat -= 1.
881   unsigned DependentLatency;
882 
883   /// Count the scheduled (issued) micro-ops that can be retired by
884   /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
885   unsigned RetiredMOps;
886 
887   // Count scheduled resources that have been executed. Resources are
888   // considered executed if they become ready in the time that it takes to
889   // saturate any resource including the one in question. Counts are scaled
890   // for direct comparison with other resources. Counts can be compared with
891   // MOps * getMicroOpFactor and Latency * getLatencyFactor.
892   SmallVector<unsigned, 16> ExecutedResCounts;
893 
894   /// Cache the max count for a single resource.
895   unsigned MaxExecutedResCount;
896 
897   // Cache the critical resources ID in this scheduled zone.
898   unsigned ZoneCritResIdx;
899 
900   // Is the scheduled region resource limited vs. latency limited.
901   bool IsResourceLimited;
902 
903 public:
904 private:
905   /// Record how resources have been allocated across the cycles of
906   /// the execution.
907   std::map<unsigned, ResourceSegments> ReservedResourceSegments;
908   std::vector<unsigned> ReservedCycles;
909   /// For each PIdx, stores first index into ReservedResourceSegments that
910   /// corresponds to it.
911   ///
912   /// For example, consider the following 3 resources (ResourceCount =
913   /// 3):
914   ///
915   ///   +------------+--------+
916   ///   |ResourceName|NumUnits|
917   ///   +------------+--------+
918   ///   |     X      |    2   |
919   ///   +------------+--------+
920   ///   |     Y      |    3   |
921   ///   +------------+--------+
922   ///   |     Z      |    1   |
923   ///   +------------+--------+
924   ///
925   /// In this case, the total number of resource instances is 6. The
926   /// vector \ref ReservedResourceSegments will have a slot for each instance.
927   /// The vector \ref ReservedCyclesIndex will track at what index the first
928   /// instance of the resource is found in the vector of \ref
929   /// ReservedResourceSegments:
930   ///
931   ///                              Indexes of instances in
932   ///                              ReservedResourceSegments
933   ///
934   ///                              0   1   2   3   4  5
935   /// ReservedCyclesIndex[0] = 0; [X0, X1,
936   /// ReservedCyclesIndex[1] = 2;          Y0, Y1, Y2
937   /// ReservedCyclesIndex[2] = 5;                     Z
938   SmallVector<unsigned, 16> ReservedCyclesIndex;
939 
940   // For each PIdx, stores the resource group IDs of its subunits
941   SmallVector<APInt, 16> ResourceGroupSubUnitMasks;
942 
943 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
944   // Remember the greatest possible stall as an upper bound on the number of
945   // times we should retry the pending queue because of a hazard.
946   unsigned MaxObservedStall;
947 #endif
948 
949 public:
950   /// Pending queues extend the ready queues with the same ID and the
951   /// PendingFlag set.
952   SchedBoundary(unsigned ID, const Twine &Name):
953     Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
954     reset();
955   }
956   SchedBoundary &operator=(const SchedBoundary &other) = delete;
957   SchedBoundary(const SchedBoundary &other) = delete;
958   ~SchedBoundary();
959 
960   void reset();
961 
962   void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
963             SchedRemainder *rem);
964 
965   bool isTop() const {
966     return Available.getID() == TopQID;
967   }
968 
969   /// Number of cycles to issue the instructions scheduled in this zone.
970   unsigned getCurrCycle() const { return CurrCycle; }
971 
972   /// Micro-ops issued in the current cycle
973   unsigned getCurrMOps() const { return CurrMOps; }
974 
975   // The latency of dependence chains leading into this zone.
976   unsigned getDependentLatency() const { return DependentLatency; }
977 
978   /// Get the number of latency cycles "covered" by the scheduled
979   /// instructions. This is the larger of the critical path within the zone
980   /// and the number of cycles required to issue the instructions.
981   unsigned getScheduledLatency() const {
982     return std::max(ExpectedLatency, CurrCycle);
983   }
984 
985   unsigned getUnscheduledLatency(SUnit *SU) const {
986     return isTop() ? SU->getHeight() : SU->getDepth();
987   }
988 
989   unsigned getResourceCount(unsigned ResIdx) const {
990     return ExecutedResCounts[ResIdx];
991   }
992 
993   /// Get the scaled count of scheduled micro-ops and resources, including
994   /// executed resources.
995   unsigned getCriticalCount() const {
996     if (!ZoneCritResIdx)
997       return RetiredMOps * SchedModel->getMicroOpFactor();
998     return getResourceCount(ZoneCritResIdx);
999   }
1000 
1001   /// Get a scaled count for the minimum execution time of the scheduled
1002   /// micro-ops that are ready to execute by getExecutedCount. Notice the
1003   /// feedback loop.
1004   unsigned getExecutedCount() const {
1005     return std::max(CurrCycle * SchedModel->getLatencyFactor(),
1006                     MaxExecutedResCount);
1007   }
1008 
1009   unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
1010 
1011   // Is the scheduled region resource limited vs. latency limited.
1012   bool isResourceLimited() const { return IsResourceLimited; }
1013 
1014   /// Get the difference between the given SUnit's ready time and the current
1015   /// cycle.
1016   unsigned getLatencyStallCycles(SUnit *SU);
1017 
1018   unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
1019                                           unsigned ReleaseAtCycle,
1020                                           unsigned AcquireAtCycle);
1021 
1022   std::pair<unsigned, unsigned> getNextResourceCycle(const MCSchedClassDesc *SC,
1023                                                      unsigned PIdx,
1024                                                      unsigned ReleaseAtCycle,
1025                                                      unsigned AcquireAtCycle);
1026 
1027   bool isUnbufferedGroup(unsigned PIdx) const {
1028     return SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin &&
1029            !SchedModel->getProcResource(PIdx)->BufferSize;
1030   }
1031 
1032   bool checkHazard(SUnit *SU);
1033 
1034   unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
1035 
1036   unsigned getOtherResourceCount(unsigned &OtherCritIdx);
1037 
1038   /// Release SU to make it ready. If it's not in hazard, remove it from
1039   /// pending queue (if already in) and push into available queue.
1040   /// Otherwise, push the SU into pending queue.
1041   ///
1042   /// @param SU The unit to be released.
1043   /// @param ReadyCycle Until which cycle the unit is ready.
1044   /// @param InPQueue Whether SU is already in pending queue.
1045   /// @param Idx Position offset in pending queue (if in it).
1046   void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
1047                    unsigned Idx = 0);
1048 
1049   void bumpCycle(unsigned NextCycle);
1050 
1051   void incExecutedResources(unsigned PIdx, unsigned Count);
1052 
1053   unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx,
1054                          unsigned Cycles, unsigned ReadyCycle,
1055                          unsigned StartAtCycle);
1056 
1057   void bumpNode(SUnit *SU);
1058 
1059   void releasePending();
1060 
1061   void removeReady(SUnit *SU);
1062 
1063   /// Call this before applying any other heuristics to the Available queue.
1064   /// Updates the Available/Pending Q's if necessary and returns the single
1065   /// available instruction, or NULL if there are multiple candidates.
1066   SUnit *pickOnlyChoice();
1067 
1068   /// Dump the state of the information that tracks resource usage.
1069   void dumpReservedCycles() const;
1070   void dumpScheduledState() const;
1071 };
1072 
1073 /// Base class for GenericScheduler. This class maintains information about
1074 /// scheduling candidates based on TargetSchedModel making it easy to implement
1075 /// heuristics for either preRA or postRA scheduling.
1076 class GenericSchedulerBase : public MachineSchedStrategy {
1077 public:
1078   /// Represent the type of SchedCandidate found within a single queue.
1079   /// pickNodeBidirectional depends on these listed by decreasing priority.
1080   enum CandReason : uint8_t {
1081     NoCand, Only1, PhysReg, RegExcess, RegCritical, Stall, Cluster, Weak,
1082     RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
1083     TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
1084 
1085 #ifndef NDEBUG
1086   static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
1087 #endif
1088 
1089   /// Policy for scheduling the next instruction in the candidate's zone.
1090   struct CandPolicy {
1091     bool ReduceLatency = false;
1092     unsigned ReduceResIdx = 0;
1093     unsigned DemandResIdx = 0;
1094 
1095     CandPolicy() = default;
1096 
1097     bool operator==(const CandPolicy &RHS) const {
1098       return ReduceLatency == RHS.ReduceLatency &&
1099              ReduceResIdx == RHS.ReduceResIdx &&
1100              DemandResIdx == RHS.DemandResIdx;
1101     }
1102     bool operator!=(const CandPolicy &RHS) const {
1103       return !(*this == RHS);
1104     }
1105   };
1106 
1107   /// Status of an instruction's critical resource consumption.
1108   struct SchedResourceDelta {
1109     // Count critical resources in the scheduled region required by SU.
1110     unsigned CritResources = 0;
1111 
1112     // Count critical resources from another region consumed by SU.
1113     unsigned DemandedResources = 0;
1114 
1115     SchedResourceDelta() = default;
1116 
1117     bool operator==(const SchedResourceDelta &RHS) const {
1118       return CritResources == RHS.CritResources
1119         && DemandedResources == RHS.DemandedResources;
1120     }
1121     bool operator!=(const SchedResourceDelta &RHS) const {
1122       return !operator==(RHS);
1123     }
1124   };
1125 
1126   /// Store the state used by GenericScheduler heuristics, required for the
1127   /// lifetime of one invocation of pickNode().
1128   struct SchedCandidate {
1129     CandPolicy Policy;
1130 
1131     // The best SUnit candidate.
1132     SUnit *SU;
1133 
1134     // The reason for this candidate.
1135     CandReason Reason;
1136 
1137     // Whether this candidate should be scheduled at top/bottom.
1138     bool AtTop;
1139 
1140     // Register pressure values for the best candidate.
1141     RegPressureDelta RPDelta;
1142 
1143     // Critical resource consumption of the best candidate.
1144     SchedResourceDelta ResDelta;
1145 
1146     SchedCandidate() { reset(CandPolicy()); }
1147     SchedCandidate(const CandPolicy &Policy) { reset(Policy); }
1148 
1149     void reset(const CandPolicy &NewPolicy) {
1150       Policy = NewPolicy;
1151       SU = nullptr;
1152       Reason = NoCand;
1153       AtTop = false;
1154       RPDelta = RegPressureDelta();
1155       ResDelta = SchedResourceDelta();
1156     }
1157 
1158     bool isValid() const { return SU; }
1159 
1160     // Copy the status of another candidate without changing policy.
1161     void setBest(SchedCandidate &Best) {
1162       assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1163       SU = Best.SU;
1164       Reason = Best.Reason;
1165       AtTop = Best.AtTop;
1166       RPDelta = Best.RPDelta;
1167       ResDelta = Best.ResDelta;
1168     }
1169 
1170     void initResourceDelta(const ScheduleDAGMI *DAG,
1171                            const TargetSchedModel *SchedModel);
1172   };
1173 
1174 protected:
1175   const MachineSchedContext *Context;
1176   const TargetSchedModel *SchedModel = nullptr;
1177   const TargetRegisterInfo *TRI = nullptr;
1178 
1179   MachineSchedPolicy RegionPolicy;
1180 
1181   SchedRemainder Rem;
1182 
1183   GenericSchedulerBase(const MachineSchedContext *C) : Context(C) {}
1184 
1185   void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
1186                  SchedBoundary *OtherZone);
1187 
1188   MachineSchedPolicy getPolicy() const override { return RegionPolicy; }
1189 
1190 #ifndef NDEBUG
1191   void traceCandidate(const SchedCandidate &Cand);
1192 #endif
1193 
1194 private:
1195   bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
1196                            bool ComputeRemLatency, unsigned &RemLatency) const;
1197 };
1198 
1199 // Utility functions used by heuristics in tryCandidate().
1200 bool tryLess(int TryVal, int CandVal,
1201              GenericSchedulerBase::SchedCandidate &TryCand,
1202              GenericSchedulerBase::SchedCandidate &Cand,
1203              GenericSchedulerBase::CandReason Reason);
1204 bool tryGreater(int TryVal, int CandVal,
1205                 GenericSchedulerBase::SchedCandidate &TryCand,
1206                 GenericSchedulerBase::SchedCandidate &Cand,
1207                 GenericSchedulerBase::CandReason Reason);
1208 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
1209                 GenericSchedulerBase::SchedCandidate &Cand,
1210                 SchedBoundary &Zone);
1211 bool tryPressure(const PressureChange &TryP,
1212                  const PressureChange &CandP,
1213                  GenericSchedulerBase::SchedCandidate &TryCand,
1214                  GenericSchedulerBase::SchedCandidate &Cand,
1215                  GenericSchedulerBase::CandReason Reason,
1216                  const TargetRegisterInfo *TRI,
1217                  const MachineFunction &MF);
1218 unsigned getWeakLeft(const SUnit *SU, bool isTop);
1219 int biasPhysReg(const SUnit *SU, bool isTop);
1220 
1221 /// GenericScheduler shrinks the unscheduled zone using heuristics to balance
1222 /// the schedule.
1223 class GenericScheduler : public GenericSchedulerBase {
1224 public:
1225   GenericScheduler(const MachineSchedContext *C):
1226     GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1227     Bot(SchedBoundary::BotQID, "BotQ") {}
1228 
1229   void initPolicy(MachineBasicBlock::iterator Begin,
1230                   MachineBasicBlock::iterator End,
1231                   unsigned NumRegionInstrs) override;
1232 
1233   void dumpPolicy() const override;
1234 
1235   bool shouldTrackPressure() const override {
1236     return RegionPolicy.ShouldTrackPressure;
1237   }
1238 
1239   bool shouldTrackLaneMasks() const override {
1240     return RegionPolicy.ShouldTrackLaneMasks;
1241   }
1242 
1243   void initialize(ScheduleDAGMI *dag) override;
1244 
1245   SUnit *pickNode(bool &IsTopNode) override;
1246 
1247   void schedNode(SUnit *SU, bool IsTopNode) override;
1248 
1249   void releaseTopNode(SUnit *SU) override {
1250     if (SU->isScheduled)
1251       return;
1252 
1253     Top.releaseNode(SU, SU->TopReadyCycle, false);
1254     TopCand.SU = nullptr;
1255   }
1256 
1257   void releaseBottomNode(SUnit *SU) override {
1258     if (SU->isScheduled)
1259       return;
1260 
1261     Bot.releaseNode(SU, SU->BotReadyCycle, false);
1262     BotCand.SU = nullptr;
1263   }
1264 
1265   void registerRoots() override;
1266 
1267 protected:
1268   ScheduleDAGMILive *DAG = nullptr;
1269 
1270   // State of the top and bottom scheduled instruction boundaries.
1271   SchedBoundary Top;
1272   SchedBoundary Bot;
1273 
1274   /// Candidate last picked from Top boundary.
1275   SchedCandidate TopCand;
1276   /// Candidate last picked from Bot boundary.
1277   SchedCandidate BotCand;
1278 
1279   void checkAcyclicLatency();
1280 
1281   void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
1282                      const RegPressureTracker &RPTracker,
1283                      RegPressureTracker &TempTracker);
1284 
1285   virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
1286                             SchedBoundary *Zone) const;
1287 
1288   SUnit *pickNodeBidirectional(bool &IsTopNode);
1289 
1290   void pickNodeFromQueue(SchedBoundary &Zone,
1291                          const CandPolicy &ZonePolicy,
1292                          const RegPressureTracker &RPTracker,
1293                          SchedCandidate &Candidate);
1294 
1295   void reschedulePhysReg(SUnit *SU, bool isTop);
1296 };
1297 
1298 /// PostGenericScheduler - Interface to the scheduling algorithm used by
1299 /// ScheduleDAGMI.
1300 ///
1301 /// Callbacks from ScheduleDAGMI:
1302 ///   initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
1303 class PostGenericScheduler : public GenericSchedulerBase {
1304 protected:
1305   ScheduleDAGMI *DAG = nullptr;
1306   SchedBoundary Top;
1307   SchedBoundary Bot;
1308 
1309   /// Candidate last picked from Top boundary.
1310   SchedCandidate TopCand;
1311   /// Candidate last picked from Bot boundary.
1312   SchedCandidate BotCand;
1313 
1314 public:
1315   PostGenericScheduler(const MachineSchedContext *C)
1316       : GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1317         Bot(SchedBoundary::BotQID, "BotQ") {}
1318 
1319   ~PostGenericScheduler() override = default;
1320 
1321   void initPolicy(MachineBasicBlock::iterator Begin,
1322                   MachineBasicBlock::iterator End,
1323                   unsigned NumRegionInstrs) override;
1324 
1325   /// PostRA scheduling does not track pressure.
1326   bool shouldTrackPressure() const override { return false; }
1327 
1328   void initialize(ScheduleDAGMI *Dag) override;
1329 
1330   void registerRoots() override;
1331 
1332   SUnit *pickNode(bool &IsTopNode) override;
1333 
1334   SUnit *pickNodeBidirectional(bool &IsTopNode);
1335 
1336   void scheduleTree(unsigned SubtreeID) override {
1337     llvm_unreachable("PostRA scheduler does not support subtree analysis.");
1338   }
1339 
1340   void schedNode(SUnit *SU, bool IsTopNode) override;
1341 
1342   void releaseTopNode(SUnit *SU) override {
1343     if (SU->isScheduled)
1344       return;
1345     Top.releaseNode(SU, SU->TopReadyCycle, false);
1346     TopCand.SU = nullptr;
1347   }
1348 
1349   void releaseBottomNode(SUnit *SU) override {
1350     if (SU->isScheduled)
1351       return;
1352     Bot.releaseNode(SU, SU->BotReadyCycle, false);
1353     BotCand.SU = nullptr;
1354   }
1355 
1356 protected:
1357   virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1358 
1359   void pickNodeFromQueue(SchedBoundary &Zone, SchedCandidate &Cand);
1360 };
1361 
1362 /// Create the standard converging machine scheduler. This will be used as the
1363 /// default scheduler if the target does not set a default.
1364 /// Adds default DAG mutations.
1365 ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C);
1366 
1367 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
1368 ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C);
1369 
1370 /// If ReorderWhileClustering is set to true, no attempt will be made to
1371 /// reduce reordering due to store clustering.
1372 std::unique_ptr<ScheduleDAGMutation>
1373 createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1374                              const TargetRegisterInfo *TRI,
1375                              bool ReorderWhileClustering = false);
1376 
1377 /// If ReorderWhileClustering is set to true, no attempt will be made to
1378 /// reduce reordering due to store clustering.
1379 std::unique_ptr<ScheduleDAGMutation>
1380 createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1381                               const TargetRegisterInfo *TRI,
1382                               bool ReorderWhileClustering = false);
1383 
1384 std::unique_ptr<ScheduleDAGMutation>
1385 createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1386                                const TargetRegisterInfo *TRI);
1387 
1388 } // end namespace llvm
1389 
1390 #endif // LLVM_CODEGEN_MACHINESCHEDULER_H
1391