xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision 66ee0bfced8e10ac437cd8984685d7be6463d0d2)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/GraphWriter.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 
34 using namespace llvm;
35 
36 #define DEBUG_TYPE "misched"
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 cl::opt<bool>
44 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45                        cl::desc("Print critical path length to stdout"));
46 }
47 
48 #ifndef NDEBUG
49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50   cl::desc("Pop up a window to show MISched dags after they are processed"));
51 
52 /// In some situations a few uninteresting nodes depend on nearly all other
53 /// nodes in the graph, provide a cutoff to hide them.
54 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
55   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
56 
57 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
58   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
59 
60 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
61   cl::desc("Only schedule this function"));
62 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
63   cl::desc("Only schedule this MBB#"));
64 #else
65 static bool ViewMISchedDAGs = false;
66 #endif // NDEBUG
67 
68 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
69 /// size of the ready lists.
70 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
71   cl::desc("Limit ready list to N instructions"), cl::init(256));
72 
73 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
74   cl::desc("Enable register pressure scheduling."), cl::init(true));
75 
76 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
77   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
78 
79 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
80                                         cl::desc("Enable memop clustering."),
81                                         cl::init(true));
82 
83 // Experimental heuristics
84 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
85   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
86 
87 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
88   cl::desc("Verify machine instrs before and after machine scheduling"));
89 
90 // DAG subtrees must have at least this many nodes.
91 static const unsigned MinSubtreeSize = 8;
92 
93 // Pin the vtables to this file.
94 void MachineSchedStrategy::anchor() {}
95 void ScheduleDAGMutation::anchor() {}
96 
97 //===----------------------------------------------------------------------===//
98 // Machine Instruction Scheduling Pass and Registry
99 //===----------------------------------------------------------------------===//
100 
101 MachineSchedContext::MachineSchedContext():
102     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
103   RegClassInfo = new RegisterClassInfo();
104 }
105 
106 MachineSchedContext::~MachineSchedContext() {
107   delete RegClassInfo;
108 }
109 
110 namespace {
111 /// Base class for a machine scheduler class that can run at any point.
112 class MachineSchedulerBase : public MachineSchedContext,
113                              public MachineFunctionPass {
114 public:
115   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
116 
117   void print(raw_ostream &O, const Module* = nullptr) const override;
118 
119 protected:
120   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
121 };
122 
123 /// MachineScheduler runs after coalescing and before register allocation.
124 class MachineScheduler : public MachineSchedulerBase {
125 public:
126   MachineScheduler();
127 
128   void getAnalysisUsage(AnalysisUsage &AU) const override;
129 
130   bool runOnMachineFunction(MachineFunction&) override;
131 
132   static char ID; // Class identification, replacement for typeinfo
133 
134 protected:
135   ScheduleDAGInstrs *createMachineScheduler();
136 };
137 
138 /// PostMachineScheduler runs after shortly before code emission.
139 class PostMachineScheduler : public MachineSchedulerBase {
140 public:
141   PostMachineScheduler();
142 
143   void getAnalysisUsage(AnalysisUsage &AU) const override;
144 
145   bool runOnMachineFunction(MachineFunction&) override;
146 
147   static char ID; // Class identification, replacement for typeinfo
148 
149 protected:
150   ScheduleDAGInstrs *createPostMachineScheduler();
151 };
152 } // namespace
153 
154 char MachineScheduler::ID = 0;
155 
156 char &llvm::MachineSchedulerID = MachineScheduler::ID;
157 
158 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
159                       "Machine Instruction Scheduler", false, false)
160 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
161 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
162 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
163 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
164                     "Machine Instruction Scheduler", false, false)
165 
166 MachineScheduler::MachineScheduler()
167 : MachineSchedulerBase(ID) {
168   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
169 }
170 
171 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
172   AU.setPreservesCFG();
173   AU.addRequiredID(MachineDominatorsID);
174   AU.addRequired<MachineLoopInfo>();
175   AU.addRequired<AAResultsWrapperPass>();
176   AU.addRequired<TargetPassConfig>();
177   AU.addRequired<SlotIndexes>();
178   AU.addPreserved<SlotIndexes>();
179   AU.addRequired<LiveIntervals>();
180   AU.addPreserved<LiveIntervals>();
181   MachineFunctionPass::getAnalysisUsage(AU);
182 }
183 
184 char PostMachineScheduler::ID = 0;
185 
186 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
187 
188 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
189                 "PostRA Machine Instruction Scheduler", false, false)
190 
191 PostMachineScheduler::PostMachineScheduler()
192 : MachineSchedulerBase(ID) {
193   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
194 }
195 
196 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
197   AU.setPreservesCFG();
198   AU.addRequiredID(MachineDominatorsID);
199   AU.addRequired<MachineLoopInfo>();
200   AU.addRequired<TargetPassConfig>();
201   MachineFunctionPass::getAnalysisUsage(AU);
202 }
203 
204 MachinePassRegistry MachineSchedRegistry::Registry;
205 
206 /// A dummy default scheduler factory indicates whether the scheduler
207 /// is overridden on the command line.
208 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
209   return nullptr;
210 }
211 
212 /// MachineSchedOpt allows command line selection of the scheduler.
213 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
214                RegisterPassParser<MachineSchedRegistry> >
215 MachineSchedOpt("misched",
216                 cl::init(&useDefaultMachineSched), cl::Hidden,
217                 cl::desc("Machine instruction scheduler to use"));
218 
219 static MachineSchedRegistry
220 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
221                      useDefaultMachineSched);
222 
223 static cl::opt<bool> EnableMachineSched(
224     "enable-misched",
225     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
226     cl::Hidden);
227 
228 static cl::opt<bool> EnablePostRAMachineSched(
229     "enable-post-misched",
230     cl::desc("Enable the post-ra machine instruction scheduling pass."),
231     cl::init(true), cl::Hidden);
232 
233 /// Forward declare the standard machine scheduler. This will be used as the
234 /// default scheduler if the target does not set a default.
235 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
236 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
237 
238 /// Decrement this iterator until reaching the top or a non-debug instr.
239 static MachineBasicBlock::const_iterator
240 priorNonDebug(MachineBasicBlock::const_iterator I,
241               MachineBasicBlock::const_iterator Beg) {
242   assert(I != Beg && "reached the top of the region, cannot decrement");
243   while (--I != Beg) {
244     if (!I->isDebugValue())
245       break;
246   }
247   return I;
248 }
249 
250 /// Non-const version.
251 static MachineBasicBlock::iterator
252 priorNonDebug(MachineBasicBlock::iterator I,
253               MachineBasicBlock::const_iterator Beg) {
254   return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
255       .getNonConstIterator();
256 }
257 
258 /// If this iterator is a debug value, increment until reaching the End or a
259 /// non-debug instruction.
260 static MachineBasicBlock::const_iterator
261 nextIfDebug(MachineBasicBlock::const_iterator I,
262             MachineBasicBlock::const_iterator End) {
263   for(; I != End; ++I) {
264     if (!I->isDebugValue())
265       break;
266   }
267   return I;
268 }
269 
270 /// Non-const version.
271 static MachineBasicBlock::iterator
272 nextIfDebug(MachineBasicBlock::iterator I,
273             MachineBasicBlock::const_iterator End) {
274   return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
275       .getNonConstIterator();
276 }
277 
278 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
279 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
280   // Select the scheduler, or set the default.
281   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
282   if (Ctor != useDefaultMachineSched)
283     return Ctor(this);
284 
285   // Get the default scheduler set by the target for this function.
286   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
287   if (Scheduler)
288     return Scheduler;
289 
290   // Default to GenericScheduler.
291   return createGenericSchedLive(this);
292 }
293 
294 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
295 /// the caller. We don't have a command line option to override the postRA
296 /// scheduler. The Target must configure it.
297 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
298   // Get the postRA scheduler set by the target for this function.
299   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
300   if (Scheduler)
301     return Scheduler;
302 
303   // Default to GenericScheduler.
304   return createGenericSchedPostRA(this);
305 }
306 
307 /// Top-level MachineScheduler pass driver.
308 ///
309 /// Visit blocks in function order. Divide each block into scheduling regions
310 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
311 /// consistent with the DAG builder, which traverses the interior of the
312 /// scheduling regions bottom-up.
313 ///
314 /// This design avoids exposing scheduling boundaries to the DAG builder,
315 /// simplifying the DAG builder's support for "special" target instructions.
316 /// At the same time the design allows target schedulers to operate across
317 /// scheduling boundaries, for example to bundle the boudary instructions
318 /// without reordering them. This creates complexity, because the target
319 /// scheduler must update the RegionBegin and RegionEnd positions cached by
320 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
321 /// design would be to split blocks at scheduling boundaries, but LLVM has a
322 /// general bias against block splitting purely for implementation simplicity.
323 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
324   if (skipFunction(*mf.getFunction()))
325     return false;
326 
327   if (EnableMachineSched.getNumOccurrences()) {
328     if (!EnableMachineSched)
329       return false;
330   } else if (!mf.getSubtarget().enableMachineScheduler())
331     return false;
332 
333   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
334 
335   // Initialize the context of the pass.
336   MF = &mf;
337   MLI = &getAnalysis<MachineLoopInfo>();
338   MDT = &getAnalysis<MachineDominatorTree>();
339   PassConfig = &getAnalysis<TargetPassConfig>();
340   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
341 
342   LIS = &getAnalysis<LiveIntervals>();
343 
344   if (VerifyScheduling) {
345     DEBUG(LIS->dump());
346     MF->verify(this, "Before machine scheduling.");
347   }
348   RegClassInfo->runOnMachineFunction(*MF);
349 
350   // Instantiate the selected scheduler for this target, function, and
351   // optimization level.
352   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
353   scheduleRegions(*Scheduler, false);
354 
355   DEBUG(LIS->dump());
356   if (VerifyScheduling)
357     MF->verify(this, "After machine scheduling.");
358   return true;
359 }
360 
361 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
362   if (skipFunction(*mf.getFunction()))
363     return false;
364 
365   if (EnablePostRAMachineSched.getNumOccurrences()) {
366     if (!EnablePostRAMachineSched)
367       return false;
368   } else if (!mf.getSubtarget().enablePostRAScheduler()) {
369     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
370     return false;
371   }
372   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
373 
374   // Initialize the context of the pass.
375   MF = &mf;
376   PassConfig = &getAnalysis<TargetPassConfig>();
377 
378   if (VerifyScheduling)
379     MF->verify(this, "Before post machine scheduling.");
380 
381   // Instantiate the selected scheduler for this target, function, and
382   // optimization level.
383   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
384   scheduleRegions(*Scheduler, true);
385 
386   if (VerifyScheduling)
387     MF->verify(this, "After post machine scheduling.");
388   return true;
389 }
390 
391 /// Return true of the given instruction should not be included in a scheduling
392 /// region.
393 ///
394 /// MachineScheduler does not currently support scheduling across calls. To
395 /// handle calls, the DAG builder needs to be modified to create register
396 /// anti/output dependencies on the registers clobbered by the call's regmask
397 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
398 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
399 /// the boundary, but there would be no benefit to postRA scheduling across
400 /// calls this late anyway.
401 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
402                             MachineBasicBlock *MBB,
403                             MachineFunction *MF,
404                             const TargetInstrInfo *TII) {
405   return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
406 }
407 
408 /// Main driver for both MachineScheduler and PostMachineScheduler.
409 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
410                                            bool FixKillFlags) {
411   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
412 
413   // Visit all machine basic blocks.
414   //
415   // TODO: Visit blocks in global postorder or postorder within the bottom-up
416   // loop tree. Then we can optionally compute global RegPressure.
417   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
418        MBB != MBBEnd; ++MBB) {
419 
420     Scheduler.startBlock(&*MBB);
421 
422 #ifndef NDEBUG
423     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
424       continue;
425     if (SchedOnlyBlock.getNumOccurrences()
426         && (int)SchedOnlyBlock != MBB->getNumber())
427       continue;
428 #endif
429 
430     // Break the block into scheduling regions [I, RegionEnd), and schedule each
431     // region as soon as it is discovered. RegionEnd points the scheduling
432     // boundary at the bottom of the region. The DAG does not include RegionEnd,
433     // but the region does (i.e. the next RegionEnd is above the previous
434     // RegionBegin). If the current block has no terminator then RegionEnd ==
435     // MBB->end() for the bottom region.
436     //
437     // The Scheduler may insert instructions during either schedule() or
438     // exitRegion(), even for empty regions. So the local iterators 'I' and
439     // 'RegionEnd' are invalid across these calls.
440     //
441     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
442     // as a single instruction.
443     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
444         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
445 
446       // Avoid decrementing RegionEnd for blocks with no terminator.
447       if (RegionEnd != MBB->end() ||
448           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
449         --RegionEnd;
450       }
451 
452       // The next region starts above the previous region. Look backward in the
453       // instruction stream until we find the nearest boundary.
454       unsigned NumRegionInstrs = 0;
455       MachineBasicBlock::iterator I = RegionEnd;
456       for (;I != MBB->begin(); --I) {
457         MachineInstr &MI = *std::prev(I);
458         if (isSchedBoundary(&MI, &*MBB, MF, TII))
459           break;
460         if (!MI.isDebugValue())
461           ++NumRegionInstrs;
462       }
463       // Notify the scheduler of the region, even if we may skip scheduling
464       // it. Perhaps it still needs to be bundled.
465       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
466 
467       // Skip empty scheduling regions (0 or 1 schedulable instructions).
468       if (I == RegionEnd || I == std::prev(RegionEnd)) {
469         // Close the current region. Bundle the terminator if needed.
470         // This invalidates 'RegionEnd' and 'I'.
471         Scheduler.exitRegion();
472         continue;
473       }
474       DEBUG(dbgs() << "********** MI Scheduling **********\n");
475       DEBUG(dbgs() << MF->getName()
476             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
477             << "\n  From: " << *I << "    To: ";
478             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
479             else dbgs() << "End";
480             dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
481       if (DumpCriticalPathLength) {
482         errs() << MF->getName();
483         errs() << ":BB# " << MBB->getNumber();
484         errs() << " " << MBB->getName() << " \n";
485       }
486 
487       // Schedule a region: possibly reorder instructions.
488       // This invalidates 'RegionEnd' and 'I'.
489       Scheduler.schedule();
490 
491       // Close the current region.
492       Scheduler.exitRegion();
493 
494       // Scheduling has invalidated the current iterator 'I'. Ask the
495       // scheduler for the top of it's scheduled region.
496       RegionEnd = Scheduler.begin();
497     }
498     Scheduler.finishBlock();
499     // FIXME: Ideally, no further passes should rely on kill flags. However,
500     // thumb2 size reduction is currently an exception, so the PostMIScheduler
501     // needs to do this.
502     if (FixKillFlags)
503         Scheduler.fixupKills(&*MBB);
504   }
505   Scheduler.finalizeSchedule();
506 }
507 
508 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
509   // unimplemented
510 }
511 
512 LLVM_DUMP_METHOD
513 void ReadyQueue::dump() {
514   dbgs() << "Queue " << Name << ": ";
515   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
516     dbgs() << Queue[i]->NodeNum << " ";
517   dbgs() << "\n";
518 }
519 
520 //===----------------------------------------------------------------------===//
521 // ScheduleDAGMI - Basic machine instruction scheduling. This is
522 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
523 // virtual registers.
524 // ===----------------------------------------------------------------------===/
525 
526 // Provide a vtable anchor.
527 ScheduleDAGMI::~ScheduleDAGMI() {
528 }
529 
530 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
531   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
532 }
533 
534 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
535   if (SuccSU != &ExitSU) {
536     // Do not use WillCreateCycle, it assumes SD scheduling.
537     // If Pred is reachable from Succ, then the edge creates a cycle.
538     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
539       return false;
540     Topo.AddPred(SuccSU, PredDep.getSUnit());
541   }
542   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
543   // Return true regardless of whether a new edge needed to be inserted.
544   return true;
545 }
546 
547 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
548 /// NumPredsLeft reaches zero, release the successor node.
549 ///
550 /// FIXME: Adjust SuccSU height based on MinLatency.
551 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
552   SUnit *SuccSU = SuccEdge->getSUnit();
553 
554   if (SuccEdge->isWeak()) {
555     --SuccSU->WeakPredsLeft;
556     if (SuccEdge->isCluster())
557       NextClusterSucc = SuccSU;
558     return;
559   }
560 #ifndef NDEBUG
561   if (SuccSU->NumPredsLeft == 0) {
562     dbgs() << "*** Scheduling failed! ***\n";
563     SuccSU->dump(this);
564     dbgs() << " has been released too many times!\n";
565     llvm_unreachable(nullptr);
566   }
567 #endif
568   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
569   // CurrCycle may have advanced since then.
570   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
571     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
572 
573   --SuccSU->NumPredsLeft;
574   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
575     SchedImpl->releaseTopNode(SuccSU);
576 }
577 
578 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
579 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
580   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
581        I != E; ++I) {
582     releaseSucc(SU, &*I);
583   }
584 }
585 
586 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
587 /// NumSuccsLeft reaches zero, release the predecessor node.
588 ///
589 /// FIXME: Adjust PredSU height based on MinLatency.
590 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
591   SUnit *PredSU = PredEdge->getSUnit();
592 
593   if (PredEdge->isWeak()) {
594     --PredSU->WeakSuccsLeft;
595     if (PredEdge->isCluster())
596       NextClusterPred = PredSU;
597     return;
598   }
599 #ifndef NDEBUG
600   if (PredSU->NumSuccsLeft == 0) {
601     dbgs() << "*** Scheduling failed! ***\n";
602     PredSU->dump(this);
603     dbgs() << " has been released too many times!\n";
604     llvm_unreachable(nullptr);
605   }
606 #endif
607   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
608   // CurrCycle may have advanced since then.
609   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
610     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
611 
612   --PredSU->NumSuccsLeft;
613   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
614     SchedImpl->releaseBottomNode(PredSU);
615 }
616 
617 /// releasePredecessors - Call releasePred on each of SU's predecessors.
618 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
619   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
620        I != E; ++I) {
621     releasePred(SU, &*I);
622   }
623 }
624 
625 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
626 /// crossing a scheduling boundary. [begin, end) includes all instructions in
627 /// the region, including the boundary itself and single-instruction regions
628 /// that don't get scheduled.
629 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
630                                      MachineBasicBlock::iterator begin,
631                                      MachineBasicBlock::iterator end,
632                                      unsigned regioninstrs)
633 {
634   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
635 
636   SchedImpl->initPolicy(begin, end, regioninstrs);
637 }
638 
639 /// This is normally called from the main scheduler loop but may also be invoked
640 /// by the scheduling strategy to perform additional code motion.
641 void ScheduleDAGMI::moveInstruction(
642   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
643   // Advance RegionBegin if the first instruction moves down.
644   if (&*RegionBegin == MI)
645     ++RegionBegin;
646 
647   // Update the instruction stream.
648   BB->splice(InsertPos, BB, MI);
649 
650   // Update LiveIntervals
651   if (LIS)
652     LIS->handleMove(*MI, /*UpdateFlags=*/true);
653 
654   // Recede RegionBegin if an instruction moves above the first.
655   if (RegionBegin == InsertPos)
656     RegionBegin = MI;
657 }
658 
659 bool ScheduleDAGMI::checkSchedLimit() {
660 #ifndef NDEBUG
661   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
662     CurrentTop = CurrentBottom;
663     return false;
664   }
665   ++NumInstrsScheduled;
666 #endif
667   return true;
668 }
669 
670 /// Per-region scheduling driver, called back from
671 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
672 /// does not consider liveness or register pressure. It is useful for PostRA
673 /// scheduling and potentially other custom schedulers.
674 void ScheduleDAGMI::schedule() {
675   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
676   DEBUG(SchedImpl->dumpPolicy());
677 
678   // Build the DAG.
679   buildSchedGraph(AA);
680 
681   Topo.InitDAGTopologicalSorting();
682 
683   postprocessDAG();
684 
685   SmallVector<SUnit*, 8> TopRoots, BotRoots;
686   findRootsAndBiasEdges(TopRoots, BotRoots);
687 
688   // Initialize the strategy before modifying the DAG.
689   // This may initialize a DFSResult to be used for queue priority.
690   SchedImpl->initialize(this);
691 
692   DEBUG(
693     if (EntrySU.getInstr() != nullptr)
694       EntrySU.dumpAll(this);
695     for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
696       SUnits[su].dumpAll(this);
697     if (ExitSU.getInstr() != nullptr)
698       ExitSU.dumpAll(this);
699   );
700   if (ViewMISchedDAGs) viewGraph();
701 
702   // Initialize ready queues now that the DAG and priority data are finalized.
703   initQueues(TopRoots, BotRoots);
704 
705   bool IsTopNode = false;
706   while (true) {
707     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
708     SUnit *SU = SchedImpl->pickNode(IsTopNode);
709     if (!SU) break;
710     assert(!SU->skip);
711 
712     assert(!SU->isScheduled && "Node already scheduled");
713     if (!checkSchedLimit())
714       break;
715 
716     MachineInstr *MI = SU->getInstr();
717     if (IsTopNode) {
718       assert(SU->isTopReady() && "node still has unscheduled dependencies");
719       if (&*CurrentTop == MI)
720         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
721       else
722         moveInstruction(MI, CurrentTop);
723     } else {
724       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
725       MachineBasicBlock::iterator priorII =
726         priorNonDebug(CurrentBottom, CurrentTop);
727       if (&*priorII == MI)
728         CurrentBottom = priorII;
729       else {
730         if (&*CurrentTop == MI)
731           CurrentTop = nextIfDebug(++CurrentTop, priorII);
732         moveInstruction(MI, CurrentBottom);
733         CurrentBottom = MI;
734       }
735     }
736     // Notify the scheduling strategy before updating the DAG.
737     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
738     // runs, it can then use the accurate ReadyCycle time to determine whether
739     // newly released nodes can move to the readyQ.
740     SchedImpl->schedNode(SU, IsTopNode);
741 
742     updateQueues(SU, IsTopNode);
743   }
744   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
745 
746   placeDebugValues();
747 
748   DEBUG({
749       unsigned BBNum = begin()->getParent()->getNumber();
750       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
751       dumpSchedule();
752       dbgs() << '\n';
753     });
754 }
755 
756 /// Apply each ScheduleDAGMutation step in order.
757 void ScheduleDAGMI::postprocessDAG() {
758   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
759     Mutations[i]->apply(this);
760   }
761 }
762 
763 void ScheduleDAGMI::
764 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
765                       SmallVectorImpl<SUnit*> &BotRoots) {
766   for (std::vector<SUnit>::iterator
767          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
768     if (I->skip)
769       continue;
770     SUnit *SU = &(*I);
771     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
772 
773     // Order predecessors so DFSResult follows the critical path.
774     SU->biasCriticalPath();
775 
776     // A SUnit is ready to top schedule if it has no predecessors.
777     if (!I->NumPredsLeft)
778       TopRoots.push_back(SU);
779     // A SUnit is ready to bottom schedule if it has no successors.
780     if (!I->NumSuccsLeft)
781       BotRoots.push_back(SU);
782   }
783   ExitSU.biasCriticalPath();
784 }
785 
786 /// Identify DAG roots and setup scheduler queues.
787 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
788                                ArrayRef<SUnit*> BotRoots) {
789   NextClusterSucc = nullptr;
790   NextClusterPred = nullptr;
791 
792   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
793   //
794   // Nodes with unreleased weak edges can still be roots.
795   // Release top roots in forward order.
796   for (SmallVectorImpl<SUnit*>::const_iterator
797          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
798     SchedImpl->releaseTopNode(*I);
799   }
800   // Release bottom roots in reverse order so the higher priority nodes appear
801   // first. This is more natural and slightly more efficient.
802   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
803          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
804     SchedImpl->releaseBottomNode(*I);
805   }
806 
807   releaseSuccessors(&EntrySU);
808   releasePredecessors(&ExitSU);
809 
810   SchedImpl->registerRoots();
811 
812   // Advance past initial DebugValues.
813   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
814   CurrentBottom = RegionEnd;
815 }
816 
817 /// Update scheduler queues after scheduling an instruction.
818 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
819   // Release dependent instructions for scheduling.
820   if (IsTopNode)
821     releaseSuccessors(SU);
822   else
823     releasePredecessors(SU);
824 
825   SU->isScheduled = true;
826 }
827 
828 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
829 void ScheduleDAGMI::placeDebugValues() {
830   // If first instruction was a DBG_VALUE then put it back.
831   if (FirstDbgValue) {
832     BB->splice(RegionBegin, BB, FirstDbgValue);
833     RegionBegin = FirstDbgValue;
834   }
835 
836   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
837          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
838     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
839     MachineInstr *DbgValue = P.first;
840     MachineBasicBlock::iterator OrigPrevMI = P.second;
841     if (&*RegionBegin == DbgValue)
842       ++RegionBegin;
843     BB->splice(++OrigPrevMI, BB, DbgValue);
844     if (OrigPrevMI == std::prev(RegionEnd))
845       RegionEnd = DbgValue;
846   }
847   DbgValues.clear();
848   FirstDbgValue = nullptr;
849 }
850 
851 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
852 void ScheduleDAGMI::dumpSchedule() const {
853   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
854     if (SUnit *SU = getSUnit(&(*MI)))
855       SU->dump(this);
856     else
857       dbgs() << "Missing SUnit\n";
858   }
859 }
860 #endif
861 
862 //===----------------------------------------------------------------------===//
863 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
864 // preservation.
865 //===----------------------------------------------------------------------===//
866 
867 ScheduleDAGMILive::~ScheduleDAGMILive() {
868   delete DFSResult;
869 }
870 
871 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
872   const MachineInstr &MI = *SU.getInstr();
873   for (const MachineOperand &MO : MI.operands()) {
874     if (!MO.isReg())
875       continue;
876     if (!MO.readsReg())
877       continue;
878     if (TrackLaneMasks && !MO.isUse())
879       continue;
880 
881     unsigned Reg = MO.getReg();
882     if (!TargetRegisterInfo::isVirtualRegister(Reg))
883       continue;
884 
885     // Ignore re-defs.
886     if (TrackLaneMasks) {
887       bool FoundDef = false;
888       for (const MachineOperand &MO2 : MI.operands()) {
889         if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) {
890           FoundDef = true;
891           break;
892         }
893       }
894       if (FoundDef)
895         continue;
896     }
897 
898     // Record this local VReg use.
899     VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
900     for (; UI != VRegUses.end(); ++UI) {
901       if (UI->SU == &SU)
902         break;
903     }
904     if (UI == VRegUses.end())
905       VRegUses.insert(VReg2SUnit(Reg, 0, &SU));
906   }
907 }
908 
909 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
910 /// crossing a scheduling boundary. [begin, end) includes all instructions in
911 /// the region, including the boundary itself and single-instruction regions
912 /// that don't get scheduled.
913 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
914                                 MachineBasicBlock::iterator begin,
915                                 MachineBasicBlock::iterator end,
916                                 unsigned regioninstrs)
917 {
918   // ScheduleDAGMI initializes SchedImpl's per-region policy.
919   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
920 
921   // For convenience remember the end of the liveness region.
922   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
923 
924   SUPressureDiffs.clear();
925 
926   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
927   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
928 
929   assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
930          "ShouldTrackLaneMasks requires ShouldTrackPressure");
931 }
932 
933 // Setup the register pressure trackers for the top scheduled top and bottom
934 // scheduled regions.
935 void ScheduleDAGMILive::initRegPressure() {
936   VRegUses.clear();
937   VRegUses.setUniverse(MRI.getNumVirtRegs());
938   for (SUnit &SU : SUnits)
939     collectVRegUses(SU);
940 
941   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
942                     ShouldTrackLaneMasks, false);
943   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
944                     ShouldTrackLaneMasks, false);
945 
946   // Close the RPTracker to finalize live ins.
947   RPTracker.closeRegion();
948 
949   DEBUG(RPTracker.dump());
950 
951   // Initialize the live ins and live outs.
952   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
953   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
954 
955   // Close one end of the tracker so we can call
956   // getMaxUpward/DownwardPressureDelta before advancing across any
957   // instructions. This converts currently live regs into live ins/outs.
958   TopRPTracker.closeTop();
959   BotRPTracker.closeBottom();
960 
961   BotRPTracker.initLiveThru(RPTracker);
962   if (!BotRPTracker.getLiveThru().empty()) {
963     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
964     DEBUG(dbgs() << "Live Thru: ";
965           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
966   };
967 
968   // For each live out vreg reduce the pressure change associated with other
969   // uses of the same vreg below the live-out reaching def.
970   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
971 
972   // Account for liveness generated by the region boundary.
973   if (LiveRegionEnd != RegionEnd) {
974     SmallVector<RegisterMaskPair, 8> LiveUses;
975     BotRPTracker.recede(&LiveUses);
976     updatePressureDiffs(LiveUses);
977   }
978 
979   DEBUG(
980     dbgs() << "Top Pressure:\n";
981     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
982     dbgs() << "Bottom Pressure:\n";
983     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
984   );
985 
986   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
987 
988   // Cache the list of excess pressure sets in this region. This will also track
989   // the max pressure in the scheduled code for these sets.
990   RegionCriticalPSets.clear();
991   const std::vector<unsigned> &RegionPressure =
992     RPTracker.getPressure().MaxSetPressure;
993   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
994     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
995     if (RegionPressure[i] > Limit) {
996       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
997             << " Limit " << Limit
998             << " Actual " << RegionPressure[i] << "\n");
999       RegionCriticalPSets.push_back(PressureChange(i));
1000     }
1001   }
1002   DEBUG(dbgs() << "Excess PSets: ";
1003         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
1004           dbgs() << TRI->getRegPressureSetName(
1005             RegionCriticalPSets[i].getPSet()) << " ";
1006         dbgs() << "\n");
1007 }
1008 
1009 void ScheduleDAGMILive::
1010 updateScheduledPressure(const SUnit *SU,
1011                         const std::vector<unsigned> &NewMaxPressure) {
1012   const PressureDiff &PDiff = getPressureDiff(SU);
1013   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
1014   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
1015        I != E; ++I) {
1016     if (!I->isValid())
1017       break;
1018     unsigned ID = I->getPSet();
1019     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
1020       ++CritIdx;
1021     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
1022       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
1023           && NewMaxPressure[ID] <= INT16_MAX)
1024         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
1025     }
1026     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
1027     if (NewMaxPressure[ID] >= Limit - 2) {
1028       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
1029             << NewMaxPressure[ID]
1030             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
1031             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
1032     }
1033   }
1034 }
1035 
1036 /// Update the PressureDiff array for liveness after scheduling this
1037 /// instruction.
1038 void ScheduleDAGMILive::updatePressureDiffs(
1039     ArrayRef<RegisterMaskPair> LiveUses) {
1040   for (const RegisterMaskPair &P : LiveUses) {
1041     unsigned Reg = P.RegUnit;
1042     /// FIXME: Currently assuming single-use physregs.
1043     if (!TRI->isVirtualRegister(Reg))
1044       continue;
1045 
1046     if (ShouldTrackLaneMasks) {
1047       // If the register has just become live then other uses won't change
1048       // this fact anymore => decrement pressure.
1049       // If the register has just become dead then other uses make it come
1050       // back to life => increment pressure.
1051       bool Decrement = P.LaneMask != 0;
1052 
1053       for (const VReg2SUnit &V2SU
1054            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1055         SUnit &SU = *V2SU.SU;
1056         if (SU.isScheduled || &SU == &ExitSU)
1057           continue;
1058 
1059         PressureDiff &PDiff = getPressureDiff(&SU);
1060         PDiff.addPressureChange(Reg, Decrement, &MRI);
1061         DEBUG(
1062           dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1063                  << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1064                  << ' ' << *SU.getInstr();
1065           dbgs() << "              to ";
1066           PDiff.dump(*TRI);
1067         );
1068       }
1069     } else {
1070       assert(P.LaneMask != 0);
1071       DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1072       // This may be called before CurrentBottom has been initialized. However,
1073       // BotRPTracker must have a valid position. We want the value live into the
1074       // instruction or live out of the block, so ask for the previous
1075       // instruction's live-out.
1076       const LiveInterval &LI = LIS->getInterval(Reg);
1077       VNInfo *VNI;
1078       MachineBasicBlock::const_iterator I =
1079         nextIfDebug(BotRPTracker.getPos(), BB->end());
1080       if (I == BB->end())
1081         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1082       else {
1083         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1084         VNI = LRQ.valueIn();
1085       }
1086       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1087       assert(VNI && "No live value at use.");
1088       for (const VReg2SUnit &V2SU
1089            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1090         SUnit *SU = V2SU.SU;
1091         // If this use comes before the reaching def, it cannot be a last use,
1092         // so decrease its pressure change.
1093         if (!SU->isScheduled && SU != &ExitSU) {
1094           LiveQueryResult LRQ =
1095               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1096           if (LRQ.valueIn() == VNI) {
1097             PressureDiff &PDiff = getPressureDiff(SU);
1098             PDiff.addPressureChange(Reg, true, &MRI);
1099             DEBUG(
1100               dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1101                      << *SU->getInstr();
1102               dbgs() << "              to ";
1103               PDiff.dump(*TRI);
1104             );
1105           }
1106         }
1107       }
1108     }
1109   }
1110 }
1111 
1112 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1113 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1114 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1115 ///
1116 /// This is a skeletal driver, with all the functionality pushed into helpers,
1117 /// so that it can be easily extended by experimental schedulers. Generally,
1118 /// implementing MachineSchedStrategy should be sufficient to implement a new
1119 /// scheduling algorithm. However, if a scheduler further subclasses
1120 /// ScheduleDAGMILive then it will want to override this virtual method in order
1121 /// to update any specialized state.
1122 void ScheduleDAGMILive::schedule() {
1123   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1124   DEBUG(SchedImpl->dumpPolicy());
1125   buildDAGWithRegPressure();
1126 
1127   Topo.InitDAGTopologicalSorting();
1128 
1129   postprocessDAG();
1130 
1131   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1132   findRootsAndBiasEdges(TopRoots, BotRoots);
1133 
1134   // Initialize the strategy before modifying the DAG.
1135   // This may initialize a DFSResult to be used for queue priority.
1136   SchedImpl->initialize(this);
1137 
1138   DEBUG(
1139     if (EntrySU.getInstr() != nullptr)
1140       EntrySU.dumpAll(this);
1141     for (const SUnit &SU : SUnits) {
1142       SU.dumpAll(this);
1143       if (ShouldTrackPressure) {
1144         dbgs() << "  Pressure Diff      : ";
1145         getPressureDiff(&SU).dump(*TRI);
1146       }
1147       dbgs() << '\n';
1148     }
1149     if (ExitSU.getInstr() != nullptr)
1150       ExitSU.dumpAll(this);
1151   );
1152   if (ViewMISchedDAGs) viewGraph();
1153 
1154   // Initialize ready queues now that the DAG and priority data are finalized.
1155   initQueues(TopRoots, BotRoots);
1156 
1157   bool IsTopNode = false;
1158   while (true) {
1159     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1160     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1161     if (!SU) break;
1162 
1163     assert(!SU->isScheduled && "Node already scheduled");
1164     if (!checkSchedLimit())
1165       break;
1166 
1167     scheduleMI(SU, IsTopNode);
1168 
1169     if (DFSResult) {
1170       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1171       if (!ScheduledTrees.test(SubtreeID)) {
1172         ScheduledTrees.set(SubtreeID);
1173         DFSResult->scheduleTree(SubtreeID);
1174         SchedImpl->scheduleTree(SubtreeID);
1175       }
1176     }
1177 
1178     // Notify the scheduling strategy after updating the DAG.
1179     SchedImpl->schedNode(SU, IsTopNode);
1180 
1181     updateQueues(SU, IsTopNode);
1182   }
1183   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1184 
1185   placeDebugValues();
1186 
1187   DEBUG({
1188       unsigned BBNum = begin()->getParent()->getNumber();
1189       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1190       dumpSchedule();
1191       dbgs() << '\n';
1192     });
1193 }
1194 
1195 /// Build the DAG and setup three register pressure trackers.
1196 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1197   if (!ShouldTrackPressure) {
1198     RPTracker.reset();
1199     RegionCriticalPSets.clear();
1200     buildSchedGraph(AA);
1201     return;
1202   }
1203 
1204   // Initialize the register pressure tracker used by buildSchedGraph.
1205   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1206                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1207 
1208   // Account for liveness generate by the region boundary.
1209   if (LiveRegionEnd != RegionEnd)
1210     RPTracker.recede();
1211 
1212   // Build the DAG, and compute current register pressure.
1213   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1214 
1215   // Initialize top/bottom trackers after computing region pressure.
1216   initRegPressure();
1217 }
1218 
1219 void ScheduleDAGMILive::computeDFSResult() {
1220   if (!DFSResult)
1221     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1222   DFSResult->clear();
1223   ScheduledTrees.clear();
1224   DFSResult->resize(SUnits.size());
1225   DFSResult->compute(SUnits);
1226   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1227 }
1228 
1229 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1230 /// only provides the critical path for single block loops. To handle loops that
1231 /// span blocks, we could use the vreg path latencies provided by
1232 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1233 /// available for use in the scheduler.
1234 ///
1235 /// The cyclic path estimation identifies a def-use pair that crosses the back
1236 /// edge and considers the depth and height of the nodes. For example, consider
1237 /// the following instruction sequence where each instruction has unit latency
1238 /// and defines an epomymous virtual register:
1239 ///
1240 /// a->b(a,c)->c(b)->d(c)->exit
1241 ///
1242 /// The cyclic critical path is a two cycles: b->c->b
1243 /// The acyclic critical path is four cycles: a->b->c->d->exit
1244 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1245 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1246 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1247 /// LiveInDepth = depth(b) = len(a->b) = 1
1248 ///
1249 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1250 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1251 /// CyclicCriticalPath = min(2, 2) = 2
1252 ///
1253 /// This could be relevant to PostRA scheduling, but is currently implemented
1254 /// assuming LiveIntervals.
1255 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1256   // This only applies to single block loop.
1257   if (!BB->isSuccessor(BB))
1258     return 0;
1259 
1260   unsigned MaxCyclicLatency = 0;
1261   // Visit each live out vreg def to find def/use pairs that cross iterations.
1262   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1263     unsigned Reg = P.RegUnit;
1264     if (!TRI->isVirtualRegister(Reg))
1265         continue;
1266     const LiveInterval &LI = LIS->getInterval(Reg);
1267     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1268     if (!DefVNI)
1269       continue;
1270 
1271     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1272     const SUnit *DefSU = getSUnit(DefMI);
1273     if (!DefSU)
1274       continue;
1275 
1276     unsigned LiveOutHeight = DefSU->getHeight();
1277     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1278     // Visit all local users of the vreg def.
1279     for (const VReg2SUnit &V2SU
1280          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1281       SUnit *SU = V2SU.SU;
1282       if (SU == &ExitSU)
1283         continue;
1284 
1285       // Only consider uses of the phi.
1286       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1287       if (!LRQ.valueIn()->isPHIDef())
1288         continue;
1289 
1290       // Assume that a path spanning two iterations is a cycle, which could
1291       // overestimate in strange cases. This allows cyclic latency to be
1292       // estimated as the minimum slack of the vreg's depth or height.
1293       unsigned CyclicLatency = 0;
1294       if (LiveOutDepth > SU->getDepth())
1295         CyclicLatency = LiveOutDepth - SU->getDepth();
1296 
1297       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1298       if (LiveInHeight > LiveOutHeight) {
1299         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1300           CyclicLatency = LiveInHeight - LiveOutHeight;
1301       } else
1302         CyclicLatency = 0;
1303 
1304       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1305             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1306       if (CyclicLatency > MaxCyclicLatency)
1307         MaxCyclicLatency = CyclicLatency;
1308     }
1309   }
1310   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1311   return MaxCyclicLatency;
1312 }
1313 
1314 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1315 /// the Top RP tracker in case the region beginning has changed.
1316 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1317                                    ArrayRef<SUnit*> BotRoots) {
1318   ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1319   if (ShouldTrackPressure) {
1320     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1321     TopRPTracker.setPos(CurrentTop);
1322   }
1323 }
1324 
1325 /// Move an instruction and update register pressure.
1326 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1327   // Move the instruction to its new location in the instruction stream.
1328   MachineInstr *MI = SU->getInstr();
1329 
1330   if (IsTopNode) {
1331     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1332     if (&*CurrentTop == MI)
1333       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1334     else {
1335       moveInstruction(MI, CurrentTop);
1336       TopRPTracker.setPos(MI);
1337     }
1338 
1339     if (ShouldTrackPressure) {
1340       // Update top scheduled pressure.
1341       RegisterOperands RegOpers;
1342       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1343       if (ShouldTrackLaneMasks) {
1344         // Adjust liveness and add missing dead+read-undef flags.
1345         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1346         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1347       } else {
1348         // Adjust for missing dead-def flags.
1349         RegOpers.detectDeadDefs(*MI, *LIS);
1350       }
1351 
1352       TopRPTracker.advance(RegOpers);
1353       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1354       DEBUG(
1355         dbgs() << "Top Pressure:\n";
1356         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1357       );
1358 
1359       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1360     }
1361   } else {
1362     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1363     MachineBasicBlock::iterator priorII =
1364       priorNonDebug(CurrentBottom, CurrentTop);
1365     if (&*priorII == MI)
1366       CurrentBottom = priorII;
1367     else {
1368       if (&*CurrentTop == MI) {
1369         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1370         TopRPTracker.setPos(CurrentTop);
1371       }
1372       moveInstruction(MI, CurrentBottom);
1373       CurrentBottom = MI;
1374     }
1375     if (ShouldTrackPressure) {
1376       RegisterOperands RegOpers;
1377       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1378       if (ShouldTrackLaneMasks) {
1379         // Adjust liveness and add missing dead+read-undef flags.
1380         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1381         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1382       } else {
1383         // Adjust for missing dead-def flags.
1384         RegOpers.detectDeadDefs(*MI, *LIS);
1385       }
1386 
1387       BotRPTracker.recedeSkipDebugValues();
1388       SmallVector<RegisterMaskPair, 8> LiveUses;
1389       BotRPTracker.recede(RegOpers, &LiveUses);
1390       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1391       DEBUG(
1392         dbgs() << "Bottom Pressure:\n";
1393         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1394       );
1395 
1396       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1397       updatePressureDiffs(LiveUses);
1398     }
1399   }
1400 }
1401 
1402 //===----------------------------------------------------------------------===//
1403 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1404 //===----------------------------------------------------------------------===//
1405 
1406 namespace {
1407 /// \brief Post-process the DAG to create cluster edges between neighboring
1408 /// loads or between neighboring stores.
1409 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1410   struct MemOpInfo {
1411     SUnit *SU;
1412     unsigned BaseReg;
1413     int64_t Offset;
1414     MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1415         : SU(su), BaseReg(reg), Offset(ofs) {}
1416 
1417     bool operator<(const MemOpInfo&RHS) const {
1418       return std::tie(BaseReg, Offset, SU->NodeNum) <
1419              std::tie(RHS.BaseReg, RHS.Offset, RHS.SU->NodeNum);
1420     }
1421   };
1422 
1423   const TargetInstrInfo *TII;
1424   const TargetRegisterInfo *TRI;
1425   bool IsLoad;
1426 
1427 public:
1428   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1429                            const TargetRegisterInfo *tri, bool IsLoad)
1430       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1431 
1432   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1433 
1434 protected:
1435   void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1436 };
1437 
1438 class StoreClusterMutation : public BaseMemOpClusterMutation {
1439 public:
1440   StoreClusterMutation(const TargetInstrInfo *tii,
1441                        const TargetRegisterInfo *tri)
1442       : BaseMemOpClusterMutation(tii, tri, false) {}
1443 };
1444 
1445 class LoadClusterMutation : public BaseMemOpClusterMutation {
1446 public:
1447   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1448       : BaseMemOpClusterMutation(tii, tri, true) {}
1449 };
1450 } // anonymous
1451 
1452 namespace llvm {
1453 
1454 std::unique_ptr<ScheduleDAGMutation>
1455 createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1456                              const TargetRegisterInfo *TRI) {
1457   return make_unique<LoadClusterMutation>(TII, TRI);
1458 }
1459 
1460 std::unique_ptr<ScheduleDAGMutation>
1461 createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1462                               const TargetRegisterInfo *TRI) {
1463   return make_unique<StoreClusterMutation>(TII, TRI);
1464 }
1465 
1466 } // namespace llvm
1467 
1468 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1469     ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1470   SmallVector<MemOpInfo, 32> MemOpRecords;
1471   for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1472     SUnit *SU = MemOps[Idx];
1473     unsigned BaseReg;
1474     int64_t Offset;
1475     if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
1476       MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
1477   }
1478   if (MemOpRecords.size() < 2)
1479     return;
1480 
1481   std::sort(MemOpRecords.begin(), MemOpRecords.end());
1482   unsigned ClusterLength = 1;
1483   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1484     if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
1485       ClusterLength = 1;
1486       continue;
1487     }
1488 
1489     SUnit *SUa = MemOpRecords[Idx].SU;
1490     SUnit *SUb = MemOpRecords[Idx+1].SU;
1491     if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(),
1492                                  ClusterLength) &&
1493         DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1494       DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1495             << SUb->NodeNum << ")\n");
1496       // Copy successor edges from SUa to SUb. Interleaving computation
1497       // dependent on SUa can prevent load combining due to register reuse.
1498       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1499       // loads should have effectively the same inputs.
1500       for (SUnit::const_succ_iterator
1501              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1502         if (SI->getSUnit() == SUb)
1503           continue;
1504         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1505         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1506       }
1507       ++ClusterLength;
1508     } else
1509       ClusterLength = 1;
1510   }
1511 }
1512 
1513 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1514 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1515 
1516   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1517 
1518   // Map DAG NodeNum to store chain ID.
1519   DenseMap<unsigned, unsigned> StoreChainIDs;
1520   // Map each store chain to a set of dependent MemOps.
1521   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1522   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1523     SUnit *SU = &DAG->SUnits[Idx];
1524     if (SU->skip)
1525       continue;
1526     if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1527         (!IsLoad && !SU->getInstr()->mayStore()))
1528       continue;
1529 
1530     unsigned ChainPredID = DAG->SUnits.size();
1531     for (SUnit::const_pred_iterator
1532            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1533       if (PI->isCtrl()) {
1534         ChainPredID = PI->getSUnit()->NodeNum;
1535         break;
1536       }
1537     }
1538     // Check if this chain-like pred has been seen
1539     // before. ChainPredID==MaxNodeID at the top of the schedule.
1540     unsigned NumChains = StoreChainDependents.size();
1541     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1542       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1543     if (Result.second)
1544       StoreChainDependents.resize(NumChains + 1);
1545     StoreChainDependents[Result.first->second].push_back(SU);
1546   }
1547 
1548   // Iterate over the store chains.
1549   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1550     clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
1551 }
1552 
1553 //===----------------------------------------------------------------------===//
1554 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1555 //===----------------------------------------------------------------------===//
1556 
1557 namespace {
1558 /// \brief Post-process the DAG to create cluster edges between instructions
1559 /// that may be fused by the processor into a single operation.
1560 class MacroFusion : public ScheduleDAGMutation {
1561   const TargetInstrInfo &TII;
1562 public:
1563   MacroFusion(const TargetInstrInfo &TII)
1564     : TII(TII) {}
1565 
1566   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1567 };
1568 } // anonymous
1569 
1570 namespace llvm {
1571 
1572 std::unique_ptr<ScheduleDAGMutation>
1573 createMacroFusionDAGMutation(const TargetInstrInfo *TII) {
1574   return make_unique<MacroFusion>(*TII);
1575 }
1576 
1577 } // namespace llvm
1578 
1579 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1580 /// fused operations.
1581 void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1582   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1583 
1584   // For now, assume targets can only fuse with the branch.
1585   SUnit &ExitSU = DAG->ExitSU;
1586   MachineInstr *Branch = ExitSU.getInstr();
1587   if (!Branch)
1588     return;
1589 
1590   for (SDep &PredDep : ExitSU.Preds) {
1591     if (PredDep.isWeak())
1592       continue;
1593     SUnit &SU = *PredDep.getSUnit();
1594     MachineInstr &Pred = *SU.getInstr();
1595     if (!TII.shouldScheduleAdjacent(Pred, *Branch))
1596       continue;
1597 
1598     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1599     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1600     // need to copy predecessor edges from ExitSU to SU, since top-down
1601     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1602     // of SU, we could create an artificial edge from the deepest root, but it
1603     // hasn't been needed yet.
1604     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1605     (void)Success;
1606     assert(Success && "No DAG nodes should be reachable from ExitSU");
1607 
1608     // Adjust latency of data deps between the nodes.
1609     for (SDep &PredDep : ExitSU.Preds) {
1610       if (PredDep.getSUnit() == &SU)
1611         PredDep.setLatency(0);
1612     }
1613     for (SDep &SuccDep : SU.Succs) {
1614       if (SuccDep.getSUnit() == &ExitSU)
1615         SuccDep.setLatency(0);
1616     }
1617 
1618     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1619     break;
1620   }
1621 }
1622 
1623 //===----------------------------------------------------------------------===//
1624 // CopyConstrain - DAG post-processing to encourage copy elimination.
1625 //===----------------------------------------------------------------------===//
1626 
1627 namespace {
1628 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1629 /// the one use that defines the copy's source vreg, most likely an induction
1630 /// variable increment.
1631 class CopyConstrain : public ScheduleDAGMutation {
1632   // Transient state.
1633   SlotIndex RegionBeginIdx;
1634   // RegionEndIdx is the slot index of the last non-debug instruction in the
1635   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1636   SlotIndex RegionEndIdx;
1637 public:
1638   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1639 
1640   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1641 
1642 protected:
1643   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1644 };
1645 } // anonymous
1646 
1647 namespace llvm {
1648 
1649 std::unique_ptr<ScheduleDAGMutation>
1650 createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1651                              const TargetRegisterInfo *TRI) {
1652   return make_unique<CopyConstrain>(TII, TRI);
1653 }
1654 
1655 } // namespace llvm
1656 
1657 /// constrainLocalCopy handles two possibilities:
1658 /// 1) Local src:
1659 /// I0:     = dst
1660 /// I1: src = ...
1661 /// I2:     = dst
1662 /// I3: dst = src (copy)
1663 /// (create pred->succ edges I0->I1, I2->I1)
1664 ///
1665 /// 2) Local copy:
1666 /// I0: dst = src (copy)
1667 /// I1:     = dst
1668 /// I2: src = ...
1669 /// I3:     = dst
1670 /// (create pred->succ edges I1->I2, I3->I2)
1671 ///
1672 /// Although the MachineScheduler is currently constrained to single blocks,
1673 /// this algorithm should handle extended blocks. An EBB is a set of
1674 /// contiguously numbered blocks such that the previous block in the EBB is
1675 /// always the single predecessor.
1676 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1677   LiveIntervals *LIS = DAG->getLIS();
1678   MachineInstr *Copy = CopySU->getInstr();
1679 
1680   // Check for pure vreg copies.
1681   const MachineOperand &SrcOp = Copy->getOperand(1);
1682   unsigned SrcReg = SrcOp.getReg();
1683   if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1684     return;
1685 
1686   const MachineOperand &DstOp = Copy->getOperand(0);
1687   unsigned DstReg = DstOp.getReg();
1688   if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1689     return;
1690 
1691   // Check if either the dest or source is local. If it's live across a back
1692   // edge, it's not local. Note that if both vregs are live across the back
1693   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1694   // If both the copy's source and dest are local live intervals, then we
1695   // should treat the dest as the global for the purpose of adding
1696   // constraints. This adds edges from source's other uses to the copy.
1697   unsigned LocalReg = SrcReg;
1698   unsigned GlobalReg = DstReg;
1699   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1700   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1701     LocalReg = DstReg;
1702     GlobalReg = SrcReg;
1703     LocalLI = &LIS->getInterval(LocalReg);
1704     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1705       return;
1706   }
1707   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1708 
1709   // Find the global segment after the start of the local LI.
1710   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1711   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1712   // local live range. We could create edges from other global uses to the local
1713   // start, but the coalescer should have already eliminated these cases, so
1714   // don't bother dealing with it.
1715   if (GlobalSegment == GlobalLI->end())
1716     return;
1717 
1718   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1719   // returned the next global segment. But if GlobalSegment overlaps with
1720   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1721   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1722   if (GlobalSegment->contains(LocalLI->beginIndex()))
1723     ++GlobalSegment;
1724 
1725   if (GlobalSegment == GlobalLI->end())
1726     return;
1727 
1728   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1729   if (GlobalSegment != GlobalLI->begin()) {
1730     // Two address defs have no hole.
1731     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1732                                GlobalSegment->start)) {
1733       return;
1734     }
1735     // If the prior global segment may be defined by the same two-address
1736     // instruction that also defines LocalLI, then can't make a hole here.
1737     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1738                                LocalLI->beginIndex())) {
1739       return;
1740     }
1741     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1742     // it would be a disconnected component in the live range.
1743     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1744            "Disconnected LRG within the scheduling region.");
1745   }
1746   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1747   if (!GlobalDef)
1748     return;
1749 
1750   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1751   if (!GlobalSU)
1752     return;
1753 
1754   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1755   // constraining the uses of the last local def to precede GlobalDef.
1756   SmallVector<SUnit*,8> LocalUses;
1757   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1758   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1759   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1760   for (SUnit::const_succ_iterator
1761          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1762        I != E; ++I) {
1763     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1764       continue;
1765     if (I->getSUnit() == GlobalSU)
1766       continue;
1767     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1768       return;
1769     LocalUses.push_back(I->getSUnit());
1770   }
1771   // Open the top of the GlobalLI hole by constraining any earlier global uses
1772   // to precede the start of LocalLI.
1773   SmallVector<SUnit*,8> GlobalUses;
1774   MachineInstr *FirstLocalDef =
1775     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1776   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1777   for (SUnit::const_pred_iterator
1778          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1779     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1780       continue;
1781     if (I->getSUnit() == FirstLocalSU)
1782       continue;
1783     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1784       return;
1785     GlobalUses.push_back(I->getSUnit());
1786   }
1787   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1788   // Add the weak edges.
1789   for (SmallVectorImpl<SUnit*>::const_iterator
1790          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1791     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1792           << GlobalSU->NodeNum << ")\n");
1793     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1794   }
1795   for (SmallVectorImpl<SUnit*>::const_iterator
1796          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1797     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1798           << FirstLocalSU->NodeNum << ")\n");
1799     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1800   }
1801 }
1802 
1803 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1804 /// copy elimination.
1805 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1806   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1807   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1808 
1809   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1810   if (FirstPos == DAG->end())
1811     return;
1812   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1813   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1814       *priorNonDebug(DAG->end(), DAG->begin()));
1815 
1816   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1817     SUnit *SU = &DAG->SUnits[Idx];
1818     if (SU->skip)
1819       continue;
1820     if (!SU->getInstr()->isCopy())
1821       continue;
1822 
1823     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1824   }
1825 }
1826 
1827 //===----------------------------------------------------------------------===//
1828 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1829 // and possibly other custom schedulers.
1830 //===----------------------------------------------------------------------===//
1831 
1832 static const unsigned InvalidCycle = ~0U;
1833 
1834 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1835 
1836 void SchedBoundary::reset() {
1837   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1838   // Destroying and reconstructing it is very expensive though. So keep
1839   // invalid, placeholder HazardRecs.
1840   if (HazardRec && HazardRec->isEnabled()) {
1841     delete HazardRec;
1842     HazardRec = nullptr;
1843   }
1844   Available.clear();
1845   Pending.clear();
1846   CheckPending = false;
1847   CurrCycle = 0;
1848   CurrMOps = 0;
1849   MinReadyCycle = UINT_MAX;
1850   ExpectedLatency = 0;
1851   DependentLatency = 0;
1852   RetiredMOps = 0;
1853   MaxExecutedResCount = 0;
1854   ZoneCritResIdx = 0;
1855   IsResourceLimited = false;
1856   ReservedCycles.clear();
1857 #ifndef NDEBUG
1858   // Track the maximum number of stall cycles that could arise either from the
1859   // latency of a DAG edge or the number of cycles that a processor resource is
1860   // reserved (SchedBoundary::ReservedCycles).
1861   MaxObservedStall = 0;
1862 #endif
1863   // Reserve a zero-count for invalid CritResIdx.
1864   ExecutedResCounts.resize(1);
1865   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1866 }
1867 
1868 void SchedRemainder::
1869 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1870   reset();
1871   if (!SchedModel->hasInstrSchedModel())
1872     return;
1873   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1874   for (std::vector<SUnit>::iterator
1875          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1876     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1877     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1878       * SchedModel->getMicroOpFactor();
1879     for (TargetSchedModel::ProcResIter
1880            PI = SchedModel->getWriteProcResBegin(SC),
1881            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1882       unsigned PIdx = PI->ProcResourceIdx;
1883       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1884       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1885     }
1886   }
1887 }
1888 
1889 void SchedBoundary::
1890 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1891   reset();
1892   DAG = dag;
1893   SchedModel = smodel;
1894   Rem = rem;
1895   if (SchedModel->hasInstrSchedModel()) {
1896     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1897     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1898   }
1899 }
1900 
1901 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1902 /// these "soft stalls" differently than the hard stall cycles based on CPU
1903 /// resources and computed by checkHazard(). A fully in-order model
1904 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1905 /// available for scheduling until they are ready. However, a weaker in-order
1906 /// model may use this for heuristics. For example, if a processor has in-order
1907 /// behavior when reading certain resources, this may come into play.
1908 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1909   if (!SU->isUnbuffered)
1910     return 0;
1911 
1912   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1913   if (ReadyCycle > CurrCycle)
1914     return ReadyCycle - CurrCycle;
1915   return 0;
1916 }
1917 
1918 /// Compute the next cycle at which the given processor resource can be
1919 /// scheduled.
1920 unsigned SchedBoundary::
1921 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1922   unsigned NextUnreserved = ReservedCycles[PIdx];
1923   // If this resource has never been used, always return cycle zero.
1924   if (NextUnreserved == InvalidCycle)
1925     return 0;
1926   // For bottom-up scheduling add the cycles needed for the current operation.
1927   if (!isTop())
1928     NextUnreserved += Cycles;
1929   return NextUnreserved;
1930 }
1931 
1932 /// Does this SU have a hazard within the current instruction group.
1933 ///
1934 /// The scheduler supports two modes of hazard recognition. The first is the
1935 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1936 /// supports highly complicated in-order reservation tables
1937 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1938 ///
1939 /// The second is a streamlined mechanism that checks for hazards based on
1940 /// simple counters that the scheduler itself maintains. It explicitly checks
1941 /// for instruction dispatch limitations, including the number of micro-ops that
1942 /// can dispatch per cycle.
1943 ///
1944 /// TODO: Also check whether the SU must start a new group.
1945 bool SchedBoundary::checkHazard(SUnit *SU) {
1946   if (HazardRec->isEnabled()
1947       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1948     return true;
1949   }
1950   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1951   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1952     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1953           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1954     return true;
1955   }
1956   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1957     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1958     for (TargetSchedModel::ProcResIter
1959            PI = SchedModel->getWriteProcResBegin(SC),
1960            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1961       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1962       if (NRCycle > CurrCycle) {
1963 #ifndef NDEBUG
1964         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1965 #endif
1966         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1967               << SchedModel->getResourceName(PI->ProcResourceIdx)
1968               << "=" << NRCycle << "c\n");
1969         return true;
1970       }
1971     }
1972   }
1973   return false;
1974 }
1975 
1976 // Find the unscheduled node in ReadySUs with the highest latency.
1977 unsigned SchedBoundary::
1978 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1979   SUnit *LateSU = nullptr;
1980   unsigned RemLatency = 0;
1981   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1982        I != E; ++I) {
1983     unsigned L = getUnscheduledLatency(*I);
1984     if (L > RemLatency) {
1985       RemLatency = L;
1986       LateSU = *I;
1987     }
1988   }
1989   if (LateSU) {
1990     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1991           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1992   }
1993   return RemLatency;
1994 }
1995 
1996 // Count resources in this zone and the remaining unscheduled
1997 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1998 // resource index, or zero if the zone is issue limited.
1999 unsigned SchedBoundary::
2000 getOtherResourceCount(unsigned &OtherCritIdx) {
2001   OtherCritIdx = 0;
2002   if (!SchedModel->hasInstrSchedModel())
2003     return 0;
2004 
2005   unsigned OtherCritCount = Rem->RemIssueCount
2006     + (RetiredMOps * SchedModel->getMicroOpFactor());
2007   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
2008         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
2009   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
2010        PIdx != PEnd; ++PIdx) {
2011     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
2012     if (OtherCount > OtherCritCount) {
2013       OtherCritCount = OtherCount;
2014       OtherCritIdx = PIdx;
2015     }
2016   }
2017   if (OtherCritIdx) {
2018     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
2019           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
2020           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
2021   }
2022   return OtherCritCount;
2023 }
2024 
2025 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
2026   assert(SU->getInstr() && "Scheduled SUnit must have instr");
2027 
2028 #ifndef NDEBUG
2029   // ReadyCycle was been bumped up to the CurrCycle when this node was
2030   // scheduled, but CurrCycle may have been eagerly advanced immediately after
2031   // scheduling, so may now be greater than ReadyCycle.
2032   if (ReadyCycle > CurrCycle)
2033     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
2034 #endif
2035 
2036   if (ReadyCycle < MinReadyCycle)
2037     MinReadyCycle = ReadyCycle;
2038 
2039   // Check for interlocks first. For the purpose of other heuristics, an
2040   // instruction that cannot issue appears as if it's not in the ReadyQueue.
2041   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2042   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
2043       Available.size() >= ReadyListLimit)
2044     Pending.push(SU);
2045   else
2046     Available.push(SU);
2047 }
2048 
2049 /// Move the boundary of scheduled code by one cycle.
2050 void SchedBoundary::bumpCycle(unsigned NextCycle) {
2051   if (SchedModel->getMicroOpBufferSize() == 0) {
2052     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
2053     if (MinReadyCycle > NextCycle)
2054       NextCycle = MinReadyCycle;
2055   }
2056   // Update the current micro-ops, which will issue in the next cycle.
2057   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2058   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2059 
2060   // Decrement DependentLatency based on the next cycle.
2061   if ((NextCycle - CurrCycle) > DependentLatency)
2062     DependentLatency = 0;
2063   else
2064     DependentLatency -= (NextCycle - CurrCycle);
2065 
2066   if (!HazardRec->isEnabled()) {
2067     // Bypass HazardRec virtual calls.
2068     CurrCycle = NextCycle;
2069   } else {
2070     // Bypass getHazardType calls in case of long latency.
2071     for (; CurrCycle != NextCycle; ++CurrCycle) {
2072       if (isTop())
2073         HazardRec->AdvanceCycle();
2074       else
2075         HazardRec->RecedeCycle();
2076     }
2077   }
2078   CheckPending = true;
2079   unsigned LFactor = SchedModel->getLatencyFactor();
2080   IsResourceLimited =
2081     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2082     > (int)LFactor;
2083 
2084   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2085 }
2086 
2087 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2088   ExecutedResCounts[PIdx] += Count;
2089   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2090     MaxExecutedResCount = ExecutedResCounts[PIdx];
2091 }
2092 
2093 /// Add the given processor resource to this scheduled zone.
2094 ///
2095 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2096 /// during which this resource is consumed.
2097 ///
2098 /// \return the next cycle at which the instruction may execute without
2099 /// oversubscribing resources.
2100 unsigned SchedBoundary::
2101 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2102   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2103   unsigned Count = Factor * Cycles;
2104   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
2105         << " +" << Cycles << "x" << Factor << "u\n");
2106 
2107   // Update Executed resources counts.
2108   incExecutedResources(PIdx, Count);
2109   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2110   Rem->RemainingCounts[PIdx] -= Count;
2111 
2112   // Check if this resource exceeds the current critical resource. If so, it
2113   // becomes the critical resource.
2114   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2115     ZoneCritResIdx = PIdx;
2116     DEBUG(dbgs() << "  *** Critical resource "
2117           << SchedModel->getResourceName(PIdx) << ": "
2118           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2119   }
2120   // For reserved resources, record the highest cycle using the resource.
2121   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2122   if (NextAvailable > CurrCycle) {
2123     DEBUG(dbgs() << "  Resource conflict: "
2124           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2125           << NextAvailable << "\n");
2126   }
2127   return NextAvailable;
2128 }
2129 
2130 /// Move the boundary of scheduled code by one SUnit.
2131 void SchedBoundary::bumpNode(SUnit *SU) {
2132   // Update the reservation table.
2133   if (HazardRec->isEnabled()) {
2134     if (!isTop() && SU->isCall) {
2135       // Calls are scheduled with their preceding instructions. For bottom-up
2136       // scheduling, clear the pipeline state before emitting.
2137       HazardRec->Reset();
2138     }
2139     HazardRec->EmitInstruction(SU);
2140   }
2141   // checkHazard should prevent scheduling multiple instructions per cycle that
2142   // exceed the issue width.
2143   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2144   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2145   assert(
2146       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2147       "Cannot schedule this instruction's MicroOps in the current cycle.");
2148 
2149   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2150   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2151 
2152   unsigned NextCycle = CurrCycle;
2153   switch (SchedModel->getMicroOpBufferSize()) {
2154   case 0:
2155     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2156     break;
2157   case 1:
2158     if (ReadyCycle > NextCycle) {
2159       NextCycle = ReadyCycle;
2160       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2161     }
2162     break;
2163   default:
2164     // We don't currently model the OOO reorder buffer, so consider all
2165     // scheduled MOps to be "retired". We do loosely model in-order resource
2166     // latency. If this instruction uses an in-order resource, account for any
2167     // likely stall cycles.
2168     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2169       NextCycle = ReadyCycle;
2170     break;
2171   }
2172   RetiredMOps += IncMOps;
2173 
2174   // Update resource counts and critical resource.
2175   if (SchedModel->hasInstrSchedModel()) {
2176     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2177     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2178     Rem->RemIssueCount -= DecRemIssue;
2179     if (ZoneCritResIdx) {
2180       // Scale scheduled micro-ops for comparing with the critical resource.
2181       unsigned ScaledMOps =
2182         RetiredMOps * SchedModel->getMicroOpFactor();
2183 
2184       // If scaled micro-ops are now more than the previous critical resource by
2185       // a full cycle, then micro-ops issue becomes critical.
2186       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2187           >= (int)SchedModel->getLatencyFactor()) {
2188         ZoneCritResIdx = 0;
2189         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2190               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2191       }
2192     }
2193     for (TargetSchedModel::ProcResIter
2194            PI = SchedModel->getWriteProcResBegin(SC),
2195            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2196       unsigned RCycle =
2197         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2198       if (RCycle > NextCycle)
2199         NextCycle = RCycle;
2200     }
2201     if (SU->hasReservedResource) {
2202       // For reserved resources, record the highest cycle using the resource.
2203       // For top-down scheduling, this is the cycle in which we schedule this
2204       // instruction plus the number of cycles the operations reserves the
2205       // resource. For bottom-up is it simply the instruction's cycle.
2206       for (TargetSchedModel::ProcResIter
2207              PI = SchedModel->getWriteProcResBegin(SC),
2208              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2209         unsigned PIdx = PI->ProcResourceIdx;
2210         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2211           if (isTop()) {
2212             ReservedCycles[PIdx] =
2213               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2214           }
2215           else
2216             ReservedCycles[PIdx] = NextCycle;
2217         }
2218       }
2219     }
2220   }
2221   // Update ExpectedLatency and DependentLatency.
2222   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2223   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2224   if (SU->getDepth() > TopLatency) {
2225     TopLatency = SU->getDepth();
2226     DEBUG(dbgs() << "  " << Available.getName()
2227           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2228   }
2229   if (SU->getHeight() > BotLatency) {
2230     BotLatency = SU->getHeight();
2231     DEBUG(dbgs() << "  " << Available.getName()
2232           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2233   }
2234   // If we stall for any reason, bump the cycle.
2235   if (NextCycle > CurrCycle) {
2236     bumpCycle(NextCycle);
2237   } else {
2238     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2239     // resource limited. If a stall occurred, bumpCycle does this.
2240     unsigned LFactor = SchedModel->getLatencyFactor();
2241     IsResourceLimited =
2242       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2243       > (int)LFactor;
2244   }
2245   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2246   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2247   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2248   // bump the cycle to avoid uselessly checking everything in the readyQ.
2249   CurrMOps += IncMOps;
2250   while (CurrMOps >= SchedModel->getIssueWidth()) {
2251     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2252           << " at cycle " << CurrCycle << '\n');
2253     bumpCycle(++NextCycle);
2254   }
2255   DEBUG(dumpScheduledState());
2256 }
2257 
2258 /// Release pending ready nodes in to the available queue. This makes them
2259 /// visible to heuristics.
2260 void SchedBoundary::releasePending() {
2261   // If the available queue is empty, it is safe to reset MinReadyCycle.
2262   if (Available.empty())
2263     MinReadyCycle = UINT_MAX;
2264 
2265   // Check to see if any of the pending instructions are ready to issue.  If
2266   // so, add them to the available queue.
2267   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2268   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2269     SUnit *SU = *(Pending.begin()+i);
2270     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2271 
2272     if (ReadyCycle < MinReadyCycle)
2273       MinReadyCycle = ReadyCycle;
2274 
2275     if (!IsBuffered && ReadyCycle > CurrCycle)
2276       continue;
2277 
2278     if (checkHazard(SU))
2279       continue;
2280 
2281     if (Available.size() >= ReadyListLimit)
2282       break;
2283 
2284     Available.push(SU);
2285     Pending.remove(Pending.begin()+i);
2286     --i; --e;
2287   }
2288   CheckPending = false;
2289 }
2290 
2291 /// Remove SU from the ready set for this boundary.
2292 void SchedBoundary::removeReady(SUnit *SU) {
2293   if (Available.isInQueue(SU))
2294     Available.remove(Available.find(SU));
2295   else {
2296     assert(Pending.isInQueue(SU) && "bad ready count");
2297     Pending.remove(Pending.find(SU));
2298   }
2299 }
2300 
2301 /// If this queue only has one ready candidate, return it. As a side effect,
2302 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2303 /// one node is ready. If multiple instructions are ready, return NULL.
2304 SUnit *SchedBoundary::pickOnlyChoice() {
2305   if (CheckPending)
2306     releasePending();
2307 
2308   if (CurrMOps > 0) {
2309     // Defer any ready instrs that now have a hazard.
2310     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2311       if (checkHazard(*I)) {
2312         Pending.push(*I);
2313         I = Available.remove(I);
2314         continue;
2315       }
2316       ++I;
2317     }
2318   }
2319   for (unsigned i = 0; Available.empty(); ++i) {
2320 //  FIXME: Re-enable assert once PR20057 is resolved.
2321 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2322 //           "permanent hazard");
2323     (void)i;
2324     bumpCycle(CurrCycle + 1);
2325     releasePending();
2326   }
2327 
2328   DEBUG(Pending.dump());
2329   DEBUG(Available.dump());
2330 
2331   if (Available.size() == 1)
2332     return *Available.begin();
2333   return nullptr;
2334 }
2335 
2336 #ifndef NDEBUG
2337 // This is useful information to dump after bumpNode.
2338 // Note that the Queue contents are more useful before pickNodeFromQueue.
2339 void SchedBoundary::dumpScheduledState() {
2340   unsigned ResFactor;
2341   unsigned ResCount;
2342   if (ZoneCritResIdx) {
2343     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2344     ResCount = getResourceCount(ZoneCritResIdx);
2345   } else {
2346     ResFactor = SchedModel->getMicroOpFactor();
2347     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2348   }
2349   unsigned LFactor = SchedModel->getLatencyFactor();
2350   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2351          << "  Retired: " << RetiredMOps;
2352   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2353   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2354          << ResCount / ResFactor << " "
2355          << SchedModel->getResourceName(ZoneCritResIdx)
2356          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2357          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2358          << " limited.\n";
2359 }
2360 #endif
2361 
2362 //===----------------------------------------------------------------------===//
2363 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2364 //===----------------------------------------------------------------------===//
2365 
2366 void GenericSchedulerBase::SchedCandidate::
2367 initResourceDelta(const ScheduleDAGMI *DAG,
2368                   const TargetSchedModel *SchedModel) {
2369   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2370     return;
2371 
2372   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2373   for (TargetSchedModel::ProcResIter
2374          PI = SchedModel->getWriteProcResBegin(SC),
2375          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2376     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2377       ResDelta.CritResources += PI->Cycles;
2378     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2379       ResDelta.DemandedResources += PI->Cycles;
2380   }
2381 }
2382 
2383 /// Set the CandPolicy given a scheduling zone given the current resources and
2384 /// latencies inside and outside the zone.
2385 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2386                                      SchedBoundary &CurrZone,
2387                                      SchedBoundary *OtherZone) {
2388   // Apply preemptive heuristics based on the total latency and resources
2389   // inside and outside this zone. Potential stalls should be considered before
2390   // following this policy.
2391 
2392   // Compute remaining latency. We need this both to determine whether the
2393   // overall schedule has become latency-limited and whether the instructions
2394   // outside this zone are resource or latency limited.
2395   //
2396   // The "dependent" latency is updated incrementally during scheduling as the
2397   // max height/depth of scheduled nodes minus the cycles since it was
2398   // scheduled:
2399   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2400   //
2401   // The "independent" latency is the max ready queue depth:
2402   //   ILat = max N.depth for N in Available|Pending
2403   //
2404   // RemainingLatency is the greater of independent and dependent latency.
2405   unsigned RemLatency = CurrZone.getDependentLatency();
2406   RemLatency = std::max(RemLatency,
2407                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2408   RemLatency = std::max(RemLatency,
2409                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2410 
2411   // Compute the critical resource outside the zone.
2412   unsigned OtherCritIdx = 0;
2413   unsigned OtherCount =
2414     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2415 
2416   bool OtherResLimited = false;
2417   if (SchedModel->hasInstrSchedModel()) {
2418     unsigned LFactor = SchedModel->getLatencyFactor();
2419     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2420   }
2421   // Schedule aggressively for latency in PostRA mode. We don't check for
2422   // acyclic latency during PostRA, and highly out-of-order processors will
2423   // skip PostRA scheduling.
2424   if (!OtherResLimited) {
2425     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2426       Policy.ReduceLatency |= true;
2427       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2428             << " RemainingLatency " << RemLatency << " + "
2429             << CurrZone.getCurrCycle() << "c > CritPath "
2430             << Rem.CriticalPath << "\n");
2431     }
2432   }
2433   // If the same resource is limiting inside and outside the zone, do nothing.
2434   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2435     return;
2436 
2437   DEBUG(
2438     if (CurrZone.isResourceLimited()) {
2439       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2440              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2441              << "\n";
2442     }
2443     if (OtherResLimited)
2444       dbgs() << "  RemainingLimit: "
2445              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2446     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2447       dbgs() << "  Latency limited both directions.\n");
2448 
2449   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2450     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2451 
2452   if (OtherResLimited)
2453     Policy.DemandResIdx = OtherCritIdx;
2454 }
2455 
2456 #ifndef NDEBUG
2457 const char *GenericSchedulerBase::getReasonStr(
2458   GenericSchedulerBase::CandReason Reason) {
2459   switch (Reason) {
2460   case NoCand:         return "NOCAND    ";
2461   case Only1:          return "ONLY1     ";
2462   case PhysRegCopy:    return "PREG-COPY ";
2463   case RegExcess:      return "REG-EXCESS";
2464   case RegCritical:    return "REG-CRIT  ";
2465   case Stall:          return "STALL     ";
2466   case Cluster:        return "CLUSTER   ";
2467   case Weak:           return "WEAK      ";
2468   case RegMax:         return "REG-MAX   ";
2469   case ResourceReduce: return "RES-REDUCE";
2470   case ResourceDemand: return "RES-DEMAND";
2471   case TopDepthReduce: return "TOP-DEPTH ";
2472   case TopPathReduce:  return "TOP-PATH  ";
2473   case BotHeightReduce:return "BOT-HEIGHT";
2474   case BotPathReduce:  return "BOT-PATH  ";
2475   case NextDefUse:     return "DEF-USE   ";
2476   case NodeOrder:      return "ORDER     ";
2477   };
2478   llvm_unreachable("Unknown reason!");
2479 }
2480 
2481 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2482   PressureChange P;
2483   unsigned ResIdx = 0;
2484   unsigned Latency = 0;
2485   switch (Cand.Reason) {
2486   default:
2487     break;
2488   case RegExcess:
2489     P = Cand.RPDelta.Excess;
2490     break;
2491   case RegCritical:
2492     P = Cand.RPDelta.CriticalMax;
2493     break;
2494   case RegMax:
2495     P = Cand.RPDelta.CurrentMax;
2496     break;
2497   case ResourceReduce:
2498     ResIdx = Cand.Policy.ReduceResIdx;
2499     break;
2500   case ResourceDemand:
2501     ResIdx = Cand.Policy.DemandResIdx;
2502     break;
2503   case TopDepthReduce:
2504     Latency = Cand.SU->getDepth();
2505     break;
2506   case TopPathReduce:
2507     Latency = Cand.SU->getHeight();
2508     break;
2509   case BotHeightReduce:
2510     Latency = Cand.SU->getHeight();
2511     break;
2512   case BotPathReduce:
2513     Latency = Cand.SU->getDepth();
2514     break;
2515   }
2516   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2517   if (P.isValid())
2518     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2519            << ":" << P.getUnitInc() << " ";
2520   else
2521     dbgs() << "      ";
2522   if (ResIdx)
2523     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2524   else
2525     dbgs() << "         ";
2526   if (Latency)
2527     dbgs() << " " << Latency << " cycles ";
2528   else
2529     dbgs() << "          ";
2530   dbgs() << '\n';
2531 }
2532 #endif
2533 
2534 /// Return true if this heuristic determines order.
2535 static bool tryLess(int TryVal, int CandVal,
2536                     GenericSchedulerBase::SchedCandidate &TryCand,
2537                     GenericSchedulerBase::SchedCandidate &Cand,
2538                     GenericSchedulerBase::CandReason Reason) {
2539   if (TryVal < CandVal) {
2540     TryCand.Reason = Reason;
2541     return true;
2542   }
2543   if (TryVal > CandVal) {
2544     if (Cand.Reason > Reason)
2545       Cand.Reason = Reason;
2546     return true;
2547   }
2548   return false;
2549 }
2550 
2551 static bool tryGreater(int TryVal, int CandVal,
2552                        GenericSchedulerBase::SchedCandidate &TryCand,
2553                        GenericSchedulerBase::SchedCandidate &Cand,
2554                        GenericSchedulerBase::CandReason Reason) {
2555   if (TryVal > CandVal) {
2556     TryCand.Reason = Reason;
2557     return true;
2558   }
2559   if (TryVal < CandVal) {
2560     if (Cand.Reason > Reason)
2561       Cand.Reason = Reason;
2562     return true;
2563   }
2564   return false;
2565 }
2566 
2567 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2568                        GenericSchedulerBase::SchedCandidate &Cand,
2569                        SchedBoundary &Zone) {
2570   if (Zone.isTop()) {
2571     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2572       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2573                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2574         return true;
2575     }
2576     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2577                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2578       return true;
2579   } else {
2580     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2581       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2582                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2583         return true;
2584     }
2585     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2586                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2587       return true;
2588   }
2589   return false;
2590 }
2591 
2592 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2593   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2594         << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2595 }
2596 
2597 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
2598   tracePick(Cand.Reason, Cand.AtTop);
2599 }
2600 
2601 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2602   assert(dag->hasVRegLiveness() &&
2603          "(PreRA)GenericScheduler needs vreg liveness");
2604   DAG = static_cast<ScheduleDAGMILive*>(dag);
2605   SchedModel = DAG->getSchedModel();
2606   TRI = DAG->TRI;
2607 
2608   Rem.init(DAG, SchedModel);
2609   Top.init(DAG, SchedModel, &Rem);
2610   Bot.init(DAG, SchedModel, &Rem);
2611 
2612   // Initialize resource counts.
2613 
2614   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2615   // are disabled, then these HazardRecs will be disabled.
2616   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2617   if (!Top.HazardRec) {
2618     Top.HazardRec =
2619         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2620             Itin, DAG);
2621   }
2622   if (!Bot.HazardRec) {
2623     Bot.HazardRec =
2624         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2625             Itin, DAG);
2626   }
2627   TopCand.SU = nullptr;
2628   BotCand.SU = nullptr;
2629 }
2630 
2631 /// Initialize the per-region scheduling policy.
2632 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2633                                   MachineBasicBlock::iterator End,
2634                                   unsigned NumRegionInstrs) {
2635   const MachineFunction &MF = *Begin->getParent()->getParent();
2636   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2637 
2638   // Avoid setting up the register pressure tracker for small regions to save
2639   // compile time. As a rough heuristic, only track pressure when the number of
2640   // schedulable instructions exceeds half the integer register file.
2641   RegionPolicy.ShouldTrackPressure = true;
2642   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2643     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2644     if (TLI->isTypeLegal(LegalIntVT)) {
2645       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2646         TLI->getRegClassFor(LegalIntVT));
2647       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2648     }
2649   }
2650 
2651   // For generic targets, we default to bottom-up, because it's simpler and more
2652   // compile-time optimizations have been implemented in that direction.
2653   RegionPolicy.OnlyBottomUp = true;
2654 
2655   // Allow the subtarget to override default policy.
2656   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
2657 
2658   // After subtarget overrides, apply command line options.
2659   if (!EnableRegPressure)
2660     RegionPolicy.ShouldTrackPressure = false;
2661 
2662   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2663   // e.g. -misched-bottomup=false allows scheduling in both directions.
2664   assert((!ForceTopDown || !ForceBottomUp) &&
2665          "-misched-topdown incompatible with -misched-bottomup");
2666   if (ForceBottomUp.getNumOccurrences() > 0) {
2667     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2668     if (RegionPolicy.OnlyBottomUp)
2669       RegionPolicy.OnlyTopDown = false;
2670   }
2671   if (ForceTopDown.getNumOccurrences() > 0) {
2672     RegionPolicy.OnlyTopDown = ForceTopDown;
2673     if (RegionPolicy.OnlyTopDown)
2674       RegionPolicy.OnlyBottomUp = false;
2675   }
2676 }
2677 
2678 void GenericScheduler::dumpPolicy() {
2679   dbgs() << "GenericScheduler RegionPolicy: "
2680          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2681          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2682          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2683          << "\n";
2684 }
2685 
2686 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2687 /// critical path by more cycles than it takes to drain the instruction buffer.
2688 /// We estimate an upper bounds on in-flight instructions as:
2689 ///
2690 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2691 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2692 /// InFlightResources = InFlightIterations * LoopResources
2693 ///
2694 /// TODO: Check execution resources in addition to IssueCount.
2695 void GenericScheduler::checkAcyclicLatency() {
2696   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2697     return;
2698 
2699   // Scaled number of cycles per loop iteration.
2700   unsigned IterCount =
2701     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2702              Rem.RemIssueCount);
2703   // Scaled acyclic critical path.
2704   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2705   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2706   unsigned InFlightCount =
2707     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2708   unsigned BufferLimit =
2709     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2710 
2711   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2712 
2713   DEBUG(dbgs() << "IssueCycles="
2714         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2715         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2716         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2717         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2718         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2719         if (Rem.IsAcyclicLatencyLimited)
2720           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2721 }
2722 
2723 void GenericScheduler::registerRoots() {
2724   Rem.CriticalPath = DAG->ExitSU.getDepth();
2725 
2726   // Some roots may not feed into ExitSU. Check all of them in case.
2727   for (std::vector<SUnit*>::const_iterator
2728          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2729     if ((*I)->getDepth() > Rem.CriticalPath)
2730       Rem.CriticalPath = (*I)->getDepth();
2731   }
2732   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2733   if (DumpCriticalPathLength) {
2734     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2735   }
2736 
2737   if (EnableCyclicPath) {
2738     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2739     checkAcyclicLatency();
2740   }
2741 }
2742 
2743 static bool tryPressure(const PressureChange &TryP,
2744                         const PressureChange &CandP,
2745                         GenericSchedulerBase::SchedCandidate &TryCand,
2746                         GenericSchedulerBase::SchedCandidate &Cand,
2747                         GenericSchedulerBase::CandReason Reason,
2748                         const TargetRegisterInfo *TRI,
2749                         const MachineFunction &MF) {
2750   // If one candidate decreases and the other increases, go with it.
2751   // Invalid candidates have UnitInc==0.
2752   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2753                  Reason)) {
2754     return true;
2755   }
2756   // Do not compare the magnitude of pressure changes between top and bottom
2757   // boundary.
2758   if (Cand.AtTop != TryCand.AtTop)
2759     return false;
2760 
2761   // If both candidates affect the same set in the same boundary, go with the
2762   // smallest increase.
2763   unsigned TryPSet = TryP.getPSetOrMax();
2764   unsigned CandPSet = CandP.getPSetOrMax();
2765   if (TryPSet == CandPSet) {
2766     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2767                    Reason);
2768   }
2769 
2770   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2771                                  std::numeric_limits<int>::max();
2772 
2773   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2774                                    std::numeric_limits<int>::max();
2775 
2776   // If the candidates are decreasing pressure, reverse priority.
2777   if (TryP.getUnitInc() < 0)
2778     std::swap(TryRank, CandRank);
2779   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2780 }
2781 
2782 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2783   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2784 }
2785 
2786 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2787 /// their physreg def/use.
2788 ///
2789 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2790 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2791 /// with the operation that produces or consumes the physreg. We'll do this when
2792 /// regalloc has support for parallel copies.
2793 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2794   const MachineInstr *MI = SU->getInstr();
2795   if (!MI->isCopy())
2796     return 0;
2797 
2798   unsigned ScheduledOper = isTop ? 1 : 0;
2799   unsigned UnscheduledOper = isTop ? 0 : 1;
2800   // If we have already scheduled the physreg produce/consumer, immediately
2801   // schedule the copy.
2802   if (TargetRegisterInfo::isPhysicalRegister(
2803         MI->getOperand(ScheduledOper).getReg()))
2804     return 1;
2805   // If the physreg is at the boundary, defer it. Otherwise schedule it
2806   // immediately to free the dependent. We can hoist the copy later.
2807   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2808   if (TargetRegisterInfo::isPhysicalRegister(
2809         MI->getOperand(UnscheduledOper).getReg()))
2810     return AtBoundary ? -1 : 1;
2811   return 0;
2812 }
2813 
2814 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2815                                      bool AtTop,
2816                                      const RegPressureTracker &RPTracker,
2817                                      RegPressureTracker &TempTracker) {
2818   Cand.SU = SU;
2819   Cand.AtTop = AtTop;
2820   if (DAG->isTrackingPressure()) {
2821     if (AtTop) {
2822       TempTracker.getMaxDownwardPressureDelta(
2823         Cand.SU->getInstr(),
2824         Cand.RPDelta,
2825         DAG->getRegionCriticalPSets(),
2826         DAG->getRegPressure().MaxSetPressure);
2827     } else {
2828       if (VerifyScheduling) {
2829         TempTracker.getMaxUpwardPressureDelta(
2830           Cand.SU->getInstr(),
2831           &DAG->getPressureDiff(Cand.SU),
2832           Cand.RPDelta,
2833           DAG->getRegionCriticalPSets(),
2834           DAG->getRegPressure().MaxSetPressure);
2835       } else {
2836         RPTracker.getUpwardPressureDelta(
2837           Cand.SU->getInstr(),
2838           DAG->getPressureDiff(Cand.SU),
2839           Cand.RPDelta,
2840           DAG->getRegionCriticalPSets(),
2841           DAG->getRegPressure().MaxSetPressure);
2842       }
2843     }
2844   }
2845   DEBUG(if (Cand.RPDelta.Excess.isValid())
2846           dbgs() << "  Try  SU(" << Cand.SU->NodeNum << ") "
2847                  << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
2848                  << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
2849 }
2850 
2851 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2852 /// hierarchical. This may be more efficient than a graduated cost model because
2853 /// we don't need to evaluate all aspects of the model for each node in the
2854 /// queue. But it's really done to make the heuristics easier to debug and
2855 /// statistically analyze.
2856 ///
2857 /// \param Cand provides the policy and current best candidate.
2858 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2859 /// \param Zone describes the scheduled zone that we are extending, or nullptr
2860 //              if Cand is from a different zone than TryCand.
2861 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2862                                     SchedCandidate &TryCand,
2863                                     SchedBoundary *Zone) {
2864   // Initialize the candidate if needed.
2865   if (!Cand.isValid()) {
2866     TryCand.Reason = NodeOrder;
2867     return;
2868   }
2869 
2870   if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop),
2871                  biasPhysRegCopy(Cand.SU, Cand.AtTop),
2872                  TryCand, Cand, PhysRegCopy))
2873     return;
2874 
2875   // Avoid exceeding the target's limit.
2876   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2877                                                Cand.RPDelta.Excess,
2878                                                TryCand, Cand, RegExcess, TRI,
2879                                                DAG->MF))
2880     return;
2881 
2882   // Avoid increasing the max critical pressure in the scheduled region.
2883   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2884                                                Cand.RPDelta.CriticalMax,
2885                                                TryCand, Cand, RegCritical, TRI,
2886                                                DAG->MF))
2887     return;
2888 
2889   // We only compare a subset of features when comparing nodes between
2890   // Top and Bottom boundary. Some properties are simply incomparable, in many
2891   // other instances we should only override the other boundary if something
2892   // is a clear good pick on one boundary. Skip heuristics that are more
2893   // "tie-breaking" in nature.
2894   bool SameBoundary = Zone != nullptr;
2895   if (SameBoundary) {
2896     // For loops that are acyclic path limited, aggressively schedule for
2897     // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
2898     // heuristics to take precedence.
2899     if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
2900         tryLatency(TryCand, Cand, *Zone))
2901       return;
2902 
2903     // Prioritize instructions that read unbuffered resources by stall cycles.
2904     if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
2905                 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2906       return;
2907   }
2908 
2909   // Keep clustered nodes together to encourage downstream peephole
2910   // optimizations which may reduce resource requirements.
2911   //
2912   // This is a best effort to set things up for a post-RA pass. Optimizations
2913   // like generating loads of multiple registers should ideally be done within
2914   // the scheduler pass by combining the loads during DAG postprocessing.
2915   const SUnit *CandNextClusterSU =
2916     Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2917   const SUnit *TryCandNextClusterSU =
2918     TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2919   if (tryGreater(TryCand.SU == TryCandNextClusterSU,
2920                  Cand.SU == CandNextClusterSU,
2921                  TryCand, Cand, Cluster))
2922     return;
2923 
2924   if (SameBoundary) {
2925     // Weak edges are for clustering and other constraints.
2926     if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
2927                 getWeakLeft(Cand.SU, Cand.AtTop),
2928                 TryCand, Cand, Weak))
2929       return;
2930   }
2931 
2932   // Avoid increasing the max pressure of the entire region.
2933   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2934                                                Cand.RPDelta.CurrentMax,
2935                                                TryCand, Cand, RegMax, TRI,
2936                                                DAG->MF))
2937     return;
2938 
2939   if (SameBoundary) {
2940     // Avoid critical resource consumption and balance the schedule.
2941     TryCand.initResourceDelta(DAG, SchedModel);
2942     if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2943                 TryCand, Cand, ResourceReduce))
2944       return;
2945     if (tryGreater(TryCand.ResDelta.DemandedResources,
2946                    Cand.ResDelta.DemandedResources,
2947                    TryCand, Cand, ResourceDemand))
2948       return;
2949 
2950     // Avoid serializing long latency dependence chains.
2951     // For acyclic path limited loops, latency was already checked above.
2952     if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
2953         !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
2954       return;
2955 
2956     // Fall through to original instruction order.
2957     if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2958         || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2959       TryCand.Reason = NodeOrder;
2960     }
2961   }
2962 }
2963 
2964 /// Pick the best candidate from the queue.
2965 ///
2966 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2967 /// DAG building. To adjust for the current scheduling location we need to
2968 /// maintain the number of vreg uses remaining to be top-scheduled.
2969 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2970                                          const CandPolicy &ZonePolicy,
2971                                          const RegPressureTracker &RPTracker,
2972                                          SchedCandidate &Cand) {
2973   // getMaxPressureDelta temporarily modifies the tracker.
2974   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2975 
2976   ReadyQueue &Q = Zone.Available;
2977   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2978 
2979     SchedCandidate TryCand(ZonePolicy);
2980     initCandidate(TryCand, *I, Zone.isTop(), RPTracker, TempTracker);
2981     // Pass SchedBoundary only when comparing nodes from the same boundary.
2982     SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
2983     tryCandidate(Cand, TryCand, ZoneArg);
2984     if (TryCand.Reason != NoCand) {
2985       // Initialize resource delta if needed in case future heuristics query it.
2986       if (TryCand.ResDelta == SchedResourceDelta())
2987         TryCand.initResourceDelta(DAG, SchedModel);
2988       Cand.setBest(TryCand);
2989       DEBUG(traceCandidate(Cand));
2990     }
2991   }
2992 }
2993 
2994 /// Pick the best candidate node from either the top or bottom queue.
2995 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2996   // Schedule as far as possible in the direction of no choice. This is most
2997   // efficient, but also provides the best heuristics for CriticalPSets.
2998   if (SUnit *SU = Bot.pickOnlyChoice()) {
2999     IsTopNode = false;
3000     tracePick(Only1, false);
3001     return SU;
3002   }
3003   if (SUnit *SU = Top.pickOnlyChoice()) {
3004     IsTopNode = true;
3005     tracePick(Only1, true);
3006     return SU;
3007   }
3008   // Set the bottom-up policy based on the state of the current bottom zone and
3009   // the instructions outside the zone, including the top zone.
3010   CandPolicy BotPolicy;
3011   setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
3012   // Set the top-down policy based on the state of the current top zone and
3013   // the instructions outside the zone, including the bottom zone.
3014   CandPolicy TopPolicy;
3015   setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
3016 
3017   // See if BotCand is still valid (because we previously scheduled from Top).
3018   DEBUG(dbgs() << "Picking from Bot:\n");
3019   if (!BotCand.isValid() || BotCand.SU->isScheduled ||
3020       BotCand.Policy != BotPolicy) {
3021     BotCand.reset(CandPolicy());
3022     pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
3023     assert(BotCand.Reason != NoCand && "failed to find the first candidate");
3024   } else {
3025     DEBUG(traceCandidate(BotCand));
3026 #ifndef NDEBUG
3027     if (VerifyScheduling) {
3028       SchedCandidate TCand;
3029       TCand.reset(CandPolicy());
3030       pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3031       assert(TCand.SU == BotCand.SU &&
3032              "Last pick result should correspond to re-picking right now");
3033     }
3034 #endif
3035   }
3036 
3037   // Check if the top Q has a better candidate.
3038   DEBUG(dbgs() << "Picking from Top:\n");
3039   if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3040       TopCand.Policy != TopPolicy) {
3041     TopCand.reset(CandPolicy());
3042     pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3043     assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3044   } else {
3045     DEBUG(traceCandidate(TopCand));
3046 #ifndef NDEBUG
3047     if (VerifyScheduling) {
3048       SchedCandidate TCand;
3049       TCand.reset(CandPolicy());
3050       pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3051       assert(TCand.SU == TopCand.SU &&
3052            "Last pick result should correspond to re-picking right now");
3053     }
3054 #endif
3055   }
3056 
3057   // Pick best from BotCand and TopCand.
3058   assert(BotCand.isValid());
3059   assert(TopCand.isValid());
3060   SchedCandidate Cand = BotCand;
3061   TopCand.Reason = NoCand;
3062   tryCandidate(Cand, TopCand, nullptr);
3063   if (TopCand.Reason != NoCand) {
3064     Cand.setBest(TopCand);
3065     DEBUG(traceCandidate(Cand));
3066   }
3067 
3068   IsTopNode = Cand.AtTop;
3069   tracePick(Cand);
3070   return Cand.SU;
3071 }
3072 
3073 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3074 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
3075   if (DAG->top() == DAG->bottom()) {
3076     assert(Top.Available.empty() && Top.Pending.empty() &&
3077            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
3078     return nullptr;
3079   }
3080   SUnit *SU;
3081   do {
3082     if (RegionPolicy.OnlyTopDown) {
3083       SU = Top.pickOnlyChoice();
3084       if (!SU) {
3085         CandPolicy NoPolicy;
3086         TopCand.reset(NoPolicy);
3087         pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
3088         assert(TopCand.Reason != NoCand && "failed to find a candidate");
3089         tracePick(TopCand);
3090         SU = TopCand.SU;
3091       }
3092       IsTopNode = true;
3093     } else if (RegionPolicy.OnlyBottomUp) {
3094       SU = Bot.pickOnlyChoice();
3095       if (!SU) {
3096         CandPolicy NoPolicy;
3097         BotCand.reset(NoPolicy);
3098         pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
3099         assert(BotCand.Reason != NoCand && "failed to find a candidate");
3100         tracePick(BotCand);
3101         SU = BotCand.SU;
3102       }
3103       IsTopNode = false;
3104     } else {
3105       SU = pickNodeBidirectional(IsTopNode);
3106     }
3107   } while (SU->isScheduled);
3108 
3109   if (SU->isTopReady())
3110     Top.removeReady(SU);
3111   if (SU->isBottomReady())
3112     Bot.removeReady(SU);
3113 
3114   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3115   return SU;
3116 }
3117 
3118 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
3119 
3120   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3121   if (!isTop)
3122     ++InsertPos;
3123   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3124 
3125   // Find already scheduled copies with a single physreg dependence and move
3126   // them just above the scheduled instruction.
3127   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3128        I != E; ++I) {
3129     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3130       continue;
3131     SUnit *DepSU = I->getSUnit();
3132     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3133       continue;
3134     MachineInstr *Copy = DepSU->getInstr();
3135     if (!Copy->isCopy())
3136       continue;
3137     DEBUG(dbgs() << "  Rescheduling physreg copy ";
3138           I->getSUnit()->dump(DAG));
3139     DAG->moveInstruction(Copy, InsertPos);
3140   }
3141 }
3142 
3143 /// Update the scheduler's state after scheduling a node. This is the same node
3144 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3145 /// update it's state based on the current cycle before MachineSchedStrategy
3146 /// does.
3147 ///
3148 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3149 /// them here. See comments in biasPhysRegCopy.
3150 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3151   if (IsTopNode) {
3152     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3153     Top.bumpNode(SU);
3154     if (SU->hasPhysRegUses)
3155       reschedulePhysRegCopies(SU, true);
3156   } else {
3157     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3158     Bot.bumpNode(SU);
3159     if (SU->hasPhysRegDefs)
3160       reschedulePhysRegCopies(SU, false);
3161   }
3162 }
3163 
3164 /// Create the standard converging machine scheduler. This will be used as the
3165 /// default scheduler if the target does not set a default.
3166 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
3167   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
3168   // Register DAG post-processors.
3169   //
3170   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3171   // data and pass it to later mutations. Have a single mutation that gathers
3172   // the interesting nodes in one pass.
3173   DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
3174   if (EnableMemOpCluster) {
3175     if (DAG->TII->enableClusterLoads())
3176       DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
3177     if (DAG->TII->enableClusterStores())
3178       DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
3179   }
3180   if (EnableMacroFusion)
3181     DAG->addMutation(createMacroFusionDAGMutation(DAG->TII));
3182   return DAG;
3183 }
3184 
3185 static MachineSchedRegistry
3186 GenericSchedRegistry("converge", "Standard converging scheduler.",
3187                      createGenericSchedLive);
3188 
3189 //===----------------------------------------------------------------------===//
3190 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3191 //===----------------------------------------------------------------------===//
3192 
3193 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3194   DAG = Dag;
3195   SchedModel = DAG->getSchedModel();
3196   TRI = DAG->TRI;
3197 
3198   Rem.init(DAG, SchedModel);
3199   Top.init(DAG, SchedModel, &Rem);
3200   BotRoots.clear();
3201 
3202   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3203   // or are disabled, then these HazardRecs will be disabled.
3204   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3205   if (!Top.HazardRec) {
3206     Top.HazardRec =
3207         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3208             Itin, DAG);
3209   }
3210 }
3211 
3212 
3213 void PostGenericScheduler::registerRoots() {
3214   Rem.CriticalPath = DAG->ExitSU.getDepth();
3215 
3216   // Some roots may not feed into ExitSU. Check all of them in case.
3217   for (SmallVectorImpl<SUnit*>::const_iterator
3218          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3219     if ((*I)->getDepth() > Rem.CriticalPath)
3220       Rem.CriticalPath = (*I)->getDepth();
3221   }
3222   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3223   if (DumpCriticalPathLength) {
3224     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3225   }
3226 }
3227 
3228 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3229 ///
3230 /// \param Cand provides the policy and current best candidate.
3231 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3232 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3233                                         SchedCandidate &TryCand) {
3234 
3235   // Initialize the candidate if needed.
3236   if (!Cand.isValid()) {
3237     TryCand.Reason = NodeOrder;
3238     return;
3239   }
3240 
3241   // Prioritize instructions that read unbuffered resources by stall cycles.
3242   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3243               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3244     return;
3245 
3246   // Avoid critical resource consumption and balance the schedule.
3247   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3248               TryCand, Cand, ResourceReduce))
3249     return;
3250   if (tryGreater(TryCand.ResDelta.DemandedResources,
3251                  Cand.ResDelta.DemandedResources,
3252                  TryCand, Cand, ResourceDemand))
3253     return;
3254 
3255   // Avoid serializing long latency dependence chains.
3256   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3257     return;
3258   }
3259 
3260   // Fall through to original instruction order.
3261   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3262     TryCand.Reason = NodeOrder;
3263 }
3264 
3265 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3266   ReadyQueue &Q = Top.Available;
3267   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3268     SchedCandidate TryCand(Cand.Policy);
3269     TryCand.SU = *I;
3270     TryCand.AtTop = true;
3271     TryCand.initResourceDelta(DAG, SchedModel);
3272     tryCandidate(Cand, TryCand);
3273     if (TryCand.Reason != NoCand) {
3274       Cand.setBest(TryCand);
3275       DEBUG(traceCandidate(Cand));
3276     }
3277   }
3278 }
3279 
3280 /// Pick the next node to schedule.
3281 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3282   if (DAG->top() == DAG->bottom()) {
3283     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3284     return nullptr;
3285   }
3286   SUnit *SU;
3287   do {
3288     SU = Top.pickOnlyChoice();
3289     if (SU) {
3290       tracePick(Only1, true);
3291     } else {
3292       CandPolicy NoPolicy;
3293       SchedCandidate TopCand(NoPolicy);
3294       // Set the top-down policy based on the state of the current top zone and
3295       // the instructions outside the zone, including the bottom zone.
3296       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3297       pickNodeFromQueue(TopCand);
3298       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3299       tracePick(TopCand);
3300       SU = TopCand.SU;
3301     }
3302   } while (SU->isScheduled);
3303 
3304   IsTopNode = true;
3305   Top.removeReady(SU);
3306 
3307   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3308   return SU;
3309 }
3310 
3311 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3312 /// scheduled/remaining flags in the DAG nodes.
3313 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3314   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3315   Top.bumpNode(SU);
3316 }
3317 
3318 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3319 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3320   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C),
3321                            /*RemoveKillFlags=*/true);
3322 }
3323 
3324 //===----------------------------------------------------------------------===//
3325 // ILP Scheduler. Currently for experimental analysis of heuristics.
3326 //===----------------------------------------------------------------------===//
3327 
3328 namespace {
3329 /// \brief Order nodes by the ILP metric.
3330 struct ILPOrder {
3331   const SchedDFSResult *DFSResult;
3332   const BitVector *ScheduledTrees;
3333   bool MaximizeILP;
3334 
3335   ILPOrder(bool MaxILP)
3336     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3337 
3338   /// \brief Apply a less-than relation on node priority.
3339   ///
3340   /// (Return true if A comes after B in the Q.)
3341   bool operator()(const SUnit *A, const SUnit *B) const {
3342     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3343     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3344     if (SchedTreeA != SchedTreeB) {
3345       // Unscheduled trees have lower priority.
3346       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3347         return ScheduledTrees->test(SchedTreeB);
3348 
3349       // Trees with shallower connections have have lower priority.
3350       if (DFSResult->getSubtreeLevel(SchedTreeA)
3351           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3352         return DFSResult->getSubtreeLevel(SchedTreeA)
3353           < DFSResult->getSubtreeLevel(SchedTreeB);
3354       }
3355     }
3356     if (MaximizeILP)
3357       return DFSResult->getILP(A) < DFSResult->getILP(B);
3358     else
3359       return DFSResult->getILP(A) > DFSResult->getILP(B);
3360   }
3361 };
3362 
3363 /// \brief Schedule based on the ILP metric.
3364 class ILPScheduler : public MachineSchedStrategy {
3365   ScheduleDAGMILive *DAG;
3366   ILPOrder Cmp;
3367 
3368   std::vector<SUnit*> ReadyQ;
3369 public:
3370   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3371 
3372   void initialize(ScheduleDAGMI *dag) override {
3373     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3374     DAG = static_cast<ScheduleDAGMILive*>(dag);
3375     DAG->computeDFSResult();
3376     Cmp.DFSResult = DAG->getDFSResult();
3377     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3378     ReadyQ.clear();
3379   }
3380 
3381   void registerRoots() override {
3382     // Restore the heap in ReadyQ with the updated DFS results.
3383     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3384   }
3385 
3386   /// Implement MachineSchedStrategy interface.
3387   /// -----------------------------------------
3388 
3389   /// Callback to select the highest priority node from the ready Q.
3390   SUnit *pickNode(bool &IsTopNode) override {
3391     if (ReadyQ.empty()) return nullptr;
3392     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3393     SUnit *SU = ReadyQ.back();
3394     ReadyQ.pop_back();
3395     IsTopNode = false;
3396     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3397           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3398           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3399           << DAG->getDFSResult()->getSubtreeLevel(
3400             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3401           << "Scheduling " << *SU->getInstr());
3402     return SU;
3403   }
3404 
3405   /// \brief Scheduler callback to notify that a new subtree is scheduled.
3406   void scheduleTree(unsigned SubtreeID) override {
3407     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3408   }
3409 
3410   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3411   /// DFSResults, and resort the priority Q.
3412   void schedNode(SUnit *SU, bool IsTopNode) override {
3413     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3414   }
3415 
3416   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3417 
3418   void releaseBottomNode(SUnit *SU) override {
3419     ReadyQ.push_back(SU);
3420     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3421   }
3422 };
3423 } // namespace
3424 
3425 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3426   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3427 }
3428 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3429   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3430 }
3431 static MachineSchedRegistry ILPMaxRegistry(
3432   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3433 static MachineSchedRegistry ILPMinRegistry(
3434   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3435 
3436 //===----------------------------------------------------------------------===//
3437 // Machine Instruction Shuffler for Correctness Testing
3438 //===----------------------------------------------------------------------===//
3439 
3440 #ifndef NDEBUG
3441 namespace {
3442 /// Apply a less-than relation on the node order, which corresponds to the
3443 /// instruction order prior to scheduling. IsReverse implements greater-than.
3444 template<bool IsReverse>
3445 struct SUnitOrder {
3446   bool operator()(SUnit *A, SUnit *B) const {
3447     if (IsReverse)
3448       return A->NodeNum > B->NodeNum;
3449     else
3450       return A->NodeNum < B->NodeNum;
3451   }
3452 };
3453 
3454 /// Reorder instructions as much as possible.
3455 class InstructionShuffler : public MachineSchedStrategy {
3456   bool IsAlternating;
3457   bool IsTopDown;
3458 
3459   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3460   // gives nodes with a higher number higher priority causing the latest
3461   // instructions to be scheduled first.
3462   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3463     TopQ;
3464   // When scheduling bottom-up, use greater-than as the queue priority.
3465   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3466     BottomQ;
3467 public:
3468   InstructionShuffler(bool alternate, bool topdown)
3469     : IsAlternating(alternate), IsTopDown(topdown) {}
3470 
3471   void initialize(ScheduleDAGMI*) override {
3472     TopQ.clear();
3473     BottomQ.clear();
3474   }
3475 
3476   /// Implement MachineSchedStrategy interface.
3477   /// -----------------------------------------
3478 
3479   SUnit *pickNode(bool &IsTopNode) override {
3480     SUnit *SU;
3481     if (IsTopDown) {
3482       do {
3483         if (TopQ.empty()) return nullptr;
3484         SU = TopQ.top();
3485         TopQ.pop();
3486       } while (SU->isScheduled);
3487       IsTopNode = true;
3488     } else {
3489       do {
3490         if (BottomQ.empty()) return nullptr;
3491         SU = BottomQ.top();
3492         BottomQ.pop();
3493       } while (SU->isScheduled);
3494       IsTopNode = false;
3495     }
3496     if (IsAlternating)
3497       IsTopDown = !IsTopDown;
3498     return SU;
3499   }
3500 
3501   void schedNode(SUnit *SU, bool IsTopNode) override {}
3502 
3503   void releaseTopNode(SUnit *SU) override {
3504     TopQ.push(SU);
3505   }
3506   void releaseBottomNode(SUnit *SU) override {
3507     BottomQ.push(SU);
3508   }
3509 };
3510 } // namespace
3511 
3512 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3513   bool Alternate = !ForceTopDown && !ForceBottomUp;
3514   bool TopDown = !ForceBottomUp;
3515   assert((TopDown || !ForceTopDown) &&
3516          "-misched-topdown incompatible with -misched-bottomup");
3517   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3518 }
3519 static MachineSchedRegistry ShufflerRegistry(
3520   "shuffle", "Shuffle machine instructions alternating directions",
3521   createInstructionShuffler);
3522 #endif // !NDEBUG
3523 
3524 //===----------------------------------------------------------------------===//
3525 // GraphWriter support for ScheduleDAGMILive.
3526 //===----------------------------------------------------------------------===//
3527 
3528 #ifndef NDEBUG
3529 namespace llvm {
3530 
3531 template<> struct GraphTraits<
3532   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3533 
3534 template<>
3535 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3536 
3537   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3538 
3539   static std::string getGraphName(const ScheduleDAG *G) {
3540     return G->MF.getName();
3541   }
3542 
3543   static bool renderGraphFromBottomUp() {
3544     return true;
3545   }
3546 
3547   static bool isNodeHidden(const SUnit *Node) {
3548     if (ViewMISchedCutoff == 0)
3549       return false;
3550     return (Node->Preds.size() > ViewMISchedCutoff
3551          || Node->Succs.size() > ViewMISchedCutoff);
3552   }
3553 
3554   /// If you want to override the dot attributes printed for a particular
3555   /// edge, override this method.
3556   static std::string getEdgeAttributes(const SUnit *Node,
3557                                        SUnitIterator EI,
3558                                        const ScheduleDAG *Graph) {
3559     if (EI.isArtificialDep())
3560       return "color=cyan,style=dashed";
3561     if (EI.isCtrlDep())
3562       return "color=blue,style=dashed";
3563     return "";
3564   }
3565 
3566   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3567     std::string Str;
3568     raw_string_ostream SS(Str);
3569     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3570     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3571       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3572     SS << "SU:" << SU->NodeNum;
3573     if (DFS)
3574       SS << " I:" << DFS->getNumInstrs(SU);
3575     return SS.str();
3576   }
3577   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3578     return G->getGraphNodeLabel(SU);
3579   }
3580 
3581   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3582     std::string Str("shape=Mrecord");
3583     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3584     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3585       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3586     if (DFS) {
3587       Str += ",style=filled,fillcolor=\"#";
3588       Str += DOT::getColorString(DFS->getSubtreeID(N));
3589       Str += '"';
3590     }
3591     return Str;
3592   }
3593 };
3594 } // namespace llvm
3595 #endif // NDEBUG
3596 
3597 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3598 /// rendered using 'dot'.
3599 ///
3600 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3601 #ifndef NDEBUG
3602   ViewGraph(this, Name, false, Title);
3603 #else
3604   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3605          << "systems with Graphviz or gv!\n";
3606 #endif  // NDEBUG
3607 }
3608 
3609 /// Out-of-line implementation with no arguments is handy for gdb.
3610 void ScheduleDAGMI::viewGraph() {
3611   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3612 }
3613