xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision f9acacaa928d7ba9db900c42893c244fb19714c4)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/GraphWriter.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 
34 using namespace llvm;
35 
36 #define DEBUG_TYPE "misched"
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 cl::opt<bool>
44 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45                        cl::desc("Print critical path length to stdout"));
46 }
47 
48 #ifndef NDEBUG
49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50   cl::desc("Pop up a window to show MISched dags after they are processed"));
51 
52 /// In some situations a few uninteresting nodes depend on nearly all other
53 /// nodes in the graph, provide a cutoff to hide them.
54 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
55   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
56 
57 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
58   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
59 
60 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
61   cl::desc("Only schedule this function"));
62 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
63   cl::desc("Only schedule this MBB#"));
64 #else
65 static bool ViewMISchedDAGs = false;
66 #endif // NDEBUG
67 
68 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
69 /// size of the ready lists.
70 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
71   cl::desc("Limit ready list to N instructions"), cl::init(256));
72 
73 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
74   cl::desc("Enable register pressure scheduling."), cl::init(true));
75 
76 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
77   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
78 
79 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
80                                         cl::desc("Enable memop clustering."),
81                                         cl::init(true));
82 
83 // Experimental heuristics
84 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
85   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
86 
87 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
88   cl::desc("Verify machine instrs before and after machine scheduling"));
89 
90 // DAG subtrees must have at least this many nodes.
91 static const unsigned MinSubtreeSize = 8;
92 
93 // Pin the vtables to this file.
94 void MachineSchedStrategy::anchor() {}
95 void ScheduleDAGMutation::anchor() {}
96 
97 //===----------------------------------------------------------------------===//
98 // Machine Instruction Scheduling Pass and Registry
99 //===----------------------------------------------------------------------===//
100 
101 MachineSchedContext::MachineSchedContext():
102     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
103   RegClassInfo = new RegisterClassInfo();
104 }
105 
106 MachineSchedContext::~MachineSchedContext() {
107   delete RegClassInfo;
108 }
109 
110 namespace {
111 /// Base class for a machine scheduler class that can run at any point.
112 class MachineSchedulerBase : public MachineSchedContext,
113                              public MachineFunctionPass {
114 public:
115   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
116 
117   void print(raw_ostream &O, const Module* = nullptr) const override;
118 
119 protected:
120   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
121 };
122 
123 /// MachineScheduler runs after coalescing and before register allocation.
124 class MachineScheduler : public MachineSchedulerBase {
125 public:
126   MachineScheduler();
127 
128   void getAnalysisUsage(AnalysisUsage &AU) const override;
129 
130   bool runOnMachineFunction(MachineFunction&) override;
131 
132   static char ID; // Class identification, replacement for typeinfo
133 
134 protected:
135   ScheduleDAGInstrs *createMachineScheduler();
136 };
137 
138 /// PostMachineScheduler runs after shortly before code emission.
139 class PostMachineScheduler : public MachineSchedulerBase {
140 public:
141   PostMachineScheduler();
142 
143   void getAnalysisUsage(AnalysisUsage &AU) const override;
144 
145   bool runOnMachineFunction(MachineFunction&) override;
146 
147   static char ID; // Class identification, replacement for typeinfo
148 
149 protected:
150   ScheduleDAGInstrs *createPostMachineScheduler();
151 };
152 } // namespace
153 
154 char MachineScheduler::ID = 0;
155 
156 char &llvm::MachineSchedulerID = MachineScheduler::ID;
157 
158 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
159                       "Machine Instruction Scheduler", false, false)
160 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
161 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
162 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
163 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
164                     "Machine Instruction Scheduler", false, false)
165 
166 MachineScheduler::MachineScheduler()
167 : MachineSchedulerBase(ID) {
168   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
169 }
170 
171 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
172   AU.setPreservesCFG();
173   AU.addRequiredID(MachineDominatorsID);
174   AU.addRequired<MachineLoopInfo>();
175   AU.addRequired<AAResultsWrapperPass>();
176   AU.addRequired<TargetPassConfig>();
177   AU.addRequired<SlotIndexes>();
178   AU.addPreserved<SlotIndexes>();
179   AU.addRequired<LiveIntervals>();
180   AU.addPreserved<LiveIntervals>();
181   MachineFunctionPass::getAnalysisUsage(AU);
182 }
183 
184 char PostMachineScheduler::ID = 0;
185 
186 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
187 
188 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
189                 "PostRA Machine Instruction Scheduler", false, false)
190 
191 PostMachineScheduler::PostMachineScheduler()
192 : MachineSchedulerBase(ID) {
193   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
194 }
195 
196 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
197   AU.setPreservesCFG();
198   AU.addRequiredID(MachineDominatorsID);
199   AU.addRequired<MachineLoopInfo>();
200   AU.addRequired<TargetPassConfig>();
201   MachineFunctionPass::getAnalysisUsage(AU);
202 }
203 
204 MachinePassRegistry MachineSchedRegistry::Registry;
205 
206 /// A dummy default scheduler factory indicates whether the scheduler
207 /// is overridden on the command line.
208 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
209   return nullptr;
210 }
211 
212 /// MachineSchedOpt allows command line selection of the scheduler.
213 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
214                RegisterPassParser<MachineSchedRegistry> >
215 MachineSchedOpt("misched",
216                 cl::init(&useDefaultMachineSched), cl::Hidden,
217                 cl::desc("Machine instruction scheduler to use"));
218 
219 static MachineSchedRegistry
220 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
221                      useDefaultMachineSched);
222 
223 static cl::opt<bool> EnableMachineSched(
224     "enable-misched",
225     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
226     cl::Hidden);
227 
228 static cl::opt<bool> EnablePostRAMachineSched(
229     "enable-post-misched",
230     cl::desc("Enable the post-ra machine instruction scheduling pass."),
231     cl::init(true), cl::Hidden);
232 
233 /// Forward declare the standard machine scheduler. This will be used as the
234 /// default scheduler if the target does not set a default.
235 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
236 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
237 
238 /// Decrement this iterator until reaching the top or a non-debug instr.
239 static MachineBasicBlock::const_iterator
240 priorNonDebug(MachineBasicBlock::const_iterator I,
241               MachineBasicBlock::const_iterator Beg) {
242   assert(I != Beg && "reached the top of the region, cannot decrement");
243   while (--I != Beg) {
244     if (!I->isDebugValue())
245       break;
246   }
247   return I;
248 }
249 
250 /// Non-const version.
251 static MachineBasicBlock::iterator
252 priorNonDebug(MachineBasicBlock::iterator I,
253               MachineBasicBlock::const_iterator Beg) {
254   return const_cast<MachineInstr*>(
255     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
256 }
257 
258 /// If this iterator is a debug value, increment until reaching the End or a
259 /// non-debug instruction.
260 static MachineBasicBlock::const_iterator
261 nextIfDebug(MachineBasicBlock::const_iterator I,
262             MachineBasicBlock::const_iterator End) {
263   for(; I != End; ++I) {
264     if (!I->isDebugValue())
265       break;
266   }
267   return I;
268 }
269 
270 /// Non-const version.
271 static MachineBasicBlock::iterator
272 nextIfDebug(MachineBasicBlock::iterator I,
273             MachineBasicBlock::const_iterator End) {
274   // Cast the return value to nonconst MachineInstr, then cast to an
275   // instr_iterator, which does not check for null, finally return a
276   // bundle_iterator.
277   return MachineBasicBlock::instr_iterator(
278     const_cast<MachineInstr*>(
279       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
280 }
281 
282 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
283 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
284   // Select the scheduler, or set the default.
285   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
286   if (Ctor != useDefaultMachineSched)
287     return Ctor(this);
288 
289   // Get the default scheduler set by the target for this function.
290   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
291   if (Scheduler)
292     return Scheduler;
293 
294   // Default to GenericScheduler.
295   return createGenericSchedLive(this);
296 }
297 
298 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
299 /// the caller. We don't have a command line option to override the postRA
300 /// scheduler. The Target must configure it.
301 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
302   // Get the postRA scheduler set by the target for this function.
303   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
304   if (Scheduler)
305     return Scheduler;
306 
307   // Default to GenericScheduler.
308   return createGenericSchedPostRA(this);
309 }
310 
311 /// Top-level MachineScheduler pass driver.
312 ///
313 /// Visit blocks in function order. Divide each block into scheduling regions
314 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
315 /// consistent with the DAG builder, which traverses the interior of the
316 /// scheduling regions bottom-up.
317 ///
318 /// This design avoids exposing scheduling boundaries to the DAG builder,
319 /// simplifying the DAG builder's support for "special" target instructions.
320 /// At the same time the design allows target schedulers to operate across
321 /// scheduling boundaries, for example to bundle the boudary instructions
322 /// without reordering them. This creates complexity, because the target
323 /// scheduler must update the RegionBegin and RegionEnd positions cached by
324 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
325 /// design would be to split blocks at scheduling boundaries, but LLVM has a
326 /// general bias against block splitting purely for implementation simplicity.
327 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
328   if (skipFunction(*mf.getFunction()))
329     return false;
330 
331   if (EnableMachineSched.getNumOccurrences()) {
332     if (!EnableMachineSched)
333       return false;
334   } else if (!mf.getSubtarget().enableMachineScheduler())
335     return false;
336 
337   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
338 
339   // Initialize the context of the pass.
340   MF = &mf;
341   MLI = &getAnalysis<MachineLoopInfo>();
342   MDT = &getAnalysis<MachineDominatorTree>();
343   PassConfig = &getAnalysis<TargetPassConfig>();
344   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
345 
346   LIS = &getAnalysis<LiveIntervals>();
347 
348   if (VerifyScheduling) {
349     DEBUG(LIS->dump());
350     MF->verify(this, "Before machine scheduling.");
351   }
352   RegClassInfo->runOnMachineFunction(*MF);
353 
354   // Instantiate the selected scheduler for this target, function, and
355   // optimization level.
356   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
357   scheduleRegions(*Scheduler, false);
358 
359   DEBUG(LIS->dump());
360   if (VerifyScheduling)
361     MF->verify(this, "After machine scheduling.");
362   return true;
363 }
364 
365 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
366   if (skipFunction(*mf.getFunction()))
367     return false;
368 
369   if (EnablePostRAMachineSched.getNumOccurrences()) {
370     if (!EnablePostRAMachineSched)
371       return false;
372   } else if (!mf.getSubtarget().enablePostRAScheduler()) {
373     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
374     return false;
375   }
376   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
377 
378   // Initialize the context of the pass.
379   MF = &mf;
380   PassConfig = &getAnalysis<TargetPassConfig>();
381 
382   if (VerifyScheduling)
383     MF->verify(this, "Before post machine scheduling.");
384 
385   // Instantiate the selected scheduler for this target, function, and
386   // optimization level.
387   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
388   scheduleRegions(*Scheduler, true);
389 
390   if (VerifyScheduling)
391     MF->verify(this, "After post machine scheduling.");
392   return true;
393 }
394 
395 /// Return true of the given instruction should not be included in a scheduling
396 /// region.
397 ///
398 /// MachineScheduler does not currently support scheduling across calls. To
399 /// handle calls, the DAG builder needs to be modified to create register
400 /// anti/output dependencies on the registers clobbered by the call's regmask
401 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
402 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
403 /// the boundary, but there would be no benefit to postRA scheduling across
404 /// calls this late anyway.
405 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
406                             MachineBasicBlock *MBB,
407                             MachineFunction *MF,
408                             const TargetInstrInfo *TII) {
409   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
410 }
411 
412 /// Main driver for both MachineScheduler and PostMachineScheduler.
413 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
414                                            bool FixKillFlags) {
415   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
416 
417   // Visit all machine basic blocks.
418   //
419   // TODO: Visit blocks in global postorder or postorder within the bottom-up
420   // loop tree. Then we can optionally compute global RegPressure.
421   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
422        MBB != MBBEnd; ++MBB) {
423 
424     Scheduler.startBlock(&*MBB);
425 
426 #ifndef NDEBUG
427     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
428       continue;
429     if (SchedOnlyBlock.getNumOccurrences()
430         && (int)SchedOnlyBlock != MBB->getNumber())
431       continue;
432 #endif
433 
434     // Break the block into scheduling regions [I, RegionEnd), and schedule each
435     // region as soon as it is discovered. RegionEnd points the scheduling
436     // boundary at the bottom of the region. The DAG does not include RegionEnd,
437     // but the region does (i.e. the next RegionEnd is above the previous
438     // RegionBegin). If the current block has no terminator then RegionEnd ==
439     // MBB->end() for the bottom region.
440     //
441     // The Scheduler may insert instructions during either schedule() or
442     // exitRegion(), even for empty regions. So the local iterators 'I' and
443     // 'RegionEnd' are invalid across these calls.
444     //
445     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
446     // as a single instruction.
447     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
448         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
449 
450       // Avoid decrementing RegionEnd for blocks with no terminator.
451       if (RegionEnd != MBB->end() ||
452           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
453         --RegionEnd;
454       }
455 
456       // The next region starts above the previous region. Look backward in the
457       // instruction stream until we find the nearest boundary.
458       unsigned NumRegionInstrs = 0;
459       MachineBasicBlock::iterator I = RegionEnd;
460       for (;I != MBB->begin(); --I) {
461         if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII))
462           break;
463         if (!I->isDebugValue())
464           ++NumRegionInstrs;
465       }
466       // Notify the scheduler of the region, even if we may skip scheduling
467       // it. Perhaps it still needs to be bundled.
468       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
469 
470       // Skip empty scheduling regions (0 or 1 schedulable instructions).
471       if (I == RegionEnd || I == std::prev(RegionEnd)) {
472         // Close the current region. Bundle the terminator if needed.
473         // This invalidates 'RegionEnd' and 'I'.
474         Scheduler.exitRegion();
475         continue;
476       }
477       DEBUG(dbgs() << "********** MI Scheduling **********\n");
478       DEBUG(dbgs() << MF->getName()
479             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
480             << "\n  From: " << *I << "    To: ";
481             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
482             else dbgs() << "End";
483             dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
484       if (DumpCriticalPathLength) {
485         errs() << MF->getName();
486         errs() << ":BB# " << MBB->getNumber();
487         errs() << " " << MBB->getName() << " \n";
488       }
489 
490       // Schedule a region: possibly reorder instructions.
491       // This invalidates 'RegionEnd' and 'I'.
492       Scheduler.schedule();
493 
494       // Close the current region.
495       Scheduler.exitRegion();
496 
497       // Scheduling has invalidated the current iterator 'I'. Ask the
498       // scheduler for the top of it's scheduled region.
499       RegionEnd = Scheduler.begin();
500     }
501     Scheduler.finishBlock();
502     // FIXME: Ideally, no further passes should rely on kill flags. However,
503     // thumb2 size reduction is currently an exception, so the PostMIScheduler
504     // needs to do this.
505     if (FixKillFlags)
506         Scheduler.fixupKills(&*MBB);
507   }
508   Scheduler.finalizeSchedule();
509 }
510 
511 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
512   // unimplemented
513 }
514 
515 LLVM_DUMP_METHOD
516 void ReadyQueue::dump() {
517   dbgs() << "Queue " << Name << ": ";
518   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
519     dbgs() << Queue[i]->NodeNum << " ";
520   dbgs() << "\n";
521 }
522 
523 //===----------------------------------------------------------------------===//
524 // ScheduleDAGMI - Basic machine instruction scheduling. This is
525 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
526 // virtual registers.
527 // ===----------------------------------------------------------------------===/
528 
529 // Provide a vtable anchor.
530 ScheduleDAGMI::~ScheduleDAGMI() {
531 }
532 
533 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
534   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
535 }
536 
537 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
538   if (SuccSU != &ExitSU) {
539     // Do not use WillCreateCycle, it assumes SD scheduling.
540     // If Pred is reachable from Succ, then the edge creates a cycle.
541     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
542       return false;
543     Topo.AddPred(SuccSU, PredDep.getSUnit());
544   }
545   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
546   // Return true regardless of whether a new edge needed to be inserted.
547   return true;
548 }
549 
550 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
551 /// NumPredsLeft reaches zero, release the successor node.
552 ///
553 /// FIXME: Adjust SuccSU height based on MinLatency.
554 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
555   SUnit *SuccSU = SuccEdge->getSUnit();
556 
557   if (SuccEdge->isWeak()) {
558     --SuccSU->WeakPredsLeft;
559     if (SuccEdge->isCluster())
560       NextClusterSucc = SuccSU;
561     return;
562   }
563 #ifndef NDEBUG
564   if (SuccSU->NumPredsLeft == 0) {
565     dbgs() << "*** Scheduling failed! ***\n";
566     SuccSU->dump(this);
567     dbgs() << " has been released too many times!\n";
568     llvm_unreachable(nullptr);
569   }
570 #endif
571   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
572   // CurrCycle may have advanced since then.
573   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
574     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
575 
576   --SuccSU->NumPredsLeft;
577   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
578     SchedImpl->releaseTopNode(SuccSU);
579 }
580 
581 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
582 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
583   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
584        I != E; ++I) {
585     releaseSucc(SU, &*I);
586   }
587 }
588 
589 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
590 /// NumSuccsLeft reaches zero, release the predecessor node.
591 ///
592 /// FIXME: Adjust PredSU height based on MinLatency.
593 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
594   SUnit *PredSU = PredEdge->getSUnit();
595 
596   if (PredEdge->isWeak()) {
597     --PredSU->WeakSuccsLeft;
598     if (PredEdge->isCluster())
599       NextClusterPred = PredSU;
600     return;
601   }
602 #ifndef NDEBUG
603   if (PredSU->NumSuccsLeft == 0) {
604     dbgs() << "*** Scheduling failed! ***\n";
605     PredSU->dump(this);
606     dbgs() << " has been released too many times!\n";
607     llvm_unreachable(nullptr);
608   }
609 #endif
610   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
611   // CurrCycle may have advanced since then.
612   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
613     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
614 
615   --PredSU->NumSuccsLeft;
616   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
617     SchedImpl->releaseBottomNode(PredSU);
618 }
619 
620 /// releasePredecessors - Call releasePred on each of SU's predecessors.
621 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
622   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
623        I != E; ++I) {
624     releasePred(SU, &*I);
625   }
626 }
627 
628 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
629 /// crossing a scheduling boundary. [begin, end) includes all instructions in
630 /// the region, including the boundary itself and single-instruction regions
631 /// that don't get scheduled.
632 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
633                                      MachineBasicBlock::iterator begin,
634                                      MachineBasicBlock::iterator end,
635                                      unsigned regioninstrs)
636 {
637   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
638 
639   SchedImpl->initPolicy(begin, end, regioninstrs);
640 }
641 
642 /// This is normally called from the main scheduler loop but may also be invoked
643 /// by the scheduling strategy to perform additional code motion.
644 void ScheduleDAGMI::moveInstruction(
645   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
646   // Advance RegionBegin if the first instruction moves down.
647   if (&*RegionBegin == MI)
648     ++RegionBegin;
649 
650   // Update the instruction stream.
651   BB->splice(InsertPos, BB, MI);
652 
653   // Update LiveIntervals
654   if (LIS)
655     LIS->handleMove(*MI, /*UpdateFlags=*/true);
656 
657   // Recede RegionBegin if an instruction moves above the first.
658   if (RegionBegin == InsertPos)
659     RegionBegin = MI;
660 }
661 
662 bool ScheduleDAGMI::checkSchedLimit() {
663 #ifndef NDEBUG
664   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
665     CurrentTop = CurrentBottom;
666     return false;
667   }
668   ++NumInstrsScheduled;
669 #endif
670   return true;
671 }
672 
673 /// Per-region scheduling driver, called back from
674 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
675 /// does not consider liveness or register pressure. It is useful for PostRA
676 /// scheduling and potentially other custom schedulers.
677 void ScheduleDAGMI::schedule() {
678   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
679   DEBUG(SchedImpl->dumpPolicy());
680 
681   // Build the DAG.
682   buildSchedGraph(AA);
683 
684   Topo.InitDAGTopologicalSorting();
685 
686   postprocessDAG();
687 
688   SmallVector<SUnit*, 8> TopRoots, BotRoots;
689   findRootsAndBiasEdges(TopRoots, BotRoots);
690 
691   // Initialize the strategy before modifying the DAG.
692   // This may initialize a DFSResult to be used for queue priority.
693   SchedImpl->initialize(this);
694 
695   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
696           SUnits[su].dumpAll(this));
697   if (ViewMISchedDAGs) viewGraph();
698 
699   // Initialize ready queues now that the DAG and priority data are finalized.
700   initQueues(TopRoots, BotRoots);
701 
702   bool IsTopNode = false;
703   while (true) {
704     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
705     SUnit *SU = SchedImpl->pickNode(IsTopNode);
706     if (!SU) break;
707 
708     assert(!SU->isScheduled && "Node already scheduled");
709     if (!checkSchedLimit())
710       break;
711 
712     MachineInstr *MI = SU->getInstr();
713     if (IsTopNode) {
714       assert(SU->isTopReady() && "node still has unscheduled dependencies");
715       if (&*CurrentTop == MI)
716         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
717       else
718         moveInstruction(MI, CurrentTop);
719     } else {
720       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
721       MachineBasicBlock::iterator priorII =
722         priorNonDebug(CurrentBottom, CurrentTop);
723       if (&*priorII == MI)
724         CurrentBottom = priorII;
725       else {
726         if (&*CurrentTop == MI)
727           CurrentTop = nextIfDebug(++CurrentTop, priorII);
728         moveInstruction(MI, CurrentBottom);
729         CurrentBottom = MI;
730       }
731     }
732     // Notify the scheduling strategy before updating the DAG.
733     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
734     // runs, it can then use the accurate ReadyCycle time to determine whether
735     // newly released nodes can move to the readyQ.
736     SchedImpl->schedNode(SU, IsTopNode);
737 
738     updateQueues(SU, IsTopNode);
739   }
740   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
741 
742   placeDebugValues();
743 
744   DEBUG({
745       unsigned BBNum = begin()->getParent()->getNumber();
746       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
747       dumpSchedule();
748       dbgs() << '\n';
749     });
750 }
751 
752 /// Apply each ScheduleDAGMutation step in order.
753 void ScheduleDAGMI::postprocessDAG() {
754   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
755     Mutations[i]->apply(this);
756   }
757 }
758 
759 void ScheduleDAGMI::
760 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
761                       SmallVectorImpl<SUnit*> &BotRoots) {
762   for (std::vector<SUnit>::iterator
763          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
764     SUnit *SU = &(*I);
765     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
766 
767     // Order predecessors so DFSResult follows the critical path.
768     SU->biasCriticalPath();
769 
770     // A SUnit is ready to top schedule if it has no predecessors.
771     if (!I->NumPredsLeft)
772       TopRoots.push_back(SU);
773     // A SUnit is ready to bottom schedule if it has no successors.
774     if (!I->NumSuccsLeft)
775       BotRoots.push_back(SU);
776   }
777   ExitSU.biasCriticalPath();
778 }
779 
780 /// Identify DAG roots and setup scheduler queues.
781 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
782                                ArrayRef<SUnit*> BotRoots) {
783   NextClusterSucc = nullptr;
784   NextClusterPred = nullptr;
785 
786   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
787   //
788   // Nodes with unreleased weak edges can still be roots.
789   // Release top roots in forward order.
790   for (SmallVectorImpl<SUnit*>::const_iterator
791          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
792     SchedImpl->releaseTopNode(*I);
793   }
794   // Release bottom roots in reverse order so the higher priority nodes appear
795   // first. This is more natural and slightly more efficient.
796   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
797          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
798     SchedImpl->releaseBottomNode(*I);
799   }
800 
801   releaseSuccessors(&EntrySU);
802   releasePredecessors(&ExitSU);
803 
804   SchedImpl->registerRoots();
805 
806   // Advance past initial DebugValues.
807   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
808   CurrentBottom = RegionEnd;
809 }
810 
811 /// Update scheduler queues after scheduling an instruction.
812 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
813   // Release dependent instructions for scheduling.
814   if (IsTopNode)
815     releaseSuccessors(SU);
816   else
817     releasePredecessors(SU);
818 
819   SU->isScheduled = true;
820 }
821 
822 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
823 void ScheduleDAGMI::placeDebugValues() {
824   // If first instruction was a DBG_VALUE then put it back.
825   if (FirstDbgValue) {
826     BB->splice(RegionBegin, BB, FirstDbgValue);
827     RegionBegin = FirstDbgValue;
828   }
829 
830   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
831          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
832     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
833     MachineInstr *DbgValue = P.first;
834     MachineBasicBlock::iterator OrigPrevMI = P.second;
835     if (&*RegionBegin == DbgValue)
836       ++RegionBegin;
837     BB->splice(++OrigPrevMI, BB, DbgValue);
838     if (OrigPrevMI == std::prev(RegionEnd))
839       RegionEnd = DbgValue;
840   }
841   DbgValues.clear();
842   FirstDbgValue = nullptr;
843 }
844 
845 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
846 void ScheduleDAGMI::dumpSchedule() const {
847   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
848     if (SUnit *SU = getSUnit(&(*MI)))
849       SU->dump(this);
850     else
851       dbgs() << "Missing SUnit\n";
852   }
853 }
854 #endif
855 
856 //===----------------------------------------------------------------------===//
857 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
858 // preservation.
859 //===----------------------------------------------------------------------===//
860 
861 ScheduleDAGMILive::~ScheduleDAGMILive() {
862   delete DFSResult;
863 }
864 
865 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
866 /// crossing a scheduling boundary. [begin, end) includes all instructions in
867 /// the region, including the boundary itself and single-instruction regions
868 /// that don't get scheduled.
869 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
870                                 MachineBasicBlock::iterator begin,
871                                 MachineBasicBlock::iterator end,
872                                 unsigned regioninstrs)
873 {
874   // ScheduleDAGMI initializes SchedImpl's per-region policy.
875   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
876 
877   // For convenience remember the end of the liveness region.
878   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
879 
880   SUPressureDiffs.clear();
881 
882   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
883   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
884 
885   assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
886          "ShouldTrackLaneMasks requires ShouldTrackPressure");
887 }
888 
889 // Setup the register pressure trackers for the top scheduled top and bottom
890 // scheduled regions.
891 void ScheduleDAGMILive::initRegPressure() {
892   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
893                     ShouldTrackLaneMasks, false);
894   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
895                     ShouldTrackLaneMasks, false);
896 
897   // Close the RPTracker to finalize live ins.
898   RPTracker.closeRegion();
899 
900   DEBUG(RPTracker.dump());
901 
902   // Initialize the live ins and live outs.
903   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
904   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
905 
906   // Close one end of the tracker so we can call
907   // getMaxUpward/DownwardPressureDelta before advancing across any
908   // instructions. This converts currently live regs into live ins/outs.
909   TopRPTracker.closeTop();
910   BotRPTracker.closeBottom();
911 
912   BotRPTracker.initLiveThru(RPTracker);
913   if (!BotRPTracker.getLiveThru().empty()) {
914     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
915     DEBUG(dbgs() << "Live Thru: ";
916           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
917   };
918 
919   // For each live out vreg reduce the pressure change associated with other
920   // uses of the same vreg below the live-out reaching def.
921   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
922 
923   // Account for liveness generated by the region boundary.
924   if (LiveRegionEnd != RegionEnd) {
925     SmallVector<RegisterMaskPair, 8> LiveUses;
926     BotRPTracker.recede(&LiveUses);
927     updatePressureDiffs(LiveUses);
928   }
929 
930   DEBUG(
931     dbgs() << "Top Pressure:\n";
932     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
933     dbgs() << "Bottom Pressure:\n";
934     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
935   );
936 
937   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
938 
939   // Cache the list of excess pressure sets in this region. This will also track
940   // the max pressure in the scheduled code for these sets.
941   RegionCriticalPSets.clear();
942   const std::vector<unsigned> &RegionPressure =
943     RPTracker.getPressure().MaxSetPressure;
944   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
945     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
946     if (RegionPressure[i] > Limit) {
947       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
948             << " Limit " << Limit
949             << " Actual " << RegionPressure[i] << "\n");
950       RegionCriticalPSets.push_back(PressureChange(i));
951     }
952   }
953   DEBUG(dbgs() << "Excess PSets: ";
954         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
955           dbgs() << TRI->getRegPressureSetName(
956             RegionCriticalPSets[i].getPSet()) << " ";
957         dbgs() << "\n");
958 }
959 
960 void ScheduleDAGMILive::
961 updateScheduledPressure(const SUnit *SU,
962                         const std::vector<unsigned> &NewMaxPressure) {
963   const PressureDiff &PDiff = getPressureDiff(SU);
964   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
965   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
966        I != E; ++I) {
967     if (!I->isValid())
968       break;
969     unsigned ID = I->getPSet();
970     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
971       ++CritIdx;
972     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
973       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
974           && NewMaxPressure[ID] <= INT16_MAX)
975         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
976     }
977     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
978     if (NewMaxPressure[ID] >= Limit - 2) {
979       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
980             << NewMaxPressure[ID]
981             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
982             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
983     }
984   }
985 }
986 
987 /// Update the PressureDiff array for liveness after scheduling this
988 /// instruction.
989 void ScheduleDAGMILive::updatePressureDiffs(
990     ArrayRef<RegisterMaskPair> LiveUses) {
991   for (const RegisterMaskPair &P : LiveUses) {
992     unsigned Reg = P.RegUnit;
993     /// FIXME: Currently assuming single-use physregs.
994     if (!TRI->isVirtualRegister(Reg))
995       continue;
996 
997     if (ShouldTrackLaneMasks) {
998       // If the register has just become live then other uses won't change
999       // this fact anymore => decrement pressure.
1000       // If the register has just become dead then other uses make it come
1001       // back to life => increment pressure.
1002       bool Decrement = P.LaneMask != 0;
1003 
1004       for (const VReg2SUnit &V2SU
1005            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1006         SUnit &SU = *V2SU.SU;
1007         if (SU.isScheduled || &SU == &ExitSU)
1008           continue;
1009 
1010         PressureDiff &PDiff = getPressureDiff(&SU);
1011         PDiff.addPressureChange(Reg, Decrement, &MRI);
1012         DEBUG(
1013           dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1014                  << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1015                  << ' ' << *SU.getInstr();
1016           dbgs() << "              to ";
1017           PDiff.dump(*TRI);
1018         );
1019       }
1020     } else {
1021       assert(P.LaneMask != 0);
1022       DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1023       // This may be called before CurrentBottom has been initialized. However,
1024       // BotRPTracker must have a valid position. We want the value live into the
1025       // instruction or live out of the block, so ask for the previous
1026       // instruction's live-out.
1027       const LiveInterval &LI = LIS->getInterval(Reg);
1028       VNInfo *VNI;
1029       MachineBasicBlock::const_iterator I =
1030         nextIfDebug(BotRPTracker.getPos(), BB->end());
1031       if (I == BB->end())
1032         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1033       else {
1034         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1035         VNI = LRQ.valueIn();
1036       }
1037       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1038       assert(VNI && "No live value at use.");
1039       for (const VReg2SUnit &V2SU
1040            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1041         SUnit *SU = V2SU.SU;
1042         // If this use comes before the reaching def, it cannot be a last use,
1043         // so decrease its pressure change.
1044         if (!SU->isScheduled && SU != &ExitSU) {
1045           LiveQueryResult LRQ =
1046               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1047           if (LRQ.valueIn() == VNI) {
1048             PressureDiff &PDiff = getPressureDiff(SU);
1049             PDiff.addPressureChange(Reg, true, &MRI);
1050             DEBUG(
1051               dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1052                      << *SU->getInstr();
1053               dbgs() << "              to ";
1054               PDiff.dump(*TRI);
1055             );
1056           }
1057         }
1058       }
1059     }
1060   }
1061 }
1062 
1063 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1064 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1065 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1066 ///
1067 /// This is a skeletal driver, with all the functionality pushed into helpers,
1068 /// so that it can be easily extended by experimental schedulers. Generally,
1069 /// implementing MachineSchedStrategy should be sufficient to implement a new
1070 /// scheduling algorithm. However, if a scheduler further subclasses
1071 /// ScheduleDAGMILive then it will want to override this virtual method in order
1072 /// to update any specialized state.
1073 void ScheduleDAGMILive::schedule() {
1074   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1075   DEBUG(SchedImpl->dumpPolicy());
1076   buildDAGWithRegPressure();
1077 
1078   Topo.InitDAGTopologicalSorting();
1079 
1080   postprocessDAG();
1081 
1082   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1083   findRootsAndBiasEdges(TopRoots, BotRoots);
1084 
1085   // Initialize the strategy before modifying the DAG.
1086   // This may initialize a DFSResult to be used for queue priority.
1087   SchedImpl->initialize(this);
1088 
1089   DEBUG(
1090     for (const SUnit &SU : SUnits) {
1091       SU.dumpAll(this);
1092       if (ShouldTrackPressure) {
1093         dbgs() << "  Pressure Diff      : ";
1094         getPressureDiff(&SU).dump(*TRI);
1095       }
1096       dbgs() << '\n';
1097     }
1098   );
1099   if (ViewMISchedDAGs) viewGraph();
1100 
1101   // Initialize ready queues now that the DAG and priority data are finalized.
1102   initQueues(TopRoots, BotRoots);
1103 
1104   bool IsTopNode = false;
1105   while (true) {
1106     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1107     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1108     if (!SU) break;
1109 
1110     assert(!SU->isScheduled && "Node already scheduled");
1111     if (!checkSchedLimit())
1112       break;
1113 
1114     scheduleMI(SU, IsTopNode);
1115 
1116     if (DFSResult) {
1117       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1118       if (!ScheduledTrees.test(SubtreeID)) {
1119         ScheduledTrees.set(SubtreeID);
1120         DFSResult->scheduleTree(SubtreeID);
1121         SchedImpl->scheduleTree(SubtreeID);
1122       }
1123     }
1124 
1125     // Notify the scheduling strategy after updating the DAG.
1126     SchedImpl->schedNode(SU, IsTopNode);
1127 
1128     updateQueues(SU, IsTopNode);
1129   }
1130   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1131 
1132   placeDebugValues();
1133 
1134   DEBUG({
1135       unsigned BBNum = begin()->getParent()->getNumber();
1136       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1137       dumpSchedule();
1138       dbgs() << '\n';
1139     });
1140 }
1141 
1142 /// Build the DAG and setup three register pressure trackers.
1143 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1144   if (!ShouldTrackPressure) {
1145     RPTracker.reset();
1146     RegionCriticalPSets.clear();
1147     buildSchedGraph(AA);
1148     return;
1149   }
1150 
1151   // Initialize the register pressure tracker used by buildSchedGraph.
1152   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1153                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1154 
1155   // Account for liveness generate by the region boundary.
1156   if (LiveRegionEnd != RegionEnd)
1157     RPTracker.recede();
1158 
1159   // Build the DAG, and compute current register pressure.
1160   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1161 
1162   // Initialize top/bottom trackers after computing region pressure.
1163   initRegPressure();
1164 }
1165 
1166 void ScheduleDAGMILive::computeDFSResult() {
1167   if (!DFSResult)
1168     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1169   DFSResult->clear();
1170   ScheduledTrees.clear();
1171   DFSResult->resize(SUnits.size());
1172   DFSResult->compute(SUnits);
1173   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1174 }
1175 
1176 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1177 /// only provides the critical path for single block loops. To handle loops that
1178 /// span blocks, we could use the vreg path latencies provided by
1179 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1180 /// available for use in the scheduler.
1181 ///
1182 /// The cyclic path estimation identifies a def-use pair that crosses the back
1183 /// edge and considers the depth and height of the nodes. For example, consider
1184 /// the following instruction sequence where each instruction has unit latency
1185 /// and defines an epomymous virtual register:
1186 ///
1187 /// a->b(a,c)->c(b)->d(c)->exit
1188 ///
1189 /// The cyclic critical path is a two cycles: b->c->b
1190 /// The acyclic critical path is four cycles: a->b->c->d->exit
1191 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1192 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1193 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1194 /// LiveInDepth = depth(b) = len(a->b) = 1
1195 ///
1196 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1197 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1198 /// CyclicCriticalPath = min(2, 2) = 2
1199 ///
1200 /// This could be relevant to PostRA scheduling, but is currently implemented
1201 /// assuming LiveIntervals.
1202 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1203   // This only applies to single block loop.
1204   if (!BB->isSuccessor(BB))
1205     return 0;
1206 
1207   unsigned MaxCyclicLatency = 0;
1208   // Visit each live out vreg def to find def/use pairs that cross iterations.
1209   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1210     unsigned Reg = P.RegUnit;
1211     if (!TRI->isVirtualRegister(Reg))
1212         continue;
1213     const LiveInterval &LI = LIS->getInterval(Reg);
1214     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1215     if (!DefVNI)
1216       continue;
1217 
1218     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1219     const SUnit *DefSU = getSUnit(DefMI);
1220     if (!DefSU)
1221       continue;
1222 
1223     unsigned LiveOutHeight = DefSU->getHeight();
1224     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1225     // Visit all local users of the vreg def.
1226     for (const VReg2SUnit &V2SU
1227          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1228       SUnit *SU = V2SU.SU;
1229       if (SU == &ExitSU)
1230         continue;
1231 
1232       // Only consider uses of the phi.
1233       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1234       if (!LRQ.valueIn()->isPHIDef())
1235         continue;
1236 
1237       // Assume that a path spanning two iterations is a cycle, which could
1238       // overestimate in strange cases. This allows cyclic latency to be
1239       // estimated as the minimum slack of the vreg's depth or height.
1240       unsigned CyclicLatency = 0;
1241       if (LiveOutDepth > SU->getDepth())
1242         CyclicLatency = LiveOutDepth - SU->getDepth();
1243 
1244       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1245       if (LiveInHeight > LiveOutHeight) {
1246         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1247           CyclicLatency = LiveInHeight - LiveOutHeight;
1248       } else
1249         CyclicLatency = 0;
1250 
1251       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1252             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1253       if (CyclicLatency > MaxCyclicLatency)
1254         MaxCyclicLatency = CyclicLatency;
1255     }
1256   }
1257   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1258   return MaxCyclicLatency;
1259 }
1260 
1261 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1262 /// the Top RP tracker in case the region beginning has changed.
1263 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1264                                    ArrayRef<SUnit*> BotRoots) {
1265   ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1266   if (ShouldTrackPressure) {
1267     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1268     TopRPTracker.setPos(CurrentTop);
1269   }
1270 }
1271 
1272 /// Move an instruction and update register pressure.
1273 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1274   // Move the instruction to its new location in the instruction stream.
1275   MachineInstr *MI = SU->getInstr();
1276 
1277   if (IsTopNode) {
1278     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1279     if (&*CurrentTop == MI)
1280       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1281     else {
1282       moveInstruction(MI, CurrentTop);
1283       TopRPTracker.setPos(MI);
1284     }
1285 
1286     if (ShouldTrackPressure) {
1287       // Update top scheduled pressure.
1288       RegisterOperands RegOpers;
1289       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1290       if (ShouldTrackLaneMasks) {
1291         // Adjust liveness and add missing dead+read-undef flags.
1292         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1293         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1294       } else {
1295         // Adjust for missing dead-def flags.
1296         RegOpers.detectDeadDefs(*MI, *LIS);
1297       }
1298 
1299       TopRPTracker.advance(RegOpers);
1300       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1301       DEBUG(
1302         dbgs() << "Top Pressure:\n";
1303         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1304       );
1305 
1306       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1307     }
1308   } else {
1309     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1310     MachineBasicBlock::iterator priorII =
1311       priorNonDebug(CurrentBottom, CurrentTop);
1312     if (&*priorII == MI)
1313       CurrentBottom = priorII;
1314     else {
1315       if (&*CurrentTop == MI) {
1316         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1317         TopRPTracker.setPos(CurrentTop);
1318       }
1319       moveInstruction(MI, CurrentBottom);
1320       CurrentBottom = MI;
1321     }
1322     if (ShouldTrackPressure) {
1323       RegisterOperands RegOpers;
1324       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1325       if (ShouldTrackLaneMasks) {
1326         // Adjust liveness and add missing dead+read-undef flags.
1327         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1328         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1329       } else {
1330         // Adjust for missing dead-def flags.
1331         RegOpers.detectDeadDefs(*MI, *LIS);
1332       }
1333 
1334       BotRPTracker.recedeSkipDebugValues();
1335       SmallVector<RegisterMaskPair, 8> LiveUses;
1336       BotRPTracker.recede(RegOpers, &LiveUses);
1337       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1338       DEBUG(
1339         dbgs() << "Bottom Pressure:\n";
1340         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1341       );
1342 
1343       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1344       updatePressureDiffs(LiveUses);
1345     }
1346   }
1347 }
1348 
1349 //===----------------------------------------------------------------------===//
1350 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1351 //===----------------------------------------------------------------------===//
1352 
1353 namespace {
1354 /// \brief Post-process the DAG to create cluster edges between neighboring
1355 /// loads or between neighboring stores.
1356 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1357   struct MemOpInfo {
1358     SUnit *SU;
1359     unsigned BaseReg;
1360     int64_t Offset;
1361     MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1362         : SU(su), BaseReg(reg), Offset(ofs) {}
1363 
1364     bool operator<(const MemOpInfo&RHS) const {
1365       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1366     }
1367   };
1368 
1369   const TargetInstrInfo *TII;
1370   const TargetRegisterInfo *TRI;
1371   bool IsLoad;
1372 
1373 public:
1374   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1375                            const TargetRegisterInfo *tri, bool IsLoad)
1376       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1377 
1378   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1379 
1380 protected:
1381   void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1382 };
1383 
1384 class StoreClusterMutation : public BaseMemOpClusterMutation {
1385 public:
1386   StoreClusterMutation(const TargetInstrInfo *tii,
1387                        const TargetRegisterInfo *tri)
1388       : BaseMemOpClusterMutation(tii, tri, false) {}
1389 };
1390 
1391 class LoadClusterMutation : public BaseMemOpClusterMutation {
1392 public:
1393   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1394       : BaseMemOpClusterMutation(tii, tri, true) {}
1395 };
1396 } // anonymous
1397 
1398 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1399     ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1400   SmallVector<MemOpInfo, 32> MemOpRecords;
1401   for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1402     SUnit *SU = MemOps[Idx];
1403     unsigned BaseReg;
1404     int64_t Offset;
1405     if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1406       MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
1407   }
1408   if (MemOpRecords.size() < 2)
1409     return;
1410 
1411   std::sort(MemOpRecords.begin(), MemOpRecords.end());
1412   unsigned ClusterLength = 1;
1413   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1414     if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
1415       ClusterLength = 1;
1416       continue;
1417     }
1418 
1419     SUnit *SUa = MemOpRecords[Idx].SU;
1420     SUnit *SUb = MemOpRecords[Idx+1].SU;
1421     if (TII->shouldClusterMemOps(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1422         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1423       DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1424             << SUb->NodeNum << ")\n");
1425       // Copy successor edges from SUa to SUb. Interleaving computation
1426       // dependent on SUa can prevent load combining due to register reuse.
1427       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1428       // loads should have effectively the same inputs.
1429       for (SUnit::const_succ_iterator
1430              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1431         if (SI->getSUnit() == SUb)
1432           continue;
1433         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1434         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1435       }
1436       ++ClusterLength;
1437     } else
1438       ClusterLength = 1;
1439   }
1440 }
1441 
1442 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1443 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1444 
1445   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1446 
1447   // Map DAG NodeNum to store chain ID.
1448   DenseMap<unsigned, unsigned> StoreChainIDs;
1449   // Map each store chain to a set of dependent MemOps.
1450   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1451   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1452     SUnit *SU = &DAG->SUnits[Idx];
1453     if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1454         (!IsLoad && !SU->getInstr()->mayStore()))
1455       continue;
1456 
1457     unsigned ChainPredID = DAG->SUnits.size();
1458     for (SUnit::const_pred_iterator
1459            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1460       if (PI->isCtrl()) {
1461         ChainPredID = PI->getSUnit()->NodeNum;
1462         break;
1463       }
1464     }
1465     // Check if this chain-like pred has been seen
1466     // before. ChainPredID==MaxNodeID at the top of the schedule.
1467     unsigned NumChains = StoreChainDependents.size();
1468     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1469       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1470     if (Result.second)
1471       StoreChainDependents.resize(NumChains + 1);
1472     StoreChainDependents[Result.first->second].push_back(SU);
1473   }
1474 
1475   // Iterate over the store chains.
1476   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1477     clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
1478 }
1479 
1480 //===----------------------------------------------------------------------===//
1481 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1482 //===----------------------------------------------------------------------===//
1483 
1484 namespace {
1485 /// \brief Post-process the DAG to create cluster edges between instructions
1486 /// that may be fused by the processor into a single operation.
1487 class MacroFusion : public ScheduleDAGMutation {
1488   const TargetInstrInfo &TII;
1489   const TargetRegisterInfo &TRI;
1490 public:
1491   MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1492     : TII(TII), TRI(TRI) {}
1493 
1494   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1495 };
1496 } // anonymous
1497 
1498 /// Returns true if \p MI reads a register written by \p Other.
1499 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1500                        const MachineInstr &Other) {
1501   for (const MachineOperand &MO : MI.uses()) {
1502     if (!MO.isReg() || !MO.readsReg())
1503       continue;
1504 
1505     unsigned Reg = MO.getReg();
1506     if (Other.modifiesRegister(Reg, &TRI))
1507       return true;
1508   }
1509   return false;
1510 }
1511 
1512 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1513 /// fused operations.
1514 void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1515   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1516 
1517   // For now, assume targets can only fuse with the branch.
1518   SUnit &ExitSU = DAG->ExitSU;
1519   MachineInstr *Branch = ExitSU.getInstr();
1520   if (!Branch)
1521     return;
1522 
1523   for (SUnit &SU : DAG->SUnits) {
1524     // SUnits with successors can't be schedule in front of the ExitSU.
1525     if (!SU.Succs.empty())
1526       continue;
1527     // We only care if the node writes to a register that the branch reads.
1528     MachineInstr *Pred = SU.getInstr();
1529     if (!HasDataDep(TRI, *Branch, *Pred))
1530       continue;
1531 
1532     if (!TII.shouldScheduleAdjacent(Pred, Branch))
1533       continue;
1534 
1535     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1536     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1537     // need to copy predecessor edges from ExitSU to SU, since top-down
1538     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1539     // of SU, we could create an artificial edge from the deepest root, but it
1540     // hasn't been needed yet.
1541     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1542     (void)Success;
1543     assert(Success && "No DAG nodes should be reachable from ExitSU");
1544 
1545     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1546     break;
1547   }
1548 }
1549 
1550 //===----------------------------------------------------------------------===//
1551 // CopyConstrain - DAG post-processing to encourage copy elimination.
1552 //===----------------------------------------------------------------------===//
1553 
1554 namespace {
1555 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1556 /// the one use that defines the copy's source vreg, most likely an induction
1557 /// variable increment.
1558 class CopyConstrain : public ScheduleDAGMutation {
1559   // Transient state.
1560   SlotIndex RegionBeginIdx;
1561   // RegionEndIdx is the slot index of the last non-debug instruction in the
1562   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1563   SlotIndex RegionEndIdx;
1564 public:
1565   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1566 
1567   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1568 
1569 protected:
1570   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1571 };
1572 } // anonymous
1573 
1574 /// constrainLocalCopy handles two possibilities:
1575 /// 1) Local src:
1576 /// I0:     = dst
1577 /// I1: src = ...
1578 /// I2:     = dst
1579 /// I3: dst = src (copy)
1580 /// (create pred->succ edges I0->I1, I2->I1)
1581 ///
1582 /// 2) Local copy:
1583 /// I0: dst = src (copy)
1584 /// I1:     = dst
1585 /// I2: src = ...
1586 /// I3:     = dst
1587 /// (create pred->succ edges I1->I2, I3->I2)
1588 ///
1589 /// Although the MachineScheduler is currently constrained to single blocks,
1590 /// this algorithm should handle extended blocks. An EBB is a set of
1591 /// contiguously numbered blocks such that the previous block in the EBB is
1592 /// always the single predecessor.
1593 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1594   LiveIntervals *LIS = DAG->getLIS();
1595   MachineInstr *Copy = CopySU->getInstr();
1596 
1597   // Check for pure vreg copies.
1598   const MachineOperand &SrcOp = Copy->getOperand(1);
1599   unsigned SrcReg = SrcOp.getReg();
1600   if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1601     return;
1602 
1603   const MachineOperand &DstOp = Copy->getOperand(0);
1604   unsigned DstReg = DstOp.getReg();
1605   if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1606     return;
1607 
1608   // Check if either the dest or source is local. If it's live across a back
1609   // edge, it's not local. Note that if both vregs are live across the back
1610   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1611   // If both the copy's source and dest are local live intervals, then we
1612   // should treat the dest as the global for the purpose of adding
1613   // constraints. This adds edges from source's other uses to the copy.
1614   unsigned LocalReg = SrcReg;
1615   unsigned GlobalReg = DstReg;
1616   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1617   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1618     LocalReg = DstReg;
1619     GlobalReg = SrcReg;
1620     LocalLI = &LIS->getInterval(LocalReg);
1621     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1622       return;
1623   }
1624   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1625 
1626   // Find the global segment after the start of the local LI.
1627   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1628   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1629   // local live range. We could create edges from other global uses to the local
1630   // start, but the coalescer should have already eliminated these cases, so
1631   // don't bother dealing with it.
1632   if (GlobalSegment == GlobalLI->end())
1633     return;
1634 
1635   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1636   // returned the next global segment. But if GlobalSegment overlaps with
1637   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1638   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1639   if (GlobalSegment->contains(LocalLI->beginIndex()))
1640     ++GlobalSegment;
1641 
1642   if (GlobalSegment == GlobalLI->end())
1643     return;
1644 
1645   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1646   if (GlobalSegment != GlobalLI->begin()) {
1647     // Two address defs have no hole.
1648     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1649                                GlobalSegment->start)) {
1650       return;
1651     }
1652     // If the prior global segment may be defined by the same two-address
1653     // instruction that also defines LocalLI, then can't make a hole here.
1654     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1655                                LocalLI->beginIndex())) {
1656       return;
1657     }
1658     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1659     // it would be a disconnected component in the live range.
1660     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1661            "Disconnected LRG within the scheduling region.");
1662   }
1663   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1664   if (!GlobalDef)
1665     return;
1666 
1667   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1668   if (!GlobalSU)
1669     return;
1670 
1671   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1672   // constraining the uses of the last local def to precede GlobalDef.
1673   SmallVector<SUnit*,8> LocalUses;
1674   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1675   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1676   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1677   for (SUnit::const_succ_iterator
1678          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1679        I != E; ++I) {
1680     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1681       continue;
1682     if (I->getSUnit() == GlobalSU)
1683       continue;
1684     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1685       return;
1686     LocalUses.push_back(I->getSUnit());
1687   }
1688   // Open the top of the GlobalLI hole by constraining any earlier global uses
1689   // to precede the start of LocalLI.
1690   SmallVector<SUnit*,8> GlobalUses;
1691   MachineInstr *FirstLocalDef =
1692     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1693   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1694   for (SUnit::const_pred_iterator
1695          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1696     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1697       continue;
1698     if (I->getSUnit() == FirstLocalSU)
1699       continue;
1700     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1701       return;
1702     GlobalUses.push_back(I->getSUnit());
1703   }
1704   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1705   // Add the weak edges.
1706   for (SmallVectorImpl<SUnit*>::const_iterator
1707          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1708     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1709           << GlobalSU->NodeNum << ")\n");
1710     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1711   }
1712   for (SmallVectorImpl<SUnit*>::const_iterator
1713          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1714     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1715           << FirstLocalSU->NodeNum << ")\n");
1716     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1717   }
1718 }
1719 
1720 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1721 /// copy elimination.
1722 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1723   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1724   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1725 
1726   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1727   if (FirstPos == DAG->end())
1728     return;
1729   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1730   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1731       *priorNonDebug(DAG->end(), DAG->begin()));
1732 
1733   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1734     SUnit *SU = &DAG->SUnits[Idx];
1735     if (!SU->getInstr()->isCopy())
1736       continue;
1737 
1738     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1739   }
1740 }
1741 
1742 //===----------------------------------------------------------------------===//
1743 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1744 // and possibly other custom schedulers.
1745 //===----------------------------------------------------------------------===//
1746 
1747 static const unsigned InvalidCycle = ~0U;
1748 
1749 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1750 
1751 void SchedBoundary::reset() {
1752   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1753   // Destroying and reconstructing it is very expensive though. So keep
1754   // invalid, placeholder HazardRecs.
1755   if (HazardRec && HazardRec->isEnabled()) {
1756     delete HazardRec;
1757     HazardRec = nullptr;
1758   }
1759   Available.clear();
1760   Pending.clear();
1761   CheckPending = false;
1762   NextSUs.clear();
1763   CurrCycle = 0;
1764   CurrMOps = 0;
1765   MinReadyCycle = UINT_MAX;
1766   ExpectedLatency = 0;
1767   DependentLatency = 0;
1768   RetiredMOps = 0;
1769   MaxExecutedResCount = 0;
1770   ZoneCritResIdx = 0;
1771   IsResourceLimited = false;
1772   ReservedCycles.clear();
1773 #ifndef NDEBUG
1774   // Track the maximum number of stall cycles that could arise either from the
1775   // latency of a DAG edge or the number of cycles that a processor resource is
1776   // reserved (SchedBoundary::ReservedCycles).
1777   MaxObservedStall = 0;
1778 #endif
1779   // Reserve a zero-count for invalid CritResIdx.
1780   ExecutedResCounts.resize(1);
1781   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1782 }
1783 
1784 void SchedRemainder::
1785 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1786   reset();
1787   if (!SchedModel->hasInstrSchedModel())
1788     return;
1789   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1790   for (std::vector<SUnit>::iterator
1791          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1792     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1793     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1794       * SchedModel->getMicroOpFactor();
1795     for (TargetSchedModel::ProcResIter
1796            PI = SchedModel->getWriteProcResBegin(SC),
1797            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1798       unsigned PIdx = PI->ProcResourceIdx;
1799       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1800       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1801     }
1802   }
1803 }
1804 
1805 void SchedBoundary::
1806 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1807   reset();
1808   DAG = dag;
1809   SchedModel = smodel;
1810   Rem = rem;
1811   if (SchedModel->hasInstrSchedModel()) {
1812     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1813     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1814   }
1815 }
1816 
1817 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1818 /// these "soft stalls" differently than the hard stall cycles based on CPU
1819 /// resources and computed by checkHazard(). A fully in-order model
1820 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1821 /// available for scheduling until they are ready. However, a weaker in-order
1822 /// model may use this for heuristics. For example, if a processor has in-order
1823 /// behavior when reading certain resources, this may come into play.
1824 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1825   if (!SU->isUnbuffered)
1826     return 0;
1827 
1828   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1829   if (ReadyCycle > CurrCycle)
1830     return ReadyCycle - CurrCycle;
1831   return 0;
1832 }
1833 
1834 /// Compute the next cycle at which the given processor resource can be
1835 /// scheduled.
1836 unsigned SchedBoundary::
1837 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1838   unsigned NextUnreserved = ReservedCycles[PIdx];
1839   // If this resource has never been used, always return cycle zero.
1840   if (NextUnreserved == InvalidCycle)
1841     return 0;
1842   // For bottom-up scheduling add the cycles needed for the current operation.
1843   if (!isTop())
1844     NextUnreserved += Cycles;
1845   return NextUnreserved;
1846 }
1847 
1848 /// Does this SU have a hazard within the current instruction group.
1849 ///
1850 /// The scheduler supports two modes of hazard recognition. The first is the
1851 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1852 /// supports highly complicated in-order reservation tables
1853 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1854 ///
1855 /// The second is a streamlined mechanism that checks for hazards based on
1856 /// simple counters that the scheduler itself maintains. It explicitly checks
1857 /// for instruction dispatch limitations, including the number of micro-ops that
1858 /// can dispatch per cycle.
1859 ///
1860 /// TODO: Also check whether the SU must start a new group.
1861 bool SchedBoundary::checkHazard(SUnit *SU) {
1862   if (HazardRec->isEnabled()
1863       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1864     return true;
1865   }
1866   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1867   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1868     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1869           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1870     return true;
1871   }
1872   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1873     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1874     for (TargetSchedModel::ProcResIter
1875            PI = SchedModel->getWriteProcResBegin(SC),
1876            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1877       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1878       if (NRCycle > CurrCycle) {
1879 #ifndef NDEBUG
1880         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1881 #endif
1882         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1883               << SchedModel->getResourceName(PI->ProcResourceIdx)
1884               << "=" << NRCycle << "c\n");
1885         return true;
1886       }
1887     }
1888   }
1889   return false;
1890 }
1891 
1892 // Find the unscheduled node in ReadySUs with the highest latency.
1893 unsigned SchedBoundary::
1894 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1895   SUnit *LateSU = nullptr;
1896   unsigned RemLatency = 0;
1897   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1898        I != E; ++I) {
1899     unsigned L = getUnscheduledLatency(*I);
1900     if (L > RemLatency) {
1901       RemLatency = L;
1902       LateSU = *I;
1903     }
1904   }
1905   if (LateSU) {
1906     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1907           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1908   }
1909   return RemLatency;
1910 }
1911 
1912 // Count resources in this zone and the remaining unscheduled
1913 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1914 // resource index, or zero if the zone is issue limited.
1915 unsigned SchedBoundary::
1916 getOtherResourceCount(unsigned &OtherCritIdx) {
1917   OtherCritIdx = 0;
1918   if (!SchedModel->hasInstrSchedModel())
1919     return 0;
1920 
1921   unsigned OtherCritCount = Rem->RemIssueCount
1922     + (RetiredMOps * SchedModel->getMicroOpFactor());
1923   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1924         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1925   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1926        PIdx != PEnd; ++PIdx) {
1927     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1928     if (OtherCount > OtherCritCount) {
1929       OtherCritCount = OtherCount;
1930       OtherCritIdx = PIdx;
1931     }
1932   }
1933   if (OtherCritIdx) {
1934     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1935           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1936           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1937   }
1938   return OtherCritCount;
1939 }
1940 
1941 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1942   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1943 
1944 #ifndef NDEBUG
1945   // ReadyCycle was been bumped up to the CurrCycle when this node was
1946   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1947   // scheduling, so may now be greater than ReadyCycle.
1948   if (ReadyCycle > CurrCycle)
1949     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1950 #endif
1951 
1952   if (ReadyCycle < MinReadyCycle)
1953     MinReadyCycle = ReadyCycle;
1954 
1955   // Check for interlocks first. For the purpose of other heuristics, an
1956   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1957   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1958   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
1959       Available.size() >= ReadyListLimit)
1960     Pending.push(SU);
1961   else
1962     Available.push(SU);
1963 
1964   // Record this node as an immediate dependent of the scheduled node.
1965   NextSUs.insert(SU);
1966 }
1967 
1968 void SchedBoundary::releaseTopNode(SUnit *SU) {
1969   if (SU->isScheduled)
1970     return;
1971 
1972   releaseNode(SU, SU->TopReadyCycle);
1973 }
1974 
1975 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1976   if (SU->isScheduled)
1977     return;
1978 
1979   releaseNode(SU, SU->BotReadyCycle);
1980 }
1981 
1982 /// Move the boundary of scheduled code by one cycle.
1983 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1984   if (SchedModel->getMicroOpBufferSize() == 0) {
1985     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1986     if (MinReadyCycle > NextCycle)
1987       NextCycle = MinReadyCycle;
1988   }
1989   // Update the current micro-ops, which will issue in the next cycle.
1990   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1991   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1992 
1993   // Decrement DependentLatency based on the next cycle.
1994   if ((NextCycle - CurrCycle) > DependentLatency)
1995     DependentLatency = 0;
1996   else
1997     DependentLatency -= (NextCycle - CurrCycle);
1998 
1999   if (!HazardRec->isEnabled()) {
2000     // Bypass HazardRec virtual calls.
2001     CurrCycle = NextCycle;
2002   } else {
2003     // Bypass getHazardType calls in case of long latency.
2004     for (; CurrCycle != NextCycle; ++CurrCycle) {
2005       if (isTop())
2006         HazardRec->AdvanceCycle();
2007       else
2008         HazardRec->RecedeCycle();
2009     }
2010   }
2011   CheckPending = true;
2012   unsigned LFactor = SchedModel->getLatencyFactor();
2013   IsResourceLimited =
2014     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2015     > (int)LFactor;
2016 
2017   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2018 }
2019 
2020 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2021   ExecutedResCounts[PIdx] += Count;
2022   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2023     MaxExecutedResCount = ExecutedResCounts[PIdx];
2024 }
2025 
2026 /// Add the given processor resource to this scheduled zone.
2027 ///
2028 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2029 /// during which this resource is consumed.
2030 ///
2031 /// \return the next cycle at which the instruction may execute without
2032 /// oversubscribing resources.
2033 unsigned SchedBoundary::
2034 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2035   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2036   unsigned Count = Factor * Cycles;
2037   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
2038         << " +" << Cycles << "x" << Factor << "u\n");
2039 
2040   // Update Executed resources counts.
2041   incExecutedResources(PIdx, Count);
2042   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2043   Rem->RemainingCounts[PIdx] -= Count;
2044 
2045   // Check if this resource exceeds the current critical resource. If so, it
2046   // becomes the critical resource.
2047   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2048     ZoneCritResIdx = PIdx;
2049     DEBUG(dbgs() << "  *** Critical resource "
2050           << SchedModel->getResourceName(PIdx) << ": "
2051           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2052   }
2053   // For reserved resources, record the highest cycle using the resource.
2054   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2055   if (NextAvailable > CurrCycle) {
2056     DEBUG(dbgs() << "  Resource conflict: "
2057           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2058           << NextAvailable << "\n");
2059   }
2060   return NextAvailable;
2061 }
2062 
2063 /// Move the boundary of scheduled code by one SUnit.
2064 void SchedBoundary::bumpNode(SUnit *SU) {
2065   // Update the reservation table.
2066   if (HazardRec->isEnabled()) {
2067     if (!isTop() && SU->isCall) {
2068       // Calls are scheduled with their preceding instructions. For bottom-up
2069       // scheduling, clear the pipeline state before emitting.
2070       HazardRec->Reset();
2071     }
2072     HazardRec->EmitInstruction(SU);
2073   }
2074   // checkHazard should prevent scheduling multiple instructions per cycle that
2075   // exceed the issue width.
2076   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2077   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2078   assert(
2079       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2080       "Cannot schedule this instruction's MicroOps in the current cycle.");
2081 
2082   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2083   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2084 
2085   unsigned NextCycle = CurrCycle;
2086   switch (SchedModel->getMicroOpBufferSize()) {
2087   case 0:
2088     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2089     break;
2090   case 1:
2091     if (ReadyCycle > NextCycle) {
2092       NextCycle = ReadyCycle;
2093       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2094     }
2095     break;
2096   default:
2097     // We don't currently model the OOO reorder buffer, so consider all
2098     // scheduled MOps to be "retired". We do loosely model in-order resource
2099     // latency. If this instruction uses an in-order resource, account for any
2100     // likely stall cycles.
2101     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2102       NextCycle = ReadyCycle;
2103     break;
2104   }
2105   RetiredMOps += IncMOps;
2106 
2107   // Update resource counts and critical resource.
2108   if (SchedModel->hasInstrSchedModel()) {
2109     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2110     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2111     Rem->RemIssueCount -= DecRemIssue;
2112     if (ZoneCritResIdx) {
2113       // Scale scheduled micro-ops for comparing with the critical resource.
2114       unsigned ScaledMOps =
2115         RetiredMOps * SchedModel->getMicroOpFactor();
2116 
2117       // If scaled micro-ops are now more than the previous critical resource by
2118       // a full cycle, then micro-ops issue becomes critical.
2119       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2120           >= (int)SchedModel->getLatencyFactor()) {
2121         ZoneCritResIdx = 0;
2122         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2123               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2124       }
2125     }
2126     for (TargetSchedModel::ProcResIter
2127            PI = SchedModel->getWriteProcResBegin(SC),
2128            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2129       unsigned RCycle =
2130         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2131       if (RCycle > NextCycle)
2132         NextCycle = RCycle;
2133     }
2134     if (SU->hasReservedResource) {
2135       // For reserved resources, record the highest cycle using the resource.
2136       // For top-down scheduling, this is the cycle in which we schedule this
2137       // instruction plus the number of cycles the operations reserves the
2138       // resource. For bottom-up is it simply the instruction's cycle.
2139       for (TargetSchedModel::ProcResIter
2140              PI = SchedModel->getWriteProcResBegin(SC),
2141              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2142         unsigned PIdx = PI->ProcResourceIdx;
2143         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2144           if (isTop()) {
2145             ReservedCycles[PIdx] =
2146               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2147           }
2148           else
2149             ReservedCycles[PIdx] = NextCycle;
2150         }
2151       }
2152     }
2153   }
2154   // Update ExpectedLatency and DependentLatency.
2155   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2156   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2157   if (SU->getDepth() > TopLatency) {
2158     TopLatency = SU->getDepth();
2159     DEBUG(dbgs() << "  " << Available.getName()
2160           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2161   }
2162   if (SU->getHeight() > BotLatency) {
2163     BotLatency = SU->getHeight();
2164     DEBUG(dbgs() << "  " << Available.getName()
2165           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2166   }
2167   // If we stall for any reason, bump the cycle.
2168   if (NextCycle > CurrCycle) {
2169     bumpCycle(NextCycle);
2170   } else {
2171     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2172     // resource limited. If a stall occurred, bumpCycle does this.
2173     unsigned LFactor = SchedModel->getLatencyFactor();
2174     IsResourceLimited =
2175       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2176       > (int)LFactor;
2177   }
2178   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2179   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2180   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2181   // bump the cycle to avoid uselessly checking everything in the readyQ.
2182   CurrMOps += IncMOps;
2183   while (CurrMOps >= SchedModel->getIssueWidth()) {
2184     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2185           << " at cycle " << CurrCycle << '\n');
2186     bumpCycle(++NextCycle);
2187   }
2188   DEBUG(dumpScheduledState());
2189 }
2190 
2191 /// Release pending ready nodes in to the available queue. This makes them
2192 /// visible to heuristics.
2193 void SchedBoundary::releasePending() {
2194   // If the available queue is empty, it is safe to reset MinReadyCycle.
2195   if (Available.empty())
2196     MinReadyCycle = UINT_MAX;
2197 
2198   // Check to see if any of the pending instructions are ready to issue.  If
2199   // so, add them to the available queue.
2200   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2201   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2202     SUnit *SU = *(Pending.begin()+i);
2203     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2204 
2205     if (ReadyCycle < MinReadyCycle)
2206       MinReadyCycle = ReadyCycle;
2207 
2208     if (!IsBuffered && ReadyCycle > CurrCycle)
2209       continue;
2210 
2211     if (checkHazard(SU))
2212       continue;
2213 
2214     if (Available.size() >= ReadyListLimit)
2215       break;
2216 
2217     Available.push(SU);
2218     Pending.remove(Pending.begin()+i);
2219     --i; --e;
2220   }
2221   DEBUG(if (!Pending.empty()) Pending.dump());
2222   CheckPending = false;
2223 }
2224 
2225 /// Remove SU from the ready set for this boundary.
2226 void SchedBoundary::removeReady(SUnit *SU) {
2227   if (Available.isInQueue(SU))
2228     Available.remove(Available.find(SU));
2229   else {
2230     assert(Pending.isInQueue(SU) && "bad ready count");
2231     Pending.remove(Pending.find(SU));
2232   }
2233 }
2234 
2235 /// If this queue only has one ready candidate, return it. As a side effect,
2236 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2237 /// one node is ready. If multiple instructions are ready, return NULL.
2238 SUnit *SchedBoundary::pickOnlyChoice() {
2239   if (CheckPending)
2240     releasePending();
2241 
2242   if (CurrMOps > 0) {
2243     // Defer any ready instrs that now have a hazard.
2244     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2245       if (checkHazard(*I)) {
2246         Pending.push(*I);
2247         I = Available.remove(I);
2248         continue;
2249       }
2250       ++I;
2251     }
2252   }
2253   for (unsigned i = 0; Available.empty(); ++i) {
2254 //  FIXME: Re-enable assert once PR20057 is resolved.
2255 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2256 //           "permanent hazard");
2257     (void)i;
2258     bumpCycle(CurrCycle + 1);
2259     releasePending();
2260   }
2261   if (Available.size() == 1)
2262     return *Available.begin();
2263   return nullptr;
2264 }
2265 
2266 #ifndef NDEBUG
2267 // This is useful information to dump after bumpNode.
2268 // Note that the Queue contents are more useful before pickNodeFromQueue.
2269 void SchedBoundary::dumpScheduledState() {
2270   unsigned ResFactor;
2271   unsigned ResCount;
2272   if (ZoneCritResIdx) {
2273     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2274     ResCount = getResourceCount(ZoneCritResIdx);
2275   } else {
2276     ResFactor = SchedModel->getMicroOpFactor();
2277     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2278   }
2279   unsigned LFactor = SchedModel->getLatencyFactor();
2280   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2281          << "  Retired: " << RetiredMOps;
2282   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2283   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2284          << ResCount / ResFactor << " "
2285          << SchedModel->getResourceName(ZoneCritResIdx)
2286          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2287          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2288          << " limited.\n";
2289 }
2290 #endif
2291 
2292 //===----------------------------------------------------------------------===//
2293 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2294 //===----------------------------------------------------------------------===//
2295 
2296 void GenericSchedulerBase::SchedCandidate::
2297 initResourceDelta(const ScheduleDAGMI *DAG,
2298                   const TargetSchedModel *SchedModel) {
2299   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2300     return;
2301 
2302   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2303   for (TargetSchedModel::ProcResIter
2304          PI = SchedModel->getWriteProcResBegin(SC),
2305          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2306     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2307       ResDelta.CritResources += PI->Cycles;
2308     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2309       ResDelta.DemandedResources += PI->Cycles;
2310   }
2311 }
2312 
2313 /// Set the CandPolicy given a scheduling zone given the current resources and
2314 /// latencies inside and outside the zone.
2315 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2316                                      SchedBoundary &CurrZone,
2317                                      SchedBoundary *OtherZone) {
2318   // Apply preemptive heuristics based on the total latency and resources
2319   // inside and outside this zone. Potential stalls should be considered before
2320   // following this policy.
2321 
2322   // Compute remaining latency. We need this both to determine whether the
2323   // overall schedule has become latency-limited and whether the instructions
2324   // outside this zone are resource or latency limited.
2325   //
2326   // The "dependent" latency is updated incrementally during scheduling as the
2327   // max height/depth of scheduled nodes minus the cycles since it was
2328   // scheduled:
2329   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2330   //
2331   // The "independent" latency is the max ready queue depth:
2332   //   ILat = max N.depth for N in Available|Pending
2333   //
2334   // RemainingLatency is the greater of independent and dependent latency.
2335   unsigned RemLatency = CurrZone.getDependentLatency();
2336   RemLatency = std::max(RemLatency,
2337                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2338   RemLatency = std::max(RemLatency,
2339                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2340 
2341   // Compute the critical resource outside the zone.
2342   unsigned OtherCritIdx = 0;
2343   unsigned OtherCount =
2344     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2345 
2346   bool OtherResLimited = false;
2347   if (SchedModel->hasInstrSchedModel()) {
2348     unsigned LFactor = SchedModel->getLatencyFactor();
2349     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2350   }
2351   // Schedule aggressively for latency in PostRA mode. We don't check for
2352   // acyclic latency during PostRA, and highly out-of-order processors will
2353   // skip PostRA scheduling.
2354   if (!OtherResLimited) {
2355     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2356       Policy.ReduceLatency |= true;
2357       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2358             << " RemainingLatency " << RemLatency << " + "
2359             << CurrZone.getCurrCycle() << "c > CritPath "
2360             << Rem.CriticalPath << "\n");
2361     }
2362   }
2363   // If the same resource is limiting inside and outside the zone, do nothing.
2364   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2365     return;
2366 
2367   DEBUG(
2368     if (CurrZone.isResourceLimited()) {
2369       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2370              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2371              << "\n";
2372     }
2373     if (OtherResLimited)
2374       dbgs() << "  RemainingLimit: "
2375              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2376     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2377       dbgs() << "  Latency limited both directions.\n");
2378 
2379   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2380     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2381 
2382   if (OtherResLimited)
2383     Policy.DemandResIdx = OtherCritIdx;
2384 }
2385 
2386 #ifndef NDEBUG
2387 const char *GenericSchedulerBase::getReasonStr(
2388   GenericSchedulerBase::CandReason Reason) {
2389   switch (Reason) {
2390   case NoCand:         return "NOCAND    ";
2391   case Only1:          return "ONLY1     ";
2392   case PhysRegCopy:    return "PREG-COPY ";
2393   case RegExcess:      return "REG-EXCESS";
2394   case RegCritical:    return "REG-CRIT  ";
2395   case Stall:          return "STALL     ";
2396   case Cluster:        return "CLUSTER   ";
2397   case Weak:           return "WEAK      ";
2398   case RegMax:         return "REG-MAX   ";
2399   case ResourceReduce: return "RES-REDUCE";
2400   case ResourceDemand: return "RES-DEMAND";
2401   case TopDepthReduce: return "TOP-DEPTH ";
2402   case TopPathReduce:  return "TOP-PATH  ";
2403   case BotHeightReduce:return "BOT-HEIGHT";
2404   case BotPathReduce:  return "BOT-PATH  ";
2405   case NextDefUse:     return "DEF-USE   ";
2406   case NodeOrder:      return "ORDER     ";
2407   };
2408   llvm_unreachable("Unknown reason!");
2409 }
2410 
2411 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2412   PressureChange P;
2413   unsigned ResIdx = 0;
2414   unsigned Latency = 0;
2415   switch (Cand.Reason) {
2416   default:
2417     break;
2418   case RegExcess:
2419     P = Cand.RPDelta.Excess;
2420     break;
2421   case RegCritical:
2422     P = Cand.RPDelta.CriticalMax;
2423     break;
2424   case RegMax:
2425     P = Cand.RPDelta.CurrentMax;
2426     break;
2427   case ResourceReduce:
2428     ResIdx = Cand.Policy.ReduceResIdx;
2429     break;
2430   case ResourceDemand:
2431     ResIdx = Cand.Policy.DemandResIdx;
2432     break;
2433   case TopDepthReduce:
2434     Latency = Cand.SU->getDepth();
2435     break;
2436   case TopPathReduce:
2437     Latency = Cand.SU->getHeight();
2438     break;
2439   case BotHeightReduce:
2440     Latency = Cand.SU->getHeight();
2441     break;
2442   case BotPathReduce:
2443     Latency = Cand.SU->getDepth();
2444     break;
2445   }
2446   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2447   if (P.isValid())
2448     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2449            << ":" << P.getUnitInc() << " ";
2450   else
2451     dbgs() << "      ";
2452   if (ResIdx)
2453     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2454   else
2455     dbgs() << "         ";
2456   if (Latency)
2457     dbgs() << " " << Latency << " cycles ";
2458   else
2459     dbgs() << "          ";
2460   dbgs() << '\n';
2461 }
2462 #endif
2463 
2464 /// Return true if this heuristic determines order.
2465 static bool tryLess(int TryVal, int CandVal,
2466                     GenericSchedulerBase::SchedCandidate &TryCand,
2467                     GenericSchedulerBase::SchedCandidate &Cand,
2468                     GenericSchedulerBase::CandReason Reason) {
2469   if (TryVal < CandVal) {
2470     TryCand.Reason = Reason;
2471     return true;
2472   }
2473   if (TryVal > CandVal) {
2474     if (Cand.Reason > Reason)
2475       Cand.Reason = Reason;
2476     return true;
2477   }
2478   Cand.setRepeat(Reason);
2479   return false;
2480 }
2481 
2482 static bool tryGreater(int TryVal, int CandVal,
2483                        GenericSchedulerBase::SchedCandidate &TryCand,
2484                        GenericSchedulerBase::SchedCandidate &Cand,
2485                        GenericSchedulerBase::CandReason Reason) {
2486   if (TryVal > CandVal) {
2487     TryCand.Reason = Reason;
2488     return true;
2489   }
2490   if (TryVal < CandVal) {
2491     if (Cand.Reason > Reason)
2492       Cand.Reason = Reason;
2493     return true;
2494   }
2495   Cand.setRepeat(Reason);
2496   return false;
2497 }
2498 
2499 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2500                        GenericSchedulerBase::SchedCandidate &Cand,
2501                        SchedBoundary &Zone) {
2502   if (Zone.isTop()) {
2503     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2504       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2505                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2506         return true;
2507     }
2508     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2509                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2510       return true;
2511   } else {
2512     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2513       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2514                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2515         return true;
2516     }
2517     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2518                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2519       return true;
2520   }
2521   return false;
2522 }
2523 
2524 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2525   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2526         << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2527 }
2528 
2529 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2530                       bool IsTop) {
2531   tracePick(Cand.Reason, IsTop);
2532 }
2533 
2534 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2535   assert(dag->hasVRegLiveness() &&
2536          "(PreRA)GenericScheduler needs vreg liveness");
2537   DAG = static_cast<ScheduleDAGMILive*>(dag);
2538   SchedModel = DAG->getSchedModel();
2539   TRI = DAG->TRI;
2540 
2541   Rem.init(DAG, SchedModel);
2542   Top.init(DAG, SchedModel, &Rem);
2543   Bot.init(DAG, SchedModel, &Rem);
2544 
2545   // Initialize resource counts.
2546 
2547   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2548   // are disabled, then these HazardRecs will be disabled.
2549   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2550   if (!Top.HazardRec) {
2551     Top.HazardRec =
2552         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2553             Itin, DAG);
2554   }
2555   if (!Bot.HazardRec) {
2556     Bot.HazardRec =
2557         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2558             Itin, DAG);
2559   }
2560 }
2561 
2562 /// Initialize the per-region scheduling policy.
2563 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2564                                   MachineBasicBlock::iterator End,
2565                                   unsigned NumRegionInstrs) {
2566   const MachineFunction &MF = *Begin->getParent()->getParent();
2567   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2568 
2569   // Avoid setting up the register pressure tracker for small regions to save
2570   // compile time. As a rough heuristic, only track pressure when the number of
2571   // schedulable instructions exceeds half the integer register file.
2572   RegionPolicy.ShouldTrackPressure = true;
2573   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2574     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2575     if (TLI->isTypeLegal(LegalIntVT)) {
2576       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2577         TLI->getRegClassFor(LegalIntVT));
2578       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2579     }
2580   }
2581 
2582   // For generic targets, we default to bottom-up, because it's simpler and more
2583   // compile-time optimizations have been implemented in that direction.
2584   RegionPolicy.OnlyBottomUp = true;
2585 
2586   // Allow the subtarget to override default policy.
2587   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
2588                                         NumRegionInstrs);
2589 
2590   // After subtarget overrides, apply command line options.
2591   if (!EnableRegPressure)
2592     RegionPolicy.ShouldTrackPressure = false;
2593 
2594   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2595   // e.g. -misched-bottomup=false allows scheduling in both directions.
2596   assert((!ForceTopDown || !ForceBottomUp) &&
2597          "-misched-topdown incompatible with -misched-bottomup");
2598   if (ForceBottomUp.getNumOccurrences() > 0) {
2599     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2600     if (RegionPolicy.OnlyBottomUp)
2601       RegionPolicy.OnlyTopDown = false;
2602   }
2603   if (ForceTopDown.getNumOccurrences() > 0) {
2604     RegionPolicy.OnlyTopDown = ForceTopDown;
2605     if (RegionPolicy.OnlyTopDown)
2606       RegionPolicy.OnlyBottomUp = false;
2607   }
2608 }
2609 
2610 void GenericScheduler::dumpPolicy() {
2611   dbgs() << "GenericScheduler RegionPolicy: "
2612          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2613          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2614          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2615          << "\n";
2616 }
2617 
2618 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2619 /// critical path by more cycles than it takes to drain the instruction buffer.
2620 /// We estimate an upper bounds on in-flight instructions as:
2621 ///
2622 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2623 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2624 /// InFlightResources = InFlightIterations * LoopResources
2625 ///
2626 /// TODO: Check execution resources in addition to IssueCount.
2627 void GenericScheduler::checkAcyclicLatency() {
2628   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2629     return;
2630 
2631   // Scaled number of cycles per loop iteration.
2632   unsigned IterCount =
2633     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2634              Rem.RemIssueCount);
2635   // Scaled acyclic critical path.
2636   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2637   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2638   unsigned InFlightCount =
2639     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2640   unsigned BufferLimit =
2641     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2642 
2643   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2644 
2645   DEBUG(dbgs() << "IssueCycles="
2646         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2647         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2648         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2649         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2650         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2651         if (Rem.IsAcyclicLatencyLimited)
2652           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2653 }
2654 
2655 void GenericScheduler::registerRoots() {
2656   Rem.CriticalPath = DAG->ExitSU.getDepth();
2657 
2658   // Some roots may not feed into ExitSU. Check all of them in case.
2659   for (std::vector<SUnit*>::const_iterator
2660          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2661     if ((*I)->getDepth() > Rem.CriticalPath)
2662       Rem.CriticalPath = (*I)->getDepth();
2663   }
2664   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2665   if (DumpCriticalPathLength) {
2666     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2667   }
2668 
2669   if (EnableCyclicPath) {
2670     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2671     checkAcyclicLatency();
2672   }
2673 }
2674 
2675 static bool tryPressure(const PressureChange &TryP,
2676                         const PressureChange &CandP,
2677                         GenericSchedulerBase::SchedCandidate &TryCand,
2678                         GenericSchedulerBase::SchedCandidate &Cand,
2679                         GenericSchedulerBase::CandReason Reason,
2680                         const TargetRegisterInfo *TRI,
2681                         const MachineFunction &MF) {
2682   unsigned TryPSet = TryP.getPSetOrMax();
2683   unsigned CandPSet = CandP.getPSetOrMax();
2684   // If both candidates affect the same set, go with the smallest increase.
2685   if (TryPSet == CandPSet) {
2686     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2687                    Reason);
2688   }
2689   // If one candidate decreases and the other increases, go with it.
2690   // Invalid candidates have UnitInc==0.
2691   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2692                  Reason)) {
2693     return true;
2694   }
2695 
2696   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2697                                  std::numeric_limits<int>::max();
2698 
2699   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2700                                    std::numeric_limits<int>::max();
2701 
2702   // If the candidates are decreasing pressure, reverse priority.
2703   if (TryP.getUnitInc() < 0)
2704     std::swap(TryRank, CandRank);
2705   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2706 }
2707 
2708 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2709   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2710 }
2711 
2712 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2713 /// their physreg def/use.
2714 ///
2715 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2716 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2717 /// with the operation that produces or consumes the physreg. We'll do this when
2718 /// regalloc has support for parallel copies.
2719 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2720   const MachineInstr *MI = SU->getInstr();
2721   if (!MI->isCopy())
2722     return 0;
2723 
2724   unsigned ScheduledOper = isTop ? 1 : 0;
2725   unsigned UnscheduledOper = isTop ? 0 : 1;
2726   // If we have already scheduled the physreg produce/consumer, immediately
2727   // schedule the copy.
2728   if (TargetRegisterInfo::isPhysicalRegister(
2729         MI->getOperand(ScheduledOper).getReg()))
2730     return 1;
2731   // If the physreg is at the boundary, defer it. Otherwise schedule it
2732   // immediately to free the dependent. We can hoist the copy later.
2733   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2734   if (TargetRegisterInfo::isPhysicalRegister(
2735         MI->getOperand(UnscheduledOper).getReg()))
2736     return AtBoundary ? -1 : 1;
2737   return 0;
2738 }
2739 
2740 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2741                                      bool AtTop,
2742                                      const RegPressureTracker &RPTracker,
2743                                      RegPressureTracker &TempTracker) {
2744   Cand.SU = SU;
2745   if (DAG->isTrackingPressure()) {
2746     if (AtTop) {
2747       TempTracker.getMaxDownwardPressureDelta(
2748         Cand.SU->getInstr(),
2749         Cand.RPDelta,
2750         DAG->getRegionCriticalPSets(),
2751         DAG->getRegPressure().MaxSetPressure);
2752     } else {
2753       if (VerifyScheduling) {
2754         TempTracker.getMaxUpwardPressureDelta(
2755           Cand.SU->getInstr(),
2756           &DAG->getPressureDiff(Cand.SU),
2757           Cand.RPDelta,
2758           DAG->getRegionCriticalPSets(),
2759           DAG->getRegPressure().MaxSetPressure);
2760       } else {
2761         RPTracker.getUpwardPressureDelta(
2762           Cand.SU->getInstr(),
2763           DAG->getPressureDiff(Cand.SU),
2764           Cand.RPDelta,
2765           DAG->getRegionCriticalPSets(),
2766           DAG->getRegPressure().MaxSetPressure);
2767       }
2768     }
2769   }
2770   DEBUG(if (Cand.RPDelta.Excess.isValid())
2771           dbgs() << "  Try  SU(" << Cand.SU->NodeNum << ") "
2772                  << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
2773                  << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
2774 }
2775 
2776 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2777 /// hierarchical. This may be more efficient than a graduated cost model because
2778 /// we don't need to evaluate all aspects of the model for each node in the
2779 /// queue. But it's really done to make the heuristics easier to debug and
2780 /// statistically analyze.
2781 ///
2782 /// \param Cand provides the policy and current best candidate.
2783 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2784 /// \param Zone describes the scheduled zone that we are extending.
2785 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2786                                     SchedCandidate &TryCand,
2787                                     SchedBoundary &Zone) {
2788   // Initialize the candidate if needed.
2789   if (!Cand.isValid()) {
2790     TryCand.Reason = NodeOrder;
2791     return;
2792   }
2793 
2794   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2795                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2796                  TryCand, Cand, PhysRegCopy))
2797     return;
2798 
2799   // Avoid exceeding the target's limit.
2800   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2801                                                Cand.RPDelta.Excess,
2802                                                TryCand, Cand, RegExcess, TRI,
2803                                                DAG->MF))
2804     return;
2805 
2806   // Avoid increasing the max critical pressure in the scheduled region.
2807   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2808                                                Cand.RPDelta.CriticalMax,
2809                                                TryCand, Cand, RegCritical, TRI,
2810                                                DAG->MF))
2811     return;
2812 
2813   // For loops that are acyclic path limited, aggressively schedule for latency.
2814   // This can result in very long dependence chains scheduled in sequence, so
2815   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2816   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2817       && tryLatency(TryCand, Cand, Zone))
2818     return;
2819 
2820   // Prioritize instructions that read unbuffered resources by stall cycles.
2821   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2822               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2823     return;
2824 
2825   // Keep clustered nodes together to encourage downstream peephole
2826   // optimizations which may reduce resource requirements.
2827   //
2828   // This is a best effort to set things up for a post-RA pass. Optimizations
2829   // like generating loads of multiple registers should ideally be done within
2830   // the scheduler pass by combining the loads during DAG postprocessing.
2831   const SUnit *NextClusterSU =
2832     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2833   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2834                  TryCand, Cand, Cluster))
2835     return;
2836 
2837   // Weak edges are for clustering and other constraints.
2838   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2839               getWeakLeft(Cand.SU, Zone.isTop()),
2840               TryCand, Cand, Weak)) {
2841     return;
2842   }
2843   // Avoid increasing the max pressure of the entire region.
2844   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2845                                                Cand.RPDelta.CurrentMax,
2846                                                TryCand, Cand, RegMax, TRI,
2847                                                DAG->MF))
2848     return;
2849 
2850   // Avoid critical resource consumption and balance the schedule.
2851   TryCand.initResourceDelta(DAG, SchedModel);
2852   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2853               TryCand, Cand, ResourceReduce))
2854     return;
2855   if (tryGreater(TryCand.ResDelta.DemandedResources,
2856                  Cand.ResDelta.DemandedResources,
2857                  TryCand, Cand, ResourceDemand))
2858     return;
2859 
2860   // Avoid serializing long latency dependence chains.
2861   // For acyclic path limited loops, latency was already checked above.
2862   if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency &&
2863       !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) {
2864     return;
2865   }
2866 
2867   // Prefer immediate defs/users of the last scheduled instruction. This is a
2868   // local pressure avoidance strategy that also makes the machine code
2869   // readable.
2870   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2871                  TryCand, Cand, NextDefUse))
2872     return;
2873 
2874   // Fall through to original instruction order.
2875   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2876       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2877     TryCand.Reason = NodeOrder;
2878   }
2879 }
2880 
2881 /// Pick the best candidate from the queue.
2882 ///
2883 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2884 /// DAG building. To adjust for the current scheduling location we need to
2885 /// maintain the number of vreg uses remaining to be top-scheduled.
2886 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2887                                          const RegPressureTracker &RPTracker,
2888                                          SchedCandidate &Cand) {
2889   ReadyQueue &Q = Zone.Available;
2890 
2891   DEBUG(Q.dump());
2892 
2893   // getMaxPressureDelta temporarily modifies the tracker.
2894   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2895 
2896   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2897 
2898     SchedCandidate TryCand(Cand.Policy);
2899     initCandidate(TryCand, *I, Zone.isTop(), RPTracker, TempTracker);
2900     tryCandidate(Cand, TryCand, Zone);
2901     if (TryCand.Reason != NoCand) {
2902       // Initialize resource delta if needed in case future heuristics query it.
2903       if (TryCand.ResDelta == SchedResourceDelta())
2904         TryCand.initResourceDelta(DAG, SchedModel);
2905       Cand.setBest(TryCand);
2906       DEBUG(traceCandidate(Cand));
2907     }
2908   }
2909 }
2910 
2911 /// Pick the best candidate node from either the top or bottom queue.
2912 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2913   // Schedule as far as possible in the direction of no choice. This is most
2914   // efficient, but also provides the best heuristics for CriticalPSets.
2915   if (SUnit *SU = Bot.pickOnlyChoice()) {
2916     IsTopNode = false;
2917     tracePick(Only1, false);
2918     return SU;
2919   }
2920   if (SUnit *SU = Top.pickOnlyChoice()) {
2921     IsTopNode = true;
2922     tracePick(Only1, true);
2923     return SU;
2924   }
2925   CandPolicy NoPolicy;
2926   SchedCandidate BotCand(NoPolicy);
2927   SchedCandidate TopCand(NoPolicy);
2928   // Set the bottom-up policy based on the state of the current bottom zone and
2929   // the instructions outside the zone, including the top zone.
2930   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2931   // Set the top-down policy based on the state of the current top zone and
2932   // the instructions outside the zone, including the bottom zone.
2933   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2934 
2935   // Prefer bottom scheduling when heuristics are silent.
2936   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2937   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2938 
2939   // If either Q has a single candidate that provides the least increase in
2940   // Excess pressure, we can immediately schedule from that Q.
2941   //
2942   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2943   // affects picking from either Q. If scheduling in one direction must
2944   // increase pressure for one of the excess PSets, then schedule in that
2945   // direction first to provide more freedom in the other direction.
2946   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2947       || (BotCand.Reason == RegCritical && !BotCand.isRepeat(RegCritical)))
2948   {
2949     IsTopNode = false;
2950     tracePick(BotCand, IsTopNode);
2951     return BotCand.SU;
2952   }
2953   // Check if the top Q has a better candidate.
2954   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2955   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2956 
2957   // Choose the queue with the most important (lowest enum) reason.
2958   if (TopCand.Reason < BotCand.Reason) {
2959     IsTopNode = true;
2960     tracePick(TopCand, IsTopNode);
2961     return TopCand.SU;
2962   }
2963   // Otherwise prefer the bottom candidate, in node order if all else failed.
2964   IsTopNode = false;
2965   tracePick(BotCand, IsTopNode);
2966   return BotCand.SU;
2967 }
2968 
2969 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2970 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2971   if (DAG->top() == DAG->bottom()) {
2972     assert(Top.Available.empty() && Top.Pending.empty() &&
2973            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2974     return nullptr;
2975   }
2976   SUnit *SU;
2977   do {
2978     if (RegionPolicy.OnlyTopDown) {
2979       SU = Top.pickOnlyChoice();
2980       if (!SU) {
2981         CandPolicy NoPolicy;
2982         SchedCandidate TopCand(NoPolicy);
2983         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2984         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2985         tracePick(TopCand, true);
2986         SU = TopCand.SU;
2987       }
2988       IsTopNode = true;
2989     } else if (RegionPolicy.OnlyBottomUp) {
2990       SU = Bot.pickOnlyChoice();
2991       if (!SU) {
2992         CandPolicy NoPolicy;
2993         SchedCandidate BotCand(NoPolicy);
2994         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2995         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2996         tracePick(BotCand, false);
2997         SU = BotCand.SU;
2998       }
2999       IsTopNode = false;
3000     } else {
3001       SU = pickNodeBidirectional(IsTopNode);
3002     }
3003   } while (SU->isScheduled);
3004 
3005   if (SU->isTopReady())
3006     Top.removeReady(SU);
3007   if (SU->isBottomReady())
3008     Bot.removeReady(SU);
3009 
3010   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3011   return SU;
3012 }
3013 
3014 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
3015 
3016   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3017   if (!isTop)
3018     ++InsertPos;
3019   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3020 
3021   // Find already scheduled copies with a single physreg dependence and move
3022   // them just above the scheduled instruction.
3023   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3024        I != E; ++I) {
3025     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3026       continue;
3027     SUnit *DepSU = I->getSUnit();
3028     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3029       continue;
3030     MachineInstr *Copy = DepSU->getInstr();
3031     if (!Copy->isCopy())
3032       continue;
3033     DEBUG(dbgs() << "  Rescheduling physreg copy ";
3034           I->getSUnit()->dump(DAG));
3035     DAG->moveInstruction(Copy, InsertPos);
3036   }
3037 }
3038 
3039 /// Update the scheduler's state after scheduling a node. This is the same node
3040 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3041 /// update it's state based on the current cycle before MachineSchedStrategy
3042 /// does.
3043 ///
3044 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3045 /// them here. See comments in biasPhysRegCopy.
3046 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3047   if (IsTopNode) {
3048     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3049     Top.bumpNode(SU);
3050     if (SU->hasPhysRegUses)
3051       reschedulePhysRegCopies(SU, true);
3052   } else {
3053     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3054     Bot.bumpNode(SU);
3055     if (SU->hasPhysRegDefs)
3056       reschedulePhysRegCopies(SU, false);
3057   }
3058 }
3059 
3060 /// Create the standard converging machine scheduler. This will be used as the
3061 /// default scheduler if the target does not set a default.
3062 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
3063   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
3064   // Register DAG post-processors.
3065   //
3066   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3067   // data and pass it to later mutations. Have a single mutation that gathers
3068   // the interesting nodes in one pass.
3069   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
3070   if (EnableMemOpCluster) {
3071     if (DAG->TII->enableClusterLoads())
3072       DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
3073     if (DAG->TII->enableClusterStores())
3074       DAG->addMutation(make_unique<StoreClusterMutation>(DAG->TII, DAG->TRI));
3075   }
3076   if (EnableMacroFusion)
3077     DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI));
3078   return DAG;
3079 }
3080 
3081 static MachineSchedRegistry
3082 GenericSchedRegistry("converge", "Standard converging scheduler.",
3083                      createGenericSchedLive);
3084 
3085 //===----------------------------------------------------------------------===//
3086 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3087 //===----------------------------------------------------------------------===//
3088 
3089 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3090   DAG = Dag;
3091   SchedModel = DAG->getSchedModel();
3092   TRI = DAG->TRI;
3093 
3094   Rem.init(DAG, SchedModel);
3095   Top.init(DAG, SchedModel, &Rem);
3096   BotRoots.clear();
3097 
3098   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3099   // or are disabled, then these HazardRecs will be disabled.
3100   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3101   if (!Top.HazardRec) {
3102     Top.HazardRec =
3103         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3104             Itin, DAG);
3105   }
3106 }
3107 
3108 
3109 void PostGenericScheduler::registerRoots() {
3110   Rem.CriticalPath = DAG->ExitSU.getDepth();
3111 
3112   // Some roots may not feed into ExitSU. Check all of them in case.
3113   for (SmallVectorImpl<SUnit*>::const_iterator
3114          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3115     if ((*I)->getDepth() > Rem.CriticalPath)
3116       Rem.CriticalPath = (*I)->getDepth();
3117   }
3118   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3119   if (DumpCriticalPathLength) {
3120     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3121   }
3122 }
3123 
3124 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3125 ///
3126 /// \param Cand provides the policy and current best candidate.
3127 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3128 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3129                                         SchedCandidate &TryCand) {
3130 
3131   // Initialize the candidate if needed.
3132   if (!Cand.isValid()) {
3133     TryCand.Reason = NodeOrder;
3134     return;
3135   }
3136 
3137   // Prioritize instructions that read unbuffered resources by stall cycles.
3138   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3139               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3140     return;
3141 
3142   // Avoid critical resource consumption and balance the schedule.
3143   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3144               TryCand, Cand, ResourceReduce))
3145     return;
3146   if (tryGreater(TryCand.ResDelta.DemandedResources,
3147                  Cand.ResDelta.DemandedResources,
3148                  TryCand, Cand, ResourceDemand))
3149     return;
3150 
3151   // Avoid serializing long latency dependence chains.
3152   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3153     return;
3154   }
3155 
3156   // Fall through to original instruction order.
3157   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3158     TryCand.Reason = NodeOrder;
3159 }
3160 
3161 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3162   ReadyQueue &Q = Top.Available;
3163 
3164   DEBUG(Q.dump());
3165 
3166   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3167     SchedCandidate TryCand(Cand.Policy);
3168     TryCand.SU = *I;
3169     TryCand.initResourceDelta(DAG, SchedModel);
3170     tryCandidate(Cand, TryCand);
3171     if (TryCand.Reason != NoCand) {
3172       Cand.setBest(TryCand);
3173       DEBUG(traceCandidate(Cand));
3174     }
3175   }
3176 }
3177 
3178 /// Pick the next node to schedule.
3179 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3180   if (DAG->top() == DAG->bottom()) {
3181     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3182     return nullptr;
3183   }
3184   SUnit *SU;
3185   do {
3186     SU = Top.pickOnlyChoice();
3187     if (SU) {
3188       tracePick(Only1, true);
3189     } else {
3190       CandPolicy NoPolicy;
3191       SchedCandidate TopCand(NoPolicy);
3192       // Set the top-down policy based on the state of the current top zone and
3193       // the instructions outside the zone, including the bottom zone.
3194       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3195       pickNodeFromQueue(TopCand);
3196       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3197       tracePick(TopCand, true);
3198       SU = TopCand.SU;
3199     }
3200   } while (SU->isScheduled);
3201 
3202   IsTopNode = true;
3203   Top.removeReady(SU);
3204 
3205   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3206   return SU;
3207 }
3208 
3209 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3210 /// scheduled/remaining flags in the DAG nodes.
3211 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3212   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3213   Top.bumpNode(SU);
3214 }
3215 
3216 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3217 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3218   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3219 }
3220 
3221 //===----------------------------------------------------------------------===//
3222 // ILP Scheduler. Currently for experimental analysis of heuristics.
3223 //===----------------------------------------------------------------------===//
3224 
3225 namespace {
3226 /// \brief Order nodes by the ILP metric.
3227 struct ILPOrder {
3228   const SchedDFSResult *DFSResult;
3229   const BitVector *ScheduledTrees;
3230   bool MaximizeILP;
3231 
3232   ILPOrder(bool MaxILP)
3233     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3234 
3235   /// \brief Apply a less-than relation on node priority.
3236   ///
3237   /// (Return true if A comes after B in the Q.)
3238   bool operator()(const SUnit *A, const SUnit *B) const {
3239     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3240     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3241     if (SchedTreeA != SchedTreeB) {
3242       // Unscheduled trees have lower priority.
3243       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3244         return ScheduledTrees->test(SchedTreeB);
3245 
3246       // Trees with shallower connections have have lower priority.
3247       if (DFSResult->getSubtreeLevel(SchedTreeA)
3248           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3249         return DFSResult->getSubtreeLevel(SchedTreeA)
3250           < DFSResult->getSubtreeLevel(SchedTreeB);
3251       }
3252     }
3253     if (MaximizeILP)
3254       return DFSResult->getILP(A) < DFSResult->getILP(B);
3255     else
3256       return DFSResult->getILP(A) > DFSResult->getILP(B);
3257   }
3258 };
3259 
3260 /// \brief Schedule based on the ILP metric.
3261 class ILPScheduler : public MachineSchedStrategy {
3262   ScheduleDAGMILive *DAG;
3263   ILPOrder Cmp;
3264 
3265   std::vector<SUnit*> ReadyQ;
3266 public:
3267   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3268 
3269   void initialize(ScheduleDAGMI *dag) override {
3270     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3271     DAG = static_cast<ScheduleDAGMILive*>(dag);
3272     DAG->computeDFSResult();
3273     Cmp.DFSResult = DAG->getDFSResult();
3274     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3275     ReadyQ.clear();
3276   }
3277 
3278   void registerRoots() override {
3279     // Restore the heap in ReadyQ with the updated DFS results.
3280     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3281   }
3282 
3283   /// Implement MachineSchedStrategy interface.
3284   /// -----------------------------------------
3285 
3286   /// Callback to select the highest priority node from the ready Q.
3287   SUnit *pickNode(bool &IsTopNode) override {
3288     if (ReadyQ.empty()) return nullptr;
3289     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3290     SUnit *SU = ReadyQ.back();
3291     ReadyQ.pop_back();
3292     IsTopNode = false;
3293     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3294           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3295           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3296           << DAG->getDFSResult()->getSubtreeLevel(
3297             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3298           << "Scheduling " << *SU->getInstr());
3299     return SU;
3300   }
3301 
3302   /// \brief Scheduler callback to notify that a new subtree is scheduled.
3303   void scheduleTree(unsigned SubtreeID) override {
3304     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3305   }
3306 
3307   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3308   /// DFSResults, and resort the priority Q.
3309   void schedNode(SUnit *SU, bool IsTopNode) override {
3310     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3311   }
3312 
3313   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3314 
3315   void releaseBottomNode(SUnit *SU) override {
3316     ReadyQ.push_back(SU);
3317     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3318   }
3319 };
3320 } // namespace
3321 
3322 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3323   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3324 }
3325 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3326   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3327 }
3328 static MachineSchedRegistry ILPMaxRegistry(
3329   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3330 static MachineSchedRegistry ILPMinRegistry(
3331   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3332 
3333 //===----------------------------------------------------------------------===//
3334 // Machine Instruction Shuffler for Correctness Testing
3335 //===----------------------------------------------------------------------===//
3336 
3337 #ifndef NDEBUG
3338 namespace {
3339 /// Apply a less-than relation on the node order, which corresponds to the
3340 /// instruction order prior to scheduling. IsReverse implements greater-than.
3341 template<bool IsReverse>
3342 struct SUnitOrder {
3343   bool operator()(SUnit *A, SUnit *B) const {
3344     if (IsReverse)
3345       return A->NodeNum > B->NodeNum;
3346     else
3347       return A->NodeNum < B->NodeNum;
3348   }
3349 };
3350 
3351 /// Reorder instructions as much as possible.
3352 class InstructionShuffler : public MachineSchedStrategy {
3353   bool IsAlternating;
3354   bool IsTopDown;
3355 
3356   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3357   // gives nodes with a higher number higher priority causing the latest
3358   // instructions to be scheduled first.
3359   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3360     TopQ;
3361   // When scheduling bottom-up, use greater-than as the queue priority.
3362   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3363     BottomQ;
3364 public:
3365   InstructionShuffler(bool alternate, bool topdown)
3366     : IsAlternating(alternate), IsTopDown(topdown) {}
3367 
3368   void initialize(ScheduleDAGMI*) override {
3369     TopQ.clear();
3370     BottomQ.clear();
3371   }
3372 
3373   /// Implement MachineSchedStrategy interface.
3374   /// -----------------------------------------
3375 
3376   SUnit *pickNode(bool &IsTopNode) override {
3377     SUnit *SU;
3378     if (IsTopDown) {
3379       do {
3380         if (TopQ.empty()) return nullptr;
3381         SU = TopQ.top();
3382         TopQ.pop();
3383       } while (SU->isScheduled);
3384       IsTopNode = true;
3385     } else {
3386       do {
3387         if (BottomQ.empty()) return nullptr;
3388         SU = BottomQ.top();
3389         BottomQ.pop();
3390       } while (SU->isScheduled);
3391       IsTopNode = false;
3392     }
3393     if (IsAlternating)
3394       IsTopDown = !IsTopDown;
3395     return SU;
3396   }
3397 
3398   void schedNode(SUnit *SU, bool IsTopNode) override {}
3399 
3400   void releaseTopNode(SUnit *SU) override {
3401     TopQ.push(SU);
3402   }
3403   void releaseBottomNode(SUnit *SU) override {
3404     BottomQ.push(SU);
3405   }
3406 };
3407 } // namespace
3408 
3409 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3410   bool Alternate = !ForceTopDown && !ForceBottomUp;
3411   bool TopDown = !ForceBottomUp;
3412   assert((TopDown || !ForceTopDown) &&
3413          "-misched-topdown incompatible with -misched-bottomup");
3414   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3415 }
3416 static MachineSchedRegistry ShufflerRegistry(
3417   "shuffle", "Shuffle machine instructions alternating directions",
3418   createInstructionShuffler);
3419 #endif // !NDEBUG
3420 
3421 //===----------------------------------------------------------------------===//
3422 // GraphWriter support for ScheduleDAGMILive.
3423 //===----------------------------------------------------------------------===//
3424 
3425 #ifndef NDEBUG
3426 namespace llvm {
3427 
3428 template<> struct GraphTraits<
3429   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3430 
3431 template<>
3432 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3433 
3434   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3435 
3436   static std::string getGraphName(const ScheduleDAG *G) {
3437     return G->MF.getName();
3438   }
3439 
3440   static bool renderGraphFromBottomUp() {
3441     return true;
3442   }
3443 
3444   static bool isNodeHidden(const SUnit *Node) {
3445     if (ViewMISchedCutoff == 0)
3446       return false;
3447     return (Node->Preds.size() > ViewMISchedCutoff
3448          || Node->Succs.size() > ViewMISchedCutoff);
3449   }
3450 
3451   /// If you want to override the dot attributes printed for a particular
3452   /// edge, override this method.
3453   static std::string getEdgeAttributes(const SUnit *Node,
3454                                        SUnitIterator EI,
3455                                        const ScheduleDAG *Graph) {
3456     if (EI.isArtificialDep())
3457       return "color=cyan,style=dashed";
3458     if (EI.isCtrlDep())
3459       return "color=blue,style=dashed";
3460     return "";
3461   }
3462 
3463   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3464     std::string Str;
3465     raw_string_ostream SS(Str);
3466     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3467     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3468       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3469     SS << "SU:" << SU->NodeNum;
3470     if (DFS)
3471       SS << " I:" << DFS->getNumInstrs(SU);
3472     return SS.str();
3473   }
3474   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3475     return G->getGraphNodeLabel(SU);
3476   }
3477 
3478   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3479     std::string Str("shape=Mrecord");
3480     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3481     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3482       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3483     if (DFS) {
3484       Str += ",style=filled,fillcolor=\"#";
3485       Str += DOT::getColorString(DFS->getSubtreeID(N));
3486       Str += '"';
3487     }
3488     return Str;
3489   }
3490 };
3491 } // namespace llvm
3492 #endif // NDEBUG
3493 
3494 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3495 /// rendered using 'dot'.
3496 ///
3497 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3498 #ifndef NDEBUG
3499   ViewGraph(this, Name, false, Title);
3500 #else
3501   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3502          << "systems with Graphviz or gv!\n";
3503 #endif  // NDEBUG
3504 }
3505 
3506 /// Out-of-line implementation with no arguments is handy for gdb.
3507 void ScheduleDAGMI::viewGraph() {
3508   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3509 }
3510