xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision f33fd43a7c91f1774a9512bbdb78c367cd23d233)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // MachineScheduler schedules machine instructions after phi elimination. It
10 // preserves LiveIntervals so it can be invoked before register allocation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineScheduler.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PriorityQueue.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveInterval.h"
24 #include "llvm/CodeGen/LiveIntervals.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineDominators.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachinePassRegistry.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/Passes.h"
35 #include "llvm/CodeGen/RegisterClassInfo.h"
36 #include "llvm/CodeGen/RegisterPressure.h"
37 #include "llvm/CodeGen/ScheduleDAG.h"
38 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
39 #include "llvm/CodeGen/ScheduleDAGMutation.h"
40 #include "llvm/CodeGen/ScheduleDFS.h"
41 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
42 #include "llvm/CodeGen/SlotIndexes.h"
43 #include "llvm/CodeGen/TargetFrameLowering.h"
44 #include "llvm/CodeGen/TargetInstrInfo.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/CodeGen/TargetPassConfig.h"
47 #include "llvm/CodeGen/TargetRegisterInfo.h"
48 #include "llvm/CodeGen/TargetSchedule.h"
49 #include "llvm/CodeGen/TargetSubtargetInfo.h"
50 #include "llvm/Config/llvm-config.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/MC/LaneBitmask.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/GraphWriter.h"
59 #include "llvm/Support/MachineValueType.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <cstdint>
64 #include <iterator>
65 #include <limits>
66 #include <memory>
67 #include <string>
68 #include <tuple>
69 #include <utility>
70 #include <vector>
71 
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "machine-scheduler"
75 
76 namespace llvm {
77 
78 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
79                            cl::desc("Force top-down list scheduling"));
80 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
81                             cl::desc("Force bottom-up list scheduling"));
82 cl::opt<bool>
83 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
84                        cl::desc("Print critical path length to stdout"));
85 
86 cl::opt<bool> VerifyScheduling(
87     "verify-misched", cl::Hidden,
88     cl::desc("Verify machine instrs before and after machine scheduling"));
89 
90 } // end namespace llvm
91 
92 #ifndef NDEBUG
93 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
94   cl::desc("Pop up a window to show MISched dags after they are processed"));
95 
96 /// In some situations a few uninteresting nodes depend on nearly all other
97 /// nodes in the graph, provide a cutoff to hide them.
98 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
99   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
100 
101 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
102   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
103 
104 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
105   cl::desc("Only schedule this function"));
106 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
107                                         cl::desc("Only schedule this MBB#"));
108 static cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden,
109                               cl::desc("Print schedule DAGs"));
110 #else
111 static const bool ViewMISchedDAGs = false;
112 static const bool PrintDAGs = false;
113 #endif // NDEBUG
114 
115 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
116 /// size of the ready lists.
117 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
118   cl::desc("Limit ready list to N instructions"), cl::init(256));
119 
120 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
121   cl::desc("Enable register pressure scheduling."), cl::init(true));
122 
123 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
124   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
125 
126 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
127                                         cl::desc("Enable memop clustering."),
128                                         cl::init(true));
129 
130 // DAG subtrees must have at least this many nodes.
131 static const unsigned MinSubtreeSize = 8;
132 
133 // Pin the vtables to this file.
134 void MachineSchedStrategy::anchor() {}
135 
136 void ScheduleDAGMutation::anchor() {}
137 
138 //===----------------------------------------------------------------------===//
139 // Machine Instruction Scheduling Pass and Registry
140 //===----------------------------------------------------------------------===//
141 
142 MachineSchedContext::MachineSchedContext() {
143   RegClassInfo = new RegisterClassInfo();
144 }
145 
146 MachineSchedContext::~MachineSchedContext() {
147   delete RegClassInfo;
148 }
149 
150 namespace {
151 
152 /// Base class for a machine scheduler class that can run at any point.
153 class MachineSchedulerBase : public MachineSchedContext,
154                              public MachineFunctionPass {
155 public:
156   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
157 
158   void print(raw_ostream &O, const Module* = nullptr) const override;
159 
160 protected:
161   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
162 };
163 
164 /// MachineScheduler runs after coalescing and before register allocation.
165 class MachineScheduler : public MachineSchedulerBase {
166 public:
167   MachineScheduler();
168 
169   void getAnalysisUsage(AnalysisUsage &AU) const override;
170 
171   bool runOnMachineFunction(MachineFunction&) override;
172 
173   static char ID; // Class identification, replacement for typeinfo
174 
175 protected:
176   ScheduleDAGInstrs *createMachineScheduler();
177 };
178 
179 /// PostMachineScheduler runs after shortly before code emission.
180 class PostMachineScheduler : public MachineSchedulerBase {
181 public:
182   PostMachineScheduler();
183 
184   void getAnalysisUsage(AnalysisUsage &AU) const override;
185 
186   bool runOnMachineFunction(MachineFunction&) override;
187 
188   static char ID; // Class identification, replacement for typeinfo
189 
190 protected:
191   ScheduleDAGInstrs *createPostMachineScheduler();
192 };
193 
194 } // end anonymous namespace
195 
196 char MachineScheduler::ID = 0;
197 
198 char &llvm::MachineSchedulerID = MachineScheduler::ID;
199 
200 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE,
201                       "Machine Instruction Scheduler", false, false)
202 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
203 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
204 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
205 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
206 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
207 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE,
208                     "Machine Instruction Scheduler", false, false)
209 
210 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) {
211   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
212 }
213 
214 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
215   AU.setPreservesCFG();
216   AU.addRequired<MachineDominatorTree>();
217   AU.addRequired<MachineLoopInfo>();
218   AU.addRequired<AAResultsWrapperPass>();
219   AU.addRequired<TargetPassConfig>();
220   AU.addRequired<SlotIndexes>();
221   AU.addPreserved<SlotIndexes>();
222   AU.addRequired<LiveIntervals>();
223   AU.addPreserved<LiveIntervals>();
224   MachineFunctionPass::getAnalysisUsage(AU);
225 }
226 
227 char PostMachineScheduler::ID = 0;
228 
229 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
230 
231 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
232                 "PostRA Machine Instruction Scheduler", false, false)
233 
234 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) {
235   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
236 }
237 
238 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
239   AU.setPreservesCFG();
240   AU.addRequired<MachineDominatorTree>();
241   AU.addRequired<MachineLoopInfo>();
242   AU.addRequired<AAResultsWrapperPass>();
243   AU.addRequired<TargetPassConfig>();
244   MachineFunctionPass::getAnalysisUsage(AU);
245 }
246 
247 MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor>
248     MachineSchedRegistry::Registry;
249 
250 /// A dummy default scheduler factory indicates whether the scheduler
251 /// is overridden on the command line.
252 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
253   return nullptr;
254 }
255 
256 /// MachineSchedOpt allows command line selection of the scheduler.
257 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
258                RegisterPassParser<MachineSchedRegistry>>
259 MachineSchedOpt("misched",
260                 cl::init(&useDefaultMachineSched), cl::Hidden,
261                 cl::desc("Machine instruction scheduler to use"));
262 
263 static MachineSchedRegistry
264 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
265                      useDefaultMachineSched);
266 
267 static cl::opt<bool> EnableMachineSched(
268     "enable-misched",
269     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
270     cl::Hidden);
271 
272 static cl::opt<bool> EnablePostRAMachineSched(
273     "enable-post-misched",
274     cl::desc("Enable the post-ra machine instruction scheduling pass."),
275     cl::init(true), cl::Hidden);
276 
277 /// Decrement this iterator until reaching the top or a non-debug instr.
278 static MachineBasicBlock::const_iterator
279 priorNonDebug(MachineBasicBlock::const_iterator I,
280               MachineBasicBlock::const_iterator Beg) {
281   assert(I != Beg && "reached the top of the region, cannot decrement");
282   while (--I != Beg) {
283     if (!I->isDebugInstr())
284       break;
285   }
286   return I;
287 }
288 
289 /// Non-const version.
290 static MachineBasicBlock::iterator
291 priorNonDebug(MachineBasicBlock::iterator I,
292               MachineBasicBlock::const_iterator Beg) {
293   return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
294       .getNonConstIterator();
295 }
296 
297 /// If this iterator is a debug value, increment until reaching the End or a
298 /// non-debug instruction.
299 static MachineBasicBlock::const_iterator
300 nextIfDebug(MachineBasicBlock::const_iterator I,
301             MachineBasicBlock::const_iterator End) {
302   for(; I != End; ++I) {
303     if (!I->isDebugInstr())
304       break;
305   }
306   return I;
307 }
308 
309 /// Non-const version.
310 static MachineBasicBlock::iterator
311 nextIfDebug(MachineBasicBlock::iterator I,
312             MachineBasicBlock::const_iterator End) {
313   return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
314       .getNonConstIterator();
315 }
316 
317 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
318 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
319   // Select the scheduler, or set the default.
320   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
321   if (Ctor != useDefaultMachineSched)
322     return Ctor(this);
323 
324   // Get the default scheduler set by the target for this function.
325   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
326   if (Scheduler)
327     return Scheduler;
328 
329   // Default to GenericScheduler.
330   return createGenericSchedLive(this);
331 }
332 
333 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
334 /// the caller. We don't have a command line option to override the postRA
335 /// scheduler. The Target must configure it.
336 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
337   // Get the postRA scheduler set by the target for this function.
338   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
339   if (Scheduler)
340     return Scheduler;
341 
342   // Default to GenericScheduler.
343   return createGenericSchedPostRA(this);
344 }
345 
346 /// Top-level MachineScheduler pass driver.
347 ///
348 /// Visit blocks in function order. Divide each block into scheduling regions
349 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
350 /// consistent with the DAG builder, which traverses the interior of the
351 /// scheduling regions bottom-up.
352 ///
353 /// This design avoids exposing scheduling boundaries to the DAG builder,
354 /// simplifying the DAG builder's support for "special" target instructions.
355 /// At the same time the design allows target schedulers to operate across
356 /// scheduling boundaries, for example to bundle the boundary instructions
357 /// without reordering them. This creates complexity, because the target
358 /// scheduler must update the RegionBegin and RegionEnd positions cached by
359 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
360 /// design would be to split blocks at scheduling boundaries, but LLVM has a
361 /// general bias against block splitting purely for implementation simplicity.
362 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
363   if (skipFunction(mf.getFunction()))
364     return false;
365 
366   if (EnableMachineSched.getNumOccurrences()) {
367     if (!EnableMachineSched)
368       return false;
369   } else if (!mf.getSubtarget().enableMachineScheduler())
370     return false;
371 
372   LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
373 
374   // Initialize the context of the pass.
375   MF = &mf;
376   MLI = &getAnalysis<MachineLoopInfo>();
377   MDT = &getAnalysis<MachineDominatorTree>();
378   PassConfig = &getAnalysis<TargetPassConfig>();
379   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
380 
381   LIS = &getAnalysis<LiveIntervals>();
382 
383   if (VerifyScheduling) {
384     LLVM_DEBUG(LIS->dump());
385     MF->verify(this, "Before machine scheduling.");
386   }
387   RegClassInfo->runOnMachineFunction(*MF);
388 
389   // Instantiate the selected scheduler for this target, function, and
390   // optimization level.
391   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
392   scheduleRegions(*Scheduler, false);
393 
394   LLVM_DEBUG(LIS->dump());
395   if (VerifyScheduling)
396     MF->verify(this, "After machine scheduling.");
397   return true;
398 }
399 
400 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
401   if (skipFunction(mf.getFunction()))
402     return false;
403 
404   if (EnablePostRAMachineSched.getNumOccurrences()) {
405     if (!EnablePostRAMachineSched)
406       return false;
407   } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) {
408     LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
409     return false;
410   }
411   LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
412 
413   // Initialize the context of the pass.
414   MF = &mf;
415   MLI = &getAnalysis<MachineLoopInfo>();
416   PassConfig = &getAnalysis<TargetPassConfig>();
417   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
418 
419   if (VerifyScheduling)
420     MF->verify(this, "Before post machine scheduling.");
421 
422   // Instantiate the selected scheduler for this target, function, and
423   // optimization level.
424   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
425   scheduleRegions(*Scheduler, true);
426 
427   if (VerifyScheduling)
428     MF->verify(this, "After post machine scheduling.");
429   return true;
430 }
431 
432 /// Return true of the given instruction should not be included in a scheduling
433 /// region.
434 ///
435 /// MachineScheduler does not currently support scheduling across calls. To
436 /// handle calls, the DAG builder needs to be modified to create register
437 /// anti/output dependencies on the registers clobbered by the call's regmask
438 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
439 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
440 /// the boundary, but there would be no benefit to postRA scheduling across
441 /// calls this late anyway.
442 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
443                             MachineBasicBlock *MBB,
444                             MachineFunction *MF,
445                             const TargetInstrInfo *TII) {
446   return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
447 }
448 
449 /// A region of an MBB for scheduling.
450 namespace {
451 struct SchedRegion {
452   /// RegionBegin is the first instruction in the scheduling region, and
453   /// RegionEnd is either MBB->end() or the scheduling boundary after the
454   /// last instruction in the scheduling region. These iterators cannot refer
455   /// to instructions outside of the identified scheduling region because
456   /// those may be reordered before scheduling this region.
457   MachineBasicBlock::iterator RegionBegin;
458   MachineBasicBlock::iterator RegionEnd;
459   unsigned NumRegionInstrs;
460 
461   SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E,
462               unsigned N) :
463     RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {}
464 };
465 } // end anonymous namespace
466 
467 using MBBRegionsVector = SmallVector<SchedRegion, 16>;
468 
469 static void
470 getSchedRegions(MachineBasicBlock *MBB,
471                 MBBRegionsVector &Regions,
472                 bool RegionsTopDown) {
473   MachineFunction *MF = MBB->getParent();
474   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
475 
476   MachineBasicBlock::iterator I = nullptr;
477   for(MachineBasicBlock::iterator RegionEnd = MBB->end();
478       RegionEnd != MBB->begin(); RegionEnd = I) {
479 
480     // Avoid decrementing RegionEnd for blocks with no terminator.
481     if (RegionEnd != MBB->end() ||
482         isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
483       --RegionEnd;
484     }
485 
486     // The next region starts above the previous region. Look backward in the
487     // instruction stream until we find the nearest boundary.
488     unsigned NumRegionInstrs = 0;
489     I = RegionEnd;
490     for (;I != MBB->begin(); --I) {
491       MachineInstr &MI = *std::prev(I);
492       if (isSchedBoundary(&MI, &*MBB, MF, TII))
493         break;
494       if (!MI.isDebugInstr()) {
495         // MBB::size() uses instr_iterator to count. Here we need a bundle to
496         // count as a single instruction.
497         ++NumRegionInstrs;
498       }
499     }
500 
501     // It's possible we found a scheduling region that only has debug
502     // instructions. Don't bother scheduling these.
503     if (NumRegionInstrs != 0)
504       Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs));
505   }
506 
507   if (RegionsTopDown)
508     std::reverse(Regions.begin(), Regions.end());
509 }
510 
511 /// Main driver for both MachineScheduler and PostMachineScheduler.
512 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
513                                            bool FixKillFlags) {
514   // Visit all machine basic blocks.
515   //
516   // TODO: Visit blocks in global postorder or postorder within the bottom-up
517   // loop tree. Then we can optionally compute global RegPressure.
518   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
519        MBB != MBBEnd; ++MBB) {
520 
521     Scheduler.startBlock(&*MBB);
522 
523 #ifndef NDEBUG
524     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
525       continue;
526     if (SchedOnlyBlock.getNumOccurrences()
527         && (int)SchedOnlyBlock != MBB->getNumber())
528       continue;
529 #endif
530 
531     // Break the block into scheduling regions [I, RegionEnd). RegionEnd
532     // points to the scheduling boundary at the bottom of the region. The DAG
533     // does not include RegionEnd, but the region does (i.e. the next
534     // RegionEnd is above the previous RegionBegin). If the current block has
535     // no terminator then RegionEnd == MBB->end() for the bottom region.
536     //
537     // All the regions of MBB are first found and stored in MBBRegions, which
538     // will be processed (MBB) top-down if initialized with true.
539     //
540     // The Scheduler may insert instructions during either schedule() or
541     // exitRegion(), even for empty regions. So the local iterators 'I' and
542     // 'RegionEnd' are invalid across these calls. Instructions must not be
543     // added to other regions than the current one without updating MBBRegions.
544 
545     MBBRegionsVector MBBRegions;
546     getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown());
547     for (MBBRegionsVector::iterator R = MBBRegions.begin();
548          R != MBBRegions.end(); ++R) {
549       MachineBasicBlock::iterator I = R->RegionBegin;
550       MachineBasicBlock::iterator RegionEnd = R->RegionEnd;
551       unsigned NumRegionInstrs = R->NumRegionInstrs;
552 
553       // Notify the scheduler of the region, even if we may skip scheduling
554       // it. Perhaps it still needs to be bundled.
555       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
556 
557       // Skip empty scheduling regions (0 or 1 schedulable instructions).
558       if (I == RegionEnd || I == std::prev(RegionEnd)) {
559         // Close the current region. Bundle the terminator if needed.
560         // This invalidates 'RegionEnd' and 'I'.
561         Scheduler.exitRegion();
562         continue;
563       }
564       LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
565       LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB)
566                         << " " << MBB->getName() << "\n  From: " << *I
567                         << "    To: ";
568                  if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
569                  else dbgs() << "End";
570                  dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
571       if (DumpCriticalPathLength) {
572         errs() << MF->getName();
573         errs() << ":%bb. " << MBB->getNumber();
574         errs() << " " << MBB->getName() << " \n";
575       }
576 
577       // Schedule a region: possibly reorder instructions.
578       // This invalidates the original region iterators.
579       Scheduler.schedule();
580 
581       // Close the current region.
582       Scheduler.exitRegion();
583     }
584     Scheduler.finishBlock();
585     // FIXME: Ideally, no further passes should rely on kill flags. However,
586     // thumb2 size reduction is currently an exception, so the PostMIScheduler
587     // needs to do this.
588     if (FixKillFlags)
589       Scheduler.fixupKills(*MBB);
590   }
591   Scheduler.finalizeSchedule();
592 }
593 
594 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
595   // unimplemented
596 }
597 
598 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
599 LLVM_DUMP_METHOD void ReadyQueue::dump() const {
600   dbgs() << "Queue " << Name << ": ";
601   for (const SUnit *SU : Queue)
602     dbgs() << SU->NodeNum << " ";
603   dbgs() << "\n";
604 }
605 #endif
606 
607 //===----------------------------------------------------------------------===//
608 // ScheduleDAGMI - Basic machine instruction scheduling. This is
609 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
610 // virtual registers.
611 // ===----------------------------------------------------------------------===/
612 
613 // Provide a vtable anchor.
614 ScheduleDAGMI::~ScheduleDAGMI() = default;
615 
616 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
617 /// NumPredsLeft reaches zero, release the successor node.
618 ///
619 /// FIXME: Adjust SuccSU height based on MinLatency.
620 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
621   SUnit *SuccSU = SuccEdge->getSUnit();
622 
623   if (SuccEdge->isWeak()) {
624     --SuccSU->WeakPredsLeft;
625     if (SuccEdge->isCluster())
626       NextClusterSucc = SuccSU;
627     return;
628   }
629 #ifndef NDEBUG
630   if (SuccSU->NumPredsLeft == 0) {
631     dbgs() << "*** Scheduling failed! ***\n";
632     dumpNode(*SuccSU);
633     dbgs() << " has been released too many times!\n";
634     llvm_unreachable(nullptr);
635   }
636 #endif
637   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
638   // CurrCycle may have advanced since then.
639   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
640     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
641 
642   --SuccSU->NumPredsLeft;
643   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
644     SchedImpl->releaseTopNode(SuccSU);
645 }
646 
647 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
648 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
649   for (SDep &Succ : SU->Succs)
650     releaseSucc(SU, &Succ);
651 }
652 
653 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
654 /// NumSuccsLeft reaches zero, release the predecessor node.
655 ///
656 /// FIXME: Adjust PredSU height based on MinLatency.
657 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
658   SUnit *PredSU = PredEdge->getSUnit();
659 
660   if (PredEdge->isWeak()) {
661     --PredSU->WeakSuccsLeft;
662     if (PredEdge->isCluster())
663       NextClusterPred = PredSU;
664     return;
665   }
666 #ifndef NDEBUG
667   if (PredSU->NumSuccsLeft == 0) {
668     dbgs() << "*** Scheduling failed! ***\n";
669     dumpNode(*PredSU);
670     dbgs() << " has been released too many times!\n";
671     llvm_unreachable(nullptr);
672   }
673 #endif
674   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
675   // CurrCycle may have advanced since then.
676   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
677     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
678 
679   --PredSU->NumSuccsLeft;
680   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
681     SchedImpl->releaseBottomNode(PredSU);
682 }
683 
684 /// releasePredecessors - Call releasePred on each of SU's predecessors.
685 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
686   for (SDep &Pred : SU->Preds)
687     releasePred(SU, &Pred);
688 }
689 
690 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) {
691   ScheduleDAGInstrs::startBlock(bb);
692   SchedImpl->enterMBB(bb);
693 }
694 
695 void ScheduleDAGMI::finishBlock() {
696   SchedImpl->leaveMBB();
697   ScheduleDAGInstrs::finishBlock();
698 }
699 
700 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
701 /// crossing a scheduling boundary. [begin, end) includes all instructions in
702 /// the region, including the boundary itself and single-instruction regions
703 /// that don't get scheduled.
704 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
705                                      MachineBasicBlock::iterator begin,
706                                      MachineBasicBlock::iterator end,
707                                      unsigned regioninstrs)
708 {
709   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
710 
711   SchedImpl->initPolicy(begin, end, regioninstrs);
712 }
713 
714 /// This is normally called from the main scheduler loop but may also be invoked
715 /// by the scheduling strategy to perform additional code motion.
716 void ScheduleDAGMI::moveInstruction(
717   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
718   // Advance RegionBegin if the first instruction moves down.
719   if (&*RegionBegin == MI)
720     ++RegionBegin;
721 
722   // Update the instruction stream.
723   BB->splice(InsertPos, BB, MI);
724 
725   // Update LiveIntervals
726   if (LIS)
727     LIS->handleMove(*MI, /*UpdateFlags=*/true);
728 
729   // Recede RegionBegin if an instruction moves above the first.
730   if (RegionBegin == InsertPos)
731     RegionBegin = MI;
732 }
733 
734 bool ScheduleDAGMI::checkSchedLimit() {
735 #ifndef NDEBUG
736   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
737     CurrentTop = CurrentBottom;
738     return false;
739   }
740   ++NumInstrsScheduled;
741 #endif
742   return true;
743 }
744 
745 /// Per-region scheduling driver, called back from
746 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
747 /// does not consider liveness or register pressure. It is useful for PostRA
748 /// scheduling and potentially other custom schedulers.
749 void ScheduleDAGMI::schedule() {
750   LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
751   LLVM_DEBUG(SchedImpl->dumpPolicy());
752 
753   // Build the DAG.
754   buildSchedGraph(AA);
755 
756   postprocessDAG();
757 
758   SmallVector<SUnit*, 8> TopRoots, BotRoots;
759   findRootsAndBiasEdges(TopRoots, BotRoots);
760 
761   LLVM_DEBUG(dump());
762   if (PrintDAGs) dump();
763   if (ViewMISchedDAGs) viewGraph();
764 
765   // Initialize the strategy before modifying the DAG.
766   // This may initialize a DFSResult to be used for queue priority.
767   SchedImpl->initialize(this);
768 
769   // Initialize ready queues now that the DAG and priority data are finalized.
770   initQueues(TopRoots, BotRoots);
771 
772   bool IsTopNode = false;
773   while (true) {
774     LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
775     SUnit *SU = SchedImpl->pickNode(IsTopNode);
776     if (!SU) break;
777 
778     assert(!SU->isScheduled && "Node already scheduled");
779     if (!checkSchedLimit())
780       break;
781 
782     MachineInstr *MI = SU->getInstr();
783     if (IsTopNode) {
784       assert(SU->isTopReady() && "node still has unscheduled dependencies");
785       if (&*CurrentTop == MI)
786         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
787       else
788         moveInstruction(MI, CurrentTop);
789     } else {
790       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
791       MachineBasicBlock::iterator priorII =
792         priorNonDebug(CurrentBottom, CurrentTop);
793       if (&*priorII == MI)
794         CurrentBottom = priorII;
795       else {
796         if (&*CurrentTop == MI)
797           CurrentTop = nextIfDebug(++CurrentTop, priorII);
798         moveInstruction(MI, CurrentBottom);
799         CurrentBottom = MI;
800       }
801     }
802     // Notify the scheduling strategy before updating the DAG.
803     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
804     // runs, it can then use the accurate ReadyCycle time to determine whether
805     // newly released nodes can move to the readyQ.
806     SchedImpl->schedNode(SU, IsTopNode);
807 
808     updateQueues(SU, IsTopNode);
809   }
810   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
811 
812   placeDebugValues();
813 
814   LLVM_DEBUG({
815     dbgs() << "*** Final schedule for "
816            << printMBBReference(*begin()->getParent()) << " ***\n";
817     dumpSchedule();
818     dbgs() << '\n';
819   });
820 }
821 
822 /// Apply each ScheduleDAGMutation step in order.
823 void ScheduleDAGMI::postprocessDAG() {
824   for (auto &m : Mutations)
825     m->apply(this);
826 }
827 
828 void ScheduleDAGMI::
829 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
830                       SmallVectorImpl<SUnit*> &BotRoots) {
831   for (SUnit &SU : SUnits) {
832     assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits");
833 
834     // Order predecessors so DFSResult follows the critical path.
835     SU.biasCriticalPath();
836 
837     // A SUnit is ready to top schedule if it has no predecessors.
838     if (!SU.NumPredsLeft)
839       TopRoots.push_back(&SU);
840     // A SUnit is ready to bottom schedule if it has no successors.
841     if (!SU.NumSuccsLeft)
842       BotRoots.push_back(&SU);
843   }
844   ExitSU.biasCriticalPath();
845 }
846 
847 /// Identify DAG roots and setup scheduler queues.
848 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
849                                ArrayRef<SUnit*> BotRoots) {
850   NextClusterSucc = nullptr;
851   NextClusterPred = nullptr;
852 
853   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
854   //
855   // Nodes with unreleased weak edges can still be roots.
856   // Release top roots in forward order.
857   for (SUnit *SU : TopRoots)
858     SchedImpl->releaseTopNode(SU);
859 
860   // Release bottom roots in reverse order so the higher priority nodes appear
861   // first. This is more natural and slightly more efficient.
862   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
863          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
864     SchedImpl->releaseBottomNode(*I);
865   }
866 
867   releaseSuccessors(&EntrySU);
868   releasePredecessors(&ExitSU);
869 
870   SchedImpl->registerRoots();
871 
872   // Advance past initial DebugValues.
873   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
874   CurrentBottom = RegionEnd;
875 }
876 
877 /// Update scheduler queues after scheduling an instruction.
878 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
879   // Release dependent instructions for scheduling.
880   if (IsTopNode)
881     releaseSuccessors(SU);
882   else
883     releasePredecessors(SU);
884 
885   SU->isScheduled = true;
886 }
887 
888 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
889 void ScheduleDAGMI::placeDebugValues() {
890   // If first instruction was a DBG_VALUE then put it back.
891   if (FirstDbgValue) {
892     BB->splice(RegionBegin, BB, FirstDbgValue);
893     RegionBegin = FirstDbgValue;
894   }
895 
896   for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator
897          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
898     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
899     MachineInstr *DbgValue = P.first;
900     MachineBasicBlock::iterator OrigPrevMI = P.second;
901     if (&*RegionBegin == DbgValue)
902       ++RegionBegin;
903     BB->splice(++OrigPrevMI, BB, DbgValue);
904     if (OrigPrevMI == std::prev(RegionEnd))
905       RegionEnd = DbgValue;
906   }
907   DbgValues.clear();
908   FirstDbgValue = nullptr;
909 }
910 
911 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
912 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const {
913   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
914     if (SUnit *SU = getSUnit(&(*MI)))
915       dumpNode(*SU);
916     else
917       dbgs() << "Missing SUnit\n";
918   }
919 }
920 #endif
921 
922 //===----------------------------------------------------------------------===//
923 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
924 // preservation.
925 //===----------------------------------------------------------------------===//
926 
927 ScheduleDAGMILive::~ScheduleDAGMILive() {
928   delete DFSResult;
929 }
930 
931 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
932   const MachineInstr &MI = *SU.getInstr();
933   for (const MachineOperand &MO : MI.operands()) {
934     if (!MO.isReg())
935       continue;
936     if (!MO.readsReg())
937       continue;
938     if (TrackLaneMasks && !MO.isUse())
939       continue;
940 
941     Register Reg = MO.getReg();
942     if (!Register::isVirtualRegister(Reg))
943       continue;
944 
945     // Ignore re-defs.
946     if (TrackLaneMasks) {
947       bool FoundDef = false;
948       for (const MachineOperand &MO2 : MI.operands()) {
949         if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) {
950           FoundDef = true;
951           break;
952         }
953       }
954       if (FoundDef)
955         continue;
956     }
957 
958     // Record this local VReg use.
959     VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
960     for (; UI != VRegUses.end(); ++UI) {
961       if (UI->SU == &SU)
962         break;
963     }
964     if (UI == VRegUses.end())
965       VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU));
966   }
967 }
968 
969 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
970 /// crossing a scheduling boundary. [begin, end) includes all instructions in
971 /// the region, including the boundary itself and single-instruction regions
972 /// that don't get scheduled.
973 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
974                                 MachineBasicBlock::iterator begin,
975                                 MachineBasicBlock::iterator end,
976                                 unsigned regioninstrs)
977 {
978   // ScheduleDAGMI initializes SchedImpl's per-region policy.
979   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
980 
981   // For convenience remember the end of the liveness region.
982   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
983 
984   SUPressureDiffs.clear();
985 
986   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
987   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
988 
989   assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
990          "ShouldTrackLaneMasks requires ShouldTrackPressure");
991 }
992 
993 // Setup the register pressure trackers for the top scheduled and bottom
994 // scheduled regions.
995 void ScheduleDAGMILive::initRegPressure() {
996   VRegUses.clear();
997   VRegUses.setUniverse(MRI.getNumVirtRegs());
998   for (SUnit &SU : SUnits)
999     collectVRegUses(SU);
1000 
1001   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
1002                     ShouldTrackLaneMasks, false);
1003   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1004                     ShouldTrackLaneMasks, false);
1005 
1006   // Close the RPTracker to finalize live ins.
1007   RPTracker.closeRegion();
1008 
1009   LLVM_DEBUG(RPTracker.dump());
1010 
1011   // Initialize the live ins and live outs.
1012   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
1013   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
1014 
1015   // Close one end of the tracker so we can call
1016   // getMaxUpward/DownwardPressureDelta before advancing across any
1017   // instructions. This converts currently live regs into live ins/outs.
1018   TopRPTracker.closeTop();
1019   BotRPTracker.closeBottom();
1020 
1021   BotRPTracker.initLiveThru(RPTracker);
1022   if (!BotRPTracker.getLiveThru().empty()) {
1023     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
1024     LLVM_DEBUG(dbgs() << "Live Thru: ";
1025                dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
1026   };
1027 
1028   // For each live out vreg reduce the pressure change associated with other
1029   // uses of the same vreg below the live-out reaching def.
1030   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
1031 
1032   // Account for liveness generated by the region boundary.
1033   if (LiveRegionEnd != RegionEnd) {
1034     SmallVector<RegisterMaskPair, 8> LiveUses;
1035     BotRPTracker.recede(&LiveUses);
1036     updatePressureDiffs(LiveUses);
1037   }
1038 
1039   LLVM_DEBUG(dbgs() << "Top Pressure:\n";
1040              dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1041              dbgs() << "Bottom Pressure:\n";
1042              dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI););
1043 
1044   assert((BotRPTracker.getPos() == RegionEnd ||
1045           (RegionEnd->isDebugInstr() &&
1046            BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) &&
1047          "Can't find the region bottom");
1048 
1049   // Cache the list of excess pressure sets in this region. This will also track
1050   // the max pressure in the scheduled code for these sets.
1051   RegionCriticalPSets.clear();
1052   const std::vector<unsigned> &RegionPressure =
1053     RPTracker.getPressure().MaxSetPressure;
1054   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
1055     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
1056     if (RegionPressure[i] > Limit) {
1057       LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit
1058                         << " Actual " << RegionPressure[i] << "\n");
1059       RegionCriticalPSets.push_back(PressureChange(i));
1060     }
1061   }
1062   LLVM_DEBUG(dbgs() << "Excess PSets: ";
1063              for (const PressureChange &RCPS
1064                   : RegionCriticalPSets) dbgs()
1065              << TRI->getRegPressureSetName(RCPS.getPSet()) << " ";
1066              dbgs() << "\n");
1067 }
1068 
1069 void ScheduleDAGMILive::
1070 updateScheduledPressure(const SUnit *SU,
1071                         const std::vector<unsigned> &NewMaxPressure) {
1072   const PressureDiff &PDiff = getPressureDiff(SU);
1073   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
1074   for (const PressureChange &PC : PDiff) {
1075     if (!PC.isValid())
1076       break;
1077     unsigned ID = PC.getPSet();
1078     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
1079       ++CritIdx;
1080     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
1081       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
1082           && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max())
1083         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
1084     }
1085     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
1086     if (NewMaxPressure[ID] >= Limit - 2) {
1087       LLVM_DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
1088                         << NewMaxPressure[ID]
1089                         << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ")
1090                         << Limit << "(+ " << BotRPTracker.getLiveThru()[ID]
1091                         << " livethru)\n");
1092     }
1093   }
1094 }
1095 
1096 /// Update the PressureDiff array for liveness after scheduling this
1097 /// instruction.
1098 void ScheduleDAGMILive::updatePressureDiffs(
1099     ArrayRef<RegisterMaskPair> LiveUses) {
1100   for (const RegisterMaskPair &P : LiveUses) {
1101     unsigned Reg = P.RegUnit;
1102     /// FIXME: Currently assuming single-use physregs.
1103     if (!Register::isVirtualRegister(Reg))
1104       continue;
1105 
1106     if (ShouldTrackLaneMasks) {
1107       // If the register has just become live then other uses won't change
1108       // this fact anymore => decrement pressure.
1109       // If the register has just become dead then other uses make it come
1110       // back to life => increment pressure.
1111       bool Decrement = P.LaneMask.any();
1112 
1113       for (const VReg2SUnit &V2SU
1114            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1115         SUnit &SU = *V2SU.SU;
1116         if (SU.isScheduled || &SU == &ExitSU)
1117           continue;
1118 
1119         PressureDiff &PDiff = getPressureDiff(&SU);
1120         PDiff.addPressureChange(Reg, Decrement, &MRI);
1121         LLVM_DEBUG(dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1122                           << printReg(Reg, TRI) << ':'
1123                           << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr();
1124                    dbgs() << "              to "; PDiff.dump(*TRI););
1125       }
1126     } else {
1127       assert(P.LaneMask.any());
1128       LLVM_DEBUG(dbgs() << "  LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n");
1129       // This may be called before CurrentBottom has been initialized. However,
1130       // BotRPTracker must have a valid position. We want the value live into the
1131       // instruction or live out of the block, so ask for the previous
1132       // instruction's live-out.
1133       const LiveInterval &LI = LIS->getInterval(Reg);
1134       VNInfo *VNI;
1135       MachineBasicBlock::const_iterator I =
1136         nextIfDebug(BotRPTracker.getPos(), BB->end());
1137       if (I == BB->end())
1138         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1139       else {
1140         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1141         VNI = LRQ.valueIn();
1142       }
1143       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1144       assert(VNI && "No live value at use.");
1145       for (const VReg2SUnit &V2SU
1146            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1147         SUnit *SU = V2SU.SU;
1148         // If this use comes before the reaching def, it cannot be a last use,
1149         // so decrease its pressure change.
1150         if (!SU->isScheduled && SU != &ExitSU) {
1151           LiveQueryResult LRQ =
1152               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1153           if (LRQ.valueIn() == VNI) {
1154             PressureDiff &PDiff = getPressureDiff(SU);
1155             PDiff.addPressureChange(Reg, true, &MRI);
1156             LLVM_DEBUG(dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1157                               << *SU->getInstr();
1158                        dbgs() << "              to "; PDiff.dump(*TRI););
1159           }
1160         }
1161       }
1162     }
1163   }
1164 }
1165 
1166 void ScheduleDAGMILive::dump() const {
1167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1168   if (EntrySU.getInstr() != nullptr)
1169     dumpNodeAll(EntrySU);
1170   for (const SUnit &SU : SUnits) {
1171     dumpNodeAll(SU);
1172     if (ShouldTrackPressure) {
1173       dbgs() << "  Pressure Diff      : ";
1174       getPressureDiff(&SU).dump(*TRI);
1175     }
1176     dbgs() << "  Single Issue       : ";
1177     if (SchedModel.mustBeginGroup(SU.getInstr()) &&
1178         SchedModel.mustEndGroup(SU.getInstr()))
1179       dbgs() << "true;";
1180     else
1181       dbgs() << "false;";
1182     dbgs() << '\n';
1183   }
1184   if (ExitSU.getInstr() != nullptr)
1185     dumpNodeAll(ExitSU);
1186 #endif
1187 }
1188 
1189 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1190 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1191 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1192 ///
1193 /// This is a skeletal driver, with all the functionality pushed into helpers,
1194 /// so that it can be easily extended by experimental schedulers. Generally,
1195 /// implementing MachineSchedStrategy should be sufficient to implement a new
1196 /// scheduling algorithm. However, if a scheduler further subclasses
1197 /// ScheduleDAGMILive then it will want to override this virtual method in order
1198 /// to update any specialized state.
1199 void ScheduleDAGMILive::schedule() {
1200   LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1201   LLVM_DEBUG(SchedImpl->dumpPolicy());
1202   buildDAGWithRegPressure();
1203 
1204   postprocessDAG();
1205 
1206   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1207   findRootsAndBiasEdges(TopRoots, BotRoots);
1208 
1209   // Initialize the strategy before modifying the DAG.
1210   // This may initialize a DFSResult to be used for queue priority.
1211   SchedImpl->initialize(this);
1212 
1213   LLVM_DEBUG(dump());
1214   if (PrintDAGs) dump();
1215   if (ViewMISchedDAGs) viewGraph();
1216 
1217   // Initialize ready queues now that the DAG and priority data are finalized.
1218   initQueues(TopRoots, BotRoots);
1219 
1220   bool IsTopNode = false;
1221   while (true) {
1222     LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1223     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1224     if (!SU) break;
1225 
1226     assert(!SU->isScheduled && "Node already scheduled");
1227     if (!checkSchedLimit())
1228       break;
1229 
1230     scheduleMI(SU, IsTopNode);
1231 
1232     if (DFSResult) {
1233       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1234       if (!ScheduledTrees.test(SubtreeID)) {
1235         ScheduledTrees.set(SubtreeID);
1236         DFSResult->scheduleTree(SubtreeID);
1237         SchedImpl->scheduleTree(SubtreeID);
1238       }
1239     }
1240 
1241     // Notify the scheduling strategy after updating the DAG.
1242     SchedImpl->schedNode(SU, IsTopNode);
1243 
1244     updateQueues(SU, IsTopNode);
1245   }
1246   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1247 
1248   placeDebugValues();
1249 
1250   LLVM_DEBUG({
1251     dbgs() << "*** Final schedule for "
1252            << printMBBReference(*begin()->getParent()) << " ***\n";
1253     dumpSchedule();
1254     dbgs() << '\n';
1255   });
1256 }
1257 
1258 /// Build the DAG and setup three register pressure trackers.
1259 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1260   if (!ShouldTrackPressure) {
1261     RPTracker.reset();
1262     RegionCriticalPSets.clear();
1263     buildSchedGraph(AA);
1264     return;
1265   }
1266 
1267   // Initialize the register pressure tracker used by buildSchedGraph.
1268   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1269                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1270 
1271   // Account for liveness generate by the region boundary.
1272   if (LiveRegionEnd != RegionEnd)
1273     RPTracker.recede();
1274 
1275   // Build the DAG, and compute current register pressure.
1276   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1277 
1278   // Initialize top/bottom trackers after computing region pressure.
1279   initRegPressure();
1280 }
1281 
1282 void ScheduleDAGMILive::computeDFSResult() {
1283   if (!DFSResult)
1284     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1285   DFSResult->clear();
1286   ScheduledTrees.clear();
1287   DFSResult->resize(SUnits.size());
1288   DFSResult->compute(SUnits);
1289   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1290 }
1291 
1292 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1293 /// only provides the critical path for single block loops. To handle loops that
1294 /// span blocks, we could use the vreg path latencies provided by
1295 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1296 /// available for use in the scheduler.
1297 ///
1298 /// The cyclic path estimation identifies a def-use pair that crosses the back
1299 /// edge and considers the depth and height of the nodes. For example, consider
1300 /// the following instruction sequence where each instruction has unit latency
1301 /// and defines an epomymous virtual register:
1302 ///
1303 /// a->b(a,c)->c(b)->d(c)->exit
1304 ///
1305 /// The cyclic critical path is a two cycles: b->c->b
1306 /// The acyclic critical path is four cycles: a->b->c->d->exit
1307 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1308 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1309 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1310 /// LiveInDepth = depth(b) = len(a->b) = 1
1311 ///
1312 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1313 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1314 /// CyclicCriticalPath = min(2, 2) = 2
1315 ///
1316 /// This could be relevant to PostRA scheduling, but is currently implemented
1317 /// assuming LiveIntervals.
1318 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1319   // This only applies to single block loop.
1320   if (!BB->isSuccessor(BB))
1321     return 0;
1322 
1323   unsigned MaxCyclicLatency = 0;
1324   // Visit each live out vreg def to find def/use pairs that cross iterations.
1325   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1326     unsigned Reg = P.RegUnit;
1327     if (!Register::isVirtualRegister(Reg))
1328       continue;
1329     const LiveInterval &LI = LIS->getInterval(Reg);
1330     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1331     if (!DefVNI)
1332       continue;
1333 
1334     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1335     const SUnit *DefSU = getSUnit(DefMI);
1336     if (!DefSU)
1337       continue;
1338 
1339     unsigned LiveOutHeight = DefSU->getHeight();
1340     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1341     // Visit all local users of the vreg def.
1342     for (const VReg2SUnit &V2SU
1343          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1344       SUnit *SU = V2SU.SU;
1345       if (SU == &ExitSU)
1346         continue;
1347 
1348       // Only consider uses of the phi.
1349       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1350       if (!LRQ.valueIn()->isPHIDef())
1351         continue;
1352 
1353       // Assume that a path spanning two iterations is a cycle, which could
1354       // overestimate in strange cases. This allows cyclic latency to be
1355       // estimated as the minimum slack of the vreg's depth or height.
1356       unsigned CyclicLatency = 0;
1357       if (LiveOutDepth > SU->getDepth())
1358         CyclicLatency = LiveOutDepth - SU->getDepth();
1359 
1360       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1361       if (LiveInHeight > LiveOutHeight) {
1362         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1363           CyclicLatency = LiveInHeight - LiveOutHeight;
1364       } else
1365         CyclicLatency = 0;
1366 
1367       LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1368                         << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1369       if (CyclicLatency > MaxCyclicLatency)
1370         MaxCyclicLatency = CyclicLatency;
1371     }
1372   }
1373   LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1374   return MaxCyclicLatency;
1375 }
1376 
1377 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1378 /// the Top RP tracker in case the region beginning has changed.
1379 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1380                                    ArrayRef<SUnit*> BotRoots) {
1381   ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1382   if (ShouldTrackPressure) {
1383     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1384     TopRPTracker.setPos(CurrentTop);
1385   }
1386 }
1387 
1388 /// Move an instruction and update register pressure.
1389 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1390   // Move the instruction to its new location in the instruction stream.
1391   MachineInstr *MI = SU->getInstr();
1392 
1393   if (IsTopNode) {
1394     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1395     if (&*CurrentTop == MI)
1396       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1397     else {
1398       moveInstruction(MI, CurrentTop);
1399       TopRPTracker.setPos(MI);
1400     }
1401 
1402     if (ShouldTrackPressure) {
1403       // Update top scheduled pressure.
1404       RegisterOperands RegOpers;
1405       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1406       if (ShouldTrackLaneMasks) {
1407         // Adjust liveness and add missing dead+read-undef flags.
1408         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1409         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1410       } else {
1411         // Adjust for missing dead-def flags.
1412         RegOpers.detectDeadDefs(*MI, *LIS);
1413       }
1414 
1415       TopRPTracker.advance(RegOpers);
1416       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1417       LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
1418                      TopRPTracker.getRegSetPressureAtPos(), TRI););
1419 
1420       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1421     }
1422   } else {
1423     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1424     MachineBasicBlock::iterator priorII =
1425       priorNonDebug(CurrentBottom, CurrentTop);
1426     if (&*priorII == MI)
1427       CurrentBottom = priorII;
1428     else {
1429       if (&*CurrentTop == MI) {
1430         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1431         TopRPTracker.setPos(CurrentTop);
1432       }
1433       moveInstruction(MI, CurrentBottom);
1434       CurrentBottom = MI;
1435       BotRPTracker.setPos(CurrentBottom);
1436     }
1437     if (ShouldTrackPressure) {
1438       RegisterOperands RegOpers;
1439       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1440       if (ShouldTrackLaneMasks) {
1441         // Adjust liveness and add missing dead+read-undef flags.
1442         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1443         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1444       } else {
1445         // Adjust for missing dead-def flags.
1446         RegOpers.detectDeadDefs(*MI, *LIS);
1447       }
1448 
1449       if (BotRPTracker.getPos() != CurrentBottom)
1450         BotRPTracker.recedeSkipDebugValues();
1451       SmallVector<RegisterMaskPair, 8> LiveUses;
1452       BotRPTracker.recede(RegOpers, &LiveUses);
1453       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1454       LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
1455                      BotRPTracker.getRegSetPressureAtPos(), TRI););
1456 
1457       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1458       updatePressureDiffs(LiveUses);
1459     }
1460   }
1461 }
1462 
1463 //===----------------------------------------------------------------------===//
1464 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1465 //===----------------------------------------------------------------------===//
1466 
1467 namespace {
1468 
1469 /// Post-process the DAG to create cluster edges between neighboring
1470 /// loads or between neighboring stores.
1471 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1472   struct MemOpInfo {
1473     SUnit *SU;
1474     const MachineOperand *BaseOp;
1475     int64_t Offset;
1476 
1477     MemOpInfo(SUnit *su, const MachineOperand *Op, int64_t ofs)
1478         : SU(su), BaseOp(Op), Offset(ofs) {}
1479 
1480     bool operator<(const MemOpInfo &RHS) const {
1481       if (BaseOp->getType() != RHS.BaseOp->getType())
1482         return BaseOp->getType() < RHS.BaseOp->getType();
1483 
1484       if (BaseOp->isReg())
1485         return std::make_tuple(BaseOp->getReg(), Offset, SU->NodeNum) <
1486                std::make_tuple(RHS.BaseOp->getReg(), RHS.Offset,
1487                                RHS.SU->NodeNum);
1488       if (BaseOp->isFI()) {
1489         const MachineFunction &MF =
1490             *BaseOp->getParent()->getParent()->getParent();
1491         const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1492         bool StackGrowsDown = TFI.getStackGrowthDirection() ==
1493                               TargetFrameLowering::StackGrowsDown;
1494         // Can't use tuple comparison here since we might need to use a
1495         // different order when the stack grows down.
1496         if (BaseOp->getIndex() != RHS.BaseOp->getIndex())
1497           return StackGrowsDown ? BaseOp->getIndex() > RHS.BaseOp->getIndex()
1498                                 : BaseOp->getIndex() < RHS.BaseOp->getIndex();
1499 
1500         if (Offset != RHS.Offset)
1501           return Offset < RHS.Offset;
1502 
1503         return SU->NodeNum < RHS.SU->NodeNum;
1504       }
1505 
1506       llvm_unreachable("MemOpClusterMutation only supports register or frame "
1507                        "index bases.");
1508     }
1509   };
1510 
1511   const TargetInstrInfo *TII;
1512   const TargetRegisterInfo *TRI;
1513   bool IsLoad;
1514 
1515 public:
1516   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1517                            const TargetRegisterInfo *tri, bool IsLoad)
1518       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1519 
1520   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1521 
1522 protected:
1523   void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGInstrs *DAG);
1524 };
1525 
1526 class StoreClusterMutation : public BaseMemOpClusterMutation {
1527 public:
1528   StoreClusterMutation(const TargetInstrInfo *tii,
1529                        const TargetRegisterInfo *tri)
1530       : BaseMemOpClusterMutation(tii, tri, false) {}
1531 };
1532 
1533 class LoadClusterMutation : public BaseMemOpClusterMutation {
1534 public:
1535   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1536       : BaseMemOpClusterMutation(tii, tri, true) {}
1537 };
1538 
1539 } // end anonymous namespace
1540 
1541 namespace llvm {
1542 
1543 std::unique_ptr<ScheduleDAGMutation>
1544 createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1545                              const TargetRegisterInfo *TRI) {
1546   return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI)
1547                             : nullptr;
1548 }
1549 
1550 std::unique_ptr<ScheduleDAGMutation>
1551 createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1552                               const TargetRegisterInfo *TRI) {
1553   return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI)
1554                             : nullptr;
1555 }
1556 
1557 } // end namespace llvm
1558 
1559 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1560     ArrayRef<SUnit *> MemOps, ScheduleDAGInstrs *DAG) {
1561   SmallVector<MemOpInfo, 32> MemOpRecords;
1562   for (SUnit *SU : MemOps) {
1563     const MachineOperand *BaseOp;
1564     int64_t Offset;
1565     if (TII->getMemOperandWithOffset(*SU->getInstr(), BaseOp, Offset, TRI))
1566       MemOpRecords.push_back(MemOpInfo(SU, BaseOp, Offset));
1567   }
1568   if (MemOpRecords.size() < 2)
1569     return;
1570 
1571   llvm::sort(MemOpRecords);
1572   unsigned ClusterLength = 1;
1573   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1574     SUnit *SUa = MemOpRecords[Idx].SU;
1575     SUnit *SUb = MemOpRecords[Idx+1].SU;
1576     if (TII->shouldClusterMemOps(*MemOpRecords[Idx].BaseOp,
1577                                  *MemOpRecords[Idx + 1].BaseOp,
1578                                  ClusterLength) &&
1579         DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1580       LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1581                         << SUb->NodeNum << ")\n");
1582       // Copy successor edges from SUa to SUb. Interleaving computation
1583       // dependent on SUa can prevent load combining due to register reuse.
1584       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1585       // loads should have effectively the same inputs.
1586       for (const SDep &Succ : SUa->Succs) {
1587         if (Succ.getSUnit() == SUb)
1588           continue;
1589         LLVM_DEBUG(dbgs() << "  Copy Succ SU(" << Succ.getSUnit()->NodeNum
1590                           << ")\n");
1591         DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial));
1592       }
1593       ++ClusterLength;
1594     } else
1595       ClusterLength = 1;
1596   }
1597 }
1598 
1599 /// Callback from DAG postProcessing to create cluster edges for loads.
1600 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) {
1601   // Map DAG NodeNum to a set of dependent MemOps in store chain.
1602   DenseMap<unsigned, SmallVector<SUnit *, 4>> StoreChains;
1603   for (SUnit &SU : DAG->SUnits) {
1604     if ((IsLoad && !SU.getInstr()->mayLoad()) ||
1605         (!IsLoad && !SU.getInstr()->mayStore()))
1606       continue;
1607 
1608     unsigned ChainPredID = DAG->SUnits.size();
1609     for (const SDep &Pred : SU.Preds) {
1610       if (Pred.isCtrl()) {
1611         ChainPredID = Pred.getSUnit()->NodeNum;
1612         break;
1613       }
1614     }
1615     // Insert the SU to corresponding store chain.
1616     auto &Chain = StoreChains.FindAndConstruct(ChainPredID).second;
1617     Chain.push_back(&SU);
1618   }
1619 
1620   // Iterate over the store chains.
1621   for (auto &SCD : StoreChains)
1622     clusterNeighboringMemOps(SCD.second, DAG);
1623 }
1624 
1625 //===----------------------------------------------------------------------===//
1626 // CopyConstrain - DAG post-processing to encourage copy elimination.
1627 //===----------------------------------------------------------------------===//
1628 
1629 namespace {
1630 
1631 /// Post-process the DAG to create weak edges from all uses of a copy to
1632 /// the one use that defines the copy's source vreg, most likely an induction
1633 /// variable increment.
1634 class CopyConstrain : public ScheduleDAGMutation {
1635   // Transient state.
1636   SlotIndex RegionBeginIdx;
1637 
1638   // RegionEndIdx is the slot index of the last non-debug instruction in the
1639   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1640   SlotIndex RegionEndIdx;
1641 
1642 public:
1643   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1644 
1645   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1646 
1647 protected:
1648   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1649 };
1650 
1651 } // end anonymous namespace
1652 
1653 namespace llvm {
1654 
1655 std::unique_ptr<ScheduleDAGMutation>
1656 createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1657                                const TargetRegisterInfo *TRI) {
1658   return std::make_unique<CopyConstrain>(TII, TRI);
1659 }
1660 
1661 } // end namespace llvm
1662 
1663 /// constrainLocalCopy handles two possibilities:
1664 /// 1) Local src:
1665 /// I0:     = dst
1666 /// I1: src = ...
1667 /// I2:     = dst
1668 /// I3: dst = src (copy)
1669 /// (create pred->succ edges I0->I1, I2->I1)
1670 ///
1671 /// 2) Local copy:
1672 /// I0: dst = src (copy)
1673 /// I1:     = dst
1674 /// I2: src = ...
1675 /// I3:     = dst
1676 /// (create pred->succ edges I1->I2, I3->I2)
1677 ///
1678 /// Although the MachineScheduler is currently constrained to single blocks,
1679 /// this algorithm should handle extended blocks. An EBB is a set of
1680 /// contiguously numbered blocks such that the previous block in the EBB is
1681 /// always the single predecessor.
1682 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1683   LiveIntervals *LIS = DAG->getLIS();
1684   MachineInstr *Copy = CopySU->getInstr();
1685 
1686   // Check for pure vreg copies.
1687   const MachineOperand &SrcOp = Copy->getOperand(1);
1688   Register SrcReg = SrcOp.getReg();
1689   if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1690     return;
1691 
1692   const MachineOperand &DstOp = Copy->getOperand(0);
1693   Register DstReg = DstOp.getReg();
1694   if (!Register::isVirtualRegister(DstReg) || DstOp.isDead())
1695     return;
1696 
1697   // Check if either the dest or source is local. If it's live across a back
1698   // edge, it's not local. Note that if both vregs are live across the back
1699   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1700   // If both the copy's source and dest are local live intervals, then we
1701   // should treat the dest as the global for the purpose of adding
1702   // constraints. This adds edges from source's other uses to the copy.
1703   unsigned LocalReg = SrcReg;
1704   unsigned GlobalReg = DstReg;
1705   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1706   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1707     LocalReg = DstReg;
1708     GlobalReg = SrcReg;
1709     LocalLI = &LIS->getInterval(LocalReg);
1710     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1711       return;
1712   }
1713   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1714 
1715   // Find the global segment after the start of the local LI.
1716   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1717   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1718   // local live range. We could create edges from other global uses to the local
1719   // start, but the coalescer should have already eliminated these cases, so
1720   // don't bother dealing with it.
1721   if (GlobalSegment == GlobalLI->end())
1722     return;
1723 
1724   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1725   // returned the next global segment. But if GlobalSegment overlaps with
1726   // LocalLI->start, then advance to the next segment. If a hole in GlobalLI
1727   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1728   if (GlobalSegment->contains(LocalLI->beginIndex()))
1729     ++GlobalSegment;
1730 
1731   if (GlobalSegment == GlobalLI->end())
1732     return;
1733 
1734   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1735   if (GlobalSegment != GlobalLI->begin()) {
1736     // Two address defs have no hole.
1737     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1738                                GlobalSegment->start)) {
1739       return;
1740     }
1741     // If the prior global segment may be defined by the same two-address
1742     // instruction that also defines LocalLI, then can't make a hole here.
1743     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1744                                LocalLI->beginIndex())) {
1745       return;
1746     }
1747     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1748     // it would be a disconnected component in the live range.
1749     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1750            "Disconnected LRG within the scheduling region.");
1751   }
1752   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1753   if (!GlobalDef)
1754     return;
1755 
1756   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1757   if (!GlobalSU)
1758     return;
1759 
1760   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1761   // constraining the uses of the last local def to precede GlobalDef.
1762   SmallVector<SUnit*,8> LocalUses;
1763   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1764   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1765   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1766   for (const SDep &Succ : LastLocalSU->Succs) {
1767     if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg)
1768       continue;
1769     if (Succ.getSUnit() == GlobalSU)
1770       continue;
1771     if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit()))
1772       return;
1773     LocalUses.push_back(Succ.getSUnit());
1774   }
1775   // Open the top of the GlobalLI hole by constraining any earlier global uses
1776   // to precede the start of LocalLI.
1777   SmallVector<SUnit*,8> GlobalUses;
1778   MachineInstr *FirstLocalDef =
1779     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1780   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1781   for (const SDep &Pred : GlobalSU->Preds) {
1782     if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg)
1783       continue;
1784     if (Pred.getSUnit() == FirstLocalSU)
1785       continue;
1786     if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit()))
1787       return;
1788     GlobalUses.push_back(Pred.getSUnit());
1789   }
1790   LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1791   // Add the weak edges.
1792   for (SmallVectorImpl<SUnit*>::const_iterator
1793          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1794     LLVM_DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1795                       << GlobalSU->NodeNum << ")\n");
1796     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1797   }
1798   for (SmallVectorImpl<SUnit*>::const_iterator
1799          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1800     LLVM_DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1801                       << FirstLocalSU->NodeNum << ")\n");
1802     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1803   }
1804 }
1805 
1806 /// Callback from DAG postProcessing to create weak edges to encourage
1807 /// copy elimination.
1808 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1809   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1810   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1811 
1812   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1813   if (FirstPos == DAG->end())
1814     return;
1815   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1816   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1817       *priorNonDebug(DAG->end(), DAG->begin()));
1818 
1819   for (SUnit &SU : DAG->SUnits) {
1820     if (!SU.getInstr()->isCopy())
1821       continue;
1822 
1823     constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG));
1824   }
1825 }
1826 
1827 //===----------------------------------------------------------------------===//
1828 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1829 // and possibly other custom schedulers.
1830 //===----------------------------------------------------------------------===//
1831 
1832 static const unsigned InvalidCycle = ~0U;
1833 
1834 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1835 
1836 /// Given a Count of resource usage and a Latency value, return true if a
1837 /// SchedBoundary becomes resource limited.
1838 /// If we are checking after scheduling a node, we should return true when
1839 /// we just reach the resource limit.
1840 static bool checkResourceLimit(unsigned LFactor, unsigned Count,
1841                                unsigned Latency, bool AfterSchedNode) {
1842   int ResCntFactor = (int)(Count - (Latency * LFactor));
1843   if (AfterSchedNode)
1844     return ResCntFactor >= (int)LFactor;
1845   else
1846     return ResCntFactor > (int)LFactor;
1847 }
1848 
1849 void SchedBoundary::reset() {
1850   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1851   // Destroying and reconstructing it is very expensive though. So keep
1852   // invalid, placeholder HazardRecs.
1853   if (HazardRec && HazardRec->isEnabled()) {
1854     delete HazardRec;
1855     HazardRec = nullptr;
1856   }
1857   Available.clear();
1858   Pending.clear();
1859   CheckPending = false;
1860   CurrCycle = 0;
1861   CurrMOps = 0;
1862   MinReadyCycle = std::numeric_limits<unsigned>::max();
1863   ExpectedLatency = 0;
1864   DependentLatency = 0;
1865   RetiredMOps = 0;
1866   MaxExecutedResCount = 0;
1867   ZoneCritResIdx = 0;
1868   IsResourceLimited = false;
1869   ReservedCycles.clear();
1870   ReservedCyclesIndex.clear();
1871 #ifndef NDEBUG
1872   // Track the maximum number of stall cycles that could arise either from the
1873   // latency of a DAG edge or the number of cycles that a processor resource is
1874   // reserved (SchedBoundary::ReservedCycles).
1875   MaxObservedStall = 0;
1876 #endif
1877   // Reserve a zero-count for invalid CritResIdx.
1878   ExecutedResCounts.resize(1);
1879   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1880 }
1881 
1882 void SchedRemainder::
1883 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1884   reset();
1885   if (!SchedModel->hasInstrSchedModel())
1886     return;
1887   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1888   for (SUnit &SU : DAG->SUnits) {
1889     const MCSchedClassDesc *SC = DAG->getSchedClass(&SU);
1890     RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC)
1891       * SchedModel->getMicroOpFactor();
1892     for (TargetSchedModel::ProcResIter
1893            PI = SchedModel->getWriteProcResBegin(SC),
1894            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1895       unsigned PIdx = PI->ProcResourceIdx;
1896       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1897       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1898     }
1899   }
1900 }
1901 
1902 void SchedBoundary::
1903 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1904   reset();
1905   DAG = dag;
1906   SchedModel = smodel;
1907   Rem = rem;
1908   if (SchedModel->hasInstrSchedModel()) {
1909     unsigned ResourceCount = SchedModel->getNumProcResourceKinds();
1910     ReservedCyclesIndex.resize(ResourceCount);
1911     ExecutedResCounts.resize(ResourceCount);
1912     unsigned NumUnits = 0;
1913 
1914     for (unsigned i = 0; i < ResourceCount; ++i) {
1915       ReservedCyclesIndex[i] = NumUnits;
1916       NumUnits += SchedModel->getProcResource(i)->NumUnits;
1917     }
1918 
1919     ReservedCycles.resize(NumUnits, InvalidCycle);
1920   }
1921 }
1922 
1923 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1924 /// these "soft stalls" differently than the hard stall cycles based on CPU
1925 /// resources and computed by checkHazard(). A fully in-order model
1926 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1927 /// available for scheduling until they are ready. However, a weaker in-order
1928 /// model may use this for heuristics. For example, if a processor has in-order
1929 /// behavior when reading certain resources, this may come into play.
1930 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1931   if (!SU->isUnbuffered)
1932     return 0;
1933 
1934   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1935   if (ReadyCycle > CurrCycle)
1936     return ReadyCycle - CurrCycle;
1937   return 0;
1938 }
1939 
1940 /// Compute the next cycle at which the given processor resource unit
1941 /// can be scheduled.
1942 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx,
1943                                                        unsigned Cycles) {
1944   unsigned NextUnreserved = ReservedCycles[InstanceIdx];
1945   // If this resource has never been used, always return cycle zero.
1946   if (NextUnreserved == InvalidCycle)
1947     return 0;
1948   // For bottom-up scheduling add the cycles needed for the current operation.
1949   if (!isTop())
1950     NextUnreserved += Cycles;
1951   return NextUnreserved;
1952 }
1953 
1954 /// Compute the next cycle at which the given processor resource can be
1955 /// scheduled.  Returns the next cycle and the index of the processor resource
1956 /// instance in the reserved cycles vector.
1957 std::pair<unsigned, unsigned>
1958 SchedBoundary::getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1959   unsigned MinNextUnreserved = InvalidCycle;
1960   unsigned InstanceIdx = 0;
1961   unsigned StartIndex = ReservedCyclesIndex[PIdx];
1962   unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits;
1963   assert(NumberOfInstances > 0 &&
1964          "Cannot have zero instances of a ProcResource");
1965 
1966   for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End;
1967        ++I) {
1968     unsigned NextUnreserved = getNextResourceCycleByInstance(I, Cycles);
1969     if (MinNextUnreserved > NextUnreserved) {
1970       InstanceIdx = I;
1971       MinNextUnreserved = NextUnreserved;
1972     }
1973   }
1974   return std::make_pair(MinNextUnreserved, InstanceIdx);
1975 }
1976 
1977 /// Does this SU have a hazard within the current instruction group.
1978 ///
1979 /// The scheduler supports two modes of hazard recognition. The first is the
1980 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1981 /// supports highly complicated in-order reservation tables
1982 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
1983 ///
1984 /// The second is a streamlined mechanism that checks for hazards based on
1985 /// simple counters that the scheduler itself maintains. It explicitly checks
1986 /// for instruction dispatch limitations, including the number of micro-ops that
1987 /// can dispatch per cycle.
1988 ///
1989 /// TODO: Also check whether the SU must start a new group.
1990 bool SchedBoundary::checkHazard(SUnit *SU) {
1991   if (HazardRec->isEnabled()
1992       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1993     return true;
1994   }
1995 
1996   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1997   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1998     LLVM_DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1999                       << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
2000     return true;
2001   }
2002 
2003   if (CurrMOps > 0 &&
2004       ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) ||
2005        (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) {
2006     LLVM_DEBUG(dbgs() << "  hazard: SU(" << SU->NodeNum << ") must "
2007                       << (isTop() ? "begin" : "end") << " group\n");
2008     return true;
2009   }
2010 
2011   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
2012     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2013     for (const MCWriteProcResEntry &PE :
2014           make_range(SchedModel->getWriteProcResBegin(SC),
2015                      SchedModel->getWriteProcResEnd(SC))) {
2016       unsigned ResIdx = PE.ProcResourceIdx;
2017       unsigned Cycles = PE.Cycles;
2018       unsigned NRCycle, InstanceIdx;
2019       std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(ResIdx, Cycles);
2020       if (NRCycle > CurrCycle) {
2021 #ifndef NDEBUG
2022         MaxObservedStall = std::max(Cycles, MaxObservedStall);
2023 #endif
2024         LLVM_DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
2025                           << SchedModel->getResourceName(ResIdx)
2026                           << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx]  << ']'
2027                           << "=" << NRCycle << "c\n");
2028         return true;
2029       }
2030     }
2031   }
2032   return false;
2033 }
2034 
2035 // Find the unscheduled node in ReadySUs with the highest latency.
2036 unsigned SchedBoundary::
2037 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
2038   SUnit *LateSU = nullptr;
2039   unsigned RemLatency = 0;
2040   for (SUnit *SU : ReadySUs) {
2041     unsigned L = getUnscheduledLatency(SU);
2042     if (L > RemLatency) {
2043       RemLatency = L;
2044       LateSU = SU;
2045     }
2046   }
2047   if (LateSU) {
2048     LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU("
2049                       << LateSU->NodeNum << ") " << RemLatency << "c\n");
2050   }
2051   return RemLatency;
2052 }
2053 
2054 // Count resources in this zone and the remaining unscheduled
2055 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
2056 // resource index, or zero if the zone is issue limited.
2057 unsigned SchedBoundary::
2058 getOtherResourceCount(unsigned &OtherCritIdx) {
2059   OtherCritIdx = 0;
2060   if (!SchedModel->hasInstrSchedModel())
2061     return 0;
2062 
2063   unsigned OtherCritCount = Rem->RemIssueCount
2064     + (RetiredMOps * SchedModel->getMicroOpFactor());
2065   LLVM_DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
2066                     << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
2067   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
2068        PIdx != PEnd; ++PIdx) {
2069     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
2070     if (OtherCount > OtherCritCount) {
2071       OtherCritCount = OtherCount;
2072       OtherCritIdx = PIdx;
2073     }
2074   }
2075   if (OtherCritIdx) {
2076     LLVM_DEBUG(
2077         dbgs() << "  " << Available.getName() << " + Remain CritRes: "
2078                << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
2079                << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
2080   }
2081   return OtherCritCount;
2082 }
2083 
2084 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
2085                                 unsigned Idx) {
2086   assert(SU->getInstr() && "Scheduled SUnit must have instr");
2087 
2088 #ifndef NDEBUG
2089   // ReadyCycle was been bumped up to the CurrCycle when this node was
2090   // scheduled, but CurrCycle may have been eagerly advanced immediately after
2091   // scheduling, so may now be greater than ReadyCycle.
2092   if (ReadyCycle > CurrCycle)
2093     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
2094 #endif
2095 
2096   if (ReadyCycle < MinReadyCycle)
2097     MinReadyCycle = ReadyCycle;
2098 
2099   // Check for interlocks first. For the purpose of other heuristics, an
2100   // instruction that cannot issue appears as if it's not in the ReadyQueue.
2101   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2102   bool HazardDetected = (!IsBuffered && ReadyCycle > CurrCycle) ||
2103                         checkHazard(SU) || (Available.size() >= ReadyListLimit);
2104 
2105   if (!HazardDetected) {
2106     Available.push(SU);
2107 
2108     if (InPQueue)
2109       Pending.remove(Pending.begin() + Idx);
2110     return;
2111   }
2112 
2113   if (!InPQueue)
2114     Pending.push(SU);
2115 }
2116 
2117 /// Move the boundary of scheduled code by one cycle.
2118 void SchedBoundary::bumpCycle(unsigned NextCycle) {
2119   if (SchedModel->getMicroOpBufferSize() == 0) {
2120     assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
2121            "MinReadyCycle uninitialized");
2122     if (MinReadyCycle > NextCycle)
2123       NextCycle = MinReadyCycle;
2124   }
2125   // Update the current micro-ops, which will issue in the next cycle.
2126   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2127   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2128 
2129   // Decrement DependentLatency based on the next cycle.
2130   if ((NextCycle - CurrCycle) > DependentLatency)
2131     DependentLatency = 0;
2132   else
2133     DependentLatency -= (NextCycle - CurrCycle);
2134 
2135   if (!HazardRec->isEnabled()) {
2136     // Bypass HazardRec virtual calls.
2137     CurrCycle = NextCycle;
2138   } else {
2139     // Bypass getHazardType calls in case of long latency.
2140     for (; CurrCycle != NextCycle; ++CurrCycle) {
2141       if (isTop())
2142         HazardRec->AdvanceCycle();
2143       else
2144         HazardRec->RecedeCycle();
2145     }
2146   }
2147   CheckPending = true;
2148   IsResourceLimited =
2149       checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
2150                          getScheduledLatency(), true);
2151 
2152   LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName()
2153                     << '\n');
2154 }
2155 
2156 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2157   ExecutedResCounts[PIdx] += Count;
2158   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2159     MaxExecutedResCount = ExecutedResCounts[PIdx];
2160 }
2161 
2162 /// Add the given processor resource to this scheduled zone.
2163 ///
2164 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2165 /// during which this resource is consumed.
2166 ///
2167 /// \return the next cycle at which the instruction may execute without
2168 /// oversubscribing resources.
2169 unsigned SchedBoundary::
2170 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2171   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2172   unsigned Count = Factor * Cycles;
2173   LLVM_DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx) << " +"
2174                     << Cycles << "x" << Factor << "u\n");
2175 
2176   // Update Executed resources counts.
2177   incExecutedResources(PIdx, Count);
2178   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2179   Rem->RemainingCounts[PIdx] -= Count;
2180 
2181   // Check if this resource exceeds the current critical resource. If so, it
2182   // becomes the critical resource.
2183   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2184     ZoneCritResIdx = PIdx;
2185     LLVM_DEBUG(dbgs() << "  *** Critical resource "
2186                       << SchedModel->getResourceName(PIdx) << ": "
2187                       << getResourceCount(PIdx) / SchedModel->getLatencyFactor()
2188                       << "c\n");
2189   }
2190   // For reserved resources, record the highest cycle using the resource.
2191   unsigned NextAvailable, InstanceIdx;
2192   std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(PIdx, Cycles);
2193   if (NextAvailable > CurrCycle) {
2194     LLVM_DEBUG(dbgs() << "  Resource conflict: "
2195                       << SchedModel->getResourceName(PIdx)
2196                       << '[' << InstanceIdx - ReservedCyclesIndex[PIdx]  << ']'
2197                       << " reserved until @" << NextAvailable << "\n");
2198   }
2199   return NextAvailable;
2200 }
2201 
2202 /// Move the boundary of scheduled code by one SUnit.
2203 void SchedBoundary::bumpNode(SUnit *SU) {
2204   // Update the reservation table.
2205   if (HazardRec->isEnabled()) {
2206     if (!isTop() && SU->isCall) {
2207       // Calls are scheduled with their preceding instructions. For bottom-up
2208       // scheduling, clear the pipeline state before emitting.
2209       HazardRec->Reset();
2210     }
2211     HazardRec->EmitInstruction(SU);
2212     // Scheduling an instruction may have made pending instructions available.
2213     CheckPending = true;
2214   }
2215   // checkHazard should prevent scheduling multiple instructions per cycle that
2216   // exceed the issue width.
2217   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2218   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2219   assert(
2220       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2221       "Cannot schedule this instruction's MicroOps in the current cycle.");
2222 
2223   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2224   LLVM_DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2225 
2226   unsigned NextCycle = CurrCycle;
2227   switch (SchedModel->getMicroOpBufferSize()) {
2228   case 0:
2229     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2230     break;
2231   case 1:
2232     if (ReadyCycle > NextCycle) {
2233       NextCycle = ReadyCycle;
2234       LLVM_DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2235     }
2236     break;
2237   default:
2238     // We don't currently model the OOO reorder buffer, so consider all
2239     // scheduled MOps to be "retired". We do loosely model in-order resource
2240     // latency. If this instruction uses an in-order resource, account for any
2241     // likely stall cycles.
2242     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2243       NextCycle = ReadyCycle;
2244     break;
2245   }
2246   RetiredMOps += IncMOps;
2247 
2248   // Update resource counts and critical resource.
2249   if (SchedModel->hasInstrSchedModel()) {
2250     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2251     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2252     Rem->RemIssueCount -= DecRemIssue;
2253     if (ZoneCritResIdx) {
2254       // Scale scheduled micro-ops for comparing with the critical resource.
2255       unsigned ScaledMOps =
2256         RetiredMOps * SchedModel->getMicroOpFactor();
2257 
2258       // If scaled micro-ops are now more than the previous critical resource by
2259       // a full cycle, then micro-ops issue becomes critical.
2260       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2261           >= (int)SchedModel->getLatencyFactor()) {
2262         ZoneCritResIdx = 0;
2263         LLVM_DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2264                           << ScaledMOps / SchedModel->getLatencyFactor()
2265                           << "c\n");
2266       }
2267     }
2268     for (TargetSchedModel::ProcResIter
2269            PI = SchedModel->getWriteProcResBegin(SC),
2270            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2271       unsigned RCycle =
2272         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2273       if (RCycle > NextCycle)
2274         NextCycle = RCycle;
2275     }
2276     if (SU->hasReservedResource) {
2277       // For reserved resources, record the highest cycle using the resource.
2278       // For top-down scheduling, this is the cycle in which we schedule this
2279       // instruction plus the number of cycles the operations reserves the
2280       // resource. For bottom-up is it simply the instruction's cycle.
2281       for (TargetSchedModel::ProcResIter
2282              PI = SchedModel->getWriteProcResBegin(SC),
2283              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2284         unsigned PIdx = PI->ProcResourceIdx;
2285         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2286           unsigned ReservedUntil, InstanceIdx;
2287           std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(PIdx, 0);
2288           if (isTop()) {
2289             ReservedCycles[InstanceIdx] =
2290                 std::max(ReservedUntil, NextCycle + PI->Cycles);
2291           } else
2292             ReservedCycles[InstanceIdx] = NextCycle;
2293         }
2294       }
2295     }
2296   }
2297   // Update ExpectedLatency and DependentLatency.
2298   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2299   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2300   if (SU->getDepth() > TopLatency) {
2301     TopLatency = SU->getDepth();
2302     LLVM_DEBUG(dbgs() << "  " << Available.getName() << " TopLatency SU("
2303                       << SU->NodeNum << ") " << TopLatency << "c\n");
2304   }
2305   if (SU->getHeight() > BotLatency) {
2306     BotLatency = SU->getHeight();
2307     LLVM_DEBUG(dbgs() << "  " << Available.getName() << " BotLatency SU("
2308                       << SU->NodeNum << ") " << BotLatency << "c\n");
2309   }
2310   // If we stall for any reason, bump the cycle.
2311   if (NextCycle > CurrCycle)
2312     bumpCycle(NextCycle);
2313   else
2314     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2315     // resource limited. If a stall occurred, bumpCycle does this.
2316     IsResourceLimited =
2317         checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
2318                            getScheduledLatency(), true);
2319 
2320   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2321   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2322   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2323   // bump the cycle to avoid uselessly checking everything in the readyQ.
2324   CurrMOps += IncMOps;
2325 
2326   // Bump the cycle count for issue group constraints.
2327   // This must be done after NextCycle has been adjust for all other stalls.
2328   // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2329   // currCycle to X.
2330   if ((isTop() &&  SchedModel->mustEndGroup(SU->getInstr())) ||
2331       (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) {
2332     LLVM_DEBUG(dbgs() << "  Bump cycle to " << (isTop() ? "end" : "begin")
2333                       << " group\n");
2334     bumpCycle(++NextCycle);
2335   }
2336 
2337   while (CurrMOps >= SchedModel->getIssueWidth()) {
2338     LLVM_DEBUG(dbgs() << "  *** Max MOps " << CurrMOps << " at cycle "
2339                       << CurrCycle << '\n');
2340     bumpCycle(++NextCycle);
2341   }
2342   LLVM_DEBUG(dumpScheduledState());
2343 }
2344 
2345 /// Release pending ready nodes in to the available queue. This makes them
2346 /// visible to heuristics.
2347 void SchedBoundary::releasePending() {
2348   // If the available queue is empty, it is safe to reset MinReadyCycle.
2349   if (Available.empty())
2350     MinReadyCycle = std::numeric_limits<unsigned>::max();
2351 
2352   // Check to see if any of the pending instructions are ready to issue.  If
2353   // so, add them to the available queue.
2354   for (unsigned I = 0, E = Pending.size(); I < E; ++I) {
2355     SUnit *SU = *(Pending.begin() + I);
2356     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2357 
2358     if (ReadyCycle < MinReadyCycle)
2359       MinReadyCycle = ReadyCycle;
2360 
2361     if (Available.size() >= ReadyListLimit)
2362       break;
2363 
2364     releaseNode(SU, ReadyCycle, true, I);
2365     if (E != Pending.size()) {
2366       --I;
2367       --E;
2368     }
2369   }
2370   CheckPending = false;
2371 }
2372 
2373 /// Remove SU from the ready set for this boundary.
2374 void SchedBoundary::removeReady(SUnit *SU) {
2375   if (Available.isInQueue(SU))
2376     Available.remove(Available.find(SU));
2377   else {
2378     assert(Pending.isInQueue(SU) && "bad ready count");
2379     Pending.remove(Pending.find(SU));
2380   }
2381 }
2382 
2383 /// If this queue only has one ready candidate, return it. As a side effect,
2384 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2385 /// one node is ready. If multiple instructions are ready, return NULL.
2386 SUnit *SchedBoundary::pickOnlyChoice() {
2387   if (CheckPending)
2388     releasePending();
2389 
2390   if (CurrMOps > 0) {
2391     // Defer any ready instrs that now have a hazard.
2392     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2393       if (checkHazard(*I)) {
2394         Pending.push(*I);
2395         I = Available.remove(I);
2396         continue;
2397       }
2398       ++I;
2399     }
2400   }
2401   for (unsigned i = 0; Available.empty(); ++i) {
2402 //  FIXME: Re-enable assert once PR20057 is resolved.
2403 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2404 //           "permanent hazard");
2405     (void)i;
2406     bumpCycle(CurrCycle + 1);
2407     releasePending();
2408   }
2409 
2410   LLVM_DEBUG(Pending.dump());
2411   LLVM_DEBUG(Available.dump());
2412 
2413   if (Available.size() == 1)
2414     return *Available.begin();
2415   return nullptr;
2416 }
2417 
2418 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2419 // This is useful information to dump after bumpNode.
2420 // Note that the Queue contents are more useful before pickNodeFromQueue.
2421 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const {
2422   unsigned ResFactor;
2423   unsigned ResCount;
2424   if (ZoneCritResIdx) {
2425     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2426     ResCount = getResourceCount(ZoneCritResIdx);
2427   } else {
2428     ResFactor = SchedModel->getMicroOpFactor();
2429     ResCount = RetiredMOps * ResFactor;
2430   }
2431   unsigned LFactor = SchedModel->getLatencyFactor();
2432   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2433          << "  Retired: " << RetiredMOps;
2434   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2435   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2436          << ResCount / ResFactor << " "
2437          << SchedModel->getResourceName(ZoneCritResIdx)
2438          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2439          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2440          << " limited.\n";
2441 }
2442 #endif
2443 
2444 //===----------------------------------------------------------------------===//
2445 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2446 //===----------------------------------------------------------------------===//
2447 
2448 void GenericSchedulerBase::SchedCandidate::
2449 initResourceDelta(const ScheduleDAGMI *DAG,
2450                   const TargetSchedModel *SchedModel) {
2451   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2452     return;
2453 
2454   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2455   for (TargetSchedModel::ProcResIter
2456          PI = SchedModel->getWriteProcResBegin(SC),
2457          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2458     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2459       ResDelta.CritResources += PI->Cycles;
2460     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2461       ResDelta.DemandedResources += PI->Cycles;
2462   }
2463 }
2464 
2465 /// Compute remaining latency. We need this both to determine whether the
2466 /// overall schedule has become latency-limited and whether the instructions
2467 /// outside this zone are resource or latency limited.
2468 ///
2469 /// The "dependent" latency is updated incrementally during scheduling as the
2470 /// max height/depth of scheduled nodes minus the cycles since it was
2471 /// scheduled:
2472 ///   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2473 ///
2474 /// The "independent" latency is the max ready queue depth:
2475 ///   ILat = max N.depth for N in Available|Pending
2476 ///
2477 /// RemainingLatency is the greater of independent and dependent latency.
2478 ///
2479 /// These computations are expensive, especially in DAGs with many edges, so
2480 /// only do them if necessary.
2481 static unsigned computeRemLatency(SchedBoundary &CurrZone) {
2482   unsigned RemLatency = CurrZone.getDependentLatency();
2483   RemLatency = std::max(RemLatency,
2484                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2485   RemLatency = std::max(RemLatency,
2486                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2487   return RemLatency;
2488 }
2489 
2490 /// Returns true if the current cycle plus remaning latency is greater than
2491 /// the critical path in the scheduling region.
2492 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy,
2493                                                SchedBoundary &CurrZone,
2494                                                bool ComputeRemLatency,
2495                                                unsigned &RemLatency) const {
2496   // The current cycle is already greater than the critical path, so we are
2497   // already latency limited and don't need to compute the remaining latency.
2498   if (CurrZone.getCurrCycle() > Rem.CriticalPath)
2499     return true;
2500 
2501   // If we haven't scheduled anything yet, then we aren't latency limited.
2502   if (CurrZone.getCurrCycle() == 0)
2503     return false;
2504 
2505   if (ComputeRemLatency)
2506     RemLatency = computeRemLatency(CurrZone);
2507 
2508   return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath;
2509 }
2510 
2511 /// Set the CandPolicy given a scheduling zone given the current resources and
2512 /// latencies inside and outside the zone.
2513 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2514                                      SchedBoundary &CurrZone,
2515                                      SchedBoundary *OtherZone) {
2516   // Apply preemptive heuristics based on the total latency and resources
2517   // inside and outside this zone. Potential stalls should be considered before
2518   // following this policy.
2519 
2520   // Compute the critical resource outside the zone.
2521   unsigned OtherCritIdx = 0;
2522   unsigned OtherCount =
2523     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2524 
2525   bool OtherResLimited = false;
2526   unsigned RemLatency = 0;
2527   bool RemLatencyComputed = false;
2528   if (SchedModel->hasInstrSchedModel() && OtherCount != 0) {
2529     RemLatency = computeRemLatency(CurrZone);
2530     RemLatencyComputed = true;
2531     OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(),
2532                                          OtherCount, RemLatency, false);
2533   }
2534 
2535   // Schedule aggressively for latency in PostRA mode. We don't check for
2536   // acyclic latency during PostRA, and highly out-of-order processors will
2537   // skip PostRA scheduling.
2538   if (!OtherResLimited &&
2539       (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed,
2540                                        RemLatency))) {
2541     Policy.ReduceLatency |= true;
2542     LLVM_DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2543                       << " RemainingLatency " << RemLatency << " + "
2544                       << CurrZone.getCurrCycle() << "c > CritPath "
2545                       << Rem.CriticalPath << "\n");
2546   }
2547   // If the same resource is limiting inside and outside the zone, do nothing.
2548   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2549     return;
2550 
2551   LLVM_DEBUG(if (CurrZone.isResourceLimited()) {
2552     dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2553            << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n";
2554   } if (OtherResLimited) dbgs()
2555                  << "  RemainingLimit: "
2556                  << SchedModel->getResourceName(OtherCritIdx) << "\n";
2557              if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs()
2558              << "  Latency limited both directions.\n");
2559 
2560   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2561     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2562 
2563   if (OtherResLimited)
2564     Policy.DemandResIdx = OtherCritIdx;
2565 }
2566 
2567 #ifndef NDEBUG
2568 const char *GenericSchedulerBase::getReasonStr(
2569   GenericSchedulerBase::CandReason Reason) {
2570   switch (Reason) {
2571   case NoCand:         return "NOCAND    ";
2572   case Only1:          return "ONLY1     ";
2573   case PhysReg:        return "PHYS-REG  ";
2574   case RegExcess:      return "REG-EXCESS";
2575   case RegCritical:    return "REG-CRIT  ";
2576   case Stall:          return "STALL     ";
2577   case Cluster:        return "CLUSTER   ";
2578   case Weak:           return "WEAK      ";
2579   case RegMax:         return "REG-MAX   ";
2580   case ResourceReduce: return "RES-REDUCE";
2581   case ResourceDemand: return "RES-DEMAND";
2582   case TopDepthReduce: return "TOP-DEPTH ";
2583   case TopPathReduce:  return "TOP-PATH  ";
2584   case BotHeightReduce:return "BOT-HEIGHT";
2585   case BotPathReduce:  return "BOT-PATH  ";
2586   case NextDefUse:     return "DEF-USE   ";
2587   case NodeOrder:      return "ORDER     ";
2588   };
2589   llvm_unreachable("Unknown reason!");
2590 }
2591 
2592 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2593   PressureChange P;
2594   unsigned ResIdx = 0;
2595   unsigned Latency = 0;
2596   switch (Cand.Reason) {
2597   default:
2598     break;
2599   case RegExcess:
2600     P = Cand.RPDelta.Excess;
2601     break;
2602   case RegCritical:
2603     P = Cand.RPDelta.CriticalMax;
2604     break;
2605   case RegMax:
2606     P = Cand.RPDelta.CurrentMax;
2607     break;
2608   case ResourceReduce:
2609     ResIdx = Cand.Policy.ReduceResIdx;
2610     break;
2611   case ResourceDemand:
2612     ResIdx = Cand.Policy.DemandResIdx;
2613     break;
2614   case TopDepthReduce:
2615     Latency = Cand.SU->getDepth();
2616     break;
2617   case TopPathReduce:
2618     Latency = Cand.SU->getHeight();
2619     break;
2620   case BotHeightReduce:
2621     Latency = Cand.SU->getHeight();
2622     break;
2623   case BotPathReduce:
2624     Latency = Cand.SU->getDepth();
2625     break;
2626   }
2627   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2628   if (P.isValid())
2629     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2630            << ":" << P.getUnitInc() << " ";
2631   else
2632     dbgs() << "      ";
2633   if (ResIdx)
2634     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2635   else
2636     dbgs() << "         ";
2637   if (Latency)
2638     dbgs() << " " << Latency << " cycles ";
2639   else
2640     dbgs() << "          ";
2641   dbgs() << '\n';
2642 }
2643 #endif
2644 
2645 namespace llvm {
2646 /// Return true if this heuristic determines order.
2647 bool tryLess(int TryVal, int CandVal,
2648              GenericSchedulerBase::SchedCandidate &TryCand,
2649              GenericSchedulerBase::SchedCandidate &Cand,
2650              GenericSchedulerBase::CandReason Reason) {
2651   if (TryVal < CandVal) {
2652     TryCand.Reason = Reason;
2653     return true;
2654   }
2655   if (TryVal > CandVal) {
2656     if (Cand.Reason > Reason)
2657       Cand.Reason = Reason;
2658     return true;
2659   }
2660   return false;
2661 }
2662 
2663 bool tryGreater(int TryVal, int CandVal,
2664                 GenericSchedulerBase::SchedCandidate &TryCand,
2665                 GenericSchedulerBase::SchedCandidate &Cand,
2666                 GenericSchedulerBase::CandReason Reason) {
2667   if (TryVal > CandVal) {
2668     TryCand.Reason = Reason;
2669     return true;
2670   }
2671   if (TryVal < CandVal) {
2672     if (Cand.Reason > Reason)
2673       Cand.Reason = Reason;
2674     return true;
2675   }
2676   return false;
2677 }
2678 
2679 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2680                 GenericSchedulerBase::SchedCandidate &Cand,
2681                 SchedBoundary &Zone) {
2682   if (Zone.isTop()) {
2683     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2684       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2685                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2686         return true;
2687     }
2688     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2689                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2690       return true;
2691   } else {
2692     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2693       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2694                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2695         return true;
2696     }
2697     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2698                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2699       return true;
2700   }
2701   return false;
2702 }
2703 } // end namespace llvm
2704 
2705 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2706   LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2707                     << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2708 }
2709 
2710 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
2711   tracePick(Cand.Reason, Cand.AtTop);
2712 }
2713 
2714 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2715   assert(dag->hasVRegLiveness() &&
2716          "(PreRA)GenericScheduler needs vreg liveness");
2717   DAG = static_cast<ScheduleDAGMILive*>(dag);
2718   SchedModel = DAG->getSchedModel();
2719   TRI = DAG->TRI;
2720 
2721   Rem.init(DAG, SchedModel);
2722   Top.init(DAG, SchedModel, &Rem);
2723   Bot.init(DAG, SchedModel, &Rem);
2724 
2725   // Initialize resource counts.
2726 
2727   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2728   // are disabled, then these HazardRecs will be disabled.
2729   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2730   if (!Top.HazardRec) {
2731     Top.HazardRec =
2732         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2733             Itin, DAG);
2734   }
2735   if (!Bot.HazardRec) {
2736     Bot.HazardRec =
2737         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2738             Itin, DAG);
2739   }
2740   TopCand.SU = nullptr;
2741   BotCand.SU = nullptr;
2742 }
2743 
2744 /// Initialize the per-region scheduling policy.
2745 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2746                                   MachineBasicBlock::iterator End,
2747                                   unsigned NumRegionInstrs) {
2748   const MachineFunction &MF = *Begin->getMF();
2749   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2750 
2751   // Avoid setting up the register pressure tracker for small regions to save
2752   // compile time. As a rough heuristic, only track pressure when the number of
2753   // schedulable instructions exceeds half the integer register file.
2754   RegionPolicy.ShouldTrackPressure = true;
2755   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2756     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2757     if (TLI->isTypeLegal(LegalIntVT)) {
2758       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2759         TLI->getRegClassFor(LegalIntVT));
2760       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2761     }
2762   }
2763 
2764   // For generic targets, we default to bottom-up, because it's simpler and more
2765   // compile-time optimizations have been implemented in that direction.
2766   RegionPolicy.OnlyBottomUp = true;
2767 
2768   // Allow the subtarget to override default policy.
2769   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
2770 
2771   // After subtarget overrides, apply command line options.
2772   if (!EnableRegPressure) {
2773     RegionPolicy.ShouldTrackPressure = false;
2774     RegionPolicy.ShouldTrackLaneMasks = false;
2775   }
2776 
2777   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2778   // e.g. -misched-bottomup=false allows scheduling in both directions.
2779   assert((!ForceTopDown || !ForceBottomUp) &&
2780          "-misched-topdown incompatible with -misched-bottomup");
2781   if (ForceBottomUp.getNumOccurrences() > 0) {
2782     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2783     if (RegionPolicy.OnlyBottomUp)
2784       RegionPolicy.OnlyTopDown = false;
2785   }
2786   if (ForceTopDown.getNumOccurrences() > 0) {
2787     RegionPolicy.OnlyTopDown = ForceTopDown;
2788     if (RegionPolicy.OnlyTopDown)
2789       RegionPolicy.OnlyBottomUp = false;
2790   }
2791 }
2792 
2793 void GenericScheduler::dumpPolicy() const {
2794   // Cannot completely remove virtual function even in release mode.
2795 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2796   dbgs() << "GenericScheduler RegionPolicy: "
2797          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2798          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2799          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2800          << "\n";
2801 #endif
2802 }
2803 
2804 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2805 /// critical path by more cycles than it takes to drain the instruction buffer.
2806 /// We estimate an upper bounds on in-flight instructions as:
2807 ///
2808 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2809 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2810 /// InFlightResources = InFlightIterations * LoopResources
2811 ///
2812 /// TODO: Check execution resources in addition to IssueCount.
2813 void GenericScheduler::checkAcyclicLatency() {
2814   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2815     return;
2816 
2817   // Scaled number of cycles per loop iteration.
2818   unsigned IterCount =
2819     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2820              Rem.RemIssueCount);
2821   // Scaled acyclic critical path.
2822   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2823   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2824   unsigned InFlightCount =
2825     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2826   unsigned BufferLimit =
2827     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2828 
2829   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2830 
2831   LLVM_DEBUG(
2832       dbgs() << "IssueCycles="
2833              << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2834              << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2835              << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount
2836              << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2837              << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2838       if (Rem.IsAcyclicLatencyLimited) dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2839 }
2840 
2841 void GenericScheduler::registerRoots() {
2842   Rem.CriticalPath = DAG->ExitSU.getDepth();
2843 
2844   // Some roots may not feed into ExitSU. Check all of them in case.
2845   for (const SUnit *SU : Bot.Available) {
2846     if (SU->getDepth() > Rem.CriticalPath)
2847       Rem.CriticalPath = SU->getDepth();
2848   }
2849   LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2850   if (DumpCriticalPathLength) {
2851     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2852   }
2853 
2854   if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) {
2855     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2856     checkAcyclicLatency();
2857   }
2858 }
2859 
2860 namespace llvm {
2861 bool tryPressure(const PressureChange &TryP,
2862                  const PressureChange &CandP,
2863                  GenericSchedulerBase::SchedCandidate &TryCand,
2864                  GenericSchedulerBase::SchedCandidate &Cand,
2865                  GenericSchedulerBase::CandReason Reason,
2866                  const TargetRegisterInfo *TRI,
2867                  const MachineFunction &MF) {
2868   // If one candidate decreases and the other increases, go with it.
2869   // Invalid candidates have UnitInc==0.
2870   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2871                  Reason)) {
2872     return true;
2873   }
2874   // Do not compare the magnitude of pressure changes between top and bottom
2875   // boundary.
2876   if (Cand.AtTop != TryCand.AtTop)
2877     return false;
2878 
2879   // If both candidates affect the same set in the same boundary, go with the
2880   // smallest increase.
2881   unsigned TryPSet = TryP.getPSetOrMax();
2882   unsigned CandPSet = CandP.getPSetOrMax();
2883   if (TryPSet == CandPSet) {
2884     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2885                    Reason);
2886   }
2887 
2888   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2889                                  std::numeric_limits<int>::max();
2890 
2891   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2892                                    std::numeric_limits<int>::max();
2893 
2894   // If the candidates are decreasing pressure, reverse priority.
2895   if (TryP.getUnitInc() < 0)
2896     std::swap(TryRank, CandRank);
2897   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2898 }
2899 
2900 unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2901   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2902 }
2903 
2904 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2905 /// their physreg def/use.
2906 ///
2907 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2908 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2909 /// with the operation that produces or consumes the physreg. We'll do this when
2910 /// regalloc has support for parallel copies.
2911 int biasPhysReg(const SUnit *SU, bool isTop) {
2912   const MachineInstr *MI = SU->getInstr();
2913 
2914   if (MI->isCopy()) {
2915     unsigned ScheduledOper = isTop ? 1 : 0;
2916     unsigned UnscheduledOper = isTop ? 0 : 1;
2917     // If we have already scheduled the physreg produce/consumer, immediately
2918     // schedule the copy.
2919     if (Register::isPhysicalRegister(MI->getOperand(ScheduledOper).getReg()))
2920       return 1;
2921     // If the physreg is at the boundary, defer it. Otherwise schedule it
2922     // immediately to free the dependent. We can hoist the copy later.
2923     bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2924     if (Register::isPhysicalRegister(MI->getOperand(UnscheduledOper).getReg()))
2925       return AtBoundary ? -1 : 1;
2926   }
2927 
2928   if (MI->isMoveImmediate()) {
2929     // If we have a move immediate and all successors have been assigned, bias
2930     // towards scheduling this later. Make sure all register defs are to
2931     // physical registers.
2932     bool DoBias = true;
2933     for (const MachineOperand &Op : MI->defs()) {
2934       if (Op.isReg() && !Register::isPhysicalRegister(Op.getReg())) {
2935         DoBias = false;
2936         break;
2937       }
2938     }
2939 
2940     if (DoBias)
2941       return isTop ? -1 : 1;
2942   }
2943 
2944   return 0;
2945 }
2946 } // end namespace llvm
2947 
2948 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2949                                      bool AtTop,
2950                                      const RegPressureTracker &RPTracker,
2951                                      RegPressureTracker &TempTracker) {
2952   Cand.SU = SU;
2953   Cand.AtTop = AtTop;
2954   if (DAG->isTrackingPressure()) {
2955     if (AtTop) {
2956       TempTracker.getMaxDownwardPressureDelta(
2957         Cand.SU->getInstr(),
2958         Cand.RPDelta,
2959         DAG->getRegionCriticalPSets(),
2960         DAG->getRegPressure().MaxSetPressure);
2961     } else {
2962       if (VerifyScheduling) {
2963         TempTracker.getMaxUpwardPressureDelta(
2964           Cand.SU->getInstr(),
2965           &DAG->getPressureDiff(Cand.SU),
2966           Cand.RPDelta,
2967           DAG->getRegionCriticalPSets(),
2968           DAG->getRegPressure().MaxSetPressure);
2969       } else {
2970         RPTracker.getUpwardPressureDelta(
2971           Cand.SU->getInstr(),
2972           DAG->getPressureDiff(Cand.SU),
2973           Cand.RPDelta,
2974           DAG->getRegionCriticalPSets(),
2975           DAG->getRegPressure().MaxSetPressure);
2976       }
2977     }
2978   }
2979   LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs()
2980              << "  Try  SU(" << Cand.SU->NodeNum << ") "
2981              << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":"
2982              << Cand.RPDelta.Excess.getUnitInc() << "\n");
2983 }
2984 
2985 /// Apply a set of heuristics to a new candidate. Heuristics are currently
2986 /// hierarchical. This may be more efficient than a graduated cost model because
2987 /// we don't need to evaluate all aspects of the model for each node in the
2988 /// queue. But it's really done to make the heuristics easier to debug and
2989 /// statistically analyze.
2990 ///
2991 /// \param Cand provides the policy and current best candidate.
2992 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2993 /// \param Zone describes the scheduled zone that we are extending, or nullptr
2994 //              if Cand is from a different zone than TryCand.
2995 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2996                                     SchedCandidate &TryCand,
2997                                     SchedBoundary *Zone) const {
2998   // Initialize the candidate if needed.
2999   if (!Cand.isValid()) {
3000     TryCand.Reason = NodeOrder;
3001     return;
3002   }
3003 
3004   // Bias PhysReg Defs and copies to their uses and defined respectively.
3005   if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop),
3006                  biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg))
3007     return;
3008 
3009   // Avoid exceeding the target's limit.
3010   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
3011                                                Cand.RPDelta.Excess,
3012                                                TryCand, Cand, RegExcess, TRI,
3013                                                DAG->MF))
3014     return;
3015 
3016   // Avoid increasing the max critical pressure in the scheduled region.
3017   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
3018                                                Cand.RPDelta.CriticalMax,
3019                                                TryCand, Cand, RegCritical, TRI,
3020                                                DAG->MF))
3021     return;
3022 
3023   // We only compare a subset of features when comparing nodes between
3024   // Top and Bottom boundary. Some properties are simply incomparable, in many
3025   // other instances we should only override the other boundary if something
3026   // is a clear good pick on one boundary. Skip heuristics that are more
3027   // "tie-breaking" in nature.
3028   bool SameBoundary = Zone != nullptr;
3029   if (SameBoundary) {
3030     // For loops that are acyclic path limited, aggressively schedule for
3031     // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
3032     // heuristics to take precedence.
3033     if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
3034         tryLatency(TryCand, Cand, *Zone))
3035       return;
3036 
3037     // Prioritize instructions that read unbuffered resources by stall cycles.
3038     if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
3039                 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3040       return;
3041   }
3042 
3043   // Keep clustered nodes together to encourage downstream peephole
3044   // optimizations which may reduce resource requirements.
3045   //
3046   // This is a best effort to set things up for a post-RA pass. Optimizations
3047   // like generating loads of multiple registers should ideally be done within
3048   // the scheduler pass by combining the loads during DAG postprocessing.
3049   const SUnit *CandNextClusterSU =
3050     Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3051   const SUnit *TryCandNextClusterSU =
3052     TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3053   if (tryGreater(TryCand.SU == TryCandNextClusterSU,
3054                  Cand.SU == CandNextClusterSU,
3055                  TryCand, Cand, Cluster))
3056     return;
3057 
3058   if (SameBoundary) {
3059     // Weak edges are for clustering and other constraints.
3060     if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
3061                 getWeakLeft(Cand.SU, Cand.AtTop),
3062                 TryCand, Cand, Weak))
3063       return;
3064   }
3065 
3066   // Avoid increasing the max pressure of the entire region.
3067   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
3068                                                Cand.RPDelta.CurrentMax,
3069                                                TryCand, Cand, RegMax, TRI,
3070                                                DAG->MF))
3071     return;
3072 
3073   if (SameBoundary) {
3074     // Avoid critical resource consumption and balance the schedule.
3075     TryCand.initResourceDelta(DAG, SchedModel);
3076     if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3077                 TryCand, Cand, ResourceReduce))
3078       return;
3079     if (tryGreater(TryCand.ResDelta.DemandedResources,
3080                    Cand.ResDelta.DemandedResources,
3081                    TryCand, Cand, ResourceDemand))
3082       return;
3083 
3084     // Avoid serializing long latency dependence chains.
3085     // For acyclic path limited loops, latency was already checked above.
3086     if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
3087         !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
3088       return;
3089 
3090     // Fall through to original instruction order.
3091     if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
3092         || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
3093       TryCand.Reason = NodeOrder;
3094     }
3095   }
3096 }
3097 
3098 /// Pick the best candidate from the queue.
3099 ///
3100 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3101 /// DAG building. To adjust for the current scheduling location we need to
3102 /// maintain the number of vreg uses remaining to be top-scheduled.
3103 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
3104                                          const CandPolicy &ZonePolicy,
3105                                          const RegPressureTracker &RPTracker,
3106                                          SchedCandidate &Cand) {
3107   // getMaxPressureDelta temporarily modifies the tracker.
3108   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
3109 
3110   ReadyQueue &Q = Zone.Available;
3111   for (SUnit *SU : Q) {
3112 
3113     SchedCandidate TryCand(ZonePolicy);
3114     initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker);
3115     // Pass SchedBoundary only when comparing nodes from the same boundary.
3116     SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
3117     tryCandidate(Cand, TryCand, ZoneArg);
3118     if (TryCand.Reason != NoCand) {
3119       // Initialize resource delta if needed in case future heuristics query it.
3120       if (TryCand.ResDelta == SchedResourceDelta())
3121         TryCand.initResourceDelta(DAG, SchedModel);
3122       Cand.setBest(TryCand);
3123       LLVM_DEBUG(traceCandidate(Cand));
3124     }
3125   }
3126 }
3127 
3128 /// Pick the best candidate node from either the top or bottom queue.
3129 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
3130   // Schedule as far as possible in the direction of no choice. This is most
3131   // efficient, but also provides the best heuristics for CriticalPSets.
3132   if (SUnit *SU = Bot.pickOnlyChoice()) {
3133     IsTopNode = false;
3134     tracePick(Only1, false);
3135     return SU;
3136   }
3137   if (SUnit *SU = Top.pickOnlyChoice()) {
3138     IsTopNode = true;
3139     tracePick(Only1, true);
3140     return SU;
3141   }
3142   // Set the bottom-up policy based on the state of the current bottom zone and
3143   // the instructions outside the zone, including the top zone.
3144   CandPolicy BotPolicy;
3145   setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
3146   // Set the top-down policy based on the state of the current top zone and
3147   // the instructions outside the zone, including the bottom zone.
3148   CandPolicy TopPolicy;
3149   setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
3150 
3151   // See if BotCand is still valid (because we previously scheduled from Top).
3152   LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
3153   if (!BotCand.isValid() || BotCand.SU->isScheduled ||
3154       BotCand.Policy != BotPolicy) {
3155     BotCand.reset(CandPolicy());
3156     pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
3157     assert(BotCand.Reason != NoCand && "failed to find the first candidate");
3158   } else {
3159     LLVM_DEBUG(traceCandidate(BotCand));
3160 #ifndef NDEBUG
3161     if (VerifyScheduling) {
3162       SchedCandidate TCand;
3163       TCand.reset(CandPolicy());
3164       pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3165       assert(TCand.SU == BotCand.SU &&
3166              "Last pick result should correspond to re-picking right now");
3167     }
3168 #endif
3169   }
3170 
3171   // Check if the top Q has a better candidate.
3172   LLVM_DEBUG(dbgs() << "Picking from Top:\n");
3173   if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3174       TopCand.Policy != TopPolicy) {
3175     TopCand.reset(CandPolicy());
3176     pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3177     assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3178   } else {
3179     LLVM_DEBUG(traceCandidate(TopCand));
3180 #ifndef NDEBUG
3181     if (VerifyScheduling) {
3182       SchedCandidate TCand;
3183       TCand.reset(CandPolicy());
3184       pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3185       assert(TCand.SU == TopCand.SU &&
3186            "Last pick result should correspond to re-picking right now");
3187     }
3188 #endif
3189   }
3190 
3191   // Pick best from BotCand and TopCand.
3192   assert(BotCand.isValid());
3193   assert(TopCand.isValid());
3194   SchedCandidate Cand = BotCand;
3195   TopCand.Reason = NoCand;
3196   tryCandidate(Cand, TopCand, nullptr);
3197   if (TopCand.Reason != NoCand) {
3198     Cand.setBest(TopCand);
3199     LLVM_DEBUG(traceCandidate(Cand));
3200   }
3201 
3202   IsTopNode = Cand.AtTop;
3203   tracePick(Cand);
3204   return Cand.SU;
3205 }
3206 
3207 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3208 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
3209   if (DAG->top() == DAG->bottom()) {
3210     assert(Top.Available.empty() && Top.Pending.empty() &&
3211            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
3212     return nullptr;
3213   }
3214   SUnit *SU;
3215   do {
3216     if (RegionPolicy.OnlyTopDown) {
3217       SU = Top.pickOnlyChoice();
3218       if (!SU) {
3219         CandPolicy NoPolicy;
3220         TopCand.reset(NoPolicy);
3221         pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
3222         assert(TopCand.Reason != NoCand && "failed to find a candidate");
3223         tracePick(TopCand);
3224         SU = TopCand.SU;
3225       }
3226       IsTopNode = true;
3227     } else if (RegionPolicy.OnlyBottomUp) {
3228       SU = Bot.pickOnlyChoice();
3229       if (!SU) {
3230         CandPolicy NoPolicy;
3231         BotCand.reset(NoPolicy);
3232         pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
3233         assert(BotCand.Reason != NoCand && "failed to find a candidate");
3234         tracePick(BotCand);
3235         SU = BotCand.SU;
3236       }
3237       IsTopNode = false;
3238     } else {
3239       SU = pickNodeBidirectional(IsTopNode);
3240     }
3241   } while (SU->isScheduled);
3242 
3243   if (SU->isTopReady())
3244     Top.removeReady(SU);
3245   if (SU->isBottomReady())
3246     Bot.removeReady(SU);
3247 
3248   LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3249                     << *SU->getInstr());
3250   return SU;
3251 }
3252 
3253 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
3254   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3255   if (!isTop)
3256     ++InsertPos;
3257   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3258 
3259   // Find already scheduled copies with a single physreg dependence and move
3260   // them just above the scheduled instruction.
3261   for (SDep &Dep : Deps) {
3262     if (Dep.getKind() != SDep::Data ||
3263         !Register::isPhysicalRegister(Dep.getReg()))
3264       continue;
3265     SUnit *DepSU = Dep.getSUnit();
3266     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3267       continue;
3268     MachineInstr *Copy = DepSU->getInstr();
3269     if (!Copy->isCopy() && !Copy->isMoveImmediate())
3270       continue;
3271     LLVM_DEBUG(dbgs() << "  Rescheduling physreg copy ";
3272                DAG->dumpNode(*Dep.getSUnit()));
3273     DAG->moveInstruction(Copy, InsertPos);
3274   }
3275 }
3276 
3277 /// Update the scheduler's state after scheduling a node. This is the same node
3278 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3279 /// update it's state based on the current cycle before MachineSchedStrategy
3280 /// does.
3281 ///
3282 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3283 /// them here. See comments in biasPhysReg.
3284 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3285   if (IsTopNode) {
3286     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3287     Top.bumpNode(SU);
3288     if (SU->hasPhysRegUses)
3289       reschedulePhysReg(SU, true);
3290   } else {
3291     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3292     Bot.bumpNode(SU);
3293     if (SU->hasPhysRegDefs)
3294       reschedulePhysReg(SU, false);
3295   }
3296 }
3297 
3298 /// Create the standard converging machine scheduler. This will be used as the
3299 /// default scheduler if the target does not set a default.
3300 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) {
3301   ScheduleDAGMILive *DAG =
3302       new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C));
3303   // Register DAG post-processors.
3304   //
3305   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3306   // data and pass it to later mutations. Have a single mutation that gathers
3307   // the interesting nodes in one pass.
3308   DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
3309   return DAG;
3310 }
3311 
3312 static ScheduleDAGInstrs *createConveringSched(MachineSchedContext *C) {
3313   return createGenericSchedLive(C);
3314 }
3315 
3316 static MachineSchedRegistry
3317 GenericSchedRegistry("converge", "Standard converging scheduler.",
3318                      createConveringSched);
3319 
3320 //===----------------------------------------------------------------------===//
3321 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3322 //===----------------------------------------------------------------------===//
3323 
3324 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3325   DAG = Dag;
3326   SchedModel = DAG->getSchedModel();
3327   TRI = DAG->TRI;
3328 
3329   Rem.init(DAG, SchedModel);
3330   Top.init(DAG, SchedModel, &Rem);
3331   BotRoots.clear();
3332 
3333   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3334   // or are disabled, then these HazardRecs will be disabled.
3335   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3336   if (!Top.HazardRec) {
3337     Top.HazardRec =
3338         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3339             Itin, DAG);
3340   }
3341 }
3342 
3343 void PostGenericScheduler::registerRoots() {
3344   Rem.CriticalPath = DAG->ExitSU.getDepth();
3345 
3346   // Some roots may not feed into ExitSU. Check all of them in case.
3347   for (const SUnit *SU : BotRoots) {
3348     if (SU->getDepth() > Rem.CriticalPath)
3349       Rem.CriticalPath = SU->getDepth();
3350   }
3351   LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3352   if (DumpCriticalPathLength) {
3353     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3354   }
3355 }
3356 
3357 /// Apply a set of heuristics to a new candidate for PostRA scheduling.
3358 ///
3359 /// \param Cand provides the policy and current best candidate.
3360 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3361 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3362                                         SchedCandidate &TryCand) {
3363   // Initialize the candidate if needed.
3364   if (!Cand.isValid()) {
3365     TryCand.Reason = NodeOrder;
3366     return;
3367   }
3368 
3369   // Prioritize instructions that read unbuffered resources by stall cycles.
3370   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3371               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3372     return;
3373 
3374   // Keep clustered nodes together.
3375   if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(),
3376                  Cand.SU == DAG->getNextClusterSucc(),
3377                  TryCand, Cand, Cluster))
3378     return;
3379 
3380   // Avoid critical resource consumption and balance the schedule.
3381   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3382               TryCand, Cand, ResourceReduce))
3383     return;
3384   if (tryGreater(TryCand.ResDelta.DemandedResources,
3385                  Cand.ResDelta.DemandedResources,
3386                  TryCand, Cand, ResourceDemand))
3387     return;
3388 
3389   // Avoid serializing long latency dependence chains.
3390   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3391     return;
3392   }
3393 
3394   // Fall through to original instruction order.
3395   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3396     TryCand.Reason = NodeOrder;
3397 }
3398 
3399 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3400   ReadyQueue &Q = Top.Available;
3401   for (SUnit *SU : Q) {
3402     SchedCandidate TryCand(Cand.Policy);
3403     TryCand.SU = SU;
3404     TryCand.AtTop = true;
3405     TryCand.initResourceDelta(DAG, SchedModel);
3406     tryCandidate(Cand, TryCand);
3407     if (TryCand.Reason != NoCand) {
3408       Cand.setBest(TryCand);
3409       LLVM_DEBUG(traceCandidate(Cand));
3410     }
3411   }
3412 }
3413 
3414 /// Pick the next node to schedule.
3415 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3416   if (DAG->top() == DAG->bottom()) {
3417     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3418     return nullptr;
3419   }
3420   SUnit *SU;
3421   do {
3422     SU = Top.pickOnlyChoice();
3423     if (SU) {
3424       tracePick(Only1, true);
3425     } else {
3426       CandPolicy NoPolicy;
3427       SchedCandidate TopCand(NoPolicy);
3428       // Set the top-down policy based on the state of the current top zone and
3429       // the instructions outside the zone, including the bottom zone.
3430       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3431       pickNodeFromQueue(TopCand);
3432       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3433       tracePick(TopCand);
3434       SU = TopCand.SU;
3435     }
3436   } while (SU->isScheduled);
3437 
3438   IsTopNode = true;
3439   Top.removeReady(SU);
3440 
3441   LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3442                     << *SU->getInstr());
3443   return SU;
3444 }
3445 
3446 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3447 /// scheduled/remaining flags in the DAG nodes.
3448 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3449   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3450   Top.bumpNode(SU);
3451 }
3452 
3453 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) {
3454   return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C),
3455                            /*RemoveKillFlags=*/true);
3456 }
3457 
3458 //===----------------------------------------------------------------------===//
3459 // ILP Scheduler. Currently for experimental analysis of heuristics.
3460 //===----------------------------------------------------------------------===//
3461 
3462 namespace {
3463 
3464 /// Order nodes by the ILP metric.
3465 struct ILPOrder {
3466   const SchedDFSResult *DFSResult = nullptr;
3467   const BitVector *ScheduledTrees = nullptr;
3468   bool MaximizeILP;
3469 
3470   ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {}
3471 
3472   /// Apply a less-than relation on node priority.
3473   ///
3474   /// (Return true if A comes after B in the Q.)
3475   bool operator()(const SUnit *A, const SUnit *B) const {
3476     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3477     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3478     if (SchedTreeA != SchedTreeB) {
3479       // Unscheduled trees have lower priority.
3480       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3481         return ScheduledTrees->test(SchedTreeB);
3482 
3483       // Trees with shallower connections have have lower priority.
3484       if (DFSResult->getSubtreeLevel(SchedTreeA)
3485           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3486         return DFSResult->getSubtreeLevel(SchedTreeA)
3487           < DFSResult->getSubtreeLevel(SchedTreeB);
3488       }
3489     }
3490     if (MaximizeILP)
3491       return DFSResult->getILP(A) < DFSResult->getILP(B);
3492     else
3493       return DFSResult->getILP(A) > DFSResult->getILP(B);
3494   }
3495 };
3496 
3497 /// Schedule based on the ILP metric.
3498 class ILPScheduler : public MachineSchedStrategy {
3499   ScheduleDAGMILive *DAG = nullptr;
3500   ILPOrder Cmp;
3501 
3502   std::vector<SUnit*> ReadyQ;
3503 
3504 public:
3505   ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {}
3506 
3507   void initialize(ScheduleDAGMI *dag) override {
3508     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3509     DAG = static_cast<ScheduleDAGMILive*>(dag);
3510     DAG->computeDFSResult();
3511     Cmp.DFSResult = DAG->getDFSResult();
3512     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3513     ReadyQ.clear();
3514   }
3515 
3516   void registerRoots() override {
3517     // Restore the heap in ReadyQ with the updated DFS results.
3518     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3519   }
3520 
3521   /// Implement MachineSchedStrategy interface.
3522   /// -----------------------------------------
3523 
3524   /// Callback to select the highest priority node from the ready Q.
3525   SUnit *pickNode(bool &IsTopNode) override {
3526     if (ReadyQ.empty()) return nullptr;
3527     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3528     SUnit *SU = ReadyQ.back();
3529     ReadyQ.pop_back();
3530     IsTopNode = false;
3531     LLVM_DEBUG(dbgs() << "Pick node "
3532                       << "SU(" << SU->NodeNum << ") "
3533                       << " ILP: " << DAG->getDFSResult()->getILP(SU)
3534                       << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU)
3535                       << " @"
3536                       << DAG->getDFSResult()->getSubtreeLevel(
3537                              DAG->getDFSResult()->getSubtreeID(SU))
3538                       << '\n'
3539                       << "Scheduling " << *SU->getInstr());
3540     return SU;
3541   }
3542 
3543   /// Scheduler callback to notify that a new subtree is scheduled.
3544   void scheduleTree(unsigned SubtreeID) override {
3545     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3546   }
3547 
3548   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3549   /// DFSResults, and resort the priority Q.
3550   void schedNode(SUnit *SU, bool IsTopNode) override {
3551     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3552   }
3553 
3554   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3555 
3556   void releaseBottomNode(SUnit *SU) override {
3557     ReadyQ.push_back(SU);
3558     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3559   }
3560 };
3561 
3562 } // end anonymous namespace
3563 
3564 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3565   return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true));
3566 }
3567 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3568   return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false));
3569 }
3570 
3571 static MachineSchedRegistry ILPMaxRegistry(
3572   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3573 static MachineSchedRegistry ILPMinRegistry(
3574   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3575 
3576 //===----------------------------------------------------------------------===//
3577 // Machine Instruction Shuffler for Correctness Testing
3578 //===----------------------------------------------------------------------===//
3579 
3580 #ifndef NDEBUG
3581 namespace {
3582 
3583 /// Apply a less-than relation on the node order, which corresponds to the
3584 /// instruction order prior to scheduling. IsReverse implements greater-than.
3585 template<bool IsReverse>
3586 struct SUnitOrder {
3587   bool operator()(SUnit *A, SUnit *B) const {
3588     if (IsReverse)
3589       return A->NodeNum > B->NodeNum;
3590     else
3591       return A->NodeNum < B->NodeNum;
3592   }
3593 };
3594 
3595 /// Reorder instructions as much as possible.
3596 class InstructionShuffler : public MachineSchedStrategy {
3597   bool IsAlternating;
3598   bool IsTopDown;
3599 
3600   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3601   // gives nodes with a higher number higher priority causing the latest
3602   // instructions to be scheduled first.
3603   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>>
3604     TopQ;
3605 
3606   // When scheduling bottom-up, use greater-than as the queue priority.
3607   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>>
3608     BottomQ;
3609 
3610 public:
3611   InstructionShuffler(bool alternate, bool topdown)
3612     : IsAlternating(alternate), IsTopDown(topdown) {}
3613 
3614   void initialize(ScheduleDAGMI*) override {
3615     TopQ.clear();
3616     BottomQ.clear();
3617   }
3618 
3619   /// Implement MachineSchedStrategy interface.
3620   /// -----------------------------------------
3621 
3622   SUnit *pickNode(bool &IsTopNode) override {
3623     SUnit *SU;
3624     if (IsTopDown) {
3625       do {
3626         if (TopQ.empty()) return nullptr;
3627         SU = TopQ.top();
3628         TopQ.pop();
3629       } while (SU->isScheduled);
3630       IsTopNode = true;
3631     } else {
3632       do {
3633         if (BottomQ.empty()) return nullptr;
3634         SU = BottomQ.top();
3635         BottomQ.pop();
3636       } while (SU->isScheduled);
3637       IsTopNode = false;
3638     }
3639     if (IsAlternating)
3640       IsTopDown = !IsTopDown;
3641     return SU;
3642   }
3643 
3644   void schedNode(SUnit *SU, bool IsTopNode) override {}
3645 
3646   void releaseTopNode(SUnit *SU) override {
3647     TopQ.push(SU);
3648   }
3649   void releaseBottomNode(SUnit *SU) override {
3650     BottomQ.push(SU);
3651   }
3652 };
3653 
3654 } // end anonymous namespace
3655 
3656 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3657   bool Alternate = !ForceTopDown && !ForceBottomUp;
3658   bool TopDown = !ForceBottomUp;
3659   assert((TopDown || !ForceTopDown) &&
3660          "-misched-topdown incompatible with -misched-bottomup");
3661   return new ScheduleDAGMILive(
3662       C, std::make_unique<InstructionShuffler>(Alternate, TopDown));
3663 }
3664 
3665 static MachineSchedRegistry ShufflerRegistry(
3666   "shuffle", "Shuffle machine instructions alternating directions",
3667   createInstructionShuffler);
3668 #endif // !NDEBUG
3669 
3670 //===----------------------------------------------------------------------===//
3671 // GraphWriter support for ScheduleDAGMILive.
3672 //===----------------------------------------------------------------------===//
3673 
3674 #ifndef NDEBUG
3675 namespace llvm {
3676 
3677 template<> struct GraphTraits<
3678   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3679 
3680 template<>
3681 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3682   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3683 
3684   static std::string getGraphName(const ScheduleDAG *G) {
3685     return G->MF.getName();
3686   }
3687 
3688   static bool renderGraphFromBottomUp() {
3689     return true;
3690   }
3691 
3692   static bool isNodeHidden(const SUnit *Node) {
3693     if (ViewMISchedCutoff == 0)
3694       return false;
3695     return (Node->Preds.size() > ViewMISchedCutoff
3696          || Node->Succs.size() > ViewMISchedCutoff);
3697   }
3698 
3699   /// If you want to override the dot attributes printed for a particular
3700   /// edge, override this method.
3701   static std::string getEdgeAttributes(const SUnit *Node,
3702                                        SUnitIterator EI,
3703                                        const ScheduleDAG *Graph) {
3704     if (EI.isArtificialDep())
3705       return "color=cyan,style=dashed";
3706     if (EI.isCtrlDep())
3707       return "color=blue,style=dashed";
3708     return "";
3709   }
3710 
3711   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3712     std::string Str;
3713     raw_string_ostream SS(Str);
3714     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3715     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3716       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3717     SS << "SU:" << SU->NodeNum;
3718     if (DFS)
3719       SS << " I:" << DFS->getNumInstrs(SU);
3720     return SS.str();
3721   }
3722 
3723   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3724     return G->getGraphNodeLabel(SU);
3725   }
3726 
3727   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3728     std::string Str("shape=Mrecord");
3729     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3730     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3731       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3732     if (DFS) {
3733       Str += ",style=filled,fillcolor=\"#";
3734       Str += DOT::getColorString(DFS->getSubtreeID(N));
3735       Str += '"';
3736     }
3737     return Str;
3738   }
3739 };
3740 
3741 } // end namespace llvm
3742 #endif // NDEBUG
3743 
3744 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3745 /// rendered using 'dot'.
3746 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3747 #ifndef NDEBUG
3748   ViewGraph(this, Name, false, Title);
3749 #else
3750   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3751          << "systems with Graphviz or gv!\n";
3752 #endif  // NDEBUG
3753 }
3754 
3755 /// Out-of-line implementation with no arguments is handy for gdb.
3756 void ScheduleDAGMI::viewGraph() {
3757   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3758 }
3759