xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision d4f6409dffb5ce446a1fc58f53a31d7c1cf948d9)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/GraphWriter.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include <queue>
33 
34 using namespace llvm;
35 
36 #define DEBUG_TYPE "misched"
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 cl::opt<bool>
44 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45                        cl::desc("Print critical path length to stdout"));
46 }
47 
48 #ifndef NDEBUG
49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50   cl::desc("Pop up a window to show MISched dags after they are processed"));
51 
52 /// In some situations a few uninteresting nodes depend on nearly all other
53 /// nodes in the graph, provide a cutoff to hide them.
54 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
55   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
56 
57 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
58   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
59 
60 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
61   cl::desc("Only schedule this function"));
62 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
63   cl::desc("Only schedule this MBB#"));
64 #else
65 static bool ViewMISchedDAGs = false;
66 #endif // NDEBUG
67 
68 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
69   cl::desc("Enable register pressure scheduling."), cl::init(true));
70 
71 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
72   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
73 
74 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
75   cl::desc("Enable load clustering."), cl::init(true));
76 
77 // Experimental heuristics
78 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
79   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
80 
81 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
82   cl::desc("Verify machine instrs before and after machine scheduling"));
83 
84 // DAG subtrees must have at least this many nodes.
85 static const unsigned MinSubtreeSize = 8;
86 
87 // Pin the vtables to this file.
88 void MachineSchedStrategy::anchor() {}
89 void ScheduleDAGMutation::anchor() {}
90 
91 //===----------------------------------------------------------------------===//
92 // Machine Instruction Scheduling Pass and Registry
93 //===----------------------------------------------------------------------===//
94 
95 MachineSchedContext::MachineSchedContext():
96     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
97   RegClassInfo = new RegisterClassInfo();
98 }
99 
100 MachineSchedContext::~MachineSchedContext() {
101   delete RegClassInfo;
102 }
103 
104 namespace {
105 /// Base class for a machine scheduler class that can run at any point.
106 class MachineSchedulerBase : public MachineSchedContext,
107                              public MachineFunctionPass {
108 public:
109   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
110 
111   void print(raw_ostream &O, const Module* = nullptr) const override;
112 
113 protected:
114   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
115 };
116 
117 /// MachineScheduler runs after coalescing and before register allocation.
118 class MachineScheduler : public MachineSchedulerBase {
119 public:
120   MachineScheduler();
121 
122   void getAnalysisUsage(AnalysisUsage &AU) const override;
123 
124   bool runOnMachineFunction(MachineFunction&) override;
125 
126   static char ID; // Class identification, replacement for typeinfo
127 
128 protected:
129   ScheduleDAGInstrs *createMachineScheduler();
130 };
131 
132 /// PostMachineScheduler runs after shortly before code emission.
133 class PostMachineScheduler : public MachineSchedulerBase {
134 public:
135   PostMachineScheduler();
136 
137   void getAnalysisUsage(AnalysisUsage &AU) const override;
138 
139   bool runOnMachineFunction(MachineFunction&) override;
140 
141   static char ID; // Class identification, replacement for typeinfo
142 
143 protected:
144   ScheduleDAGInstrs *createPostMachineScheduler();
145 };
146 } // namespace
147 
148 char MachineScheduler::ID = 0;
149 
150 char &llvm::MachineSchedulerID = MachineScheduler::ID;
151 
152 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
153                       "Machine Instruction Scheduler", false, false)
154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
155 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
156 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
157 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
158                     "Machine Instruction Scheduler", false, false)
159 
160 MachineScheduler::MachineScheduler()
161 : MachineSchedulerBase(ID) {
162   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
163 }
164 
165 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
166   AU.setPreservesCFG();
167   AU.addRequiredID(MachineDominatorsID);
168   AU.addRequired<MachineLoopInfo>();
169   AU.addRequired<AAResultsWrapperPass>();
170   AU.addRequired<TargetPassConfig>();
171   AU.addRequired<SlotIndexes>();
172   AU.addPreserved<SlotIndexes>();
173   AU.addRequired<LiveIntervals>();
174   AU.addPreserved<LiveIntervals>();
175   MachineFunctionPass::getAnalysisUsage(AU);
176 }
177 
178 char PostMachineScheduler::ID = 0;
179 
180 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
181 
182 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
183                 "PostRA Machine Instruction Scheduler", false, false)
184 
185 PostMachineScheduler::PostMachineScheduler()
186 : MachineSchedulerBase(ID) {
187   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
188 }
189 
190 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
191   AU.setPreservesCFG();
192   AU.addRequiredID(MachineDominatorsID);
193   AU.addRequired<MachineLoopInfo>();
194   AU.addRequired<TargetPassConfig>();
195   MachineFunctionPass::getAnalysisUsage(AU);
196 }
197 
198 MachinePassRegistry MachineSchedRegistry::Registry;
199 
200 /// A dummy default scheduler factory indicates whether the scheduler
201 /// is overridden on the command line.
202 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
203   return nullptr;
204 }
205 
206 /// MachineSchedOpt allows command line selection of the scheduler.
207 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
208                RegisterPassParser<MachineSchedRegistry> >
209 MachineSchedOpt("misched",
210                 cl::init(&useDefaultMachineSched), cl::Hidden,
211                 cl::desc("Machine instruction scheduler to use"));
212 
213 static MachineSchedRegistry
214 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
215                      useDefaultMachineSched);
216 
217 static cl::opt<bool> EnableMachineSched(
218     "enable-misched",
219     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
220     cl::Hidden);
221 
222 /// Forward declare the standard machine scheduler. This will be used as the
223 /// default scheduler if the target does not set a default.
224 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
225 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
226 
227 /// Decrement this iterator until reaching the top or a non-debug instr.
228 static MachineBasicBlock::const_iterator
229 priorNonDebug(MachineBasicBlock::const_iterator I,
230               MachineBasicBlock::const_iterator Beg) {
231   assert(I != Beg && "reached the top of the region, cannot decrement");
232   while (--I != Beg) {
233     if (!I->isDebugValue())
234       break;
235   }
236   return I;
237 }
238 
239 /// Non-const version.
240 static MachineBasicBlock::iterator
241 priorNonDebug(MachineBasicBlock::iterator I,
242               MachineBasicBlock::const_iterator Beg) {
243   return const_cast<MachineInstr*>(
244     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
245 }
246 
247 /// If this iterator is a debug value, increment until reaching the End or a
248 /// non-debug instruction.
249 static MachineBasicBlock::const_iterator
250 nextIfDebug(MachineBasicBlock::const_iterator I,
251             MachineBasicBlock::const_iterator End) {
252   for(; I != End; ++I) {
253     if (!I->isDebugValue())
254       break;
255   }
256   return I;
257 }
258 
259 /// Non-const version.
260 static MachineBasicBlock::iterator
261 nextIfDebug(MachineBasicBlock::iterator I,
262             MachineBasicBlock::const_iterator End) {
263   // Cast the return value to nonconst MachineInstr, then cast to an
264   // instr_iterator, which does not check for null, finally return a
265   // bundle_iterator.
266   return MachineBasicBlock::instr_iterator(
267     const_cast<MachineInstr*>(
268       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
269 }
270 
271 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
272 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
273   // Select the scheduler, or set the default.
274   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
275   if (Ctor != useDefaultMachineSched)
276     return Ctor(this);
277 
278   // Get the default scheduler set by the target for this function.
279   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
280   if (Scheduler)
281     return Scheduler;
282 
283   // Default to GenericScheduler.
284   return createGenericSchedLive(this);
285 }
286 
287 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
288 /// the caller. We don't have a command line option to override the postRA
289 /// scheduler. The Target must configure it.
290 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
291   // Get the postRA scheduler set by the target for this function.
292   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
293   if (Scheduler)
294     return Scheduler;
295 
296   // Default to GenericScheduler.
297   return createGenericSchedPostRA(this);
298 }
299 
300 /// Top-level MachineScheduler pass driver.
301 ///
302 /// Visit blocks in function order. Divide each block into scheduling regions
303 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
304 /// consistent with the DAG builder, which traverses the interior of the
305 /// scheduling regions bottom-up.
306 ///
307 /// This design avoids exposing scheduling boundaries to the DAG builder,
308 /// simplifying the DAG builder's support for "special" target instructions.
309 /// At the same time the design allows target schedulers to operate across
310 /// scheduling boundaries, for example to bundle the boudary instructions
311 /// without reordering them. This creates complexity, because the target
312 /// scheduler must update the RegionBegin and RegionEnd positions cached by
313 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
314 /// design would be to split blocks at scheduling boundaries, but LLVM has a
315 /// general bias against block splitting purely for implementation simplicity.
316 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
317   if (EnableMachineSched.getNumOccurrences()) {
318     if (!EnableMachineSched)
319       return false;
320   } else if (!mf.getSubtarget().enableMachineScheduler())
321     return false;
322 
323   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
324 
325   // Initialize the context of the pass.
326   MF = &mf;
327   MLI = &getAnalysis<MachineLoopInfo>();
328   MDT = &getAnalysis<MachineDominatorTree>();
329   PassConfig = &getAnalysis<TargetPassConfig>();
330   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
331 
332   LIS = &getAnalysis<LiveIntervals>();
333 
334   if (VerifyScheduling) {
335     DEBUG(LIS->dump());
336     MF->verify(this, "Before machine scheduling.");
337   }
338   RegClassInfo->runOnMachineFunction(*MF);
339 
340   // Instantiate the selected scheduler for this target, function, and
341   // optimization level.
342   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
343   scheduleRegions(*Scheduler, false);
344 
345   DEBUG(LIS->dump());
346   if (VerifyScheduling)
347     MF->verify(this, "After machine scheduling.");
348   return true;
349 }
350 
351 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
352   if (skipOptnoneFunction(*mf.getFunction()))
353     return false;
354 
355   if (!mf.getSubtarget().enablePostRAScheduler()) {
356     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
357     return false;
358   }
359   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
360 
361   // Initialize the context of the pass.
362   MF = &mf;
363   PassConfig = &getAnalysis<TargetPassConfig>();
364 
365   if (VerifyScheduling)
366     MF->verify(this, "Before post machine scheduling.");
367 
368   // Instantiate the selected scheduler for this target, function, and
369   // optimization level.
370   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
371   scheduleRegions(*Scheduler, true);
372 
373   if (VerifyScheduling)
374     MF->verify(this, "After post machine scheduling.");
375   return true;
376 }
377 
378 /// Return true of the given instruction should not be included in a scheduling
379 /// region.
380 ///
381 /// MachineScheduler does not currently support scheduling across calls. To
382 /// handle calls, the DAG builder needs to be modified to create register
383 /// anti/output dependencies on the registers clobbered by the call's regmask
384 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
385 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
386 /// the boundary, but there would be no benefit to postRA scheduling across
387 /// calls this late anyway.
388 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
389                             MachineBasicBlock *MBB,
390                             MachineFunction *MF,
391                             const TargetInstrInfo *TII) {
392   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
393 }
394 
395 /// Main driver for both MachineScheduler and PostMachineScheduler.
396 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
397                                            bool FixKillFlags) {
398   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
399 
400   // Visit all machine basic blocks.
401   //
402   // TODO: Visit blocks in global postorder or postorder within the bottom-up
403   // loop tree. Then we can optionally compute global RegPressure.
404   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
405        MBB != MBBEnd; ++MBB) {
406 
407     Scheduler.startBlock(&*MBB);
408 
409 #ifndef NDEBUG
410     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
411       continue;
412     if (SchedOnlyBlock.getNumOccurrences()
413         && (int)SchedOnlyBlock != MBB->getNumber())
414       continue;
415 #endif
416 
417     // Break the block into scheduling regions [I, RegionEnd), and schedule each
418     // region as soon as it is discovered. RegionEnd points the scheduling
419     // boundary at the bottom of the region. The DAG does not include RegionEnd,
420     // but the region does (i.e. the next RegionEnd is above the previous
421     // RegionBegin). If the current block has no terminator then RegionEnd ==
422     // MBB->end() for the bottom region.
423     //
424     // The Scheduler may insert instructions during either schedule() or
425     // exitRegion(), even for empty regions. So the local iterators 'I' and
426     // 'RegionEnd' are invalid across these calls.
427     //
428     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
429     // as a single instruction.
430     unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
431     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
432         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
433 
434       // Avoid decrementing RegionEnd for blocks with no terminator.
435       if (RegionEnd != MBB->end() ||
436           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
437         --RegionEnd;
438         // Count the boundary instruction.
439         --RemainingInstrs;
440       }
441 
442       // The next region starts above the previous region. Look backward in the
443       // instruction stream until we find the nearest boundary.
444       unsigned NumRegionInstrs = 0;
445       MachineBasicBlock::iterator I = RegionEnd;
446       for(;I != MBB->begin(); --I, --RemainingInstrs) {
447         if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII))
448           break;
449         if (!I->isDebugValue())
450           ++NumRegionInstrs;
451       }
452       // Notify the scheduler of the region, even if we may skip scheduling
453       // it. Perhaps it still needs to be bundled.
454       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
455 
456       // Skip empty scheduling regions (0 or 1 schedulable instructions).
457       if (I == RegionEnd || I == std::prev(RegionEnd)) {
458         // Close the current region. Bundle the terminator if needed.
459         // This invalidates 'RegionEnd' and 'I'.
460         Scheduler.exitRegion();
461         continue;
462       }
463       DEBUG(dbgs() << "********** MI Scheduling **********\n");
464       DEBUG(dbgs() << MF->getName()
465             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
466             << "\n  From: " << *I << "    To: ";
467             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
468             else dbgs() << "End";
469             dbgs() << " RegionInstrs: " << NumRegionInstrs
470             << " Remaining: " << RemainingInstrs << "\n");
471       if (DumpCriticalPathLength) {
472         errs() << MF->getName();
473         errs() << ":BB# " << MBB->getNumber();
474         errs() << " " << MBB->getName() << " \n";
475       }
476 
477       // Schedule a region: possibly reorder instructions.
478       // This invalidates 'RegionEnd' and 'I'.
479       Scheduler.schedule();
480 
481       // Close the current region.
482       Scheduler.exitRegion();
483 
484       // Scheduling has invalidated the current iterator 'I'. Ask the
485       // scheduler for the top of it's scheduled region.
486       RegionEnd = Scheduler.begin();
487     }
488     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
489     Scheduler.finishBlock();
490     // FIXME: Ideally, no further passes should rely on kill flags. However,
491     // thumb2 size reduction is currently an exception, so the PostMIScheduler
492     // needs to do this.
493     if (FixKillFlags)
494         Scheduler.fixupKills(&*MBB);
495   }
496   Scheduler.finalizeSchedule();
497 }
498 
499 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
500   // unimplemented
501 }
502 
503 LLVM_DUMP_METHOD
504 void ReadyQueue::dump() {
505   dbgs() << "Queue " << Name << ": ";
506   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
507     dbgs() << Queue[i]->NodeNum << " ";
508   dbgs() << "\n";
509 }
510 
511 //===----------------------------------------------------------------------===//
512 // ScheduleDAGMI - Basic machine instruction scheduling. This is
513 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
514 // virtual registers.
515 // ===----------------------------------------------------------------------===/
516 
517 // Provide a vtable anchor.
518 ScheduleDAGMI::~ScheduleDAGMI() {
519 }
520 
521 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
522   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
523 }
524 
525 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
526   if (SuccSU != &ExitSU) {
527     // Do not use WillCreateCycle, it assumes SD scheduling.
528     // If Pred is reachable from Succ, then the edge creates a cycle.
529     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
530       return false;
531     Topo.AddPred(SuccSU, PredDep.getSUnit());
532   }
533   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
534   // Return true regardless of whether a new edge needed to be inserted.
535   return true;
536 }
537 
538 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
539 /// NumPredsLeft reaches zero, release the successor node.
540 ///
541 /// FIXME: Adjust SuccSU height based on MinLatency.
542 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
543   SUnit *SuccSU = SuccEdge->getSUnit();
544 
545   if (SuccEdge->isWeak()) {
546     --SuccSU->WeakPredsLeft;
547     if (SuccEdge->isCluster())
548       NextClusterSucc = SuccSU;
549     return;
550   }
551 #ifndef NDEBUG
552   if (SuccSU->NumPredsLeft == 0) {
553     dbgs() << "*** Scheduling failed! ***\n";
554     SuccSU->dump(this);
555     dbgs() << " has been released too many times!\n";
556     llvm_unreachable(nullptr);
557   }
558 #endif
559   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
560   // CurrCycle may have advanced since then.
561   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
562     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
563 
564   --SuccSU->NumPredsLeft;
565   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
566     SchedImpl->releaseTopNode(SuccSU);
567 }
568 
569 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
570 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
571   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
572        I != E; ++I) {
573     releaseSucc(SU, &*I);
574   }
575 }
576 
577 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
578 /// NumSuccsLeft reaches zero, release the predecessor node.
579 ///
580 /// FIXME: Adjust PredSU height based on MinLatency.
581 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
582   SUnit *PredSU = PredEdge->getSUnit();
583 
584   if (PredEdge->isWeak()) {
585     --PredSU->WeakSuccsLeft;
586     if (PredEdge->isCluster())
587       NextClusterPred = PredSU;
588     return;
589   }
590 #ifndef NDEBUG
591   if (PredSU->NumSuccsLeft == 0) {
592     dbgs() << "*** Scheduling failed! ***\n";
593     PredSU->dump(this);
594     dbgs() << " has been released too many times!\n";
595     llvm_unreachable(nullptr);
596   }
597 #endif
598   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
599   // CurrCycle may have advanced since then.
600   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
601     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
602 
603   --PredSU->NumSuccsLeft;
604   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
605     SchedImpl->releaseBottomNode(PredSU);
606 }
607 
608 /// releasePredecessors - Call releasePred on each of SU's predecessors.
609 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
610   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
611        I != E; ++I) {
612     releasePred(SU, &*I);
613   }
614 }
615 
616 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
617 /// crossing a scheduling boundary. [begin, end) includes all instructions in
618 /// the region, including the boundary itself and single-instruction regions
619 /// that don't get scheduled.
620 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
621                                      MachineBasicBlock::iterator begin,
622                                      MachineBasicBlock::iterator end,
623                                      unsigned regioninstrs)
624 {
625   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
626 
627   SchedImpl->initPolicy(begin, end, regioninstrs);
628 }
629 
630 /// This is normally called from the main scheduler loop but may also be invoked
631 /// by the scheduling strategy to perform additional code motion.
632 void ScheduleDAGMI::moveInstruction(
633   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
634   // Advance RegionBegin if the first instruction moves down.
635   if (&*RegionBegin == MI)
636     ++RegionBegin;
637 
638   // Update the instruction stream.
639   BB->splice(InsertPos, BB, MI);
640 
641   // Update LiveIntervals
642   if (LIS)
643     LIS->handleMove(MI, /*UpdateFlags=*/true);
644 
645   // Recede RegionBegin if an instruction moves above the first.
646   if (RegionBegin == InsertPos)
647     RegionBegin = MI;
648 }
649 
650 bool ScheduleDAGMI::checkSchedLimit() {
651 #ifndef NDEBUG
652   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
653     CurrentTop = CurrentBottom;
654     return false;
655   }
656   ++NumInstrsScheduled;
657 #endif
658   return true;
659 }
660 
661 /// Per-region scheduling driver, called back from
662 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
663 /// does not consider liveness or register pressure. It is useful for PostRA
664 /// scheduling and potentially other custom schedulers.
665 void ScheduleDAGMI::schedule() {
666   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
667   DEBUG(SchedImpl->dumpPolicy());
668 
669   // Build the DAG.
670   buildSchedGraph(AA);
671 
672   Topo.InitDAGTopologicalSorting();
673 
674   postprocessDAG();
675 
676   SmallVector<SUnit*, 8> TopRoots, BotRoots;
677   findRootsAndBiasEdges(TopRoots, BotRoots);
678 
679   // Initialize the strategy before modifying the DAG.
680   // This may initialize a DFSResult to be used for queue priority.
681   SchedImpl->initialize(this);
682 
683   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
684           SUnits[su].dumpAll(this));
685   if (ViewMISchedDAGs) viewGraph();
686 
687   // Initialize ready queues now that the DAG and priority data are finalized.
688   initQueues(TopRoots, BotRoots);
689 
690   bool IsTopNode = false;
691   while (true) {
692     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
693     SUnit *SU = SchedImpl->pickNode(IsTopNode);
694     if (!SU) break;
695 
696     assert(!SU->isScheduled && "Node already scheduled");
697     if (!checkSchedLimit())
698       break;
699 
700     MachineInstr *MI = SU->getInstr();
701     if (IsTopNode) {
702       assert(SU->isTopReady() && "node still has unscheduled dependencies");
703       if (&*CurrentTop == MI)
704         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
705       else
706         moveInstruction(MI, CurrentTop);
707     }
708     else {
709       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
710       MachineBasicBlock::iterator priorII =
711         priorNonDebug(CurrentBottom, CurrentTop);
712       if (&*priorII == MI)
713         CurrentBottom = priorII;
714       else {
715         if (&*CurrentTop == MI)
716           CurrentTop = nextIfDebug(++CurrentTop, priorII);
717         moveInstruction(MI, CurrentBottom);
718         CurrentBottom = MI;
719       }
720     }
721     // Notify the scheduling strategy before updating the DAG.
722     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
723     // runs, it can then use the accurate ReadyCycle time to determine whether
724     // newly released nodes can move to the readyQ.
725     SchedImpl->schedNode(SU, IsTopNode);
726 
727     updateQueues(SU, IsTopNode);
728   }
729   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
730 
731   placeDebugValues();
732 
733   DEBUG({
734       unsigned BBNum = begin()->getParent()->getNumber();
735       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
736       dumpSchedule();
737       dbgs() << '\n';
738     });
739 }
740 
741 /// Apply each ScheduleDAGMutation step in order.
742 void ScheduleDAGMI::postprocessDAG() {
743   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
744     Mutations[i]->apply(this);
745   }
746 }
747 
748 void ScheduleDAGMI::
749 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
750                       SmallVectorImpl<SUnit*> &BotRoots) {
751   for (std::vector<SUnit>::iterator
752          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
753     SUnit *SU = &(*I);
754     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
755 
756     // Order predecessors so DFSResult follows the critical path.
757     SU->biasCriticalPath();
758 
759     // A SUnit is ready to top schedule if it has no predecessors.
760     if (!I->NumPredsLeft)
761       TopRoots.push_back(SU);
762     // A SUnit is ready to bottom schedule if it has no successors.
763     if (!I->NumSuccsLeft)
764       BotRoots.push_back(SU);
765   }
766   ExitSU.biasCriticalPath();
767 }
768 
769 /// Identify DAG roots and setup scheduler queues.
770 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
771                                ArrayRef<SUnit*> BotRoots) {
772   NextClusterSucc = nullptr;
773   NextClusterPred = nullptr;
774 
775   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
776   //
777   // Nodes with unreleased weak edges can still be roots.
778   // Release top roots in forward order.
779   for (SmallVectorImpl<SUnit*>::const_iterator
780          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
781     SchedImpl->releaseTopNode(*I);
782   }
783   // Release bottom roots in reverse order so the higher priority nodes appear
784   // first. This is more natural and slightly more efficient.
785   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
786          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
787     SchedImpl->releaseBottomNode(*I);
788   }
789 
790   releaseSuccessors(&EntrySU);
791   releasePredecessors(&ExitSU);
792 
793   SchedImpl->registerRoots();
794 
795   // Advance past initial DebugValues.
796   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
797   CurrentBottom = RegionEnd;
798 }
799 
800 /// Update scheduler queues after scheduling an instruction.
801 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
802   // Release dependent instructions for scheduling.
803   if (IsTopNode)
804     releaseSuccessors(SU);
805   else
806     releasePredecessors(SU);
807 
808   SU->isScheduled = true;
809 }
810 
811 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
812 void ScheduleDAGMI::placeDebugValues() {
813   // If first instruction was a DBG_VALUE then put it back.
814   if (FirstDbgValue) {
815     BB->splice(RegionBegin, BB, FirstDbgValue);
816     RegionBegin = FirstDbgValue;
817   }
818 
819   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
820          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
821     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
822     MachineInstr *DbgValue = P.first;
823     MachineBasicBlock::iterator OrigPrevMI = P.second;
824     if (&*RegionBegin == DbgValue)
825       ++RegionBegin;
826     BB->splice(++OrigPrevMI, BB, DbgValue);
827     if (OrigPrevMI == std::prev(RegionEnd))
828       RegionEnd = DbgValue;
829   }
830   DbgValues.clear();
831   FirstDbgValue = nullptr;
832 }
833 
834 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
835 void ScheduleDAGMI::dumpSchedule() const {
836   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
837     if (SUnit *SU = getSUnit(&(*MI)))
838       SU->dump(this);
839     else
840       dbgs() << "Missing SUnit\n";
841   }
842 }
843 #endif
844 
845 //===----------------------------------------------------------------------===//
846 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
847 // preservation.
848 //===----------------------------------------------------------------------===//
849 
850 ScheduleDAGMILive::~ScheduleDAGMILive() {
851   delete DFSResult;
852 }
853 
854 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
855 /// crossing a scheduling boundary. [begin, end) includes all instructions in
856 /// the region, including the boundary itself and single-instruction regions
857 /// that don't get scheduled.
858 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
859                                 MachineBasicBlock::iterator begin,
860                                 MachineBasicBlock::iterator end,
861                                 unsigned regioninstrs)
862 {
863   // ScheduleDAGMI initializes SchedImpl's per-region policy.
864   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
865 
866   // For convenience remember the end of the liveness region.
867   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
868 
869   SUPressureDiffs.clear();
870 
871   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
872   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
873 
874   if (ShouldTrackLaneMasks) {
875     if (!ShouldTrackPressure)
876       report_fatal_error("ShouldTrackLaneMasks requires ShouldTrackPressure");
877     // Dead subregister defs have no users and therefore no dependencies,
878     // moving them around may cause liveintervals to degrade into multiple
879     // components. Change independent components to have their own vreg to avoid
880     // this.
881     if (!DisconnectedComponentsRenamed)
882       LIS->renameDisconnectedComponents();
883   }
884 }
885 
886 // Setup the register pressure trackers for the top scheduled top and bottom
887 // scheduled regions.
888 void ScheduleDAGMILive::initRegPressure() {
889   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
890                     ShouldTrackLaneMasks, false);
891   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
892                     ShouldTrackLaneMasks, false);
893 
894   // Close the RPTracker to finalize live ins.
895   RPTracker.closeRegion();
896 
897   DEBUG(RPTracker.dump());
898 
899   // Initialize the live ins and live outs.
900   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
901   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
902 
903   // Close one end of the tracker so we can call
904   // getMaxUpward/DownwardPressureDelta before advancing across any
905   // instructions. This converts currently live regs into live ins/outs.
906   TopRPTracker.closeTop();
907   BotRPTracker.closeBottom();
908 
909   BotRPTracker.initLiveThru(RPTracker);
910   if (!BotRPTracker.getLiveThru().empty()) {
911     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
912     DEBUG(dbgs() << "Live Thru: ";
913           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
914   };
915 
916   // For each live out vreg reduce the pressure change associated with other
917   // uses of the same vreg below the live-out reaching def.
918   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
919 
920   // Account for liveness generated by the region boundary.
921   if (LiveRegionEnd != RegionEnd) {
922     SmallVector<RegisterMaskPair, 8> LiveUses;
923     BotRPTracker.recede(&LiveUses);
924     updatePressureDiffs(LiveUses);
925   }
926 
927   DEBUG(
928     dbgs() << "Top Pressure:\n";
929     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
930     dbgs() << "Bottom Pressure:\n";
931     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
932   );
933 
934   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
935 
936   // Cache the list of excess pressure sets in this region. This will also track
937   // the max pressure in the scheduled code for these sets.
938   RegionCriticalPSets.clear();
939   const std::vector<unsigned> &RegionPressure =
940     RPTracker.getPressure().MaxSetPressure;
941   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
942     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
943     if (RegionPressure[i] > Limit) {
944       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
945             << " Limit " << Limit
946             << " Actual " << RegionPressure[i] << "\n");
947       RegionCriticalPSets.push_back(PressureChange(i));
948     }
949   }
950   DEBUG(dbgs() << "Excess PSets: ";
951         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
952           dbgs() << TRI->getRegPressureSetName(
953             RegionCriticalPSets[i].getPSet()) << " ";
954         dbgs() << "\n");
955 }
956 
957 void ScheduleDAGMILive::
958 updateScheduledPressure(const SUnit *SU,
959                         const std::vector<unsigned> &NewMaxPressure) {
960   const PressureDiff &PDiff = getPressureDiff(SU);
961   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
962   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
963        I != E; ++I) {
964     if (!I->isValid())
965       break;
966     unsigned ID = I->getPSet();
967     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
968       ++CritIdx;
969     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
970       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
971           && NewMaxPressure[ID] <= INT16_MAX)
972         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
973     }
974     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
975     if (NewMaxPressure[ID] >= Limit - 2) {
976       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
977             << NewMaxPressure[ID]
978             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
979             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
980     }
981   }
982 }
983 
984 /// Update the PressureDiff array for liveness after scheduling this
985 /// instruction.
986 void ScheduleDAGMILive::updatePressureDiffs(
987     ArrayRef<RegisterMaskPair> LiveUses) {
988   for (const RegisterMaskPair &P : LiveUses) {
989     unsigned Reg = P.RegUnit;
990     /// FIXME: Currently assuming single-use physregs.
991     if (!TRI->isVirtualRegister(Reg))
992       continue;
993 
994     if (ShouldTrackLaneMasks) {
995       // If the register has just become live then other uses won't change
996       // this fact anymore => decrement pressure.
997       // If the register has just become dead then other uses make it come
998       // back to life => increment pressure.
999       bool Decrement = P.LaneMask != 0;
1000 
1001       for (const VReg2SUnit &V2SU
1002            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1003         SUnit &SU = *V2SU.SU;
1004         if (SU.isScheduled || &SU == &ExitSU)
1005           continue;
1006 
1007         PressureDiff &PDiff = getPressureDiff(&SU);
1008         PDiff.addPressureChange(Reg, Decrement, &MRI);
1009         DEBUG(
1010           dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1011                  << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1012                  << ' ' << *SU.getInstr();
1013           dbgs() << "              to ";
1014           PDiff.dump(*TRI);
1015         );
1016       }
1017     } else {
1018       assert(P.LaneMask != 0);
1019       DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1020       // This may be called before CurrentBottom has been initialized. However,
1021       // BotRPTracker must have a valid position. We want the value live into the
1022       // instruction or live out of the block, so ask for the previous
1023       // instruction's live-out.
1024       const LiveInterval &LI = LIS->getInterval(Reg);
1025       VNInfo *VNI;
1026       MachineBasicBlock::const_iterator I =
1027         nextIfDebug(BotRPTracker.getPos(), BB->end());
1028       if (I == BB->end())
1029         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1030       else {
1031         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I));
1032         VNI = LRQ.valueIn();
1033       }
1034       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1035       assert(VNI && "No live value at use.");
1036       for (const VReg2SUnit &V2SU
1037            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1038         SUnit *SU = V2SU.SU;
1039         // If this use comes before the reaching def, it cannot be a last use,
1040         // so decrease its pressure change.
1041         if (!SU->isScheduled && SU != &ExitSU) {
1042           LiveQueryResult LRQ
1043             = LI.Query(LIS->getInstructionIndex(SU->getInstr()));
1044           if (LRQ.valueIn() == VNI) {
1045             PressureDiff &PDiff = getPressureDiff(SU);
1046             PDiff.addPressureChange(Reg, true, &MRI);
1047             DEBUG(
1048               dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1049                      << *SU->getInstr();
1050               dbgs() << "              to ";
1051               PDiff.dump(*TRI);
1052             );
1053           }
1054         }
1055       }
1056     }
1057   }
1058 }
1059 
1060 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1061 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1062 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1063 ///
1064 /// This is a skeletal driver, with all the functionality pushed into helpers,
1065 /// so that it can be easily extended by experimental schedulers. Generally,
1066 /// implementing MachineSchedStrategy should be sufficient to implement a new
1067 /// scheduling algorithm. However, if a scheduler further subclasses
1068 /// ScheduleDAGMILive then it will want to override this virtual method in order
1069 /// to update any specialized state.
1070 void ScheduleDAGMILive::schedule() {
1071   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1072   DEBUG(SchedImpl->dumpPolicy());
1073   buildDAGWithRegPressure();
1074 
1075   Topo.InitDAGTopologicalSorting();
1076 
1077   postprocessDAG();
1078 
1079   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1080   findRootsAndBiasEdges(TopRoots, BotRoots);
1081 
1082   // Initialize the strategy before modifying the DAG.
1083   // This may initialize a DFSResult to be used for queue priority.
1084   SchedImpl->initialize(this);
1085 
1086   DEBUG(
1087     for (const SUnit &SU : SUnits) {
1088       SU.dumpAll(this);
1089       if (ShouldTrackPressure) {
1090         dbgs() << "  Pressure Diff      : ";
1091         getPressureDiff(&SU).dump(*TRI);
1092       }
1093       dbgs() << '\n';
1094     }
1095   );
1096   if (ViewMISchedDAGs) viewGraph();
1097 
1098   // Initialize ready queues now that the DAG and priority data are finalized.
1099   initQueues(TopRoots, BotRoots);
1100 
1101   if (ShouldTrackPressure) {
1102     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1103     TopRPTracker.setPos(CurrentTop);
1104   }
1105 
1106   bool IsTopNode = false;
1107   while (true) {
1108     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1109     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1110     if (!SU) break;
1111 
1112     assert(!SU->isScheduled && "Node already scheduled");
1113     if (!checkSchedLimit())
1114       break;
1115 
1116     scheduleMI(SU, IsTopNode);
1117 
1118     if (DFSResult) {
1119       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1120       if (!ScheduledTrees.test(SubtreeID)) {
1121         ScheduledTrees.set(SubtreeID);
1122         DFSResult->scheduleTree(SubtreeID);
1123         SchedImpl->scheduleTree(SubtreeID);
1124       }
1125     }
1126 
1127     // Notify the scheduling strategy after updating the DAG.
1128     SchedImpl->schedNode(SU, IsTopNode);
1129 
1130     updateQueues(SU, IsTopNode);
1131   }
1132   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1133 
1134   placeDebugValues();
1135 
1136   DEBUG({
1137       unsigned BBNum = begin()->getParent()->getNumber();
1138       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1139       dumpSchedule();
1140       dbgs() << '\n';
1141     });
1142 }
1143 
1144 /// Build the DAG and setup three register pressure trackers.
1145 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1146   if (!ShouldTrackPressure) {
1147     RPTracker.reset();
1148     RegionCriticalPSets.clear();
1149     buildSchedGraph(AA);
1150     return;
1151   }
1152 
1153   // Initialize the register pressure tracker used by buildSchedGraph.
1154   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1155                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1156 
1157   // Account for liveness generate by the region boundary.
1158   if (LiveRegionEnd != RegionEnd)
1159     RPTracker.recede();
1160 
1161   // Build the DAG, and compute current register pressure.
1162   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1163 
1164   // Initialize top/bottom trackers after computing region pressure.
1165   initRegPressure();
1166 }
1167 
1168 void ScheduleDAGMILive::computeDFSResult() {
1169   if (!DFSResult)
1170     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1171   DFSResult->clear();
1172   ScheduledTrees.clear();
1173   DFSResult->resize(SUnits.size());
1174   DFSResult->compute(SUnits);
1175   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1176 }
1177 
1178 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1179 /// only provides the critical path for single block loops. To handle loops that
1180 /// span blocks, we could use the vreg path latencies provided by
1181 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1182 /// available for use in the scheduler.
1183 ///
1184 /// The cyclic path estimation identifies a def-use pair that crosses the back
1185 /// edge and considers the depth and height of the nodes. For example, consider
1186 /// the following instruction sequence where each instruction has unit latency
1187 /// and defines an epomymous virtual register:
1188 ///
1189 /// a->b(a,c)->c(b)->d(c)->exit
1190 ///
1191 /// The cyclic critical path is a two cycles: b->c->b
1192 /// The acyclic critical path is four cycles: a->b->c->d->exit
1193 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1194 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1195 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1196 /// LiveInDepth = depth(b) = len(a->b) = 1
1197 ///
1198 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1199 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1200 /// CyclicCriticalPath = min(2, 2) = 2
1201 ///
1202 /// This could be relevant to PostRA scheduling, but is currently implemented
1203 /// assuming LiveIntervals.
1204 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1205   // This only applies to single block loop.
1206   if (!BB->isSuccessor(BB))
1207     return 0;
1208 
1209   unsigned MaxCyclicLatency = 0;
1210   // Visit each live out vreg def to find def/use pairs that cross iterations.
1211   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1212     unsigned Reg = P.RegUnit;
1213     if (!TRI->isVirtualRegister(Reg))
1214         continue;
1215     const LiveInterval &LI = LIS->getInterval(Reg);
1216     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1217     if (!DefVNI)
1218       continue;
1219 
1220     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1221     const SUnit *DefSU = getSUnit(DefMI);
1222     if (!DefSU)
1223       continue;
1224 
1225     unsigned LiveOutHeight = DefSU->getHeight();
1226     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1227     // Visit all local users of the vreg def.
1228     for (const VReg2SUnit &V2SU
1229          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1230       SUnit *SU = V2SU.SU;
1231       if (SU == &ExitSU)
1232         continue;
1233 
1234       // Only consider uses of the phi.
1235       LiveQueryResult LRQ =
1236         LI.Query(LIS->getInstructionIndex(SU->getInstr()));
1237       if (!LRQ.valueIn()->isPHIDef())
1238         continue;
1239 
1240       // Assume that a path spanning two iterations is a cycle, which could
1241       // overestimate in strange cases. This allows cyclic latency to be
1242       // estimated as the minimum slack of the vreg's depth or height.
1243       unsigned CyclicLatency = 0;
1244       if (LiveOutDepth > SU->getDepth())
1245         CyclicLatency = LiveOutDepth - SU->getDepth();
1246 
1247       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1248       if (LiveInHeight > LiveOutHeight) {
1249         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1250           CyclicLatency = LiveInHeight - LiveOutHeight;
1251       }
1252       else
1253         CyclicLatency = 0;
1254 
1255       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1256             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1257       if (CyclicLatency > MaxCyclicLatency)
1258         MaxCyclicLatency = CyclicLatency;
1259     }
1260   }
1261   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1262   return MaxCyclicLatency;
1263 }
1264 
1265 /// Move an instruction and update register pressure.
1266 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1267   // Move the instruction to its new location in the instruction stream.
1268   MachineInstr *MI = SU->getInstr();
1269 
1270   if (IsTopNode) {
1271     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1272     if (&*CurrentTop == MI)
1273       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1274     else {
1275       moveInstruction(MI, CurrentTop);
1276       TopRPTracker.setPos(MI);
1277     }
1278 
1279     if (ShouldTrackPressure) {
1280       // Update top scheduled pressure.
1281       RegisterOperands RegOpers;
1282       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1283       if (ShouldTrackLaneMasks) {
1284         // Adjust liveness and add missing dead+read-undef flags.
1285         SlotIndex SlotIdx = LIS->getInstructionIndex(MI).getRegSlot();
1286         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1287       } else {
1288         // Adjust for missing dead-def flags.
1289         RegOpers.detectDeadDefs(*MI, *LIS);
1290       }
1291 
1292       TopRPTracker.advance(RegOpers);
1293       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1294       DEBUG(
1295         dbgs() << "Top Pressure:\n";
1296         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1297       );
1298 
1299       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1300     }
1301   }
1302   else {
1303     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1304     MachineBasicBlock::iterator priorII =
1305       priorNonDebug(CurrentBottom, CurrentTop);
1306     if (&*priorII == MI)
1307       CurrentBottom = priorII;
1308     else {
1309       if (&*CurrentTop == MI) {
1310         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1311         TopRPTracker.setPos(CurrentTop);
1312       }
1313       moveInstruction(MI, CurrentBottom);
1314       CurrentBottom = MI;
1315     }
1316     if (ShouldTrackPressure) {
1317       RegisterOperands RegOpers;
1318       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1319       if (ShouldTrackLaneMasks) {
1320         // Adjust liveness and add missing dead+read-undef flags.
1321         SlotIndex SlotIdx = LIS->getInstructionIndex(MI).getRegSlot();
1322         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1323       } else {
1324         // Adjust for missing dead-def flags.
1325         RegOpers.detectDeadDefs(*MI, *LIS);
1326       }
1327 
1328       BotRPTracker.recedeSkipDebugValues();
1329       SmallVector<RegisterMaskPair, 8> LiveUses;
1330       BotRPTracker.recede(RegOpers, &LiveUses);
1331       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1332       DEBUG(
1333         dbgs() << "Bottom Pressure:\n";
1334         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1335       );
1336 
1337       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1338       updatePressureDiffs(LiveUses);
1339     }
1340   }
1341 }
1342 
1343 //===----------------------------------------------------------------------===//
1344 // LoadClusterMutation - DAG post-processing to cluster loads.
1345 //===----------------------------------------------------------------------===//
1346 
1347 namespace {
1348 /// \brief Post-process the DAG to create cluster edges between neighboring
1349 /// loads.
1350 class LoadClusterMutation : public ScheduleDAGMutation {
1351   struct LoadInfo {
1352     SUnit *SU;
1353     unsigned BaseReg;
1354     unsigned Offset;
1355     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
1356       : SU(su), BaseReg(reg), Offset(ofs) {}
1357 
1358     bool operator<(const LoadInfo &RHS) const {
1359       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1360     }
1361   };
1362 
1363   const TargetInstrInfo *TII;
1364   const TargetRegisterInfo *TRI;
1365 public:
1366   LoadClusterMutation(const TargetInstrInfo *tii,
1367                       const TargetRegisterInfo *tri)
1368     : TII(tii), TRI(tri) {}
1369 
1370   void apply(ScheduleDAGMI *DAG) override;
1371 protected:
1372   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
1373 };
1374 } // anonymous
1375 
1376 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
1377                                                   ScheduleDAGMI *DAG) {
1378   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
1379   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
1380     SUnit *SU = Loads[Idx];
1381     unsigned BaseReg;
1382     unsigned Offset;
1383     if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1384       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
1385   }
1386   if (LoadRecords.size() < 2)
1387     return;
1388   std::sort(LoadRecords.begin(), LoadRecords.end());
1389   unsigned ClusterLength = 1;
1390   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
1391     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
1392       ClusterLength = 1;
1393       continue;
1394     }
1395 
1396     SUnit *SUa = LoadRecords[Idx].SU;
1397     SUnit *SUb = LoadRecords[Idx+1].SU;
1398     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1399         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1400 
1401       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
1402             << SUb->NodeNum << ")\n");
1403       // Copy successor edges from SUa to SUb. Interleaving computation
1404       // dependent on SUa can prevent load combining due to register reuse.
1405       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1406       // loads should have effectively the same inputs.
1407       for (SUnit::const_succ_iterator
1408              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1409         if (SI->getSUnit() == SUb)
1410           continue;
1411         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1412         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1413       }
1414       ++ClusterLength;
1415     }
1416     else
1417       ClusterLength = 1;
1418   }
1419 }
1420 
1421 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1422 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
1423   // Map DAG NodeNum to store chain ID.
1424   DenseMap<unsigned, unsigned> StoreChainIDs;
1425   // Map each store chain to a set of dependent loads.
1426   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1427   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1428     SUnit *SU = &DAG->SUnits[Idx];
1429     if (!SU->getInstr()->mayLoad())
1430       continue;
1431     unsigned ChainPredID = DAG->SUnits.size();
1432     for (SUnit::const_pred_iterator
1433            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1434       if (PI->isCtrl()) {
1435         ChainPredID = PI->getSUnit()->NodeNum;
1436         break;
1437       }
1438     }
1439     // Check if this chain-like pred has been seen
1440     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
1441     unsigned NumChains = StoreChainDependents.size();
1442     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1443       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1444     if (Result.second)
1445       StoreChainDependents.resize(NumChains + 1);
1446     StoreChainDependents[Result.first->second].push_back(SU);
1447   }
1448   // Iterate over the store chains.
1449   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1450     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
1451 }
1452 
1453 //===----------------------------------------------------------------------===//
1454 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1455 //===----------------------------------------------------------------------===//
1456 
1457 namespace {
1458 /// \brief Post-process the DAG to create cluster edges between instructions
1459 /// that may be fused by the processor into a single operation.
1460 class MacroFusion : public ScheduleDAGMutation {
1461   const TargetInstrInfo &TII;
1462   const TargetRegisterInfo &TRI;
1463 public:
1464   MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1465     : TII(TII), TRI(TRI) {}
1466 
1467   void apply(ScheduleDAGMI *DAG) override;
1468 };
1469 } // anonymous
1470 
1471 /// Returns true if \p MI reads a register written by \p Other.
1472 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1473                        const MachineInstr &Other) {
1474   for (const MachineOperand &MO : MI.uses()) {
1475     if (!MO.isReg() || !MO.readsReg())
1476       continue;
1477 
1478     unsigned Reg = MO.getReg();
1479     if (Other.modifiesRegister(Reg, &TRI))
1480       return true;
1481   }
1482   return false;
1483 }
1484 
1485 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1486 /// fused operations.
1487 void MacroFusion::apply(ScheduleDAGMI *DAG) {
1488   // For now, assume targets can only fuse with the branch.
1489   SUnit &ExitSU = DAG->ExitSU;
1490   MachineInstr *Branch = ExitSU.getInstr();
1491   if (!Branch)
1492     return;
1493 
1494   for (SUnit &SU : DAG->SUnits) {
1495     // SUnits with successors can't be schedule in front of the ExitSU.
1496     if (!SU.Succs.empty())
1497       continue;
1498     // We only care if the node writes to a register that the branch reads.
1499     MachineInstr *Pred = SU.getInstr();
1500     if (!HasDataDep(TRI, *Branch, *Pred))
1501       continue;
1502 
1503     if (!TII.shouldScheduleAdjacent(Pred, Branch))
1504       continue;
1505 
1506     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1507     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1508     // need to copy predecessor edges from ExitSU to SU, since top-down
1509     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1510     // of SU, we could create an artificial edge from the deepest root, but it
1511     // hasn't been needed yet.
1512     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1513     (void)Success;
1514     assert(Success && "No DAG nodes should be reachable from ExitSU");
1515 
1516     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1517     break;
1518   }
1519 }
1520 
1521 //===----------------------------------------------------------------------===//
1522 // CopyConstrain - DAG post-processing to encourage copy elimination.
1523 //===----------------------------------------------------------------------===//
1524 
1525 namespace {
1526 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1527 /// the one use that defines the copy's source vreg, most likely an induction
1528 /// variable increment.
1529 class CopyConstrain : public ScheduleDAGMutation {
1530   // Transient state.
1531   SlotIndex RegionBeginIdx;
1532   // RegionEndIdx is the slot index of the last non-debug instruction in the
1533   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1534   SlotIndex RegionEndIdx;
1535 public:
1536   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1537 
1538   void apply(ScheduleDAGMI *DAG) override;
1539 
1540 protected:
1541   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1542 };
1543 } // anonymous
1544 
1545 /// constrainLocalCopy handles two possibilities:
1546 /// 1) Local src:
1547 /// I0:     = dst
1548 /// I1: src = ...
1549 /// I2:     = dst
1550 /// I3: dst = src (copy)
1551 /// (create pred->succ edges I0->I1, I2->I1)
1552 ///
1553 /// 2) Local copy:
1554 /// I0: dst = src (copy)
1555 /// I1:     = dst
1556 /// I2: src = ...
1557 /// I3:     = dst
1558 /// (create pred->succ edges I1->I2, I3->I2)
1559 ///
1560 /// Although the MachineScheduler is currently constrained to single blocks,
1561 /// this algorithm should handle extended blocks. An EBB is a set of
1562 /// contiguously numbered blocks such that the previous block in the EBB is
1563 /// always the single predecessor.
1564 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1565   LiveIntervals *LIS = DAG->getLIS();
1566   MachineInstr *Copy = CopySU->getInstr();
1567 
1568   // Check for pure vreg copies.
1569   unsigned SrcReg = Copy->getOperand(1).getReg();
1570   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
1571     return;
1572 
1573   unsigned DstReg = Copy->getOperand(0).getReg();
1574   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
1575     return;
1576 
1577   // Check if either the dest or source is local. If it's live across a back
1578   // edge, it's not local. Note that if both vregs are live across the back
1579   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1580   // If both the copy's source and dest are local live intervals, then we
1581   // should treat the dest as the global for the purpose of adding
1582   // constraints. This adds edges from source's other uses to the copy.
1583   unsigned LocalReg = SrcReg;
1584   unsigned GlobalReg = DstReg;
1585   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1586   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1587     LocalReg = DstReg;
1588     GlobalReg = SrcReg;
1589     LocalLI = &LIS->getInterval(LocalReg);
1590     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1591       return;
1592   }
1593   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1594 
1595   // Find the global segment after the start of the local LI.
1596   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1597   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1598   // local live range. We could create edges from other global uses to the local
1599   // start, but the coalescer should have already eliminated these cases, so
1600   // don't bother dealing with it.
1601   if (GlobalSegment == GlobalLI->end())
1602     return;
1603 
1604   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1605   // returned the next global segment. But if GlobalSegment overlaps with
1606   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1607   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1608   if (GlobalSegment->contains(LocalLI->beginIndex()))
1609     ++GlobalSegment;
1610 
1611   if (GlobalSegment == GlobalLI->end())
1612     return;
1613 
1614   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1615   if (GlobalSegment != GlobalLI->begin()) {
1616     // Two address defs have no hole.
1617     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1618                                GlobalSegment->start)) {
1619       return;
1620     }
1621     // If the prior global segment may be defined by the same two-address
1622     // instruction that also defines LocalLI, then can't make a hole here.
1623     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1624                                LocalLI->beginIndex())) {
1625       return;
1626     }
1627     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1628     // it would be a disconnected component in the live range.
1629     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1630            "Disconnected LRG within the scheduling region.");
1631   }
1632   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1633   if (!GlobalDef)
1634     return;
1635 
1636   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1637   if (!GlobalSU)
1638     return;
1639 
1640   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1641   // constraining the uses of the last local def to precede GlobalDef.
1642   SmallVector<SUnit*,8> LocalUses;
1643   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1644   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1645   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1646   for (SUnit::const_succ_iterator
1647          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1648        I != E; ++I) {
1649     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1650       continue;
1651     if (I->getSUnit() == GlobalSU)
1652       continue;
1653     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1654       return;
1655     LocalUses.push_back(I->getSUnit());
1656   }
1657   // Open the top of the GlobalLI hole by constraining any earlier global uses
1658   // to precede the start of LocalLI.
1659   SmallVector<SUnit*,8> GlobalUses;
1660   MachineInstr *FirstLocalDef =
1661     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1662   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1663   for (SUnit::const_pred_iterator
1664          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1665     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1666       continue;
1667     if (I->getSUnit() == FirstLocalSU)
1668       continue;
1669     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1670       return;
1671     GlobalUses.push_back(I->getSUnit());
1672   }
1673   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1674   // Add the weak edges.
1675   for (SmallVectorImpl<SUnit*>::const_iterator
1676          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1677     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1678           << GlobalSU->NodeNum << ")\n");
1679     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1680   }
1681   for (SmallVectorImpl<SUnit*>::const_iterator
1682          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1683     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1684           << FirstLocalSU->NodeNum << ")\n");
1685     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1686   }
1687 }
1688 
1689 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1690 /// copy elimination.
1691 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1692   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1693 
1694   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1695   if (FirstPos == DAG->end())
1696     return;
1697   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1698   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1699     &*priorNonDebug(DAG->end(), DAG->begin()));
1700 
1701   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1702     SUnit *SU = &DAG->SUnits[Idx];
1703     if (!SU->getInstr()->isCopy())
1704       continue;
1705 
1706     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1707   }
1708 }
1709 
1710 //===----------------------------------------------------------------------===//
1711 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1712 // and possibly other custom schedulers.
1713 //===----------------------------------------------------------------------===//
1714 
1715 static const unsigned InvalidCycle = ~0U;
1716 
1717 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1718 
1719 void SchedBoundary::reset() {
1720   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1721   // Destroying and reconstructing it is very expensive though. So keep
1722   // invalid, placeholder HazardRecs.
1723   if (HazardRec && HazardRec->isEnabled()) {
1724     delete HazardRec;
1725     HazardRec = nullptr;
1726   }
1727   Available.clear();
1728   Pending.clear();
1729   CheckPending = false;
1730   NextSUs.clear();
1731   CurrCycle = 0;
1732   CurrMOps = 0;
1733   MinReadyCycle = UINT_MAX;
1734   ExpectedLatency = 0;
1735   DependentLatency = 0;
1736   RetiredMOps = 0;
1737   MaxExecutedResCount = 0;
1738   ZoneCritResIdx = 0;
1739   IsResourceLimited = false;
1740   ReservedCycles.clear();
1741 #ifndef NDEBUG
1742   // Track the maximum number of stall cycles that could arise either from the
1743   // latency of a DAG edge or the number of cycles that a processor resource is
1744   // reserved (SchedBoundary::ReservedCycles).
1745   MaxObservedStall = 0;
1746 #endif
1747   // Reserve a zero-count for invalid CritResIdx.
1748   ExecutedResCounts.resize(1);
1749   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1750 }
1751 
1752 void SchedRemainder::
1753 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1754   reset();
1755   if (!SchedModel->hasInstrSchedModel())
1756     return;
1757   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1758   for (std::vector<SUnit>::iterator
1759          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1760     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1761     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1762       * SchedModel->getMicroOpFactor();
1763     for (TargetSchedModel::ProcResIter
1764            PI = SchedModel->getWriteProcResBegin(SC),
1765            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1766       unsigned PIdx = PI->ProcResourceIdx;
1767       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1768       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1769     }
1770   }
1771 }
1772 
1773 void SchedBoundary::
1774 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1775   reset();
1776   DAG = dag;
1777   SchedModel = smodel;
1778   Rem = rem;
1779   if (SchedModel->hasInstrSchedModel()) {
1780     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1781     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1782   }
1783 }
1784 
1785 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1786 /// these "soft stalls" differently than the hard stall cycles based on CPU
1787 /// resources and computed by checkHazard(). A fully in-order model
1788 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1789 /// available for scheduling until they are ready. However, a weaker in-order
1790 /// model may use this for heuristics. For example, if a processor has in-order
1791 /// behavior when reading certain resources, this may come into play.
1792 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1793   if (!SU->isUnbuffered)
1794     return 0;
1795 
1796   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1797   if (ReadyCycle > CurrCycle)
1798     return ReadyCycle - CurrCycle;
1799   return 0;
1800 }
1801 
1802 /// Compute the next cycle at which the given processor resource can be
1803 /// scheduled.
1804 unsigned SchedBoundary::
1805 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1806   unsigned NextUnreserved = ReservedCycles[PIdx];
1807   // If this resource has never been used, always return cycle zero.
1808   if (NextUnreserved == InvalidCycle)
1809     return 0;
1810   // For bottom-up scheduling add the cycles needed for the current operation.
1811   if (!isTop())
1812     NextUnreserved += Cycles;
1813   return NextUnreserved;
1814 }
1815 
1816 /// Does this SU have a hazard within the current instruction group.
1817 ///
1818 /// The scheduler supports two modes of hazard recognition. The first is the
1819 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1820 /// supports highly complicated in-order reservation tables
1821 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1822 ///
1823 /// The second is a streamlined mechanism that checks for hazards based on
1824 /// simple counters that the scheduler itself maintains. It explicitly checks
1825 /// for instruction dispatch limitations, including the number of micro-ops that
1826 /// can dispatch per cycle.
1827 ///
1828 /// TODO: Also check whether the SU must start a new group.
1829 bool SchedBoundary::checkHazard(SUnit *SU) {
1830   if (HazardRec->isEnabled()
1831       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1832     return true;
1833   }
1834   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1835   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1836     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1837           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1838     return true;
1839   }
1840   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1841     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1842     for (TargetSchedModel::ProcResIter
1843            PI = SchedModel->getWriteProcResBegin(SC),
1844            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1845       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1846       if (NRCycle > CurrCycle) {
1847 #ifndef NDEBUG
1848         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1849 #endif
1850         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1851               << SchedModel->getResourceName(PI->ProcResourceIdx)
1852               << "=" << NRCycle << "c\n");
1853         return true;
1854       }
1855     }
1856   }
1857   return false;
1858 }
1859 
1860 // Find the unscheduled node in ReadySUs with the highest latency.
1861 unsigned SchedBoundary::
1862 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1863   SUnit *LateSU = nullptr;
1864   unsigned RemLatency = 0;
1865   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1866        I != E; ++I) {
1867     unsigned L = getUnscheduledLatency(*I);
1868     if (L > RemLatency) {
1869       RemLatency = L;
1870       LateSU = *I;
1871     }
1872   }
1873   if (LateSU) {
1874     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1875           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1876   }
1877   return RemLatency;
1878 }
1879 
1880 // Count resources in this zone and the remaining unscheduled
1881 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1882 // resource index, or zero if the zone is issue limited.
1883 unsigned SchedBoundary::
1884 getOtherResourceCount(unsigned &OtherCritIdx) {
1885   OtherCritIdx = 0;
1886   if (!SchedModel->hasInstrSchedModel())
1887     return 0;
1888 
1889   unsigned OtherCritCount = Rem->RemIssueCount
1890     + (RetiredMOps * SchedModel->getMicroOpFactor());
1891   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1892         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1893   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1894        PIdx != PEnd; ++PIdx) {
1895     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1896     if (OtherCount > OtherCritCount) {
1897       OtherCritCount = OtherCount;
1898       OtherCritIdx = PIdx;
1899     }
1900   }
1901   if (OtherCritIdx) {
1902     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1903           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1904           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1905   }
1906   return OtherCritCount;
1907 }
1908 
1909 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1910   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1911 
1912 #ifndef NDEBUG
1913   // ReadyCycle was been bumped up to the CurrCycle when this node was
1914   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1915   // scheduling, so may now be greater than ReadyCycle.
1916   if (ReadyCycle > CurrCycle)
1917     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1918 #endif
1919 
1920   if (ReadyCycle < MinReadyCycle)
1921     MinReadyCycle = ReadyCycle;
1922 
1923   // Check for interlocks first. For the purpose of other heuristics, an
1924   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1925   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1926   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1927     Pending.push(SU);
1928   else
1929     Available.push(SU);
1930 
1931   // Record this node as an immediate dependent of the scheduled node.
1932   NextSUs.insert(SU);
1933 }
1934 
1935 void SchedBoundary::releaseTopNode(SUnit *SU) {
1936   if (SU->isScheduled)
1937     return;
1938 
1939   releaseNode(SU, SU->TopReadyCycle);
1940 }
1941 
1942 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1943   if (SU->isScheduled)
1944     return;
1945 
1946   releaseNode(SU, SU->BotReadyCycle);
1947 }
1948 
1949 /// Move the boundary of scheduled code by one cycle.
1950 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1951   if (SchedModel->getMicroOpBufferSize() == 0) {
1952     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1953     if (MinReadyCycle > NextCycle)
1954       NextCycle = MinReadyCycle;
1955   }
1956   // Update the current micro-ops, which will issue in the next cycle.
1957   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1958   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1959 
1960   // Decrement DependentLatency based on the next cycle.
1961   if ((NextCycle - CurrCycle) > DependentLatency)
1962     DependentLatency = 0;
1963   else
1964     DependentLatency -= (NextCycle - CurrCycle);
1965 
1966   if (!HazardRec->isEnabled()) {
1967     // Bypass HazardRec virtual calls.
1968     CurrCycle = NextCycle;
1969   }
1970   else {
1971     // Bypass getHazardType calls in case of long latency.
1972     for (; CurrCycle != NextCycle; ++CurrCycle) {
1973       if (isTop())
1974         HazardRec->AdvanceCycle();
1975       else
1976         HazardRec->RecedeCycle();
1977     }
1978   }
1979   CheckPending = true;
1980   unsigned LFactor = SchedModel->getLatencyFactor();
1981   IsResourceLimited =
1982     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1983     > (int)LFactor;
1984 
1985   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
1986 }
1987 
1988 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
1989   ExecutedResCounts[PIdx] += Count;
1990   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
1991     MaxExecutedResCount = ExecutedResCounts[PIdx];
1992 }
1993 
1994 /// Add the given processor resource to this scheduled zone.
1995 ///
1996 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
1997 /// during which this resource is consumed.
1998 ///
1999 /// \return the next cycle at which the instruction may execute without
2000 /// oversubscribing resources.
2001 unsigned SchedBoundary::
2002 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2003   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2004   unsigned Count = Factor * Cycles;
2005   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
2006         << " +" << Cycles << "x" << Factor << "u\n");
2007 
2008   // Update Executed resources counts.
2009   incExecutedResources(PIdx, Count);
2010   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2011   Rem->RemainingCounts[PIdx] -= Count;
2012 
2013   // Check if this resource exceeds the current critical resource. If so, it
2014   // becomes the critical resource.
2015   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2016     ZoneCritResIdx = PIdx;
2017     DEBUG(dbgs() << "  *** Critical resource "
2018           << SchedModel->getResourceName(PIdx) << ": "
2019           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2020   }
2021   // For reserved resources, record the highest cycle using the resource.
2022   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2023   if (NextAvailable > CurrCycle) {
2024     DEBUG(dbgs() << "  Resource conflict: "
2025           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2026           << NextAvailable << "\n");
2027   }
2028   return NextAvailable;
2029 }
2030 
2031 /// Move the boundary of scheduled code by one SUnit.
2032 void SchedBoundary::bumpNode(SUnit *SU) {
2033   // Update the reservation table.
2034   if (HazardRec->isEnabled()) {
2035     if (!isTop() && SU->isCall) {
2036       // Calls are scheduled with their preceding instructions. For bottom-up
2037       // scheduling, clear the pipeline state before emitting.
2038       HazardRec->Reset();
2039     }
2040     HazardRec->EmitInstruction(SU);
2041   }
2042   // checkHazard should prevent scheduling multiple instructions per cycle that
2043   // exceed the issue width.
2044   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2045   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2046   assert(
2047       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2048       "Cannot schedule this instruction's MicroOps in the current cycle.");
2049 
2050   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2051   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2052 
2053   unsigned NextCycle = CurrCycle;
2054   switch (SchedModel->getMicroOpBufferSize()) {
2055   case 0:
2056     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2057     break;
2058   case 1:
2059     if (ReadyCycle > NextCycle) {
2060       NextCycle = ReadyCycle;
2061       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2062     }
2063     break;
2064   default:
2065     // We don't currently model the OOO reorder buffer, so consider all
2066     // scheduled MOps to be "retired". We do loosely model in-order resource
2067     // latency. If this instruction uses an in-order resource, account for any
2068     // likely stall cycles.
2069     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2070       NextCycle = ReadyCycle;
2071     break;
2072   }
2073   RetiredMOps += IncMOps;
2074 
2075   // Update resource counts and critical resource.
2076   if (SchedModel->hasInstrSchedModel()) {
2077     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2078     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2079     Rem->RemIssueCount -= DecRemIssue;
2080     if (ZoneCritResIdx) {
2081       // Scale scheduled micro-ops for comparing with the critical resource.
2082       unsigned ScaledMOps =
2083         RetiredMOps * SchedModel->getMicroOpFactor();
2084 
2085       // If scaled micro-ops are now more than the previous critical resource by
2086       // a full cycle, then micro-ops issue becomes critical.
2087       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2088           >= (int)SchedModel->getLatencyFactor()) {
2089         ZoneCritResIdx = 0;
2090         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2091               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2092       }
2093     }
2094     for (TargetSchedModel::ProcResIter
2095            PI = SchedModel->getWriteProcResBegin(SC),
2096            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2097       unsigned RCycle =
2098         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2099       if (RCycle > NextCycle)
2100         NextCycle = RCycle;
2101     }
2102     if (SU->hasReservedResource) {
2103       // For reserved resources, record the highest cycle using the resource.
2104       // For top-down scheduling, this is the cycle in which we schedule this
2105       // instruction plus the number of cycles the operations reserves the
2106       // resource. For bottom-up is it simply the instruction's cycle.
2107       for (TargetSchedModel::ProcResIter
2108              PI = SchedModel->getWriteProcResBegin(SC),
2109              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2110         unsigned PIdx = PI->ProcResourceIdx;
2111         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2112           if (isTop()) {
2113             ReservedCycles[PIdx] =
2114               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2115           }
2116           else
2117             ReservedCycles[PIdx] = NextCycle;
2118         }
2119       }
2120     }
2121   }
2122   // Update ExpectedLatency and DependentLatency.
2123   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2124   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2125   if (SU->getDepth() > TopLatency) {
2126     TopLatency = SU->getDepth();
2127     DEBUG(dbgs() << "  " << Available.getName()
2128           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2129   }
2130   if (SU->getHeight() > BotLatency) {
2131     BotLatency = SU->getHeight();
2132     DEBUG(dbgs() << "  " << Available.getName()
2133           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2134   }
2135   // If we stall for any reason, bump the cycle.
2136   if (NextCycle > CurrCycle) {
2137     bumpCycle(NextCycle);
2138   }
2139   else {
2140     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2141     // resource limited. If a stall occurred, bumpCycle does this.
2142     unsigned LFactor = SchedModel->getLatencyFactor();
2143     IsResourceLimited =
2144       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2145       > (int)LFactor;
2146   }
2147   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2148   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2149   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2150   // bump the cycle to avoid uselessly checking everything in the readyQ.
2151   CurrMOps += IncMOps;
2152   while (CurrMOps >= SchedModel->getIssueWidth()) {
2153     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2154           << " at cycle " << CurrCycle << '\n');
2155     bumpCycle(++NextCycle);
2156   }
2157   DEBUG(dumpScheduledState());
2158 }
2159 
2160 /// Release pending ready nodes in to the available queue. This makes them
2161 /// visible to heuristics.
2162 void SchedBoundary::releasePending() {
2163   // If the available queue is empty, it is safe to reset MinReadyCycle.
2164   if (Available.empty())
2165     MinReadyCycle = UINT_MAX;
2166 
2167   // Check to see if any of the pending instructions are ready to issue.  If
2168   // so, add them to the available queue.
2169   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2170   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2171     SUnit *SU = *(Pending.begin()+i);
2172     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2173 
2174     if (ReadyCycle < MinReadyCycle)
2175       MinReadyCycle = ReadyCycle;
2176 
2177     if (!IsBuffered && ReadyCycle > CurrCycle)
2178       continue;
2179 
2180     if (checkHazard(SU))
2181       continue;
2182 
2183     Available.push(SU);
2184     Pending.remove(Pending.begin()+i);
2185     --i; --e;
2186   }
2187   DEBUG(if (!Pending.empty()) Pending.dump());
2188   CheckPending = false;
2189 }
2190 
2191 /// Remove SU from the ready set for this boundary.
2192 void SchedBoundary::removeReady(SUnit *SU) {
2193   if (Available.isInQueue(SU))
2194     Available.remove(Available.find(SU));
2195   else {
2196     assert(Pending.isInQueue(SU) && "bad ready count");
2197     Pending.remove(Pending.find(SU));
2198   }
2199 }
2200 
2201 /// If this queue only has one ready candidate, return it. As a side effect,
2202 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2203 /// one node is ready. If multiple instructions are ready, return NULL.
2204 SUnit *SchedBoundary::pickOnlyChoice() {
2205   if (CheckPending)
2206     releasePending();
2207 
2208   if (CurrMOps > 0) {
2209     // Defer any ready instrs that now have a hazard.
2210     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2211       if (checkHazard(*I)) {
2212         Pending.push(*I);
2213         I = Available.remove(I);
2214         continue;
2215       }
2216       ++I;
2217     }
2218   }
2219   for (unsigned i = 0; Available.empty(); ++i) {
2220 //  FIXME: Re-enable assert once PR20057 is resolved.
2221 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2222 //           "permanent hazard");
2223     (void)i;
2224     bumpCycle(CurrCycle + 1);
2225     releasePending();
2226   }
2227   if (Available.size() == 1)
2228     return *Available.begin();
2229   return nullptr;
2230 }
2231 
2232 #ifndef NDEBUG
2233 // This is useful information to dump after bumpNode.
2234 // Note that the Queue contents are more useful before pickNodeFromQueue.
2235 void SchedBoundary::dumpScheduledState() {
2236   unsigned ResFactor;
2237   unsigned ResCount;
2238   if (ZoneCritResIdx) {
2239     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2240     ResCount = getResourceCount(ZoneCritResIdx);
2241   }
2242   else {
2243     ResFactor = SchedModel->getMicroOpFactor();
2244     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2245   }
2246   unsigned LFactor = SchedModel->getLatencyFactor();
2247   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2248          << "  Retired: " << RetiredMOps;
2249   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2250   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2251          << ResCount / ResFactor << " "
2252          << SchedModel->getResourceName(ZoneCritResIdx)
2253          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2254          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2255          << " limited.\n";
2256 }
2257 #endif
2258 
2259 //===----------------------------------------------------------------------===//
2260 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2261 //===----------------------------------------------------------------------===//
2262 
2263 void GenericSchedulerBase::SchedCandidate::
2264 initResourceDelta(const ScheduleDAGMI *DAG,
2265                   const TargetSchedModel *SchedModel) {
2266   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2267     return;
2268 
2269   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2270   for (TargetSchedModel::ProcResIter
2271          PI = SchedModel->getWriteProcResBegin(SC),
2272          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2273     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2274       ResDelta.CritResources += PI->Cycles;
2275     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2276       ResDelta.DemandedResources += PI->Cycles;
2277   }
2278 }
2279 
2280 /// Set the CandPolicy given a scheduling zone given the current resources and
2281 /// latencies inside and outside the zone.
2282 void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
2283                                      bool IsPostRA,
2284                                      SchedBoundary &CurrZone,
2285                                      SchedBoundary *OtherZone) {
2286   // Apply preemptive heuristics based on the total latency and resources
2287   // inside and outside this zone. Potential stalls should be considered before
2288   // following this policy.
2289 
2290   // Compute remaining latency. We need this both to determine whether the
2291   // overall schedule has become latency-limited and whether the instructions
2292   // outside this zone are resource or latency limited.
2293   //
2294   // The "dependent" latency is updated incrementally during scheduling as the
2295   // max height/depth of scheduled nodes minus the cycles since it was
2296   // scheduled:
2297   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2298   //
2299   // The "independent" latency is the max ready queue depth:
2300   //   ILat = max N.depth for N in Available|Pending
2301   //
2302   // RemainingLatency is the greater of independent and dependent latency.
2303   unsigned RemLatency = CurrZone.getDependentLatency();
2304   RemLatency = std::max(RemLatency,
2305                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2306   RemLatency = std::max(RemLatency,
2307                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2308 
2309   // Compute the critical resource outside the zone.
2310   unsigned OtherCritIdx = 0;
2311   unsigned OtherCount =
2312     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2313 
2314   bool OtherResLimited = false;
2315   if (SchedModel->hasInstrSchedModel()) {
2316     unsigned LFactor = SchedModel->getLatencyFactor();
2317     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2318   }
2319   // Schedule aggressively for latency in PostRA mode. We don't check for
2320   // acyclic latency during PostRA, and highly out-of-order processors will
2321   // skip PostRA scheduling.
2322   if (!OtherResLimited) {
2323     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2324       Policy.ReduceLatency |= true;
2325       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2326             << " RemainingLatency " << RemLatency << " + "
2327             << CurrZone.getCurrCycle() << "c > CritPath "
2328             << Rem.CriticalPath << "\n");
2329     }
2330   }
2331   // If the same resource is limiting inside and outside the zone, do nothing.
2332   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2333     return;
2334 
2335   DEBUG(
2336     if (CurrZone.isResourceLimited()) {
2337       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2338              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2339              << "\n";
2340     }
2341     if (OtherResLimited)
2342       dbgs() << "  RemainingLimit: "
2343              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2344     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2345       dbgs() << "  Latency limited both directions.\n");
2346 
2347   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2348     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2349 
2350   if (OtherResLimited)
2351     Policy.DemandResIdx = OtherCritIdx;
2352 }
2353 
2354 #ifndef NDEBUG
2355 const char *GenericSchedulerBase::getReasonStr(
2356   GenericSchedulerBase::CandReason Reason) {
2357   switch (Reason) {
2358   case NoCand:         return "NOCAND    ";
2359   case PhysRegCopy:    return "PREG-COPY";
2360   case RegExcess:      return "REG-EXCESS";
2361   case RegCritical:    return "REG-CRIT  ";
2362   case Stall:          return "STALL     ";
2363   case Cluster:        return "CLUSTER   ";
2364   case Weak:           return "WEAK      ";
2365   case RegMax:         return "REG-MAX   ";
2366   case ResourceReduce: return "RES-REDUCE";
2367   case ResourceDemand: return "RES-DEMAND";
2368   case TopDepthReduce: return "TOP-DEPTH ";
2369   case TopPathReduce:  return "TOP-PATH  ";
2370   case BotHeightReduce:return "BOT-HEIGHT";
2371   case BotPathReduce:  return "BOT-PATH  ";
2372   case NextDefUse:     return "DEF-USE   ";
2373   case NodeOrder:      return "ORDER     ";
2374   };
2375   llvm_unreachable("Unknown reason!");
2376 }
2377 
2378 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2379   PressureChange P;
2380   unsigned ResIdx = 0;
2381   unsigned Latency = 0;
2382   switch (Cand.Reason) {
2383   default:
2384     break;
2385   case RegExcess:
2386     P = Cand.RPDelta.Excess;
2387     break;
2388   case RegCritical:
2389     P = Cand.RPDelta.CriticalMax;
2390     break;
2391   case RegMax:
2392     P = Cand.RPDelta.CurrentMax;
2393     break;
2394   case ResourceReduce:
2395     ResIdx = Cand.Policy.ReduceResIdx;
2396     break;
2397   case ResourceDemand:
2398     ResIdx = Cand.Policy.DemandResIdx;
2399     break;
2400   case TopDepthReduce:
2401     Latency = Cand.SU->getDepth();
2402     break;
2403   case TopPathReduce:
2404     Latency = Cand.SU->getHeight();
2405     break;
2406   case BotHeightReduce:
2407     Latency = Cand.SU->getHeight();
2408     break;
2409   case BotPathReduce:
2410     Latency = Cand.SU->getDepth();
2411     break;
2412   }
2413   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2414   if (P.isValid())
2415     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2416            << ":" << P.getUnitInc() << " ";
2417   else
2418     dbgs() << "      ";
2419   if (ResIdx)
2420     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2421   else
2422     dbgs() << "         ";
2423   if (Latency)
2424     dbgs() << " " << Latency << " cycles ";
2425   else
2426     dbgs() << "          ";
2427   dbgs() << '\n';
2428 }
2429 #endif
2430 
2431 /// Return true if this heuristic determines order.
2432 static bool tryLess(int TryVal, int CandVal,
2433                     GenericSchedulerBase::SchedCandidate &TryCand,
2434                     GenericSchedulerBase::SchedCandidate &Cand,
2435                     GenericSchedulerBase::CandReason Reason) {
2436   if (TryVal < CandVal) {
2437     TryCand.Reason = Reason;
2438     return true;
2439   }
2440   if (TryVal > CandVal) {
2441     if (Cand.Reason > Reason)
2442       Cand.Reason = Reason;
2443     return true;
2444   }
2445   Cand.setRepeat(Reason);
2446   return false;
2447 }
2448 
2449 static bool tryGreater(int TryVal, int CandVal,
2450                        GenericSchedulerBase::SchedCandidate &TryCand,
2451                        GenericSchedulerBase::SchedCandidate &Cand,
2452                        GenericSchedulerBase::CandReason Reason) {
2453   if (TryVal > CandVal) {
2454     TryCand.Reason = Reason;
2455     return true;
2456   }
2457   if (TryVal < CandVal) {
2458     if (Cand.Reason > Reason)
2459       Cand.Reason = Reason;
2460     return true;
2461   }
2462   Cand.setRepeat(Reason);
2463   return false;
2464 }
2465 
2466 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2467                        GenericSchedulerBase::SchedCandidate &Cand,
2468                        SchedBoundary &Zone) {
2469   if (Zone.isTop()) {
2470     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2471       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2472                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2473         return true;
2474     }
2475     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2476                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2477       return true;
2478   }
2479   else {
2480     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2481       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2482                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2483         return true;
2484     }
2485     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2486                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2487       return true;
2488   }
2489   return false;
2490 }
2491 
2492 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2493                       bool IsTop) {
2494   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2495         << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
2496 }
2497 
2498 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2499   assert(dag->hasVRegLiveness() &&
2500          "(PreRA)GenericScheduler needs vreg liveness");
2501   DAG = static_cast<ScheduleDAGMILive*>(dag);
2502   SchedModel = DAG->getSchedModel();
2503   TRI = DAG->TRI;
2504 
2505   Rem.init(DAG, SchedModel);
2506   Top.init(DAG, SchedModel, &Rem);
2507   Bot.init(DAG, SchedModel, &Rem);
2508 
2509   // Initialize resource counts.
2510 
2511   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2512   // are disabled, then these HazardRecs will be disabled.
2513   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2514   if (!Top.HazardRec) {
2515     Top.HazardRec =
2516         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2517             Itin, DAG);
2518   }
2519   if (!Bot.HazardRec) {
2520     Bot.HazardRec =
2521         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2522             Itin, DAG);
2523   }
2524 }
2525 
2526 /// Initialize the per-region scheduling policy.
2527 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2528                                   MachineBasicBlock::iterator End,
2529                                   unsigned NumRegionInstrs) {
2530   const MachineFunction &MF = *Begin->getParent()->getParent();
2531   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2532 
2533   // Avoid setting up the register pressure tracker for small regions to save
2534   // compile time. As a rough heuristic, only track pressure when the number of
2535   // schedulable instructions exceeds half the integer register file.
2536   RegionPolicy.ShouldTrackPressure = true;
2537   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2538     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2539     if (TLI->isTypeLegal(LegalIntVT)) {
2540       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2541         TLI->getRegClassFor(LegalIntVT));
2542       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2543     }
2544   }
2545 
2546   // For generic targets, we default to bottom-up, because it's simpler and more
2547   // compile-time optimizations have been implemented in that direction.
2548   RegionPolicy.OnlyBottomUp = true;
2549 
2550   // Allow the subtarget to override default policy.
2551   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
2552                                         NumRegionInstrs);
2553 
2554   // After subtarget overrides, apply command line options.
2555   if (!EnableRegPressure)
2556     RegionPolicy.ShouldTrackPressure = false;
2557 
2558   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2559   // e.g. -misched-bottomup=false allows scheduling in both directions.
2560   assert((!ForceTopDown || !ForceBottomUp) &&
2561          "-misched-topdown incompatible with -misched-bottomup");
2562   if (ForceBottomUp.getNumOccurrences() > 0) {
2563     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2564     if (RegionPolicy.OnlyBottomUp)
2565       RegionPolicy.OnlyTopDown = false;
2566   }
2567   if (ForceTopDown.getNumOccurrences() > 0) {
2568     RegionPolicy.OnlyTopDown = ForceTopDown;
2569     if (RegionPolicy.OnlyTopDown)
2570       RegionPolicy.OnlyBottomUp = false;
2571   }
2572 }
2573 
2574 void GenericScheduler::dumpPolicy() {
2575   dbgs() << "GenericScheduler RegionPolicy: "
2576          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2577          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2578          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2579          << "\n";
2580 }
2581 
2582 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2583 /// critical path by more cycles than it takes to drain the instruction buffer.
2584 /// We estimate an upper bounds on in-flight instructions as:
2585 ///
2586 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2587 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2588 /// InFlightResources = InFlightIterations * LoopResources
2589 ///
2590 /// TODO: Check execution resources in addition to IssueCount.
2591 void GenericScheduler::checkAcyclicLatency() {
2592   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2593     return;
2594 
2595   // Scaled number of cycles per loop iteration.
2596   unsigned IterCount =
2597     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2598              Rem.RemIssueCount);
2599   // Scaled acyclic critical path.
2600   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2601   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2602   unsigned InFlightCount =
2603     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2604   unsigned BufferLimit =
2605     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2606 
2607   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2608 
2609   DEBUG(dbgs() << "IssueCycles="
2610         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2611         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2612         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2613         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2614         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2615         if (Rem.IsAcyclicLatencyLimited)
2616           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2617 }
2618 
2619 void GenericScheduler::registerRoots() {
2620   Rem.CriticalPath = DAG->ExitSU.getDepth();
2621 
2622   // Some roots may not feed into ExitSU. Check all of them in case.
2623   for (std::vector<SUnit*>::const_iterator
2624          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2625     if ((*I)->getDepth() > Rem.CriticalPath)
2626       Rem.CriticalPath = (*I)->getDepth();
2627   }
2628   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2629   if (DumpCriticalPathLength) {
2630     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2631   }
2632 
2633   if (EnableCyclicPath) {
2634     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2635     checkAcyclicLatency();
2636   }
2637 }
2638 
2639 static bool tryPressure(const PressureChange &TryP,
2640                         const PressureChange &CandP,
2641                         GenericSchedulerBase::SchedCandidate &TryCand,
2642                         GenericSchedulerBase::SchedCandidate &Cand,
2643                         GenericSchedulerBase::CandReason Reason,
2644                         const TargetRegisterInfo *TRI,
2645                         const MachineFunction &MF) {
2646   unsigned TryPSet = TryP.getPSetOrMax();
2647   unsigned CandPSet = CandP.getPSetOrMax();
2648   // If both candidates affect the same set, go with the smallest increase.
2649   if (TryPSet == CandPSet) {
2650     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2651                    Reason);
2652   }
2653   // If one candidate decreases and the other increases, go with it.
2654   // Invalid candidates have UnitInc==0.
2655   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2656                  Reason)) {
2657     return true;
2658   }
2659 
2660   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2661                                  std::numeric_limits<int>::max();
2662 
2663   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2664                                    std::numeric_limits<int>::max();
2665 
2666   // If the candidates are decreasing pressure, reverse priority.
2667   if (TryP.getUnitInc() < 0)
2668     std::swap(TryRank, CandRank);
2669   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2670 }
2671 
2672 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2673   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2674 }
2675 
2676 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2677 /// their physreg def/use.
2678 ///
2679 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2680 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2681 /// with the operation that produces or consumes the physreg. We'll do this when
2682 /// regalloc has support for parallel copies.
2683 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2684   const MachineInstr *MI = SU->getInstr();
2685   if (!MI->isCopy())
2686     return 0;
2687 
2688   unsigned ScheduledOper = isTop ? 1 : 0;
2689   unsigned UnscheduledOper = isTop ? 0 : 1;
2690   // If we have already scheduled the physreg produce/consumer, immediately
2691   // schedule the copy.
2692   if (TargetRegisterInfo::isPhysicalRegister(
2693         MI->getOperand(ScheduledOper).getReg()))
2694     return 1;
2695   // If the physreg is at the boundary, defer it. Otherwise schedule it
2696   // immediately to free the dependent. We can hoist the copy later.
2697   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2698   if (TargetRegisterInfo::isPhysicalRegister(
2699         MI->getOperand(UnscheduledOper).getReg()))
2700     return AtBoundary ? -1 : 1;
2701   return 0;
2702 }
2703 
2704 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2705 /// hierarchical. This may be more efficient than a graduated cost model because
2706 /// we don't need to evaluate all aspects of the model for each node in the
2707 /// queue. But it's really done to make the heuristics easier to debug and
2708 /// statistically analyze.
2709 ///
2710 /// \param Cand provides the policy and current best candidate.
2711 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2712 /// \param Zone describes the scheduled zone that we are extending.
2713 /// \param RPTracker describes reg pressure within the scheduled zone.
2714 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
2715 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2716                                     SchedCandidate &TryCand,
2717                                     SchedBoundary &Zone,
2718                                     const RegPressureTracker &RPTracker,
2719                                     RegPressureTracker &TempTracker) {
2720 
2721   if (DAG->isTrackingPressure()) {
2722     // Always initialize TryCand's RPDelta.
2723     if (Zone.isTop()) {
2724       TempTracker.getMaxDownwardPressureDelta(
2725         TryCand.SU->getInstr(),
2726         TryCand.RPDelta,
2727         DAG->getRegionCriticalPSets(),
2728         DAG->getRegPressure().MaxSetPressure);
2729     }
2730     else {
2731       if (VerifyScheduling) {
2732         TempTracker.getMaxUpwardPressureDelta(
2733           TryCand.SU->getInstr(),
2734           &DAG->getPressureDiff(TryCand.SU),
2735           TryCand.RPDelta,
2736           DAG->getRegionCriticalPSets(),
2737           DAG->getRegPressure().MaxSetPressure);
2738       }
2739       else {
2740         RPTracker.getUpwardPressureDelta(
2741           TryCand.SU->getInstr(),
2742           DAG->getPressureDiff(TryCand.SU),
2743           TryCand.RPDelta,
2744           DAG->getRegionCriticalPSets(),
2745           DAG->getRegPressure().MaxSetPressure);
2746       }
2747     }
2748   }
2749   DEBUG(if (TryCand.RPDelta.Excess.isValid())
2750           dbgs() << "  Try  SU(" << TryCand.SU->NodeNum << ") "
2751                  << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
2752                  << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
2753 
2754   // Initialize the candidate if needed.
2755   if (!Cand.isValid()) {
2756     TryCand.Reason = NodeOrder;
2757     return;
2758   }
2759 
2760   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2761                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2762                  TryCand, Cand, PhysRegCopy))
2763     return;
2764 
2765   // Avoid exceeding the target's limit.
2766   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2767                                                Cand.RPDelta.Excess,
2768                                                TryCand, Cand, RegExcess, TRI,
2769                                                DAG->MF))
2770     return;
2771 
2772   // Avoid increasing the max critical pressure in the scheduled region.
2773   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2774                                                Cand.RPDelta.CriticalMax,
2775                                                TryCand, Cand, RegCritical, TRI,
2776                                                DAG->MF))
2777     return;
2778 
2779   // For loops that are acyclic path limited, aggressively schedule for latency.
2780   // This can result in very long dependence chains scheduled in sequence, so
2781   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2782   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2783       && tryLatency(TryCand, Cand, Zone))
2784     return;
2785 
2786   // Prioritize instructions that read unbuffered resources by stall cycles.
2787   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2788               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2789     return;
2790 
2791   // Keep clustered nodes together to encourage downstream peephole
2792   // optimizations which may reduce resource requirements.
2793   //
2794   // This is a best effort to set things up for a post-RA pass. Optimizations
2795   // like generating loads of multiple registers should ideally be done within
2796   // the scheduler pass by combining the loads during DAG postprocessing.
2797   const SUnit *NextClusterSU =
2798     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2799   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2800                  TryCand, Cand, Cluster))
2801     return;
2802 
2803   // Weak edges are for clustering and other constraints.
2804   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2805               getWeakLeft(Cand.SU, Zone.isTop()),
2806               TryCand, Cand, Weak)) {
2807     return;
2808   }
2809   // Avoid increasing the max pressure of the entire region.
2810   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2811                                                Cand.RPDelta.CurrentMax,
2812                                                TryCand, Cand, RegMax, TRI,
2813                                                DAG->MF))
2814     return;
2815 
2816   // Avoid critical resource consumption and balance the schedule.
2817   TryCand.initResourceDelta(DAG, SchedModel);
2818   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2819               TryCand, Cand, ResourceReduce))
2820     return;
2821   if (tryGreater(TryCand.ResDelta.DemandedResources,
2822                  Cand.ResDelta.DemandedResources,
2823                  TryCand, Cand, ResourceDemand))
2824     return;
2825 
2826   // Avoid serializing long latency dependence chains.
2827   // For acyclic path limited loops, latency was already checked above.
2828   if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency &&
2829       !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) {
2830     return;
2831   }
2832 
2833   // Prefer immediate defs/users of the last scheduled instruction. This is a
2834   // local pressure avoidance strategy that also makes the machine code
2835   // readable.
2836   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2837                  TryCand, Cand, NextDefUse))
2838     return;
2839 
2840   // Fall through to original instruction order.
2841   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2842       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2843     TryCand.Reason = NodeOrder;
2844   }
2845 }
2846 
2847 /// Pick the best candidate from the queue.
2848 ///
2849 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2850 /// DAG building. To adjust for the current scheduling location we need to
2851 /// maintain the number of vreg uses remaining to be top-scheduled.
2852 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2853                                          const RegPressureTracker &RPTracker,
2854                                          SchedCandidate &Cand) {
2855   ReadyQueue &Q = Zone.Available;
2856 
2857   DEBUG(Q.dump());
2858 
2859   // getMaxPressureDelta temporarily modifies the tracker.
2860   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2861 
2862   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2863 
2864     SchedCandidate TryCand(Cand.Policy);
2865     TryCand.SU = *I;
2866     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2867     if (TryCand.Reason != NoCand) {
2868       // Initialize resource delta if needed in case future heuristics query it.
2869       if (TryCand.ResDelta == SchedResourceDelta())
2870         TryCand.initResourceDelta(DAG, SchedModel);
2871       Cand.setBest(TryCand);
2872       DEBUG(traceCandidate(Cand));
2873     }
2874   }
2875 }
2876 
2877 /// Pick the best candidate node from either the top or bottom queue.
2878 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2879   // Schedule as far as possible in the direction of no choice. This is most
2880   // efficient, but also provides the best heuristics for CriticalPSets.
2881   if (SUnit *SU = Bot.pickOnlyChoice()) {
2882     IsTopNode = false;
2883     DEBUG(dbgs() << "Pick Bot ONLY1\n");
2884     return SU;
2885   }
2886   if (SUnit *SU = Top.pickOnlyChoice()) {
2887     IsTopNode = true;
2888     DEBUG(dbgs() << "Pick Top ONLY1\n");
2889     return SU;
2890   }
2891   CandPolicy NoPolicy;
2892   SchedCandidate BotCand(NoPolicy);
2893   SchedCandidate TopCand(NoPolicy);
2894   // Set the bottom-up policy based on the state of the current bottom zone and
2895   // the instructions outside the zone, including the top zone.
2896   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2897   // Set the top-down policy based on the state of the current top zone and
2898   // the instructions outside the zone, including the bottom zone.
2899   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2900 
2901   // Prefer bottom scheduling when heuristics are silent.
2902   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2903   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2904 
2905   // If either Q has a single candidate that provides the least increase in
2906   // Excess pressure, we can immediately schedule from that Q.
2907   //
2908   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2909   // affects picking from either Q. If scheduling in one direction must
2910   // increase pressure for one of the excess PSets, then schedule in that
2911   // direction first to provide more freedom in the other direction.
2912   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2913       || (BotCand.Reason == RegCritical
2914           && !BotCand.isRepeat(RegCritical)))
2915   {
2916     IsTopNode = false;
2917     tracePick(BotCand, IsTopNode);
2918     return BotCand.SU;
2919   }
2920   // Check if the top Q has a better candidate.
2921   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2922   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2923 
2924   // Choose the queue with the most important (lowest enum) reason.
2925   if (TopCand.Reason < BotCand.Reason) {
2926     IsTopNode = true;
2927     tracePick(TopCand, IsTopNode);
2928     return TopCand.SU;
2929   }
2930   // Otherwise prefer the bottom candidate, in node order if all else failed.
2931   IsTopNode = false;
2932   tracePick(BotCand, IsTopNode);
2933   return BotCand.SU;
2934 }
2935 
2936 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2937 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2938   if (DAG->top() == DAG->bottom()) {
2939     assert(Top.Available.empty() && Top.Pending.empty() &&
2940            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2941     return nullptr;
2942   }
2943   SUnit *SU;
2944   do {
2945     if (RegionPolicy.OnlyTopDown) {
2946       SU = Top.pickOnlyChoice();
2947       if (!SU) {
2948         CandPolicy NoPolicy;
2949         SchedCandidate TopCand(NoPolicy);
2950         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2951         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2952         tracePick(TopCand, true);
2953         SU = TopCand.SU;
2954       }
2955       IsTopNode = true;
2956     }
2957     else if (RegionPolicy.OnlyBottomUp) {
2958       SU = Bot.pickOnlyChoice();
2959       if (!SU) {
2960         CandPolicy NoPolicy;
2961         SchedCandidate BotCand(NoPolicy);
2962         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2963         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2964         tracePick(BotCand, false);
2965         SU = BotCand.SU;
2966       }
2967       IsTopNode = false;
2968     }
2969     else {
2970       SU = pickNodeBidirectional(IsTopNode);
2971     }
2972   } while (SU->isScheduled);
2973 
2974   if (SU->isTopReady())
2975     Top.removeReady(SU);
2976   if (SU->isBottomReady())
2977     Bot.removeReady(SU);
2978 
2979   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2980   return SU;
2981 }
2982 
2983 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2984 
2985   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2986   if (!isTop)
2987     ++InsertPos;
2988   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2989 
2990   // Find already scheduled copies with a single physreg dependence and move
2991   // them just above the scheduled instruction.
2992   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2993        I != E; ++I) {
2994     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2995       continue;
2996     SUnit *DepSU = I->getSUnit();
2997     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2998       continue;
2999     MachineInstr *Copy = DepSU->getInstr();
3000     if (!Copy->isCopy())
3001       continue;
3002     DEBUG(dbgs() << "  Rescheduling physreg copy ";
3003           I->getSUnit()->dump(DAG));
3004     DAG->moveInstruction(Copy, InsertPos);
3005   }
3006 }
3007 
3008 /// Update the scheduler's state after scheduling a node. This is the same node
3009 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3010 /// update it's state based on the current cycle before MachineSchedStrategy
3011 /// does.
3012 ///
3013 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3014 /// them here. See comments in biasPhysRegCopy.
3015 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3016   if (IsTopNode) {
3017     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3018     Top.bumpNode(SU);
3019     if (SU->hasPhysRegUses)
3020       reschedulePhysRegCopies(SU, true);
3021   }
3022   else {
3023     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3024     Bot.bumpNode(SU);
3025     if (SU->hasPhysRegDefs)
3026       reschedulePhysRegCopies(SU, false);
3027   }
3028 }
3029 
3030 /// Create the standard converging machine scheduler. This will be used as the
3031 /// default scheduler if the target does not set a default.
3032 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
3033   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
3034   // Register DAG post-processors.
3035   //
3036   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3037   // data and pass it to later mutations. Have a single mutation that gathers
3038   // the interesting nodes in one pass.
3039   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
3040   if (EnableLoadCluster && DAG->TII->enableClusterLoads())
3041     DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
3042   if (EnableMacroFusion)
3043     DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI));
3044   return DAG;
3045 }
3046 
3047 static MachineSchedRegistry
3048 GenericSchedRegistry("converge", "Standard converging scheduler.",
3049                      createGenericSchedLive);
3050 
3051 //===----------------------------------------------------------------------===//
3052 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3053 //===----------------------------------------------------------------------===//
3054 
3055 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3056   DAG = Dag;
3057   SchedModel = DAG->getSchedModel();
3058   TRI = DAG->TRI;
3059 
3060   Rem.init(DAG, SchedModel);
3061   Top.init(DAG, SchedModel, &Rem);
3062   BotRoots.clear();
3063 
3064   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3065   // or are disabled, then these HazardRecs will be disabled.
3066   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3067   if (!Top.HazardRec) {
3068     Top.HazardRec =
3069         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3070             Itin, DAG);
3071   }
3072 }
3073 
3074 
3075 void PostGenericScheduler::registerRoots() {
3076   Rem.CriticalPath = DAG->ExitSU.getDepth();
3077 
3078   // Some roots may not feed into ExitSU. Check all of them in case.
3079   for (SmallVectorImpl<SUnit*>::const_iterator
3080          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3081     if ((*I)->getDepth() > Rem.CriticalPath)
3082       Rem.CriticalPath = (*I)->getDepth();
3083   }
3084   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3085   if (DumpCriticalPathLength) {
3086     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3087   }
3088 }
3089 
3090 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3091 ///
3092 /// \param Cand provides the policy and current best candidate.
3093 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3094 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3095                                         SchedCandidate &TryCand) {
3096 
3097   // Initialize the candidate if needed.
3098   if (!Cand.isValid()) {
3099     TryCand.Reason = NodeOrder;
3100     return;
3101   }
3102 
3103   // Prioritize instructions that read unbuffered resources by stall cycles.
3104   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3105               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3106     return;
3107 
3108   // Avoid critical resource consumption and balance the schedule.
3109   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3110               TryCand, Cand, ResourceReduce))
3111     return;
3112   if (tryGreater(TryCand.ResDelta.DemandedResources,
3113                  Cand.ResDelta.DemandedResources,
3114                  TryCand, Cand, ResourceDemand))
3115     return;
3116 
3117   // Avoid serializing long latency dependence chains.
3118   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3119     return;
3120   }
3121 
3122   // Fall through to original instruction order.
3123   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3124     TryCand.Reason = NodeOrder;
3125 }
3126 
3127 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3128   ReadyQueue &Q = Top.Available;
3129 
3130   DEBUG(Q.dump());
3131 
3132   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3133     SchedCandidate TryCand(Cand.Policy);
3134     TryCand.SU = *I;
3135     TryCand.initResourceDelta(DAG, SchedModel);
3136     tryCandidate(Cand, TryCand);
3137     if (TryCand.Reason != NoCand) {
3138       Cand.setBest(TryCand);
3139       DEBUG(traceCandidate(Cand));
3140     }
3141   }
3142 }
3143 
3144 /// Pick the next node to schedule.
3145 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3146   if (DAG->top() == DAG->bottom()) {
3147     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3148     return nullptr;
3149   }
3150   SUnit *SU;
3151   do {
3152     SU = Top.pickOnlyChoice();
3153     if (!SU) {
3154       CandPolicy NoPolicy;
3155       SchedCandidate TopCand(NoPolicy);
3156       // Set the top-down policy based on the state of the current top zone and
3157       // the instructions outside the zone, including the bottom zone.
3158       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3159       pickNodeFromQueue(TopCand);
3160       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3161       tracePick(TopCand, true);
3162       SU = TopCand.SU;
3163     }
3164   } while (SU->isScheduled);
3165 
3166   IsTopNode = true;
3167   Top.removeReady(SU);
3168 
3169   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3170   return SU;
3171 }
3172 
3173 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3174 /// scheduled/remaining flags in the DAG nodes.
3175 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3176   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3177   Top.bumpNode(SU);
3178 }
3179 
3180 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3181 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3182   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3183 }
3184 
3185 //===----------------------------------------------------------------------===//
3186 // ILP Scheduler. Currently for experimental analysis of heuristics.
3187 //===----------------------------------------------------------------------===//
3188 
3189 namespace {
3190 /// \brief Order nodes by the ILP metric.
3191 struct ILPOrder {
3192   const SchedDFSResult *DFSResult;
3193   const BitVector *ScheduledTrees;
3194   bool MaximizeILP;
3195 
3196   ILPOrder(bool MaxILP)
3197     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3198 
3199   /// \brief Apply a less-than relation on node priority.
3200   ///
3201   /// (Return true if A comes after B in the Q.)
3202   bool operator()(const SUnit *A, const SUnit *B) const {
3203     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3204     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3205     if (SchedTreeA != SchedTreeB) {
3206       // Unscheduled trees have lower priority.
3207       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3208         return ScheduledTrees->test(SchedTreeB);
3209 
3210       // Trees with shallower connections have have lower priority.
3211       if (DFSResult->getSubtreeLevel(SchedTreeA)
3212           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3213         return DFSResult->getSubtreeLevel(SchedTreeA)
3214           < DFSResult->getSubtreeLevel(SchedTreeB);
3215       }
3216     }
3217     if (MaximizeILP)
3218       return DFSResult->getILP(A) < DFSResult->getILP(B);
3219     else
3220       return DFSResult->getILP(A) > DFSResult->getILP(B);
3221   }
3222 };
3223 
3224 /// \brief Schedule based on the ILP metric.
3225 class ILPScheduler : public MachineSchedStrategy {
3226   ScheduleDAGMILive *DAG;
3227   ILPOrder Cmp;
3228 
3229   std::vector<SUnit*> ReadyQ;
3230 public:
3231   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3232 
3233   void initialize(ScheduleDAGMI *dag) override {
3234     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3235     DAG = static_cast<ScheduleDAGMILive*>(dag);
3236     DAG->computeDFSResult();
3237     Cmp.DFSResult = DAG->getDFSResult();
3238     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3239     ReadyQ.clear();
3240   }
3241 
3242   void registerRoots() override {
3243     // Restore the heap in ReadyQ with the updated DFS results.
3244     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3245   }
3246 
3247   /// Implement MachineSchedStrategy interface.
3248   /// -----------------------------------------
3249 
3250   /// Callback to select the highest priority node from the ready Q.
3251   SUnit *pickNode(bool &IsTopNode) override {
3252     if (ReadyQ.empty()) return nullptr;
3253     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3254     SUnit *SU = ReadyQ.back();
3255     ReadyQ.pop_back();
3256     IsTopNode = false;
3257     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3258           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3259           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3260           << DAG->getDFSResult()->getSubtreeLevel(
3261             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3262           << "Scheduling " << *SU->getInstr());
3263     return SU;
3264   }
3265 
3266   /// \brief Scheduler callback to notify that a new subtree is scheduled.
3267   void scheduleTree(unsigned SubtreeID) override {
3268     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3269   }
3270 
3271   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3272   /// DFSResults, and resort the priority Q.
3273   void schedNode(SUnit *SU, bool IsTopNode) override {
3274     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3275   }
3276 
3277   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3278 
3279   void releaseBottomNode(SUnit *SU) override {
3280     ReadyQ.push_back(SU);
3281     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3282   }
3283 };
3284 } // namespace
3285 
3286 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3287   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3288 }
3289 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3290   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3291 }
3292 static MachineSchedRegistry ILPMaxRegistry(
3293   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3294 static MachineSchedRegistry ILPMinRegistry(
3295   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3296 
3297 //===----------------------------------------------------------------------===//
3298 // Machine Instruction Shuffler for Correctness Testing
3299 //===----------------------------------------------------------------------===//
3300 
3301 #ifndef NDEBUG
3302 namespace {
3303 /// Apply a less-than relation on the node order, which corresponds to the
3304 /// instruction order prior to scheduling. IsReverse implements greater-than.
3305 template<bool IsReverse>
3306 struct SUnitOrder {
3307   bool operator()(SUnit *A, SUnit *B) const {
3308     if (IsReverse)
3309       return A->NodeNum > B->NodeNum;
3310     else
3311       return A->NodeNum < B->NodeNum;
3312   }
3313 };
3314 
3315 /// Reorder instructions as much as possible.
3316 class InstructionShuffler : public MachineSchedStrategy {
3317   bool IsAlternating;
3318   bool IsTopDown;
3319 
3320   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3321   // gives nodes with a higher number higher priority causing the latest
3322   // instructions to be scheduled first.
3323   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3324     TopQ;
3325   // When scheduling bottom-up, use greater-than as the queue priority.
3326   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3327     BottomQ;
3328 public:
3329   InstructionShuffler(bool alternate, bool topdown)
3330     : IsAlternating(alternate), IsTopDown(topdown) {}
3331 
3332   void initialize(ScheduleDAGMI*) override {
3333     TopQ.clear();
3334     BottomQ.clear();
3335   }
3336 
3337   /// Implement MachineSchedStrategy interface.
3338   /// -----------------------------------------
3339 
3340   SUnit *pickNode(bool &IsTopNode) override {
3341     SUnit *SU;
3342     if (IsTopDown) {
3343       do {
3344         if (TopQ.empty()) return nullptr;
3345         SU = TopQ.top();
3346         TopQ.pop();
3347       } while (SU->isScheduled);
3348       IsTopNode = true;
3349     }
3350     else {
3351       do {
3352         if (BottomQ.empty()) return nullptr;
3353         SU = BottomQ.top();
3354         BottomQ.pop();
3355       } while (SU->isScheduled);
3356       IsTopNode = false;
3357     }
3358     if (IsAlternating)
3359       IsTopDown = !IsTopDown;
3360     return SU;
3361   }
3362 
3363   void schedNode(SUnit *SU, bool IsTopNode) override {}
3364 
3365   void releaseTopNode(SUnit *SU) override {
3366     TopQ.push(SU);
3367   }
3368   void releaseBottomNode(SUnit *SU) override {
3369     BottomQ.push(SU);
3370   }
3371 };
3372 } // namespace
3373 
3374 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3375   bool Alternate = !ForceTopDown && !ForceBottomUp;
3376   bool TopDown = !ForceBottomUp;
3377   assert((TopDown || !ForceTopDown) &&
3378          "-misched-topdown incompatible with -misched-bottomup");
3379   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3380 }
3381 static MachineSchedRegistry ShufflerRegistry(
3382   "shuffle", "Shuffle machine instructions alternating directions",
3383   createInstructionShuffler);
3384 #endif // !NDEBUG
3385 
3386 //===----------------------------------------------------------------------===//
3387 // GraphWriter support for ScheduleDAGMILive.
3388 //===----------------------------------------------------------------------===//
3389 
3390 #ifndef NDEBUG
3391 namespace llvm {
3392 
3393 template<> struct GraphTraits<
3394   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3395 
3396 template<>
3397 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3398 
3399   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3400 
3401   static std::string getGraphName(const ScheduleDAG *G) {
3402     return G->MF.getName();
3403   }
3404 
3405   static bool renderGraphFromBottomUp() {
3406     return true;
3407   }
3408 
3409   static bool isNodeHidden(const SUnit *Node) {
3410     if (ViewMISchedCutoff == 0)
3411       return false;
3412     return (Node->Preds.size() > ViewMISchedCutoff
3413          || Node->Succs.size() > ViewMISchedCutoff);
3414   }
3415 
3416   /// If you want to override the dot attributes printed for a particular
3417   /// edge, override this method.
3418   static std::string getEdgeAttributes(const SUnit *Node,
3419                                        SUnitIterator EI,
3420                                        const ScheduleDAG *Graph) {
3421     if (EI.isArtificialDep())
3422       return "color=cyan,style=dashed";
3423     if (EI.isCtrlDep())
3424       return "color=blue,style=dashed";
3425     return "";
3426   }
3427 
3428   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3429     std::string Str;
3430     raw_string_ostream SS(Str);
3431     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3432     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3433       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3434     SS << "SU:" << SU->NodeNum;
3435     if (DFS)
3436       SS << " I:" << DFS->getNumInstrs(SU);
3437     return SS.str();
3438   }
3439   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3440     return G->getGraphNodeLabel(SU);
3441   }
3442 
3443   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3444     std::string Str("shape=Mrecord");
3445     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3446     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3447       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3448     if (DFS) {
3449       Str += ",style=filled,fillcolor=\"#";
3450       Str += DOT::getColorString(DFS->getSubtreeID(N));
3451       Str += '"';
3452     }
3453     return Str;
3454   }
3455 };
3456 } // namespace llvm
3457 #endif // NDEBUG
3458 
3459 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3460 /// rendered using 'dot'.
3461 ///
3462 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3463 #ifndef NDEBUG
3464   ViewGraph(this, Name, false, Title);
3465 #else
3466   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3467          << "systems with Graphviz or gv!\n";
3468 #endif  // NDEBUG
3469 }
3470 
3471 /// Out-of-line implementation with no arguments is handy for gdb.
3472 void ScheduleDAGMI::viewGraph() {
3473   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3474 }
3475