xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision f0f279291c7ca1a0b2c125f53cd08deafcc9e44f)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/GraphWriter.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "misched"
36 
37 namespace llvm {
38 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
39                            cl::desc("Force top-down list scheduling"));
40 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
41                             cl::desc("Force bottom-up list scheduling"));
42 cl::opt<bool>
43 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
44                        cl::desc("Print critical path length to stdout"));
45 }
46 
47 #ifndef NDEBUG
48 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
49   cl::desc("Pop up a window to show MISched dags after they are processed"));
50 
51 /// In some situations a few uninteresting nodes depend on nearly all other
52 /// nodes in the graph, provide a cutoff to hide them.
53 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
54   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
55 
56 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
57   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
58 
59 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
60   cl::desc("Only schedule this function"));
61 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
62   cl::desc("Only schedule this MBB#"));
63 #else
64 static bool ViewMISchedDAGs = false;
65 #endif // NDEBUG
66 
67 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
68   cl::desc("Enable register pressure scheduling."), cl::init(true));
69 
70 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
71   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
72 
73 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
74                                         cl::desc("Enable memop clustering."),
75                                         cl::init(true));
76 
77 // Experimental heuristics
78 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
79   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
80 
81 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
82   cl::desc("Verify machine instrs before and after machine scheduling"));
83 
84 // DAG subtrees must have at least this many nodes.
85 static const unsigned MinSubtreeSize = 8;
86 
87 // Pin the vtables to this file.
88 void MachineSchedStrategy::anchor() {}
89 void ScheduleDAGMutation::anchor() {}
90 
91 //===----------------------------------------------------------------------===//
92 // Machine Instruction Scheduling Pass and Registry
93 //===----------------------------------------------------------------------===//
94 
95 MachineSchedContext::MachineSchedContext():
96     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
97   RegClassInfo = new RegisterClassInfo();
98 }
99 
100 MachineSchedContext::~MachineSchedContext() {
101   delete RegClassInfo;
102 }
103 
104 namespace {
105 /// Base class for a machine scheduler class that can run at any point.
106 class MachineSchedulerBase : public MachineSchedContext,
107                              public MachineFunctionPass {
108 public:
109   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
110 
111   void print(raw_ostream &O, const Module* = nullptr) const override;
112 
113 protected:
114   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
115 };
116 
117 /// MachineScheduler runs after coalescing and before register allocation.
118 class MachineScheduler : public MachineSchedulerBase {
119 public:
120   MachineScheduler();
121 
122   void getAnalysisUsage(AnalysisUsage &AU) const override;
123 
124   bool runOnMachineFunction(MachineFunction&) override;
125 
126   static char ID; // Class identification, replacement for typeinfo
127 
128 protected:
129   ScheduleDAGInstrs *createMachineScheduler();
130 };
131 
132 /// PostMachineScheduler runs after shortly before code emission.
133 class PostMachineScheduler : public MachineSchedulerBase {
134 public:
135   PostMachineScheduler();
136 
137   void getAnalysisUsage(AnalysisUsage &AU) const override;
138 
139   bool runOnMachineFunction(MachineFunction&) override;
140 
141   static char ID; // Class identification, replacement for typeinfo
142 
143 protected:
144   ScheduleDAGInstrs *createPostMachineScheduler();
145 };
146 } // namespace
147 
148 char MachineScheduler::ID = 0;
149 
150 char &llvm::MachineSchedulerID = MachineScheduler::ID;
151 
152 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
153                       "Machine Instruction Scheduler", false, false)
154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
155 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
156 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
157 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
158                     "Machine Instruction Scheduler", false, false)
159 
160 MachineScheduler::MachineScheduler()
161 : MachineSchedulerBase(ID) {
162   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
163 }
164 
165 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
166   AU.setPreservesCFG();
167   AU.addRequiredID(MachineDominatorsID);
168   AU.addRequired<MachineLoopInfo>();
169   AU.addRequired<AAResultsWrapperPass>();
170   AU.addRequired<TargetPassConfig>();
171   AU.addRequired<SlotIndexes>();
172   AU.addPreserved<SlotIndexes>();
173   AU.addRequired<LiveIntervals>();
174   AU.addPreserved<LiveIntervals>();
175   MachineFunctionPass::getAnalysisUsage(AU);
176 }
177 
178 char PostMachineScheduler::ID = 0;
179 
180 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
181 
182 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
183                 "PostRA Machine Instruction Scheduler", false, false)
184 
185 PostMachineScheduler::PostMachineScheduler()
186 : MachineSchedulerBase(ID) {
187   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
188 }
189 
190 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
191   AU.setPreservesCFG();
192   AU.addRequiredID(MachineDominatorsID);
193   AU.addRequired<MachineLoopInfo>();
194   AU.addRequired<TargetPassConfig>();
195   MachineFunctionPass::getAnalysisUsage(AU);
196 }
197 
198 MachinePassRegistry MachineSchedRegistry::Registry;
199 
200 /// A dummy default scheduler factory indicates whether the scheduler
201 /// is overridden on the command line.
202 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
203   return nullptr;
204 }
205 
206 /// MachineSchedOpt allows command line selection of the scheduler.
207 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
208                RegisterPassParser<MachineSchedRegistry> >
209 MachineSchedOpt("misched",
210                 cl::init(&useDefaultMachineSched), cl::Hidden,
211                 cl::desc("Machine instruction scheduler to use"));
212 
213 static MachineSchedRegistry
214 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
215                      useDefaultMachineSched);
216 
217 static cl::opt<bool> EnableMachineSched(
218     "enable-misched",
219     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
220     cl::Hidden);
221 
222 static cl::opt<bool> EnablePostRAMachineSched(
223     "enable-post-misched",
224     cl::desc("Enable the post-ra machine instruction scheduling pass."),
225     cl::init(true), cl::Hidden);
226 
227 /// Forward declare the standard machine scheduler. This will be used as the
228 /// default scheduler if the target does not set a default.
229 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
230 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
231 
232 /// Decrement this iterator until reaching the top or a non-debug instr.
233 static MachineBasicBlock::const_iterator
234 priorNonDebug(MachineBasicBlock::const_iterator I,
235               MachineBasicBlock::const_iterator Beg) {
236   assert(I != Beg && "reached the top of the region, cannot decrement");
237   while (--I != Beg) {
238     if (!I->isDebugValue())
239       break;
240   }
241   return I;
242 }
243 
244 /// Non-const version.
245 static MachineBasicBlock::iterator
246 priorNonDebug(MachineBasicBlock::iterator I,
247               MachineBasicBlock::const_iterator Beg) {
248   return const_cast<MachineInstr*>(
249     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
250 }
251 
252 /// If this iterator is a debug value, increment until reaching the End or a
253 /// non-debug instruction.
254 static MachineBasicBlock::const_iterator
255 nextIfDebug(MachineBasicBlock::const_iterator I,
256             MachineBasicBlock::const_iterator End) {
257   for(; I != End; ++I) {
258     if (!I->isDebugValue())
259       break;
260   }
261   return I;
262 }
263 
264 /// Non-const version.
265 static MachineBasicBlock::iterator
266 nextIfDebug(MachineBasicBlock::iterator I,
267             MachineBasicBlock::const_iterator End) {
268   // Cast the return value to nonconst MachineInstr, then cast to an
269   // instr_iterator, which does not check for null, finally return a
270   // bundle_iterator.
271   return MachineBasicBlock::instr_iterator(
272     const_cast<MachineInstr*>(
273       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
274 }
275 
276 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
277 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
278   // Select the scheduler, or set the default.
279   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
280   if (Ctor != useDefaultMachineSched)
281     return Ctor(this);
282 
283   // Get the default scheduler set by the target for this function.
284   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
285   if (Scheduler)
286     return Scheduler;
287 
288   // Default to GenericScheduler.
289   return createGenericSchedLive(this);
290 }
291 
292 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
293 /// the caller. We don't have a command line option to override the postRA
294 /// scheduler. The Target must configure it.
295 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
296   // Get the postRA scheduler set by the target for this function.
297   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
298   if (Scheduler)
299     return Scheduler;
300 
301   // Default to GenericScheduler.
302   return createGenericSchedPostRA(this);
303 }
304 
305 /// Top-level MachineScheduler pass driver.
306 ///
307 /// Visit blocks in function order. Divide each block into scheduling regions
308 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
309 /// consistent with the DAG builder, which traverses the interior of the
310 /// scheduling regions bottom-up.
311 ///
312 /// This design avoids exposing scheduling boundaries to the DAG builder,
313 /// simplifying the DAG builder's support for "special" target instructions.
314 /// At the same time the design allows target schedulers to operate across
315 /// scheduling boundaries, for example to bundle the boudary instructions
316 /// without reordering them. This creates complexity, because the target
317 /// scheduler must update the RegionBegin and RegionEnd positions cached by
318 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
319 /// design would be to split blocks at scheduling boundaries, but LLVM has a
320 /// general bias against block splitting purely for implementation simplicity.
321 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
322   if (skipFunction(*mf.getFunction()))
323     return false;
324 
325   if (EnableMachineSched.getNumOccurrences()) {
326     if (!EnableMachineSched)
327       return false;
328   } else if (!mf.getSubtarget().enableMachineScheduler())
329     return false;
330 
331   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
332 
333   // Initialize the context of the pass.
334   MF = &mf;
335   MLI = &getAnalysis<MachineLoopInfo>();
336   MDT = &getAnalysis<MachineDominatorTree>();
337   PassConfig = &getAnalysis<TargetPassConfig>();
338   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
339 
340   LIS = &getAnalysis<LiveIntervals>();
341 
342   if (VerifyScheduling) {
343     DEBUG(LIS->dump());
344     MF->verify(this, "Before machine scheduling.");
345   }
346   RegClassInfo->runOnMachineFunction(*MF);
347 
348   // Instantiate the selected scheduler for this target, function, and
349   // optimization level.
350   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
351   scheduleRegions(*Scheduler, false);
352 
353   DEBUG(LIS->dump());
354   if (VerifyScheduling)
355     MF->verify(this, "After machine scheduling.");
356   return true;
357 }
358 
359 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
360   if (skipFunction(*mf.getFunction()))
361     return false;
362 
363   if (EnablePostRAMachineSched.getNumOccurrences()) {
364     if (!EnablePostRAMachineSched)
365       return false;
366   } else if (!mf.getSubtarget().enablePostRAScheduler()) {
367     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
368     return false;
369   }
370   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
371 
372   // Initialize the context of the pass.
373   MF = &mf;
374   PassConfig = &getAnalysis<TargetPassConfig>();
375 
376   if (VerifyScheduling)
377     MF->verify(this, "Before post machine scheduling.");
378 
379   // Instantiate the selected scheduler for this target, function, and
380   // optimization level.
381   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
382   scheduleRegions(*Scheduler, true);
383 
384   if (VerifyScheduling)
385     MF->verify(this, "After post machine scheduling.");
386   return true;
387 }
388 
389 /// Return true of the given instruction should not be included in a scheduling
390 /// region.
391 ///
392 /// MachineScheduler does not currently support scheduling across calls. To
393 /// handle calls, the DAG builder needs to be modified to create register
394 /// anti/output dependencies on the registers clobbered by the call's regmask
395 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
396 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
397 /// the boundary, but there would be no benefit to postRA scheduling across
398 /// calls this late anyway.
399 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
400                             MachineBasicBlock *MBB,
401                             MachineFunction *MF,
402                             const TargetInstrInfo *TII) {
403   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
404 }
405 
406 /// Main driver for both MachineScheduler and PostMachineScheduler.
407 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
408                                            bool FixKillFlags) {
409   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
410 
411   // Visit all machine basic blocks.
412   //
413   // TODO: Visit blocks in global postorder or postorder within the bottom-up
414   // loop tree. Then we can optionally compute global RegPressure.
415   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
416        MBB != MBBEnd; ++MBB) {
417 
418     Scheduler.startBlock(&*MBB);
419 
420 #ifndef NDEBUG
421     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
422       continue;
423     if (SchedOnlyBlock.getNumOccurrences()
424         && (int)SchedOnlyBlock != MBB->getNumber())
425       continue;
426 #endif
427 
428     // Break the block into scheduling regions [I, RegionEnd), and schedule each
429     // region as soon as it is discovered. RegionEnd points the scheduling
430     // boundary at the bottom of the region. The DAG does not include RegionEnd,
431     // but the region does (i.e. the next RegionEnd is above the previous
432     // RegionBegin). If the current block has no terminator then RegionEnd ==
433     // MBB->end() for the bottom region.
434     //
435     // The Scheduler may insert instructions during either schedule() or
436     // exitRegion(), even for empty regions. So the local iterators 'I' and
437     // 'RegionEnd' are invalid across these calls.
438     //
439     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
440     // as a single instruction.
441     unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
442     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
443         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
444 
445       // Avoid decrementing RegionEnd for blocks with no terminator.
446       if (RegionEnd != MBB->end() ||
447           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
448         --RegionEnd;
449         // Count the boundary instruction.
450         --RemainingInstrs;
451       }
452 
453       // The next region starts above the previous region. Look backward in the
454       // instruction stream until we find the nearest boundary.
455       unsigned NumRegionInstrs = 0;
456       MachineBasicBlock::iterator I = RegionEnd;
457       for(;I != MBB->begin(); --I, --RemainingInstrs) {
458         if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII))
459           break;
460         if (!I->isDebugValue())
461           ++NumRegionInstrs;
462       }
463       // Notify the scheduler of the region, even if we may skip scheduling
464       // it. Perhaps it still needs to be bundled.
465       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
466 
467       // Skip empty scheduling regions (0 or 1 schedulable instructions).
468       if (I == RegionEnd || I == std::prev(RegionEnd)) {
469         // Close the current region. Bundle the terminator if needed.
470         // This invalidates 'RegionEnd' and 'I'.
471         Scheduler.exitRegion();
472         continue;
473       }
474       DEBUG(dbgs() << "********** MI Scheduling **********\n");
475       DEBUG(dbgs() << MF->getName()
476             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
477             << "\n  From: " << *I << "    To: ";
478             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
479             else dbgs() << "End";
480             dbgs() << " RegionInstrs: " << NumRegionInstrs
481             << " Remaining: " << RemainingInstrs << "\n");
482       if (DumpCriticalPathLength) {
483         errs() << MF->getName();
484         errs() << ":BB# " << MBB->getNumber();
485         errs() << " " << MBB->getName() << " \n";
486       }
487 
488       // Schedule a region: possibly reorder instructions.
489       // This invalidates 'RegionEnd' and 'I'.
490       Scheduler.schedule();
491 
492       // Close the current region.
493       Scheduler.exitRegion();
494 
495       // Scheduling has invalidated the current iterator 'I'. Ask the
496       // scheduler for the top of it's scheduled region.
497       RegionEnd = Scheduler.begin();
498     }
499     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
500     Scheduler.finishBlock();
501     // FIXME: Ideally, no further passes should rely on kill flags. However,
502     // thumb2 size reduction is currently an exception, so the PostMIScheduler
503     // needs to do this.
504     if (FixKillFlags)
505         Scheduler.fixupKills(&*MBB);
506   }
507   Scheduler.finalizeSchedule();
508 }
509 
510 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
511   // unimplemented
512 }
513 
514 LLVM_DUMP_METHOD
515 void ReadyQueue::dump() {
516   dbgs() << "Queue " << Name << ": ";
517   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
518     dbgs() << Queue[i]->NodeNum << " ";
519   dbgs() << "\n";
520 }
521 
522 //===----------------------------------------------------------------------===//
523 // ScheduleDAGMI - Basic machine instruction scheduling. This is
524 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
525 // virtual registers.
526 // ===----------------------------------------------------------------------===/
527 
528 // Provide a vtable anchor.
529 ScheduleDAGMI::~ScheduleDAGMI() {
530 }
531 
532 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
533   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
534 }
535 
536 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
537   if (SuccSU != &ExitSU) {
538     // Do not use WillCreateCycle, it assumes SD scheduling.
539     // If Pred is reachable from Succ, then the edge creates a cycle.
540     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
541       return false;
542     Topo.AddPred(SuccSU, PredDep.getSUnit());
543   }
544   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
545   // Return true regardless of whether a new edge needed to be inserted.
546   return true;
547 }
548 
549 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
550 /// NumPredsLeft reaches zero, release the successor node.
551 ///
552 /// FIXME: Adjust SuccSU height based on MinLatency.
553 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
554   SUnit *SuccSU = SuccEdge->getSUnit();
555 
556   if (SuccEdge->isWeak()) {
557     --SuccSU->WeakPredsLeft;
558     if (SuccEdge->isCluster())
559       NextClusterSucc = SuccSU;
560     return;
561   }
562 #ifndef NDEBUG
563   if (SuccSU->NumPredsLeft == 0) {
564     dbgs() << "*** Scheduling failed! ***\n";
565     SuccSU->dump(this);
566     dbgs() << " has been released too many times!\n";
567     llvm_unreachable(nullptr);
568   }
569 #endif
570   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
571   // CurrCycle may have advanced since then.
572   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
573     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
574 
575   --SuccSU->NumPredsLeft;
576   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
577     SchedImpl->releaseTopNode(SuccSU);
578 }
579 
580 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
581 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
582   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
583        I != E; ++I) {
584     releaseSucc(SU, &*I);
585   }
586 }
587 
588 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
589 /// NumSuccsLeft reaches zero, release the predecessor node.
590 ///
591 /// FIXME: Adjust PredSU height based on MinLatency.
592 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
593   SUnit *PredSU = PredEdge->getSUnit();
594 
595   if (PredEdge->isWeak()) {
596     --PredSU->WeakSuccsLeft;
597     if (PredEdge->isCluster())
598       NextClusterPred = PredSU;
599     return;
600   }
601 #ifndef NDEBUG
602   if (PredSU->NumSuccsLeft == 0) {
603     dbgs() << "*** Scheduling failed! ***\n";
604     PredSU->dump(this);
605     dbgs() << " has been released too many times!\n";
606     llvm_unreachable(nullptr);
607   }
608 #endif
609   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
610   // CurrCycle may have advanced since then.
611   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
612     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
613 
614   --PredSU->NumSuccsLeft;
615   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
616     SchedImpl->releaseBottomNode(PredSU);
617 }
618 
619 /// releasePredecessors - Call releasePred on each of SU's predecessors.
620 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
621   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
622        I != E; ++I) {
623     releasePred(SU, &*I);
624   }
625 }
626 
627 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
628 /// crossing a scheduling boundary. [begin, end) includes all instructions in
629 /// the region, including the boundary itself and single-instruction regions
630 /// that don't get scheduled.
631 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
632                                      MachineBasicBlock::iterator begin,
633                                      MachineBasicBlock::iterator end,
634                                      unsigned regioninstrs)
635 {
636   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
637 
638   SchedImpl->initPolicy(begin, end, regioninstrs);
639 }
640 
641 /// This is normally called from the main scheduler loop but may also be invoked
642 /// by the scheduling strategy to perform additional code motion.
643 void ScheduleDAGMI::moveInstruction(
644   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
645   // Advance RegionBegin if the first instruction moves down.
646   if (&*RegionBegin == MI)
647     ++RegionBegin;
648 
649   // Update the instruction stream.
650   BB->splice(InsertPos, BB, MI);
651 
652   // Update LiveIntervals
653   if (LIS)
654     LIS->handleMove(*MI, /*UpdateFlags=*/true);
655 
656   // Recede RegionBegin if an instruction moves above the first.
657   if (RegionBegin == InsertPos)
658     RegionBegin = MI;
659 }
660 
661 bool ScheduleDAGMI::checkSchedLimit() {
662 #ifndef NDEBUG
663   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
664     CurrentTop = CurrentBottom;
665     return false;
666   }
667   ++NumInstrsScheduled;
668 #endif
669   return true;
670 }
671 
672 /// Per-region scheduling driver, called back from
673 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
674 /// does not consider liveness or register pressure. It is useful for PostRA
675 /// scheduling and potentially other custom schedulers.
676 void ScheduleDAGMI::schedule() {
677   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
678   DEBUG(SchedImpl->dumpPolicy());
679 
680   // Build the DAG.
681   buildSchedGraph(AA);
682 
683   Topo.InitDAGTopologicalSorting();
684 
685   postprocessDAG();
686 
687   SmallVector<SUnit*, 8> TopRoots, BotRoots;
688   findRootsAndBiasEdges(TopRoots, BotRoots);
689 
690   // Initialize the strategy before modifying the DAG.
691   // This may initialize a DFSResult to be used for queue priority.
692   SchedImpl->initialize(this);
693 
694   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
695           SUnits[su].dumpAll(this));
696   if (ViewMISchedDAGs) viewGraph();
697 
698   // Initialize ready queues now that the DAG and priority data are finalized.
699   initQueues(TopRoots, BotRoots);
700 
701   bool IsTopNode = false;
702   while (true) {
703     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
704     SUnit *SU = SchedImpl->pickNode(IsTopNode);
705     if (!SU) break;
706 
707     assert(!SU->isScheduled && "Node already scheduled");
708     if (!checkSchedLimit())
709       break;
710 
711     MachineInstr *MI = SU->getInstr();
712     if (IsTopNode) {
713       assert(SU->isTopReady() && "node still has unscheduled dependencies");
714       if (&*CurrentTop == MI)
715         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
716       else
717         moveInstruction(MI, CurrentTop);
718     } else {
719       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
720       MachineBasicBlock::iterator priorII =
721         priorNonDebug(CurrentBottom, CurrentTop);
722       if (&*priorII == MI)
723         CurrentBottom = priorII;
724       else {
725         if (&*CurrentTop == MI)
726           CurrentTop = nextIfDebug(++CurrentTop, priorII);
727         moveInstruction(MI, CurrentBottom);
728         CurrentBottom = MI;
729       }
730     }
731     // Notify the scheduling strategy before updating the DAG.
732     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
733     // runs, it can then use the accurate ReadyCycle time to determine whether
734     // newly released nodes can move to the readyQ.
735     SchedImpl->schedNode(SU, IsTopNode);
736 
737     updateQueues(SU, IsTopNode);
738   }
739   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
740 
741   placeDebugValues();
742 
743   DEBUG({
744       unsigned BBNum = begin()->getParent()->getNumber();
745       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
746       dumpSchedule();
747       dbgs() << '\n';
748     });
749 }
750 
751 /// Apply each ScheduleDAGMutation step in order.
752 void ScheduleDAGMI::postprocessDAG() {
753   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
754     Mutations[i]->apply(this);
755   }
756 }
757 
758 void ScheduleDAGMI::
759 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
760                       SmallVectorImpl<SUnit*> &BotRoots) {
761   for (std::vector<SUnit>::iterator
762          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
763     SUnit *SU = &(*I);
764     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
765 
766     // Order predecessors so DFSResult follows the critical path.
767     SU->biasCriticalPath();
768 
769     // A SUnit is ready to top schedule if it has no predecessors.
770     if (!I->NumPredsLeft)
771       TopRoots.push_back(SU);
772     // A SUnit is ready to bottom schedule if it has no successors.
773     if (!I->NumSuccsLeft)
774       BotRoots.push_back(SU);
775   }
776   ExitSU.biasCriticalPath();
777 }
778 
779 /// Identify DAG roots and setup scheduler queues.
780 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
781                                ArrayRef<SUnit*> BotRoots) {
782   NextClusterSucc = nullptr;
783   NextClusterPred = nullptr;
784 
785   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
786   //
787   // Nodes with unreleased weak edges can still be roots.
788   // Release top roots in forward order.
789   for (SmallVectorImpl<SUnit*>::const_iterator
790          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
791     SchedImpl->releaseTopNode(*I);
792   }
793   // Release bottom roots in reverse order so the higher priority nodes appear
794   // first. This is more natural and slightly more efficient.
795   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
796          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
797     SchedImpl->releaseBottomNode(*I);
798   }
799 
800   releaseSuccessors(&EntrySU);
801   releasePredecessors(&ExitSU);
802 
803   SchedImpl->registerRoots();
804 
805   // Advance past initial DebugValues.
806   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
807   CurrentBottom = RegionEnd;
808 }
809 
810 /// Update scheduler queues after scheduling an instruction.
811 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
812   // Release dependent instructions for scheduling.
813   if (IsTopNode)
814     releaseSuccessors(SU);
815   else
816     releasePredecessors(SU);
817 
818   SU->isScheduled = true;
819 }
820 
821 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
822 void ScheduleDAGMI::placeDebugValues() {
823   // If first instruction was a DBG_VALUE then put it back.
824   if (FirstDbgValue) {
825     BB->splice(RegionBegin, BB, FirstDbgValue);
826     RegionBegin = FirstDbgValue;
827   }
828 
829   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
830          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
831     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
832     MachineInstr *DbgValue = P.first;
833     MachineBasicBlock::iterator OrigPrevMI = P.second;
834     if (&*RegionBegin == DbgValue)
835       ++RegionBegin;
836     BB->splice(++OrigPrevMI, BB, DbgValue);
837     if (OrigPrevMI == std::prev(RegionEnd))
838       RegionEnd = DbgValue;
839   }
840   DbgValues.clear();
841   FirstDbgValue = nullptr;
842 }
843 
844 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
845 void ScheduleDAGMI::dumpSchedule() const {
846   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
847     if (SUnit *SU = getSUnit(&(*MI)))
848       SU->dump(this);
849     else
850       dbgs() << "Missing SUnit\n";
851   }
852 }
853 #endif
854 
855 //===----------------------------------------------------------------------===//
856 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
857 // preservation.
858 //===----------------------------------------------------------------------===//
859 
860 ScheduleDAGMILive::~ScheduleDAGMILive() {
861   delete DFSResult;
862 }
863 
864 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
865 /// crossing a scheduling boundary. [begin, end) includes all instructions in
866 /// the region, including the boundary itself and single-instruction regions
867 /// that don't get scheduled.
868 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
869                                 MachineBasicBlock::iterator begin,
870                                 MachineBasicBlock::iterator end,
871                                 unsigned regioninstrs)
872 {
873   // ScheduleDAGMI initializes SchedImpl's per-region policy.
874   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
875 
876   // For convenience remember the end of the liveness region.
877   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
878 
879   SUPressureDiffs.clear();
880 
881   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
882   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
883 
884   if (ShouldTrackLaneMasks) {
885     if (!ShouldTrackPressure)
886       report_fatal_error("ShouldTrackLaneMasks requires ShouldTrackPressure");
887     // Dead subregister defs have no users and therefore no dependencies,
888     // moving them around may cause liveintervals to degrade into multiple
889     // components. Change independent components to have their own vreg to avoid
890     // this.
891     if (!DisconnectedComponentsRenamed)
892       LIS->renameDisconnectedComponents();
893   }
894 }
895 
896 // Setup the register pressure trackers for the top scheduled top and bottom
897 // scheduled regions.
898 void ScheduleDAGMILive::initRegPressure() {
899   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
900                     ShouldTrackLaneMasks, false);
901   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
902                     ShouldTrackLaneMasks, false);
903 
904   // Close the RPTracker to finalize live ins.
905   RPTracker.closeRegion();
906 
907   DEBUG(RPTracker.dump());
908 
909   // Initialize the live ins and live outs.
910   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
911   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
912 
913   // Close one end of the tracker so we can call
914   // getMaxUpward/DownwardPressureDelta before advancing across any
915   // instructions. This converts currently live regs into live ins/outs.
916   TopRPTracker.closeTop();
917   BotRPTracker.closeBottom();
918 
919   BotRPTracker.initLiveThru(RPTracker);
920   if (!BotRPTracker.getLiveThru().empty()) {
921     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
922     DEBUG(dbgs() << "Live Thru: ";
923           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
924   };
925 
926   // For each live out vreg reduce the pressure change associated with other
927   // uses of the same vreg below the live-out reaching def.
928   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
929 
930   // Account for liveness generated by the region boundary.
931   if (LiveRegionEnd != RegionEnd) {
932     SmallVector<RegisterMaskPair, 8> LiveUses;
933     BotRPTracker.recede(&LiveUses);
934     updatePressureDiffs(LiveUses);
935   }
936 
937   DEBUG(
938     dbgs() << "Top Pressure:\n";
939     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
940     dbgs() << "Bottom Pressure:\n";
941     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
942   );
943 
944   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
945 
946   // Cache the list of excess pressure sets in this region. This will also track
947   // the max pressure in the scheduled code for these sets.
948   RegionCriticalPSets.clear();
949   const std::vector<unsigned> &RegionPressure =
950     RPTracker.getPressure().MaxSetPressure;
951   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
952     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
953     if (RegionPressure[i] > Limit) {
954       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
955             << " Limit " << Limit
956             << " Actual " << RegionPressure[i] << "\n");
957       RegionCriticalPSets.push_back(PressureChange(i));
958     }
959   }
960   DEBUG(dbgs() << "Excess PSets: ";
961         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
962           dbgs() << TRI->getRegPressureSetName(
963             RegionCriticalPSets[i].getPSet()) << " ";
964         dbgs() << "\n");
965 }
966 
967 void ScheduleDAGMILive::
968 updateScheduledPressure(const SUnit *SU,
969                         const std::vector<unsigned> &NewMaxPressure) {
970   const PressureDiff &PDiff = getPressureDiff(SU);
971   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
972   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
973        I != E; ++I) {
974     if (!I->isValid())
975       break;
976     unsigned ID = I->getPSet();
977     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
978       ++CritIdx;
979     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
980       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
981           && NewMaxPressure[ID] <= INT16_MAX)
982         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
983     }
984     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
985     if (NewMaxPressure[ID] >= Limit - 2) {
986       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
987             << NewMaxPressure[ID]
988             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
989             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
990     }
991   }
992 }
993 
994 /// Update the PressureDiff array for liveness after scheduling this
995 /// instruction.
996 void ScheduleDAGMILive::updatePressureDiffs(
997     ArrayRef<RegisterMaskPair> LiveUses) {
998   for (const RegisterMaskPair &P : LiveUses) {
999     unsigned Reg = P.RegUnit;
1000     /// FIXME: Currently assuming single-use physregs.
1001     if (!TRI->isVirtualRegister(Reg))
1002       continue;
1003 
1004     if (ShouldTrackLaneMasks) {
1005       // If the register has just become live then other uses won't change
1006       // this fact anymore => decrement pressure.
1007       // If the register has just become dead then other uses make it come
1008       // back to life => increment pressure.
1009       bool Decrement = P.LaneMask != 0;
1010 
1011       for (const VReg2SUnit &V2SU
1012            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1013         SUnit &SU = *V2SU.SU;
1014         if (SU.isScheduled || &SU == &ExitSU)
1015           continue;
1016 
1017         PressureDiff &PDiff = getPressureDiff(&SU);
1018         PDiff.addPressureChange(Reg, Decrement, &MRI);
1019         DEBUG(
1020           dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1021                  << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1022                  << ' ' << *SU.getInstr();
1023           dbgs() << "              to ";
1024           PDiff.dump(*TRI);
1025         );
1026       }
1027     } else {
1028       assert(P.LaneMask != 0);
1029       DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1030       // This may be called before CurrentBottom has been initialized. However,
1031       // BotRPTracker must have a valid position. We want the value live into the
1032       // instruction or live out of the block, so ask for the previous
1033       // instruction's live-out.
1034       const LiveInterval &LI = LIS->getInterval(Reg);
1035       VNInfo *VNI;
1036       MachineBasicBlock::const_iterator I =
1037         nextIfDebug(BotRPTracker.getPos(), BB->end());
1038       if (I == BB->end())
1039         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1040       else {
1041         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1042         VNI = LRQ.valueIn();
1043       }
1044       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1045       assert(VNI && "No live value at use.");
1046       for (const VReg2SUnit &V2SU
1047            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1048         SUnit *SU = V2SU.SU;
1049         // If this use comes before the reaching def, it cannot be a last use,
1050         // so decrease its pressure change.
1051         if (!SU->isScheduled && SU != &ExitSU) {
1052           LiveQueryResult LRQ =
1053               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1054           if (LRQ.valueIn() == VNI) {
1055             PressureDiff &PDiff = getPressureDiff(SU);
1056             PDiff.addPressureChange(Reg, true, &MRI);
1057             DEBUG(
1058               dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1059                      << *SU->getInstr();
1060               dbgs() << "              to ";
1061               PDiff.dump(*TRI);
1062             );
1063           }
1064         }
1065       }
1066     }
1067   }
1068 }
1069 
1070 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1071 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1072 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1073 ///
1074 /// This is a skeletal driver, with all the functionality pushed into helpers,
1075 /// so that it can be easily extended by experimental schedulers. Generally,
1076 /// implementing MachineSchedStrategy should be sufficient to implement a new
1077 /// scheduling algorithm. However, if a scheduler further subclasses
1078 /// ScheduleDAGMILive then it will want to override this virtual method in order
1079 /// to update any specialized state.
1080 void ScheduleDAGMILive::schedule() {
1081   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1082   DEBUG(SchedImpl->dumpPolicy());
1083   buildDAGWithRegPressure();
1084 
1085   Topo.InitDAGTopologicalSorting();
1086 
1087   postprocessDAG();
1088 
1089   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1090   findRootsAndBiasEdges(TopRoots, BotRoots);
1091 
1092   // Initialize the strategy before modifying the DAG.
1093   // This may initialize a DFSResult to be used for queue priority.
1094   SchedImpl->initialize(this);
1095 
1096   DEBUG(
1097     for (const SUnit &SU : SUnits) {
1098       SU.dumpAll(this);
1099       if (ShouldTrackPressure) {
1100         dbgs() << "  Pressure Diff      : ";
1101         getPressureDiff(&SU).dump(*TRI);
1102       }
1103       dbgs() << '\n';
1104     }
1105   );
1106   if (ViewMISchedDAGs) viewGraph();
1107 
1108   // Initialize ready queues now that the DAG and priority data are finalized.
1109   initQueues(TopRoots, BotRoots);
1110 
1111   if (ShouldTrackPressure) {
1112     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1113     TopRPTracker.setPos(CurrentTop);
1114   }
1115 
1116   bool IsTopNode = false;
1117   while (true) {
1118     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1119     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1120     if (!SU) break;
1121 
1122     assert(!SU->isScheduled && "Node already scheduled");
1123     if (!checkSchedLimit())
1124       break;
1125 
1126     scheduleMI(SU, IsTopNode);
1127 
1128     if (DFSResult) {
1129       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1130       if (!ScheduledTrees.test(SubtreeID)) {
1131         ScheduledTrees.set(SubtreeID);
1132         DFSResult->scheduleTree(SubtreeID);
1133         SchedImpl->scheduleTree(SubtreeID);
1134       }
1135     }
1136 
1137     // Notify the scheduling strategy after updating the DAG.
1138     SchedImpl->schedNode(SU, IsTopNode);
1139 
1140     updateQueues(SU, IsTopNode);
1141   }
1142   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1143 
1144   placeDebugValues();
1145 
1146   DEBUG({
1147       unsigned BBNum = begin()->getParent()->getNumber();
1148       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1149       dumpSchedule();
1150       dbgs() << '\n';
1151     });
1152 }
1153 
1154 /// Build the DAG and setup three register pressure trackers.
1155 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1156   if (!ShouldTrackPressure) {
1157     RPTracker.reset();
1158     RegionCriticalPSets.clear();
1159     buildSchedGraph(AA);
1160     return;
1161   }
1162 
1163   // Initialize the register pressure tracker used by buildSchedGraph.
1164   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1165                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1166 
1167   // Account for liveness generate by the region boundary.
1168   if (LiveRegionEnd != RegionEnd)
1169     RPTracker.recede();
1170 
1171   // Build the DAG, and compute current register pressure.
1172   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1173 
1174   // Initialize top/bottom trackers after computing region pressure.
1175   initRegPressure();
1176 }
1177 
1178 void ScheduleDAGMILive::computeDFSResult() {
1179   if (!DFSResult)
1180     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1181   DFSResult->clear();
1182   ScheduledTrees.clear();
1183   DFSResult->resize(SUnits.size());
1184   DFSResult->compute(SUnits);
1185   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1186 }
1187 
1188 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1189 /// only provides the critical path for single block loops. To handle loops that
1190 /// span blocks, we could use the vreg path latencies provided by
1191 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1192 /// available for use in the scheduler.
1193 ///
1194 /// The cyclic path estimation identifies a def-use pair that crosses the back
1195 /// edge and considers the depth and height of the nodes. For example, consider
1196 /// the following instruction sequence where each instruction has unit latency
1197 /// and defines an epomymous virtual register:
1198 ///
1199 /// a->b(a,c)->c(b)->d(c)->exit
1200 ///
1201 /// The cyclic critical path is a two cycles: b->c->b
1202 /// The acyclic critical path is four cycles: a->b->c->d->exit
1203 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1204 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1205 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1206 /// LiveInDepth = depth(b) = len(a->b) = 1
1207 ///
1208 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1209 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1210 /// CyclicCriticalPath = min(2, 2) = 2
1211 ///
1212 /// This could be relevant to PostRA scheduling, but is currently implemented
1213 /// assuming LiveIntervals.
1214 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1215   // This only applies to single block loop.
1216   if (!BB->isSuccessor(BB))
1217     return 0;
1218 
1219   unsigned MaxCyclicLatency = 0;
1220   // Visit each live out vreg def to find def/use pairs that cross iterations.
1221   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1222     unsigned Reg = P.RegUnit;
1223     if (!TRI->isVirtualRegister(Reg))
1224         continue;
1225     const LiveInterval &LI = LIS->getInterval(Reg);
1226     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1227     if (!DefVNI)
1228       continue;
1229 
1230     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1231     const SUnit *DefSU = getSUnit(DefMI);
1232     if (!DefSU)
1233       continue;
1234 
1235     unsigned LiveOutHeight = DefSU->getHeight();
1236     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1237     // Visit all local users of the vreg def.
1238     for (const VReg2SUnit &V2SU
1239          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1240       SUnit *SU = V2SU.SU;
1241       if (SU == &ExitSU)
1242         continue;
1243 
1244       // Only consider uses of the phi.
1245       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1246       if (!LRQ.valueIn()->isPHIDef())
1247         continue;
1248 
1249       // Assume that a path spanning two iterations is a cycle, which could
1250       // overestimate in strange cases. This allows cyclic latency to be
1251       // estimated as the minimum slack of the vreg's depth or height.
1252       unsigned CyclicLatency = 0;
1253       if (LiveOutDepth > SU->getDepth())
1254         CyclicLatency = LiveOutDepth - SU->getDepth();
1255 
1256       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1257       if (LiveInHeight > LiveOutHeight) {
1258         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1259           CyclicLatency = LiveInHeight - LiveOutHeight;
1260       } else
1261         CyclicLatency = 0;
1262 
1263       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1264             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1265       if (CyclicLatency > MaxCyclicLatency)
1266         MaxCyclicLatency = CyclicLatency;
1267     }
1268   }
1269   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1270   return MaxCyclicLatency;
1271 }
1272 
1273 /// Move an instruction and update register pressure.
1274 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1275   // Move the instruction to its new location in the instruction stream.
1276   MachineInstr *MI = SU->getInstr();
1277 
1278   if (IsTopNode) {
1279     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1280     if (&*CurrentTop == MI)
1281       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1282     else {
1283       moveInstruction(MI, CurrentTop);
1284       TopRPTracker.setPos(MI);
1285     }
1286 
1287     if (ShouldTrackPressure) {
1288       // Update top scheduled pressure.
1289       RegisterOperands RegOpers;
1290       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1291       if (ShouldTrackLaneMasks) {
1292         // Adjust liveness and add missing dead+read-undef flags.
1293         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1294         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1295       } else {
1296         // Adjust for missing dead-def flags.
1297         RegOpers.detectDeadDefs(*MI, *LIS);
1298       }
1299 
1300       TopRPTracker.advance(RegOpers);
1301       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1302       DEBUG(
1303         dbgs() << "Top Pressure:\n";
1304         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1305       );
1306 
1307       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1308     }
1309   } else {
1310     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1311     MachineBasicBlock::iterator priorII =
1312       priorNonDebug(CurrentBottom, CurrentTop);
1313     if (&*priorII == MI)
1314       CurrentBottom = priorII;
1315     else {
1316       if (&*CurrentTop == MI) {
1317         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1318         TopRPTracker.setPos(CurrentTop);
1319       }
1320       moveInstruction(MI, CurrentBottom);
1321       CurrentBottom = MI;
1322     }
1323     if (ShouldTrackPressure) {
1324       RegisterOperands RegOpers;
1325       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1326       if (ShouldTrackLaneMasks) {
1327         // Adjust liveness and add missing dead+read-undef flags.
1328         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1329         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1330       } else {
1331         // Adjust for missing dead-def flags.
1332         RegOpers.detectDeadDefs(*MI, *LIS);
1333       }
1334 
1335       BotRPTracker.recedeSkipDebugValues();
1336       SmallVector<RegisterMaskPair, 8> LiveUses;
1337       BotRPTracker.recede(RegOpers, &LiveUses);
1338       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1339       DEBUG(
1340         dbgs() << "Bottom Pressure:\n";
1341         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1342       );
1343 
1344       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1345       updatePressureDiffs(LiveUses);
1346     }
1347   }
1348 }
1349 
1350 //===----------------------------------------------------------------------===//
1351 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1352 //===----------------------------------------------------------------------===//
1353 
1354 namespace {
1355 /// \brief Post-process the DAG to create cluster edges between neighboring
1356 /// loads or between neighboring stores.
1357 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1358   struct MemOpInfo {
1359     SUnit *SU;
1360     unsigned BaseReg;
1361     int64_t Offset;
1362     MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1363         : SU(su), BaseReg(reg), Offset(ofs) {}
1364 
1365     bool operator<(const MemOpInfo&RHS) const {
1366       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1367     }
1368   };
1369 
1370   const TargetInstrInfo *TII;
1371   const TargetRegisterInfo *TRI;
1372   bool IsLoad;
1373 
1374 public:
1375   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1376                            const TargetRegisterInfo *tri, bool IsLoad)
1377       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1378 
1379   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1380 
1381 protected:
1382   void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1383 };
1384 
1385 class StoreClusterMutation : public BaseMemOpClusterMutation {
1386 public:
1387   StoreClusterMutation(const TargetInstrInfo *tii,
1388                        const TargetRegisterInfo *tri)
1389       : BaseMemOpClusterMutation(tii, tri, false) {}
1390 };
1391 
1392 class LoadClusterMutation : public BaseMemOpClusterMutation {
1393 public:
1394   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1395       : BaseMemOpClusterMutation(tii, tri, true) {}
1396 };
1397 } // anonymous
1398 
1399 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1400     ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1401   SmallVector<MemOpInfo, 32> MemOpRecords;
1402   for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1403     SUnit *SU = MemOps[Idx];
1404     unsigned BaseReg;
1405     int64_t Offset;
1406     if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1407       MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
1408   }
1409   if (MemOpRecords.size() < 2)
1410     return;
1411 
1412   std::sort(MemOpRecords.begin(), MemOpRecords.end());
1413   unsigned ClusterLength = 1;
1414   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1415     if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
1416       ClusterLength = 1;
1417       continue;
1418     }
1419 
1420     SUnit *SUa = MemOpRecords[Idx].SU;
1421     SUnit *SUb = MemOpRecords[Idx+1].SU;
1422     if (TII->shouldClusterMemOps(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1423         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1424       DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1425             << SUb->NodeNum << ")\n");
1426       // Copy successor edges from SUa to SUb. Interleaving computation
1427       // dependent on SUa can prevent load combining due to register reuse.
1428       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1429       // loads should have effectively the same inputs.
1430       for (SUnit::const_succ_iterator
1431              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1432         if (SI->getSUnit() == SUb)
1433           continue;
1434         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1435         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1436       }
1437       ++ClusterLength;
1438     } else
1439       ClusterLength = 1;
1440   }
1441 }
1442 
1443 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1444 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1445 
1446   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1447 
1448   // Map DAG NodeNum to store chain ID.
1449   DenseMap<unsigned, unsigned> StoreChainIDs;
1450   // Map each store chain to a set of dependent MemOps.
1451   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1452   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1453     SUnit *SU = &DAG->SUnits[Idx];
1454     if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1455         (!IsLoad && !SU->getInstr()->mayStore()))
1456       continue;
1457 
1458     unsigned ChainPredID = DAG->SUnits.size();
1459     for (SUnit::const_pred_iterator
1460            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1461       if (PI->isCtrl()) {
1462         ChainPredID = PI->getSUnit()->NodeNum;
1463         break;
1464       }
1465     }
1466     // Check if this chain-like pred has been seen
1467     // before. ChainPredID==MaxNodeID at the top of the schedule.
1468     unsigned NumChains = StoreChainDependents.size();
1469     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1470       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1471     if (Result.second)
1472       StoreChainDependents.resize(NumChains + 1);
1473     StoreChainDependents[Result.first->second].push_back(SU);
1474   }
1475 
1476   // Iterate over the store chains.
1477   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1478     clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
1479 }
1480 
1481 //===----------------------------------------------------------------------===//
1482 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1483 //===----------------------------------------------------------------------===//
1484 
1485 namespace {
1486 /// \brief Post-process the DAG to create cluster edges between instructions
1487 /// that may be fused by the processor into a single operation.
1488 class MacroFusion : public ScheduleDAGMutation {
1489   const TargetInstrInfo &TII;
1490   const TargetRegisterInfo &TRI;
1491 public:
1492   MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1493     : TII(TII), TRI(TRI) {}
1494 
1495   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1496 };
1497 } // anonymous
1498 
1499 /// Returns true if \p MI reads a register written by \p Other.
1500 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1501                        const MachineInstr &Other) {
1502   for (const MachineOperand &MO : MI.uses()) {
1503     if (!MO.isReg() || !MO.readsReg())
1504       continue;
1505 
1506     unsigned Reg = MO.getReg();
1507     if (Other.modifiesRegister(Reg, &TRI))
1508       return true;
1509   }
1510   return false;
1511 }
1512 
1513 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1514 /// fused operations.
1515 void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1516   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1517 
1518   // For now, assume targets can only fuse with the branch.
1519   SUnit &ExitSU = DAG->ExitSU;
1520   MachineInstr *Branch = ExitSU.getInstr();
1521   if (!Branch)
1522     return;
1523 
1524   for (SUnit &SU : DAG->SUnits) {
1525     // SUnits with successors can't be schedule in front of the ExitSU.
1526     if (!SU.Succs.empty())
1527       continue;
1528     // We only care if the node writes to a register that the branch reads.
1529     MachineInstr *Pred = SU.getInstr();
1530     if (!HasDataDep(TRI, *Branch, *Pred))
1531       continue;
1532 
1533     if (!TII.shouldScheduleAdjacent(Pred, Branch))
1534       continue;
1535 
1536     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1537     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1538     // need to copy predecessor edges from ExitSU to SU, since top-down
1539     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1540     // of SU, we could create an artificial edge from the deepest root, but it
1541     // hasn't been needed yet.
1542     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1543     (void)Success;
1544     assert(Success && "No DAG nodes should be reachable from ExitSU");
1545 
1546     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1547     break;
1548   }
1549 }
1550 
1551 //===----------------------------------------------------------------------===//
1552 // CopyConstrain - DAG post-processing to encourage copy elimination.
1553 //===----------------------------------------------------------------------===//
1554 
1555 namespace {
1556 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1557 /// the one use that defines the copy's source vreg, most likely an induction
1558 /// variable increment.
1559 class CopyConstrain : public ScheduleDAGMutation {
1560   // Transient state.
1561   SlotIndex RegionBeginIdx;
1562   // RegionEndIdx is the slot index of the last non-debug instruction in the
1563   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1564   SlotIndex RegionEndIdx;
1565 public:
1566   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1567 
1568   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1569 
1570 protected:
1571   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1572 };
1573 } // anonymous
1574 
1575 /// constrainLocalCopy handles two possibilities:
1576 /// 1) Local src:
1577 /// I0:     = dst
1578 /// I1: src = ...
1579 /// I2:     = dst
1580 /// I3: dst = src (copy)
1581 /// (create pred->succ edges I0->I1, I2->I1)
1582 ///
1583 /// 2) Local copy:
1584 /// I0: dst = src (copy)
1585 /// I1:     = dst
1586 /// I2: src = ...
1587 /// I3:     = dst
1588 /// (create pred->succ edges I1->I2, I3->I2)
1589 ///
1590 /// Although the MachineScheduler is currently constrained to single blocks,
1591 /// this algorithm should handle extended blocks. An EBB is a set of
1592 /// contiguously numbered blocks such that the previous block in the EBB is
1593 /// always the single predecessor.
1594 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1595   LiveIntervals *LIS = DAG->getLIS();
1596   MachineInstr *Copy = CopySU->getInstr();
1597 
1598   // Check for pure vreg copies.
1599   const MachineOperand &SrcOp = Copy->getOperand(1);
1600   unsigned SrcReg = SrcOp.getReg();
1601   if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1602     return;
1603 
1604   const MachineOperand &DstOp = Copy->getOperand(0);
1605   unsigned DstReg = DstOp.getReg();
1606   if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1607     return;
1608 
1609   // Check if either the dest or source is local. If it's live across a back
1610   // edge, it's not local. Note that if both vregs are live across the back
1611   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1612   // If both the copy's source and dest are local live intervals, then we
1613   // should treat the dest as the global for the purpose of adding
1614   // constraints. This adds edges from source's other uses to the copy.
1615   unsigned LocalReg = SrcReg;
1616   unsigned GlobalReg = DstReg;
1617   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1618   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1619     LocalReg = DstReg;
1620     GlobalReg = SrcReg;
1621     LocalLI = &LIS->getInterval(LocalReg);
1622     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1623       return;
1624   }
1625   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1626 
1627   // Find the global segment after the start of the local LI.
1628   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1629   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1630   // local live range. We could create edges from other global uses to the local
1631   // start, but the coalescer should have already eliminated these cases, so
1632   // don't bother dealing with it.
1633   if (GlobalSegment == GlobalLI->end())
1634     return;
1635 
1636   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1637   // returned the next global segment. But if GlobalSegment overlaps with
1638   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1639   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1640   if (GlobalSegment->contains(LocalLI->beginIndex()))
1641     ++GlobalSegment;
1642 
1643   if (GlobalSegment == GlobalLI->end())
1644     return;
1645 
1646   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1647   if (GlobalSegment != GlobalLI->begin()) {
1648     // Two address defs have no hole.
1649     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1650                                GlobalSegment->start)) {
1651       return;
1652     }
1653     // If the prior global segment may be defined by the same two-address
1654     // instruction that also defines LocalLI, then can't make a hole here.
1655     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1656                                LocalLI->beginIndex())) {
1657       return;
1658     }
1659     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1660     // it would be a disconnected component in the live range.
1661     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1662            "Disconnected LRG within the scheduling region.");
1663   }
1664   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1665   if (!GlobalDef)
1666     return;
1667 
1668   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1669   if (!GlobalSU)
1670     return;
1671 
1672   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1673   // constraining the uses of the last local def to precede GlobalDef.
1674   SmallVector<SUnit*,8> LocalUses;
1675   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1676   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1677   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1678   for (SUnit::const_succ_iterator
1679          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1680        I != E; ++I) {
1681     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1682       continue;
1683     if (I->getSUnit() == GlobalSU)
1684       continue;
1685     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1686       return;
1687     LocalUses.push_back(I->getSUnit());
1688   }
1689   // Open the top of the GlobalLI hole by constraining any earlier global uses
1690   // to precede the start of LocalLI.
1691   SmallVector<SUnit*,8> GlobalUses;
1692   MachineInstr *FirstLocalDef =
1693     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1694   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1695   for (SUnit::const_pred_iterator
1696          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1697     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1698       continue;
1699     if (I->getSUnit() == FirstLocalSU)
1700       continue;
1701     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1702       return;
1703     GlobalUses.push_back(I->getSUnit());
1704   }
1705   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1706   // Add the weak edges.
1707   for (SmallVectorImpl<SUnit*>::const_iterator
1708          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1709     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1710           << GlobalSU->NodeNum << ")\n");
1711     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1712   }
1713   for (SmallVectorImpl<SUnit*>::const_iterator
1714          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1715     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1716           << FirstLocalSU->NodeNum << ")\n");
1717     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1718   }
1719 }
1720 
1721 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1722 /// copy elimination.
1723 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1724   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1725   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1726 
1727   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1728   if (FirstPos == DAG->end())
1729     return;
1730   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1731   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1732       *priorNonDebug(DAG->end(), DAG->begin()));
1733 
1734   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1735     SUnit *SU = &DAG->SUnits[Idx];
1736     if (!SU->getInstr()->isCopy())
1737       continue;
1738 
1739     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1740   }
1741 }
1742 
1743 //===----------------------------------------------------------------------===//
1744 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1745 // and possibly other custom schedulers.
1746 //===----------------------------------------------------------------------===//
1747 
1748 static const unsigned InvalidCycle = ~0U;
1749 
1750 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1751 
1752 void SchedBoundary::reset() {
1753   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1754   // Destroying and reconstructing it is very expensive though. So keep
1755   // invalid, placeholder HazardRecs.
1756   if (HazardRec && HazardRec->isEnabled()) {
1757     delete HazardRec;
1758     HazardRec = nullptr;
1759   }
1760   Available.clear();
1761   Pending.clear();
1762   CheckPending = false;
1763   NextSUs.clear();
1764   CurrCycle = 0;
1765   CurrMOps = 0;
1766   MinReadyCycle = UINT_MAX;
1767   ExpectedLatency = 0;
1768   DependentLatency = 0;
1769   RetiredMOps = 0;
1770   MaxExecutedResCount = 0;
1771   ZoneCritResIdx = 0;
1772   IsResourceLimited = false;
1773   ReservedCycles.clear();
1774 #ifndef NDEBUG
1775   // Track the maximum number of stall cycles that could arise either from the
1776   // latency of a DAG edge or the number of cycles that a processor resource is
1777   // reserved (SchedBoundary::ReservedCycles).
1778   MaxObservedStall = 0;
1779 #endif
1780   // Reserve a zero-count for invalid CritResIdx.
1781   ExecutedResCounts.resize(1);
1782   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1783 }
1784 
1785 void SchedRemainder::
1786 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1787   reset();
1788   if (!SchedModel->hasInstrSchedModel())
1789     return;
1790   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1791   for (std::vector<SUnit>::iterator
1792          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1793     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1794     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1795       * SchedModel->getMicroOpFactor();
1796     for (TargetSchedModel::ProcResIter
1797            PI = SchedModel->getWriteProcResBegin(SC),
1798            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1799       unsigned PIdx = PI->ProcResourceIdx;
1800       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1801       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1802     }
1803   }
1804 }
1805 
1806 void SchedBoundary::
1807 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1808   reset();
1809   DAG = dag;
1810   SchedModel = smodel;
1811   Rem = rem;
1812   if (SchedModel->hasInstrSchedModel()) {
1813     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1814     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1815   }
1816 }
1817 
1818 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1819 /// these "soft stalls" differently than the hard stall cycles based on CPU
1820 /// resources and computed by checkHazard(). A fully in-order model
1821 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1822 /// available for scheduling until they are ready. However, a weaker in-order
1823 /// model may use this for heuristics. For example, if a processor has in-order
1824 /// behavior when reading certain resources, this may come into play.
1825 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1826   if (!SU->isUnbuffered)
1827     return 0;
1828 
1829   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1830   if (ReadyCycle > CurrCycle)
1831     return ReadyCycle - CurrCycle;
1832   return 0;
1833 }
1834 
1835 /// Compute the next cycle at which the given processor resource can be
1836 /// scheduled.
1837 unsigned SchedBoundary::
1838 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1839   unsigned NextUnreserved = ReservedCycles[PIdx];
1840   // If this resource has never been used, always return cycle zero.
1841   if (NextUnreserved == InvalidCycle)
1842     return 0;
1843   // For bottom-up scheduling add the cycles needed for the current operation.
1844   if (!isTop())
1845     NextUnreserved += Cycles;
1846   return NextUnreserved;
1847 }
1848 
1849 /// Does this SU have a hazard within the current instruction group.
1850 ///
1851 /// The scheduler supports two modes of hazard recognition. The first is the
1852 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1853 /// supports highly complicated in-order reservation tables
1854 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1855 ///
1856 /// The second is a streamlined mechanism that checks for hazards based on
1857 /// simple counters that the scheduler itself maintains. It explicitly checks
1858 /// for instruction dispatch limitations, including the number of micro-ops that
1859 /// can dispatch per cycle.
1860 ///
1861 /// TODO: Also check whether the SU must start a new group.
1862 bool SchedBoundary::checkHazard(SUnit *SU) {
1863   if (HazardRec->isEnabled()
1864       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1865     return true;
1866   }
1867   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1868   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1869     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1870           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1871     return true;
1872   }
1873   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1874     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1875     for (TargetSchedModel::ProcResIter
1876            PI = SchedModel->getWriteProcResBegin(SC),
1877            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1878       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1879       if (NRCycle > CurrCycle) {
1880 #ifndef NDEBUG
1881         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1882 #endif
1883         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1884               << SchedModel->getResourceName(PI->ProcResourceIdx)
1885               << "=" << NRCycle << "c\n");
1886         return true;
1887       }
1888     }
1889   }
1890   return false;
1891 }
1892 
1893 // Find the unscheduled node in ReadySUs with the highest latency.
1894 unsigned SchedBoundary::
1895 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1896   SUnit *LateSU = nullptr;
1897   unsigned RemLatency = 0;
1898   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1899        I != E; ++I) {
1900     unsigned L = getUnscheduledLatency(*I);
1901     if (L > RemLatency) {
1902       RemLatency = L;
1903       LateSU = *I;
1904     }
1905   }
1906   if (LateSU) {
1907     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1908           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1909   }
1910   return RemLatency;
1911 }
1912 
1913 // Count resources in this zone and the remaining unscheduled
1914 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1915 // resource index, or zero if the zone is issue limited.
1916 unsigned SchedBoundary::
1917 getOtherResourceCount(unsigned &OtherCritIdx) {
1918   OtherCritIdx = 0;
1919   if (!SchedModel->hasInstrSchedModel())
1920     return 0;
1921 
1922   unsigned OtherCritCount = Rem->RemIssueCount
1923     + (RetiredMOps * SchedModel->getMicroOpFactor());
1924   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1925         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1926   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1927        PIdx != PEnd; ++PIdx) {
1928     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1929     if (OtherCount > OtherCritCount) {
1930       OtherCritCount = OtherCount;
1931       OtherCritIdx = PIdx;
1932     }
1933   }
1934   if (OtherCritIdx) {
1935     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1936           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1937           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1938   }
1939   return OtherCritCount;
1940 }
1941 
1942 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1943   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1944 
1945 #ifndef NDEBUG
1946   // ReadyCycle was been bumped up to the CurrCycle when this node was
1947   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1948   // scheduling, so may now be greater than ReadyCycle.
1949   if (ReadyCycle > CurrCycle)
1950     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1951 #endif
1952 
1953   if (ReadyCycle < MinReadyCycle)
1954     MinReadyCycle = ReadyCycle;
1955 
1956   // Check for interlocks first. For the purpose of other heuristics, an
1957   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1958   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1959   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1960     Pending.push(SU);
1961   else
1962     Available.push(SU);
1963 
1964   // Record this node as an immediate dependent of the scheduled node.
1965   NextSUs.insert(SU);
1966 }
1967 
1968 void SchedBoundary::releaseTopNode(SUnit *SU) {
1969   if (SU->isScheduled)
1970     return;
1971 
1972   releaseNode(SU, SU->TopReadyCycle);
1973 }
1974 
1975 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1976   if (SU->isScheduled)
1977     return;
1978 
1979   releaseNode(SU, SU->BotReadyCycle);
1980 }
1981 
1982 /// Move the boundary of scheduled code by one cycle.
1983 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1984   if (SchedModel->getMicroOpBufferSize() == 0) {
1985     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1986     if (MinReadyCycle > NextCycle)
1987       NextCycle = MinReadyCycle;
1988   }
1989   // Update the current micro-ops, which will issue in the next cycle.
1990   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1991   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1992 
1993   // Decrement DependentLatency based on the next cycle.
1994   if ((NextCycle - CurrCycle) > DependentLatency)
1995     DependentLatency = 0;
1996   else
1997     DependentLatency -= (NextCycle - CurrCycle);
1998 
1999   if (!HazardRec->isEnabled()) {
2000     // Bypass HazardRec virtual calls.
2001     CurrCycle = NextCycle;
2002   } else {
2003     // Bypass getHazardType calls in case of long latency.
2004     for (; CurrCycle != NextCycle; ++CurrCycle) {
2005       if (isTop())
2006         HazardRec->AdvanceCycle();
2007       else
2008         HazardRec->RecedeCycle();
2009     }
2010   }
2011   CheckPending = true;
2012   unsigned LFactor = SchedModel->getLatencyFactor();
2013   IsResourceLimited =
2014     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2015     > (int)LFactor;
2016 
2017   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2018 }
2019 
2020 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2021   ExecutedResCounts[PIdx] += Count;
2022   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2023     MaxExecutedResCount = ExecutedResCounts[PIdx];
2024 }
2025 
2026 /// Add the given processor resource to this scheduled zone.
2027 ///
2028 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2029 /// during which this resource is consumed.
2030 ///
2031 /// \return the next cycle at which the instruction may execute without
2032 /// oversubscribing resources.
2033 unsigned SchedBoundary::
2034 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2035   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2036   unsigned Count = Factor * Cycles;
2037   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
2038         << " +" << Cycles << "x" << Factor << "u\n");
2039 
2040   // Update Executed resources counts.
2041   incExecutedResources(PIdx, Count);
2042   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2043   Rem->RemainingCounts[PIdx] -= Count;
2044 
2045   // Check if this resource exceeds the current critical resource. If so, it
2046   // becomes the critical resource.
2047   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2048     ZoneCritResIdx = PIdx;
2049     DEBUG(dbgs() << "  *** Critical resource "
2050           << SchedModel->getResourceName(PIdx) << ": "
2051           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2052   }
2053   // For reserved resources, record the highest cycle using the resource.
2054   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2055   if (NextAvailable > CurrCycle) {
2056     DEBUG(dbgs() << "  Resource conflict: "
2057           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2058           << NextAvailable << "\n");
2059   }
2060   return NextAvailable;
2061 }
2062 
2063 /// Move the boundary of scheduled code by one SUnit.
2064 void SchedBoundary::bumpNode(SUnit *SU) {
2065   // Update the reservation table.
2066   if (HazardRec->isEnabled()) {
2067     if (!isTop() && SU->isCall) {
2068       // Calls are scheduled with their preceding instructions. For bottom-up
2069       // scheduling, clear the pipeline state before emitting.
2070       HazardRec->Reset();
2071     }
2072     HazardRec->EmitInstruction(SU);
2073   }
2074   // checkHazard should prevent scheduling multiple instructions per cycle that
2075   // exceed the issue width.
2076   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2077   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2078   assert(
2079       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2080       "Cannot schedule this instruction's MicroOps in the current cycle.");
2081 
2082   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2083   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2084 
2085   unsigned NextCycle = CurrCycle;
2086   switch (SchedModel->getMicroOpBufferSize()) {
2087   case 0:
2088     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2089     break;
2090   case 1:
2091     if (ReadyCycle > NextCycle) {
2092       NextCycle = ReadyCycle;
2093       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2094     }
2095     break;
2096   default:
2097     // We don't currently model the OOO reorder buffer, so consider all
2098     // scheduled MOps to be "retired". We do loosely model in-order resource
2099     // latency. If this instruction uses an in-order resource, account for any
2100     // likely stall cycles.
2101     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2102       NextCycle = ReadyCycle;
2103     break;
2104   }
2105   RetiredMOps += IncMOps;
2106 
2107   // Update resource counts and critical resource.
2108   if (SchedModel->hasInstrSchedModel()) {
2109     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2110     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2111     Rem->RemIssueCount -= DecRemIssue;
2112     if (ZoneCritResIdx) {
2113       // Scale scheduled micro-ops for comparing with the critical resource.
2114       unsigned ScaledMOps =
2115         RetiredMOps * SchedModel->getMicroOpFactor();
2116 
2117       // If scaled micro-ops are now more than the previous critical resource by
2118       // a full cycle, then micro-ops issue becomes critical.
2119       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2120           >= (int)SchedModel->getLatencyFactor()) {
2121         ZoneCritResIdx = 0;
2122         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2123               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2124       }
2125     }
2126     for (TargetSchedModel::ProcResIter
2127            PI = SchedModel->getWriteProcResBegin(SC),
2128            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2129       unsigned RCycle =
2130         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2131       if (RCycle > NextCycle)
2132         NextCycle = RCycle;
2133     }
2134     if (SU->hasReservedResource) {
2135       // For reserved resources, record the highest cycle using the resource.
2136       // For top-down scheduling, this is the cycle in which we schedule this
2137       // instruction plus the number of cycles the operations reserves the
2138       // resource. For bottom-up is it simply the instruction's cycle.
2139       for (TargetSchedModel::ProcResIter
2140              PI = SchedModel->getWriteProcResBegin(SC),
2141              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2142         unsigned PIdx = PI->ProcResourceIdx;
2143         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2144           if (isTop()) {
2145             ReservedCycles[PIdx] =
2146               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2147           }
2148           else
2149             ReservedCycles[PIdx] = NextCycle;
2150         }
2151       }
2152     }
2153   }
2154   // Update ExpectedLatency and DependentLatency.
2155   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2156   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2157   if (SU->getDepth() > TopLatency) {
2158     TopLatency = SU->getDepth();
2159     DEBUG(dbgs() << "  " << Available.getName()
2160           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2161   }
2162   if (SU->getHeight() > BotLatency) {
2163     BotLatency = SU->getHeight();
2164     DEBUG(dbgs() << "  " << Available.getName()
2165           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2166   }
2167   // If we stall for any reason, bump the cycle.
2168   if (NextCycle > CurrCycle) {
2169     bumpCycle(NextCycle);
2170   } else {
2171     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2172     // resource limited. If a stall occurred, bumpCycle does this.
2173     unsigned LFactor = SchedModel->getLatencyFactor();
2174     IsResourceLimited =
2175       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2176       > (int)LFactor;
2177   }
2178   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2179   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2180   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2181   // bump the cycle to avoid uselessly checking everything in the readyQ.
2182   CurrMOps += IncMOps;
2183   while (CurrMOps >= SchedModel->getIssueWidth()) {
2184     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2185           << " at cycle " << CurrCycle << '\n');
2186     bumpCycle(++NextCycle);
2187   }
2188   DEBUG(dumpScheduledState());
2189 }
2190 
2191 /// Release pending ready nodes in to the available queue. This makes them
2192 /// visible to heuristics.
2193 void SchedBoundary::releasePending() {
2194   // If the available queue is empty, it is safe to reset MinReadyCycle.
2195   if (Available.empty())
2196     MinReadyCycle = UINT_MAX;
2197 
2198   // Check to see if any of the pending instructions are ready to issue.  If
2199   // so, add them to the available queue.
2200   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2201   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2202     SUnit *SU = *(Pending.begin()+i);
2203     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2204 
2205     if (ReadyCycle < MinReadyCycle)
2206       MinReadyCycle = ReadyCycle;
2207 
2208     if (!IsBuffered && ReadyCycle > CurrCycle)
2209       continue;
2210 
2211     if (checkHazard(SU))
2212       continue;
2213 
2214     Available.push(SU);
2215     Pending.remove(Pending.begin()+i);
2216     --i; --e;
2217   }
2218   DEBUG(if (!Pending.empty()) Pending.dump());
2219   CheckPending = false;
2220 }
2221 
2222 /// Remove SU from the ready set for this boundary.
2223 void SchedBoundary::removeReady(SUnit *SU) {
2224   if (Available.isInQueue(SU))
2225     Available.remove(Available.find(SU));
2226   else {
2227     assert(Pending.isInQueue(SU) && "bad ready count");
2228     Pending.remove(Pending.find(SU));
2229   }
2230 }
2231 
2232 /// If this queue only has one ready candidate, return it. As a side effect,
2233 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2234 /// one node is ready. If multiple instructions are ready, return NULL.
2235 SUnit *SchedBoundary::pickOnlyChoice() {
2236   if (CheckPending)
2237     releasePending();
2238 
2239   if (CurrMOps > 0) {
2240     // Defer any ready instrs that now have a hazard.
2241     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2242       if (checkHazard(*I)) {
2243         Pending.push(*I);
2244         I = Available.remove(I);
2245         continue;
2246       }
2247       ++I;
2248     }
2249   }
2250   for (unsigned i = 0; Available.empty(); ++i) {
2251 //  FIXME: Re-enable assert once PR20057 is resolved.
2252 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2253 //           "permanent hazard");
2254     (void)i;
2255     bumpCycle(CurrCycle + 1);
2256     releasePending();
2257   }
2258   if (Available.size() == 1)
2259     return *Available.begin();
2260   return nullptr;
2261 }
2262 
2263 #ifndef NDEBUG
2264 // This is useful information to dump after bumpNode.
2265 // Note that the Queue contents are more useful before pickNodeFromQueue.
2266 void SchedBoundary::dumpScheduledState() {
2267   unsigned ResFactor;
2268   unsigned ResCount;
2269   if (ZoneCritResIdx) {
2270     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2271     ResCount = getResourceCount(ZoneCritResIdx);
2272   } else {
2273     ResFactor = SchedModel->getMicroOpFactor();
2274     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2275   }
2276   unsigned LFactor = SchedModel->getLatencyFactor();
2277   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2278          << "  Retired: " << RetiredMOps;
2279   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2280   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2281          << ResCount / ResFactor << " "
2282          << SchedModel->getResourceName(ZoneCritResIdx)
2283          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2284          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2285          << " limited.\n";
2286 }
2287 #endif
2288 
2289 //===----------------------------------------------------------------------===//
2290 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2291 //===----------------------------------------------------------------------===//
2292 
2293 void GenericSchedulerBase::SchedCandidate::
2294 initResourceDelta(const ScheduleDAGMI *DAG,
2295                   const TargetSchedModel *SchedModel) {
2296   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2297     return;
2298 
2299   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2300   for (TargetSchedModel::ProcResIter
2301          PI = SchedModel->getWriteProcResBegin(SC),
2302          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2303     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2304       ResDelta.CritResources += PI->Cycles;
2305     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2306       ResDelta.DemandedResources += PI->Cycles;
2307   }
2308 }
2309 
2310 /// Set the CandPolicy given a scheduling zone given the current resources and
2311 /// latencies inside and outside the zone.
2312 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2313                                      SchedBoundary &CurrZone,
2314                                      SchedBoundary *OtherZone) {
2315   // Apply preemptive heuristics based on the total latency and resources
2316   // inside and outside this zone. Potential stalls should be considered before
2317   // following this policy.
2318 
2319   // Compute remaining latency. We need this both to determine whether the
2320   // overall schedule has become latency-limited and whether the instructions
2321   // outside this zone are resource or latency limited.
2322   //
2323   // The "dependent" latency is updated incrementally during scheduling as the
2324   // max height/depth of scheduled nodes minus the cycles since it was
2325   // scheduled:
2326   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2327   //
2328   // The "independent" latency is the max ready queue depth:
2329   //   ILat = max N.depth for N in Available|Pending
2330   //
2331   // RemainingLatency is the greater of independent and dependent latency.
2332   unsigned RemLatency = CurrZone.getDependentLatency();
2333   RemLatency = std::max(RemLatency,
2334                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2335   RemLatency = std::max(RemLatency,
2336                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2337 
2338   // Compute the critical resource outside the zone.
2339   unsigned OtherCritIdx = 0;
2340   unsigned OtherCount =
2341     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2342 
2343   bool OtherResLimited = false;
2344   if (SchedModel->hasInstrSchedModel()) {
2345     unsigned LFactor = SchedModel->getLatencyFactor();
2346     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2347   }
2348   // Schedule aggressively for latency in PostRA mode. We don't check for
2349   // acyclic latency during PostRA, and highly out-of-order processors will
2350   // skip PostRA scheduling.
2351   if (!OtherResLimited) {
2352     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2353       Policy.ReduceLatency |= true;
2354       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2355             << " RemainingLatency " << RemLatency << " + "
2356             << CurrZone.getCurrCycle() << "c > CritPath "
2357             << Rem.CriticalPath << "\n");
2358     }
2359   }
2360   // If the same resource is limiting inside and outside the zone, do nothing.
2361   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2362     return;
2363 
2364   DEBUG(
2365     if (CurrZone.isResourceLimited()) {
2366       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2367              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2368              << "\n";
2369     }
2370     if (OtherResLimited)
2371       dbgs() << "  RemainingLimit: "
2372              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2373     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2374       dbgs() << "  Latency limited both directions.\n");
2375 
2376   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2377     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2378 
2379   if (OtherResLimited)
2380     Policy.DemandResIdx = OtherCritIdx;
2381 }
2382 
2383 #ifndef NDEBUG
2384 const char *GenericSchedulerBase::getReasonStr(
2385   GenericSchedulerBase::CandReason Reason) {
2386   switch (Reason) {
2387   case NoCand:         return "NOCAND    ";
2388   case PhysRegCopy:    return "PREG-COPY";
2389   case RegExcess:      return "REG-EXCESS";
2390   case RegCritical:    return "REG-CRIT  ";
2391   case Stall:          return "STALL     ";
2392   case Cluster:        return "CLUSTER   ";
2393   case Weak:           return "WEAK      ";
2394   case RegMax:         return "REG-MAX   ";
2395   case ResourceReduce: return "RES-REDUCE";
2396   case ResourceDemand: return "RES-DEMAND";
2397   case TopDepthReduce: return "TOP-DEPTH ";
2398   case TopPathReduce:  return "TOP-PATH  ";
2399   case BotHeightReduce:return "BOT-HEIGHT";
2400   case BotPathReduce:  return "BOT-PATH  ";
2401   case NextDefUse:     return "DEF-USE   ";
2402   case NodeOrder:      return "ORDER     ";
2403   };
2404   llvm_unreachable("Unknown reason!");
2405 }
2406 
2407 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2408   PressureChange P;
2409   unsigned ResIdx = 0;
2410   unsigned Latency = 0;
2411   switch (Cand.Reason) {
2412   default:
2413     break;
2414   case RegExcess:
2415     P = Cand.RPDelta.Excess;
2416     break;
2417   case RegCritical:
2418     P = Cand.RPDelta.CriticalMax;
2419     break;
2420   case RegMax:
2421     P = Cand.RPDelta.CurrentMax;
2422     break;
2423   case ResourceReduce:
2424     ResIdx = Cand.Policy.ReduceResIdx;
2425     break;
2426   case ResourceDemand:
2427     ResIdx = Cand.Policy.DemandResIdx;
2428     break;
2429   case TopDepthReduce:
2430     Latency = Cand.SU->getDepth();
2431     break;
2432   case TopPathReduce:
2433     Latency = Cand.SU->getHeight();
2434     break;
2435   case BotHeightReduce:
2436     Latency = Cand.SU->getHeight();
2437     break;
2438   case BotPathReduce:
2439     Latency = Cand.SU->getDepth();
2440     break;
2441   }
2442   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2443   if (P.isValid())
2444     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2445            << ":" << P.getUnitInc() << " ";
2446   else
2447     dbgs() << "      ";
2448   if (ResIdx)
2449     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2450   else
2451     dbgs() << "         ";
2452   if (Latency)
2453     dbgs() << " " << Latency << " cycles ";
2454   else
2455     dbgs() << "          ";
2456   dbgs() << '\n';
2457 }
2458 #endif
2459 
2460 /// Return true if this heuristic determines order.
2461 static bool tryLess(int TryVal, int CandVal,
2462                     GenericSchedulerBase::SchedCandidate &TryCand,
2463                     GenericSchedulerBase::SchedCandidate &Cand,
2464                     GenericSchedulerBase::CandReason Reason) {
2465   if (TryVal < CandVal) {
2466     TryCand.Reason = Reason;
2467     return true;
2468   }
2469   if (TryVal > CandVal) {
2470     if (Cand.Reason > Reason)
2471       Cand.Reason = Reason;
2472     return true;
2473   }
2474   Cand.setRepeat(Reason);
2475   return false;
2476 }
2477 
2478 static bool tryGreater(int TryVal, int CandVal,
2479                        GenericSchedulerBase::SchedCandidate &TryCand,
2480                        GenericSchedulerBase::SchedCandidate &Cand,
2481                        GenericSchedulerBase::CandReason Reason) {
2482   if (TryVal > CandVal) {
2483     TryCand.Reason = Reason;
2484     return true;
2485   }
2486   if (TryVal < CandVal) {
2487     if (Cand.Reason > Reason)
2488       Cand.Reason = Reason;
2489     return true;
2490   }
2491   Cand.setRepeat(Reason);
2492   return false;
2493 }
2494 
2495 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2496                        GenericSchedulerBase::SchedCandidate &Cand,
2497                        SchedBoundary &Zone) {
2498   if (Zone.isTop()) {
2499     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2500       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2501                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2502         return true;
2503     }
2504     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2505                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2506       return true;
2507   } else {
2508     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2509       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2510                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2511         return true;
2512     }
2513     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2514                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2515       return true;
2516   }
2517   return false;
2518 }
2519 
2520 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2521                       bool IsTop) {
2522   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2523         << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
2524 }
2525 
2526 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2527   assert(dag->hasVRegLiveness() &&
2528          "(PreRA)GenericScheduler needs vreg liveness");
2529   DAG = static_cast<ScheduleDAGMILive*>(dag);
2530   SchedModel = DAG->getSchedModel();
2531   TRI = DAG->TRI;
2532 
2533   Rem.init(DAG, SchedModel);
2534   Top.init(DAG, SchedModel, &Rem);
2535   Bot.init(DAG, SchedModel, &Rem);
2536 
2537   // Initialize resource counts.
2538 
2539   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2540   // are disabled, then these HazardRecs will be disabled.
2541   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2542   if (!Top.HazardRec) {
2543     Top.HazardRec =
2544         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2545             Itin, DAG);
2546   }
2547   if (!Bot.HazardRec) {
2548     Bot.HazardRec =
2549         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2550             Itin, DAG);
2551   }
2552 }
2553 
2554 /// Initialize the per-region scheduling policy.
2555 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2556                                   MachineBasicBlock::iterator End,
2557                                   unsigned NumRegionInstrs) {
2558   const MachineFunction &MF = *Begin->getParent()->getParent();
2559   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2560 
2561   // Avoid setting up the register pressure tracker for small regions to save
2562   // compile time. As a rough heuristic, only track pressure when the number of
2563   // schedulable instructions exceeds half the integer register file.
2564   RegionPolicy.ShouldTrackPressure = true;
2565   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2566     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2567     if (TLI->isTypeLegal(LegalIntVT)) {
2568       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2569         TLI->getRegClassFor(LegalIntVT));
2570       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2571     }
2572   }
2573 
2574   // For generic targets, we default to bottom-up, because it's simpler and more
2575   // compile-time optimizations have been implemented in that direction.
2576   RegionPolicy.OnlyBottomUp = true;
2577 
2578   // Allow the subtarget to override default policy.
2579   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
2580                                         NumRegionInstrs);
2581 
2582   // After subtarget overrides, apply command line options.
2583   if (!EnableRegPressure)
2584     RegionPolicy.ShouldTrackPressure = false;
2585 
2586   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2587   // e.g. -misched-bottomup=false allows scheduling in both directions.
2588   assert((!ForceTopDown || !ForceBottomUp) &&
2589          "-misched-topdown incompatible with -misched-bottomup");
2590   if (ForceBottomUp.getNumOccurrences() > 0) {
2591     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2592     if (RegionPolicy.OnlyBottomUp)
2593       RegionPolicy.OnlyTopDown = false;
2594   }
2595   if (ForceTopDown.getNumOccurrences() > 0) {
2596     RegionPolicy.OnlyTopDown = ForceTopDown;
2597     if (RegionPolicy.OnlyTopDown)
2598       RegionPolicy.OnlyBottomUp = false;
2599   }
2600 }
2601 
2602 void GenericScheduler::dumpPolicy() {
2603   dbgs() << "GenericScheduler RegionPolicy: "
2604          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2605          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2606          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2607          << "\n";
2608 }
2609 
2610 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2611 /// critical path by more cycles than it takes to drain the instruction buffer.
2612 /// We estimate an upper bounds on in-flight instructions as:
2613 ///
2614 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2615 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2616 /// InFlightResources = InFlightIterations * LoopResources
2617 ///
2618 /// TODO: Check execution resources in addition to IssueCount.
2619 void GenericScheduler::checkAcyclicLatency() {
2620   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2621     return;
2622 
2623   // Scaled number of cycles per loop iteration.
2624   unsigned IterCount =
2625     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2626              Rem.RemIssueCount);
2627   // Scaled acyclic critical path.
2628   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2629   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2630   unsigned InFlightCount =
2631     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2632   unsigned BufferLimit =
2633     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2634 
2635   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2636 
2637   DEBUG(dbgs() << "IssueCycles="
2638         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2639         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2640         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2641         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2642         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2643         if (Rem.IsAcyclicLatencyLimited)
2644           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2645 }
2646 
2647 void GenericScheduler::registerRoots() {
2648   Rem.CriticalPath = DAG->ExitSU.getDepth();
2649 
2650   // Some roots may not feed into ExitSU. Check all of them in case.
2651   for (std::vector<SUnit*>::const_iterator
2652          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2653     if ((*I)->getDepth() > Rem.CriticalPath)
2654       Rem.CriticalPath = (*I)->getDepth();
2655   }
2656   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2657   if (DumpCriticalPathLength) {
2658     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2659   }
2660 
2661   if (EnableCyclicPath) {
2662     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2663     checkAcyclicLatency();
2664   }
2665 }
2666 
2667 static bool tryPressure(const PressureChange &TryP,
2668                         const PressureChange &CandP,
2669                         GenericSchedulerBase::SchedCandidate &TryCand,
2670                         GenericSchedulerBase::SchedCandidate &Cand,
2671                         GenericSchedulerBase::CandReason Reason,
2672                         const TargetRegisterInfo *TRI,
2673                         const MachineFunction &MF) {
2674   unsigned TryPSet = TryP.getPSetOrMax();
2675   unsigned CandPSet = CandP.getPSetOrMax();
2676   // If both candidates affect the same set, go with the smallest increase.
2677   if (TryPSet == CandPSet) {
2678     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2679                    Reason);
2680   }
2681   // If one candidate decreases and the other increases, go with it.
2682   // Invalid candidates have UnitInc==0.
2683   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2684                  Reason)) {
2685     return true;
2686   }
2687 
2688   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2689                                  std::numeric_limits<int>::max();
2690 
2691   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2692                                    std::numeric_limits<int>::max();
2693 
2694   // If the candidates are decreasing pressure, reverse priority.
2695   if (TryP.getUnitInc() < 0)
2696     std::swap(TryRank, CandRank);
2697   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2698 }
2699 
2700 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2701   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2702 }
2703 
2704 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2705 /// their physreg def/use.
2706 ///
2707 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2708 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2709 /// with the operation that produces or consumes the physreg. We'll do this when
2710 /// regalloc has support for parallel copies.
2711 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2712   const MachineInstr *MI = SU->getInstr();
2713   if (!MI->isCopy())
2714     return 0;
2715 
2716   unsigned ScheduledOper = isTop ? 1 : 0;
2717   unsigned UnscheduledOper = isTop ? 0 : 1;
2718   // If we have already scheduled the physreg produce/consumer, immediately
2719   // schedule the copy.
2720   if (TargetRegisterInfo::isPhysicalRegister(
2721         MI->getOperand(ScheduledOper).getReg()))
2722     return 1;
2723   // If the physreg is at the boundary, defer it. Otherwise schedule it
2724   // immediately to free the dependent. We can hoist the copy later.
2725   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2726   if (TargetRegisterInfo::isPhysicalRegister(
2727         MI->getOperand(UnscheduledOper).getReg()))
2728     return AtBoundary ? -1 : 1;
2729   return 0;
2730 }
2731 
2732 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2733 /// hierarchical. This may be more efficient than a graduated cost model because
2734 /// we don't need to evaluate all aspects of the model for each node in the
2735 /// queue. But it's really done to make the heuristics easier to debug and
2736 /// statistically analyze.
2737 ///
2738 /// \param Cand provides the policy and current best candidate.
2739 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2740 /// \param Zone describes the scheduled zone that we are extending.
2741 /// \param RPTracker describes reg pressure within the scheduled zone.
2742 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
2743 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2744                                     SchedCandidate &TryCand,
2745                                     SchedBoundary &Zone,
2746                                     const RegPressureTracker &RPTracker,
2747                                     RegPressureTracker &TempTracker) {
2748 
2749   if (DAG->isTrackingPressure()) {
2750     // Always initialize TryCand's RPDelta.
2751     if (Zone.isTop()) {
2752       TempTracker.getMaxDownwardPressureDelta(
2753         TryCand.SU->getInstr(),
2754         TryCand.RPDelta,
2755         DAG->getRegionCriticalPSets(),
2756         DAG->getRegPressure().MaxSetPressure);
2757     } else {
2758       if (VerifyScheduling) {
2759         TempTracker.getMaxUpwardPressureDelta(
2760           TryCand.SU->getInstr(),
2761           &DAG->getPressureDiff(TryCand.SU),
2762           TryCand.RPDelta,
2763           DAG->getRegionCriticalPSets(),
2764           DAG->getRegPressure().MaxSetPressure);
2765       } else {
2766         RPTracker.getUpwardPressureDelta(
2767           TryCand.SU->getInstr(),
2768           DAG->getPressureDiff(TryCand.SU),
2769           TryCand.RPDelta,
2770           DAG->getRegionCriticalPSets(),
2771           DAG->getRegPressure().MaxSetPressure);
2772       }
2773     }
2774   }
2775   DEBUG(if (TryCand.RPDelta.Excess.isValid())
2776           dbgs() << "  Try  SU(" << TryCand.SU->NodeNum << ") "
2777                  << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
2778                  << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
2779 
2780   // Initialize the candidate if needed.
2781   if (!Cand.isValid()) {
2782     TryCand.Reason = NodeOrder;
2783     return;
2784   }
2785 
2786   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2787                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2788                  TryCand, Cand, PhysRegCopy))
2789     return;
2790 
2791   // Avoid exceeding the target's limit.
2792   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2793                                                Cand.RPDelta.Excess,
2794                                                TryCand, Cand, RegExcess, TRI,
2795                                                DAG->MF))
2796     return;
2797 
2798   // Avoid increasing the max critical pressure in the scheduled region.
2799   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2800                                                Cand.RPDelta.CriticalMax,
2801                                                TryCand, Cand, RegCritical, TRI,
2802                                                DAG->MF))
2803     return;
2804 
2805   // For loops that are acyclic path limited, aggressively schedule for latency.
2806   // This can result in very long dependence chains scheduled in sequence, so
2807   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2808   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2809       && tryLatency(TryCand, Cand, Zone))
2810     return;
2811 
2812   // Prioritize instructions that read unbuffered resources by stall cycles.
2813   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2814               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2815     return;
2816 
2817   // Keep clustered nodes together to encourage downstream peephole
2818   // optimizations which may reduce resource requirements.
2819   //
2820   // This is a best effort to set things up for a post-RA pass. Optimizations
2821   // like generating loads of multiple registers should ideally be done within
2822   // the scheduler pass by combining the loads during DAG postprocessing.
2823   const SUnit *NextClusterSU =
2824     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2825   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2826                  TryCand, Cand, Cluster))
2827     return;
2828 
2829   // Weak edges are for clustering and other constraints.
2830   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2831               getWeakLeft(Cand.SU, Zone.isTop()),
2832               TryCand, Cand, Weak)) {
2833     return;
2834   }
2835   // Avoid increasing the max pressure of the entire region.
2836   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2837                                                Cand.RPDelta.CurrentMax,
2838                                                TryCand, Cand, RegMax, TRI,
2839                                                DAG->MF))
2840     return;
2841 
2842   // Avoid critical resource consumption and balance the schedule.
2843   TryCand.initResourceDelta(DAG, SchedModel);
2844   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2845               TryCand, Cand, ResourceReduce))
2846     return;
2847   if (tryGreater(TryCand.ResDelta.DemandedResources,
2848                  Cand.ResDelta.DemandedResources,
2849                  TryCand, Cand, ResourceDemand))
2850     return;
2851 
2852   // Avoid serializing long latency dependence chains.
2853   // For acyclic path limited loops, latency was already checked above.
2854   if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency &&
2855       !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) {
2856     return;
2857   }
2858 
2859   // Prefer immediate defs/users of the last scheduled instruction. This is a
2860   // local pressure avoidance strategy that also makes the machine code
2861   // readable.
2862   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2863                  TryCand, Cand, NextDefUse))
2864     return;
2865 
2866   // Fall through to original instruction order.
2867   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2868       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2869     TryCand.Reason = NodeOrder;
2870   }
2871 }
2872 
2873 /// Pick the best candidate from the queue.
2874 ///
2875 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2876 /// DAG building. To adjust for the current scheduling location we need to
2877 /// maintain the number of vreg uses remaining to be top-scheduled.
2878 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2879                                          const RegPressureTracker &RPTracker,
2880                                          SchedCandidate &Cand) {
2881   ReadyQueue &Q = Zone.Available;
2882 
2883   DEBUG(Q.dump());
2884 
2885   // getMaxPressureDelta temporarily modifies the tracker.
2886   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2887 
2888   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2889 
2890     SchedCandidate TryCand(Cand.Policy);
2891     TryCand.SU = *I;
2892     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2893     if (TryCand.Reason != NoCand) {
2894       // Initialize resource delta if needed in case future heuristics query it.
2895       if (TryCand.ResDelta == SchedResourceDelta())
2896         TryCand.initResourceDelta(DAG, SchedModel);
2897       Cand.setBest(TryCand);
2898       DEBUG(traceCandidate(Cand));
2899     }
2900   }
2901 }
2902 
2903 /// Pick the best candidate node from either the top or bottom queue.
2904 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2905   // Schedule as far as possible in the direction of no choice. This is most
2906   // efficient, but also provides the best heuristics for CriticalPSets.
2907   if (SUnit *SU = Bot.pickOnlyChoice()) {
2908     IsTopNode = false;
2909     DEBUG(dbgs() << "Pick Bot ONLY1\n");
2910     return SU;
2911   }
2912   if (SUnit *SU = Top.pickOnlyChoice()) {
2913     IsTopNode = true;
2914     DEBUG(dbgs() << "Pick Top ONLY1\n");
2915     return SU;
2916   }
2917   CandPolicy NoPolicy;
2918   SchedCandidate BotCand(NoPolicy);
2919   SchedCandidate TopCand(NoPolicy);
2920   // Set the bottom-up policy based on the state of the current bottom zone and
2921   // the instructions outside the zone, including the top zone.
2922   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2923   // Set the top-down policy based on the state of the current top zone and
2924   // the instructions outside the zone, including the bottom zone.
2925   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2926 
2927   // Prefer bottom scheduling when heuristics are silent.
2928   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2929   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2930 
2931   // If either Q has a single candidate that provides the least increase in
2932   // Excess pressure, we can immediately schedule from that Q.
2933   //
2934   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2935   // affects picking from either Q. If scheduling in one direction must
2936   // increase pressure for one of the excess PSets, then schedule in that
2937   // direction first to provide more freedom in the other direction.
2938   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2939       || (BotCand.Reason == RegCritical && !BotCand.isRepeat(RegCritical)))
2940   {
2941     IsTopNode = false;
2942     tracePick(BotCand, IsTopNode);
2943     return BotCand.SU;
2944   }
2945   // Check if the top Q has a better candidate.
2946   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2947   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2948 
2949   // Choose the queue with the most important (lowest enum) reason.
2950   if (TopCand.Reason < BotCand.Reason) {
2951     IsTopNode = true;
2952     tracePick(TopCand, IsTopNode);
2953     return TopCand.SU;
2954   }
2955   // Otherwise prefer the bottom candidate, in node order if all else failed.
2956   IsTopNode = false;
2957   tracePick(BotCand, IsTopNode);
2958   return BotCand.SU;
2959 }
2960 
2961 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2962 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2963   if (DAG->top() == DAG->bottom()) {
2964     assert(Top.Available.empty() && Top.Pending.empty() &&
2965            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2966     return nullptr;
2967   }
2968   SUnit *SU;
2969   do {
2970     if (RegionPolicy.OnlyTopDown) {
2971       SU = Top.pickOnlyChoice();
2972       if (!SU) {
2973         CandPolicy NoPolicy;
2974         SchedCandidate TopCand(NoPolicy);
2975         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2976         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2977         tracePick(TopCand, true);
2978         SU = TopCand.SU;
2979       }
2980       IsTopNode = true;
2981     } else if (RegionPolicy.OnlyBottomUp) {
2982       SU = Bot.pickOnlyChoice();
2983       if (!SU) {
2984         CandPolicy NoPolicy;
2985         SchedCandidate BotCand(NoPolicy);
2986         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2987         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2988         tracePick(BotCand, false);
2989         SU = BotCand.SU;
2990       }
2991       IsTopNode = false;
2992     } else {
2993       SU = pickNodeBidirectional(IsTopNode);
2994     }
2995   } while (SU->isScheduled);
2996 
2997   if (SU->isTopReady())
2998     Top.removeReady(SU);
2999   if (SU->isBottomReady())
3000     Bot.removeReady(SU);
3001 
3002   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3003   return SU;
3004 }
3005 
3006 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
3007 
3008   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3009   if (!isTop)
3010     ++InsertPos;
3011   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3012 
3013   // Find already scheduled copies with a single physreg dependence and move
3014   // them just above the scheduled instruction.
3015   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3016        I != E; ++I) {
3017     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3018       continue;
3019     SUnit *DepSU = I->getSUnit();
3020     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3021       continue;
3022     MachineInstr *Copy = DepSU->getInstr();
3023     if (!Copy->isCopy())
3024       continue;
3025     DEBUG(dbgs() << "  Rescheduling physreg copy ";
3026           I->getSUnit()->dump(DAG));
3027     DAG->moveInstruction(Copy, InsertPos);
3028   }
3029 }
3030 
3031 /// Update the scheduler's state after scheduling a node. This is the same node
3032 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3033 /// update it's state based on the current cycle before MachineSchedStrategy
3034 /// does.
3035 ///
3036 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3037 /// them here. See comments in biasPhysRegCopy.
3038 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3039   if (IsTopNode) {
3040     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3041     Top.bumpNode(SU);
3042     if (SU->hasPhysRegUses)
3043       reschedulePhysRegCopies(SU, true);
3044   } else {
3045     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3046     Bot.bumpNode(SU);
3047     if (SU->hasPhysRegDefs)
3048       reschedulePhysRegCopies(SU, false);
3049   }
3050 }
3051 
3052 /// Create the standard converging machine scheduler. This will be used as the
3053 /// default scheduler if the target does not set a default.
3054 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
3055   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
3056   // Register DAG post-processors.
3057   //
3058   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3059   // data and pass it to later mutations. Have a single mutation that gathers
3060   // the interesting nodes in one pass.
3061   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
3062   if (EnableMemOpCluster) {
3063     if (DAG->TII->enableClusterLoads())
3064       DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
3065     if (DAG->TII->enableClusterStores())
3066       DAG->addMutation(make_unique<StoreClusterMutation>(DAG->TII, DAG->TRI));
3067   }
3068   if (EnableMacroFusion)
3069     DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI));
3070   return DAG;
3071 }
3072 
3073 static MachineSchedRegistry
3074 GenericSchedRegistry("converge", "Standard converging scheduler.",
3075                      createGenericSchedLive);
3076 
3077 //===----------------------------------------------------------------------===//
3078 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3079 //===----------------------------------------------------------------------===//
3080 
3081 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3082   DAG = Dag;
3083   SchedModel = DAG->getSchedModel();
3084   TRI = DAG->TRI;
3085 
3086   Rem.init(DAG, SchedModel);
3087   Top.init(DAG, SchedModel, &Rem);
3088   BotRoots.clear();
3089 
3090   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3091   // or are disabled, then these HazardRecs will be disabled.
3092   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3093   if (!Top.HazardRec) {
3094     Top.HazardRec =
3095         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3096             Itin, DAG);
3097   }
3098 }
3099 
3100 
3101 void PostGenericScheduler::registerRoots() {
3102   Rem.CriticalPath = DAG->ExitSU.getDepth();
3103 
3104   // Some roots may not feed into ExitSU. Check all of them in case.
3105   for (SmallVectorImpl<SUnit*>::const_iterator
3106          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3107     if ((*I)->getDepth() > Rem.CriticalPath)
3108       Rem.CriticalPath = (*I)->getDepth();
3109   }
3110   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3111   if (DumpCriticalPathLength) {
3112     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3113   }
3114 }
3115 
3116 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3117 ///
3118 /// \param Cand provides the policy and current best candidate.
3119 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3120 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3121                                         SchedCandidate &TryCand) {
3122 
3123   // Initialize the candidate if needed.
3124   if (!Cand.isValid()) {
3125     TryCand.Reason = NodeOrder;
3126     return;
3127   }
3128 
3129   // Prioritize instructions that read unbuffered resources by stall cycles.
3130   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3131               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3132     return;
3133 
3134   // Avoid critical resource consumption and balance the schedule.
3135   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3136               TryCand, Cand, ResourceReduce))
3137     return;
3138   if (tryGreater(TryCand.ResDelta.DemandedResources,
3139                  Cand.ResDelta.DemandedResources,
3140                  TryCand, Cand, ResourceDemand))
3141     return;
3142 
3143   // Avoid serializing long latency dependence chains.
3144   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3145     return;
3146   }
3147 
3148   // Fall through to original instruction order.
3149   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3150     TryCand.Reason = NodeOrder;
3151 }
3152 
3153 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3154   ReadyQueue &Q = Top.Available;
3155 
3156   DEBUG(Q.dump());
3157 
3158   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3159     SchedCandidate TryCand(Cand.Policy);
3160     TryCand.SU = *I;
3161     TryCand.initResourceDelta(DAG, SchedModel);
3162     tryCandidate(Cand, TryCand);
3163     if (TryCand.Reason != NoCand) {
3164       Cand.setBest(TryCand);
3165       DEBUG(traceCandidate(Cand));
3166     }
3167   }
3168 }
3169 
3170 /// Pick the next node to schedule.
3171 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3172   if (DAG->top() == DAG->bottom()) {
3173     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3174     return nullptr;
3175   }
3176   SUnit *SU;
3177   do {
3178     SU = Top.pickOnlyChoice();
3179     if (!SU) {
3180       CandPolicy NoPolicy;
3181       SchedCandidate TopCand(NoPolicy);
3182       // Set the top-down policy based on the state of the current top zone and
3183       // the instructions outside the zone, including the bottom zone.
3184       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3185       pickNodeFromQueue(TopCand);
3186       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3187       tracePick(TopCand, true);
3188       SU = TopCand.SU;
3189     }
3190   } while (SU->isScheduled);
3191 
3192   IsTopNode = true;
3193   Top.removeReady(SU);
3194 
3195   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3196   return SU;
3197 }
3198 
3199 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3200 /// scheduled/remaining flags in the DAG nodes.
3201 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3202   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3203   Top.bumpNode(SU);
3204 }
3205 
3206 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3207 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3208   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3209 }
3210 
3211 //===----------------------------------------------------------------------===//
3212 // ILP Scheduler. Currently for experimental analysis of heuristics.
3213 //===----------------------------------------------------------------------===//
3214 
3215 namespace {
3216 /// \brief Order nodes by the ILP metric.
3217 struct ILPOrder {
3218   const SchedDFSResult *DFSResult;
3219   const BitVector *ScheduledTrees;
3220   bool MaximizeILP;
3221 
3222   ILPOrder(bool MaxILP)
3223     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3224 
3225   /// \brief Apply a less-than relation on node priority.
3226   ///
3227   /// (Return true if A comes after B in the Q.)
3228   bool operator()(const SUnit *A, const SUnit *B) const {
3229     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3230     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3231     if (SchedTreeA != SchedTreeB) {
3232       // Unscheduled trees have lower priority.
3233       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3234         return ScheduledTrees->test(SchedTreeB);
3235 
3236       // Trees with shallower connections have have lower priority.
3237       if (DFSResult->getSubtreeLevel(SchedTreeA)
3238           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3239         return DFSResult->getSubtreeLevel(SchedTreeA)
3240           < DFSResult->getSubtreeLevel(SchedTreeB);
3241       }
3242     }
3243     if (MaximizeILP)
3244       return DFSResult->getILP(A) < DFSResult->getILP(B);
3245     else
3246       return DFSResult->getILP(A) > DFSResult->getILP(B);
3247   }
3248 };
3249 
3250 /// \brief Schedule based on the ILP metric.
3251 class ILPScheduler : public MachineSchedStrategy {
3252   ScheduleDAGMILive *DAG;
3253   ILPOrder Cmp;
3254 
3255   std::vector<SUnit*> ReadyQ;
3256 public:
3257   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3258 
3259   void initialize(ScheduleDAGMI *dag) override {
3260     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3261     DAG = static_cast<ScheduleDAGMILive*>(dag);
3262     DAG->computeDFSResult();
3263     Cmp.DFSResult = DAG->getDFSResult();
3264     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3265     ReadyQ.clear();
3266   }
3267 
3268   void registerRoots() override {
3269     // Restore the heap in ReadyQ with the updated DFS results.
3270     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3271   }
3272 
3273   /// Implement MachineSchedStrategy interface.
3274   /// -----------------------------------------
3275 
3276   /// Callback to select the highest priority node from the ready Q.
3277   SUnit *pickNode(bool &IsTopNode) override {
3278     if (ReadyQ.empty()) return nullptr;
3279     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3280     SUnit *SU = ReadyQ.back();
3281     ReadyQ.pop_back();
3282     IsTopNode = false;
3283     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3284           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3285           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3286           << DAG->getDFSResult()->getSubtreeLevel(
3287             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3288           << "Scheduling " << *SU->getInstr());
3289     return SU;
3290   }
3291 
3292   /// \brief Scheduler callback to notify that a new subtree is scheduled.
3293   void scheduleTree(unsigned SubtreeID) override {
3294     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3295   }
3296 
3297   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3298   /// DFSResults, and resort the priority Q.
3299   void schedNode(SUnit *SU, bool IsTopNode) override {
3300     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3301   }
3302 
3303   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3304 
3305   void releaseBottomNode(SUnit *SU) override {
3306     ReadyQ.push_back(SU);
3307     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3308   }
3309 };
3310 } // namespace
3311 
3312 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3313   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3314 }
3315 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3316   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3317 }
3318 static MachineSchedRegistry ILPMaxRegistry(
3319   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3320 static MachineSchedRegistry ILPMinRegistry(
3321   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3322 
3323 //===----------------------------------------------------------------------===//
3324 // Machine Instruction Shuffler for Correctness Testing
3325 //===----------------------------------------------------------------------===//
3326 
3327 #ifndef NDEBUG
3328 namespace {
3329 /// Apply a less-than relation on the node order, which corresponds to the
3330 /// instruction order prior to scheduling. IsReverse implements greater-than.
3331 template<bool IsReverse>
3332 struct SUnitOrder {
3333   bool operator()(SUnit *A, SUnit *B) const {
3334     if (IsReverse)
3335       return A->NodeNum > B->NodeNum;
3336     else
3337       return A->NodeNum < B->NodeNum;
3338   }
3339 };
3340 
3341 /// Reorder instructions as much as possible.
3342 class InstructionShuffler : public MachineSchedStrategy {
3343   bool IsAlternating;
3344   bool IsTopDown;
3345 
3346   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3347   // gives nodes with a higher number higher priority causing the latest
3348   // instructions to be scheduled first.
3349   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3350     TopQ;
3351   // When scheduling bottom-up, use greater-than as the queue priority.
3352   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3353     BottomQ;
3354 public:
3355   InstructionShuffler(bool alternate, bool topdown)
3356     : IsAlternating(alternate), IsTopDown(topdown) {}
3357 
3358   void initialize(ScheduleDAGMI*) override {
3359     TopQ.clear();
3360     BottomQ.clear();
3361   }
3362 
3363   /// Implement MachineSchedStrategy interface.
3364   /// -----------------------------------------
3365 
3366   SUnit *pickNode(bool &IsTopNode) override {
3367     SUnit *SU;
3368     if (IsTopDown) {
3369       do {
3370         if (TopQ.empty()) return nullptr;
3371         SU = TopQ.top();
3372         TopQ.pop();
3373       } while (SU->isScheduled);
3374       IsTopNode = true;
3375     } else {
3376       do {
3377         if (BottomQ.empty()) return nullptr;
3378         SU = BottomQ.top();
3379         BottomQ.pop();
3380       } while (SU->isScheduled);
3381       IsTopNode = false;
3382     }
3383     if (IsAlternating)
3384       IsTopDown = !IsTopDown;
3385     return SU;
3386   }
3387 
3388   void schedNode(SUnit *SU, bool IsTopNode) override {}
3389 
3390   void releaseTopNode(SUnit *SU) override {
3391     TopQ.push(SU);
3392   }
3393   void releaseBottomNode(SUnit *SU) override {
3394     BottomQ.push(SU);
3395   }
3396 };
3397 } // namespace
3398 
3399 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3400   bool Alternate = !ForceTopDown && !ForceBottomUp;
3401   bool TopDown = !ForceBottomUp;
3402   assert((TopDown || !ForceTopDown) &&
3403          "-misched-topdown incompatible with -misched-bottomup");
3404   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3405 }
3406 static MachineSchedRegistry ShufflerRegistry(
3407   "shuffle", "Shuffle machine instructions alternating directions",
3408   createInstructionShuffler);
3409 #endif // !NDEBUG
3410 
3411 //===----------------------------------------------------------------------===//
3412 // GraphWriter support for ScheduleDAGMILive.
3413 //===----------------------------------------------------------------------===//
3414 
3415 #ifndef NDEBUG
3416 namespace llvm {
3417 
3418 template<> struct GraphTraits<
3419   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3420 
3421 template<>
3422 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3423 
3424   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3425 
3426   static std::string getGraphName(const ScheduleDAG *G) {
3427     return G->MF.getName();
3428   }
3429 
3430   static bool renderGraphFromBottomUp() {
3431     return true;
3432   }
3433 
3434   static bool isNodeHidden(const SUnit *Node) {
3435     if (ViewMISchedCutoff == 0)
3436       return false;
3437     return (Node->Preds.size() > ViewMISchedCutoff
3438          || Node->Succs.size() > ViewMISchedCutoff);
3439   }
3440 
3441   /// If you want to override the dot attributes printed for a particular
3442   /// edge, override this method.
3443   static std::string getEdgeAttributes(const SUnit *Node,
3444                                        SUnitIterator EI,
3445                                        const ScheduleDAG *Graph) {
3446     if (EI.isArtificialDep())
3447       return "color=cyan,style=dashed";
3448     if (EI.isCtrlDep())
3449       return "color=blue,style=dashed";
3450     return "";
3451   }
3452 
3453   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3454     std::string Str;
3455     raw_string_ostream SS(Str);
3456     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3457     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3458       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3459     SS << "SU:" << SU->NodeNum;
3460     if (DFS)
3461       SS << " I:" << DFS->getNumInstrs(SU);
3462     return SS.str();
3463   }
3464   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3465     return G->getGraphNodeLabel(SU);
3466   }
3467 
3468   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3469     std::string Str("shape=Mrecord");
3470     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3471     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3472       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3473     if (DFS) {
3474       Str += ",style=filled,fillcolor=\"#";
3475       Str += DOT::getColorString(DFS->getSubtreeID(N));
3476       Str += '"';
3477     }
3478     return Str;
3479   }
3480 };
3481 } // namespace llvm
3482 #endif // NDEBUG
3483 
3484 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3485 /// rendered using 'dot'.
3486 ///
3487 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3488 #ifndef NDEBUG
3489   ViewGraph(this, Name, false, Title);
3490 #else
3491   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3492          << "systems with Graphviz or gv!\n";
3493 #endif  // NDEBUG
3494 }
3495 
3496 /// Out-of-line implementation with no arguments is handy for gdb.
3497 void ScheduleDAGMI::viewGraph() {
3498   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3499 }
3500