xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision 7ea9a529aa1d5925eda9813a92800258f80848b7)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/GraphWriter.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "misched"
36 
37 namespace llvm {
38 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
39                            cl::desc("Force top-down list scheduling"));
40 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
41                             cl::desc("Force bottom-up list scheduling"));
42 cl::opt<bool>
43 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
44                        cl::desc("Print critical path length to stdout"));
45 }
46 
47 #ifndef NDEBUG
48 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
49   cl::desc("Pop up a window to show MISched dags after they are processed"));
50 
51 /// In some situations a few uninteresting nodes depend on nearly all other
52 /// nodes in the graph, provide a cutoff to hide them.
53 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
54   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
55 
56 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
57   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
58 
59 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
60   cl::desc("Only schedule this function"));
61 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
62   cl::desc("Only schedule this MBB#"));
63 #else
64 static bool ViewMISchedDAGs = false;
65 #endif // NDEBUG
66 
67 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
68 /// size of the ready lists.
69 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
70   cl::desc("Limit ready list to N instructions"), cl::init(256));
71 
72 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
73   cl::desc("Enable register pressure scheduling."), cl::init(true));
74 
75 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
76   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
77 
78 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
79                                         cl::desc("Enable memop clustering."),
80                                         cl::init(true));
81 
82 // Experimental heuristics
83 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
84   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
85 
86 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
87   cl::desc("Verify machine instrs before and after machine scheduling"));
88 
89 // DAG subtrees must have at least this many nodes.
90 static const unsigned MinSubtreeSize = 8;
91 
92 // Pin the vtables to this file.
93 void MachineSchedStrategy::anchor() {}
94 void ScheduleDAGMutation::anchor() {}
95 
96 //===----------------------------------------------------------------------===//
97 // Machine Instruction Scheduling Pass and Registry
98 //===----------------------------------------------------------------------===//
99 
100 MachineSchedContext::MachineSchedContext():
101     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
102   RegClassInfo = new RegisterClassInfo();
103 }
104 
105 MachineSchedContext::~MachineSchedContext() {
106   delete RegClassInfo;
107 }
108 
109 namespace {
110 /// Base class for a machine scheduler class that can run at any point.
111 class MachineSchedulerBase : public MachineSchedContext,
112                              public MachineFunctionPass {
113 public:
114   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
115 
116   void print(raw_ostream &O, const Module* = nullptr) const override;
117 
118 protected:
119   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
120 };
121 
122 /// MachineScheduler runs after coalescing and before register allocation.
123 class MachineScheduler : public MachineSchedulerBase {
124 public:
125   MachineScheduler();
126 
127   void getAnalysisUsage(AnalysisUsage &AU) const override;
128 
129   bool runOnMachineFunction(MachineFunction&) override;
130 
131   static char ID; // Class identification, replacement for typeinfo
132 
133 protected:
134   ScheduleDAGInstrs *createMachineScheduler();
135 };
136 
137 /// PostMachineScheduler runs after shortly before code emission.
138 class PostMachineScheduler : public MachineSchedulerBase {
139 public:
140   PostMachineScheduler();
141 
142   void getAnalysisUsage(AnalysisUsage &AU) const override;
143 
144   bool runOnMachineFunction(MachineFunction&) override;
145 
146   static char ID; // Class identification, replacement for typeinfo
147 
148 protected:
149   ScheduleDAGInstrs *createPostMachineScheduler();
150 };
151 } // namespace
152 
153 char MachineScheduler::ID = 0;
154 
155 char &llvm::MachineSchedulerID = MachineScheduler::ID;
156 
157 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
158                       "Machine Instruction Scheduler", false, false)
159 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
160 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
161 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
162 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
163                     "Machine Instruction Scheduler", false, false)
164 
165 MachineScheduler::MachineScheduler()
166 : MachineSchedulerBase(ID) {
167   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
168 }
169 
170 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
171   AU.setPreservesCFG();
172   AU.addRequiredID(MachineDominatorsID);
173   AU.addRequired<MachineLoopInfo>();
174   AU.addRequired<AAResultsWrapperPass>();
175   AU.addRequired<TargetPassConfig>();
176   AU.addRequired<SlotIndexes>();
177   AU.addPreserved<SlotIndexes>();
178   AU.addRequired<LiveIntervals>();
179   AU.addPreserved<LiveIntervals>();
180   MachineFunctionPass::getAnalysisUsage(AU);
181 }
182 
183 char PostMachineScheduler::ID = 0;
184 
185 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
186 
187 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
188                 "PostRA Machine Instruction Scheduler", false, false)
189 
190 PostMachineScheduler::PostMachineScheduler()
191 : MachineSchedulerBase(ID) {
192   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
193 }
194 
195 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
196   AU.setPreservesCFG();
197   AU.addRequiredID(MachineDominatorsID);
198   AU.addRequired<MachineLoopInfo>();
199   AU.addRequired<TargetPassConfig>();
200   MachineFunctionPass::getAnalysisUsage(AU);
201 }
202 
203 MachinePassRegistry MachineSchedRegistry::Registry;
204 
205 /// A dummy default scheduler factory indicates whether the scheduler
206 /// is overridden on the command line.
207 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
208   return nullptr;
209 }
210 
211 /// MachineSchedOpt allows command line selection of the scheduler.
212 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
213                RegisterPassParser<MachineSchedRegistry> >
214 MachineSchedOpt("misched",
215                 cl::init(&useDefaultMachineSched), cl::Hidden,
216                 cl::desc("Machine instruction scheduler to use"));
217 
218 static MachineSchedRegistry
219 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
220                      useDefaultMachineSched);
221 
222 static cl::opt<bool> EnableMachineSched(
223     "enable-misched",
224     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
225     cl::Hidden);
226 
227 static cl::opt<bool> EnablePostRAMachineSched(
228     "enable-post-misched",
229     cl::desc("Enable the post-ra machine instruction scheduling pass."),
230     cl::init(true), cl::Hidden);
231 
232 /// Forward declare the standard machine scheduler. This will be used as the
233 /// default scheduler if the target does not set a default.
234 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
235 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
236 
237 /// Decrement this iterator until reaching the top or a non-debug instr.
238 static MachineBasicBlock::const_iterator
239 priorNonDebug(MachineBasicBlock::const_iterator I,
240               MachineBasicBlock::const_iterator Beg) {
241   assert(I != Beg && "reached the top of the region, cannot decrement");
242   while (--I != Beg) {
243     if (!I->isDebugValue())
244       break;
245   }
246   return I;
247 }
248 
249 /// Non-const version.
250 static MachineBasicBlock::iterator
251 priorNonDebug(MachineBasicBlock::iterator I,
252               MachineBasicBlock::const_iterator Beg) {
253   return const_cast<MachineInstr*>(
254     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
255 }
256 
257 /// If this iterator is a debug value, increment until reaching the End or a
258 /// non-debug instruction.
259 static MachineBasicBlock::const_iterator
260 nextIfDebug(MachineBasicBlock::const_iterator I,
261             MachineBasicBlock::const_iterator End) {
262   for(; I != End; ++I) {
263     if (!I->isDebugValue())
264       break;
265   }
266   return I;
267 }
268 
269 /// Non-const version.
270 static MachineBasicBlock::iterator
271 nextIfDebug(MachineBasicBlock::iterator I,
272             MachineBasicBlock::const_iterator End) {
273   // Cast the return value to nonconst MachineInstr, then cast to an
274   // instr_iterator, which does not check for null, finally return a
275   // bundle_iterator.
276   return MachineBasicBlock::instr_iterator(
277     const_cast<MachineInstr*>(
278       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
279 }
280 
281 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
282 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
283   // Select the scheduler, or set the default.
284   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
285   if (Ctor != useDefaultMachineSched)
286     return Ctor(this);
287 
288   // Get the default scheduler set by the target for this function.
289   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
290   if (Scheduler)
291     return Scheduler;
292 
293   // Default to GenericScheduler.
294   return createGenericSchedLive(this);
295 }
296 
297 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
298 /// the caller. We don't have a command line option to override the postRA
299 /// scheduler. The Target must configure it.
300 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
301   // Get the postRA scheduler set by the target for this function.
302   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
303   if (Scheduler)
304     return Scheduler;
305 
306   // Default to GenericScheduler.
307   return createGenericSchedPostRA(this);
308 }
309 
310 /// Top-level MachineScheduler pass driver.
311 ///
312 /// Visit blocks in function order. Divide each block into scheduling regions
313 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
314 /// consistent with the DAG builder, which traverses the interior of the
315 /// scheduling regions bottom-up.
316 ///
317 /// This design avoids exposing scheduling boundaries to the DAG builder,
318 /// simplifying the DAG builder's support for "special" target instructions.
319 /// At the same time the design allows target schedulers to operate across
320 /// scheduling boundaries, for example to bundle the boudary instructions
321 /// without reordering them. This creates complexity, because the target
322 /// scheduler must update the RegionBegin and RegionEnd positions cached by
323 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
324 /// design would be to split blocks at scheduling boundaries, but LLVM has a
325 /// general bias against block splitting purely for implementation simplicity.
326 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
327   if (skipFunction(*mf.getFunction()))
328     return false;
329 
330   if (EnableMachineSched.getNumOccurrences()) {
331     if (!EnableMachineSched)
332       return false;
333   } else if (!mf.getSubtarget().enableMachineScheduler())
334     return false;
335 
336   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
337 
338   // Initialize the context of the pass.
339   MF = &mf;
340   MLI = &getAnalysis<MachineLoopInfo>();
341   MDT = &getAnalysis<MachineDominatorTree>();
342   PassConfig = &getAnalysis<TargetPassConfig>();
343   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
344 
345   LIS = &getAnalysis<LiveIntervals>();
346 
347   if (VerifyScheduling) {
348     DEBUG(LIS->dump());
349     MF->verify(this, "Before machine scheduling.");
350   }
351   RegClassInfo->runOnMachineFunction(*MF);
352 
353   // Instantiate the selected scheduler for this target, function, and
354   // optimization level.
355   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
356   scheduleRegions(*Scheduler, false);
357 
358   DEBUG(LIS->dump());
359   if (VerifyScheduling)
360     MF->verify(this, "After machine scheduling.");
361   return true;
362 }
363 
364 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
365   if (skipFunction(*mf.getFunction()))
366     return false;
367 
368   if (EnablePostRAMachineSched.getNumOccurrences()) {
369     if (!EnablePostRAMachineSched)
370       return false;
371   } else if (!mf.getSubtarget().enablePostRAScheduler()) {
372     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
373     return false;
374   }
375   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
376 
377   // Initialize the context of the pass.
378   MF = &mf;
379   PassConfig = &getAnalysis<TargetPassConfig>();
380 
381   if (VerifyScheduling)
382     MF->verify(this, "Before post machine scheduling.");
383 
384   // Instantiate the selected scheduler for this target, function, and
385   // optimization level.
386   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
387   scheduleRegions(*Scheduler, true);
388 
389   if (VerifyScheduling)
390     MF->verify(this, "After post machine scheduling.");
391   return true;
392 }
393 
394 /// Return true of the given instruction should not be included in a scheduling
395 /// region.
396 ///
397 /// MachineScheduler does not currently support scheduling across calls. To
398 /// handle calls, the DAG builder needs to be modified to create register
399 /// anti/output dependencies on the registers clobbered by the call's regmask
400 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
401 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
402 /// the boundary, but there would be no benefit to postRA scheduling across
403 /// calls this late anyway.
404 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
405                             MachineBasicBlock *MBB,
406                             MachineFunction *MF,
407                             const TargetInstrInfo *TII) {
408   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
409 }
410 
411 /// Main driver for both MachineScheduler and PostMachineScheduler.
412 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
413                                            bool FixKillFlags) {
414   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
415 
416   // Visit all machine basic blocks.
417   //
418   // TODO: Visit blocks in global postorder or postorder within the bottom-up
419   // loop tree. Then we can optionally compute global RegPressure.
420   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
421        MBB != MBBEnd; ++MBB) {
422 
423     Scheduler.startBlock(&*MBB);
424 
425 #ifndef NDEBUG
426     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
427       continue;
428     if (SchedOnlyBlock.getNumOccurrences()
429         && (int)SchedOnlyBlock != MBB->getNumber())
430       continue;
431 #endif
432 
433     // Break the block into scheduling regions [I, RegionEnd), and schedule each
434     // region as soon as it is discovered. RegionEnd points the scheduling
435     // boundary at the bottom of the region. The DAG does not include RegionEnd,
436     // but the region does (i.e. the next RegionEnd is above the previous
437     // RegionBegin). If the current block has no terminator then RegionEnd ==
438     // MBB->end() for the bottom region.
439     //
440     // The Scheduler may insert instructions during either schedule() or
441     // exitRegion(), even for empty regions. So the local iterators 'I' and
442     // 'RegionEnd' are invalid across these calls.
443     //
444     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
445     // as a single instruction.
446     unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
447     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
448         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
449 
450       // Avoid decrementing RegionEnd for blocks with no terminator.
451       if (RegionEnd != MBB->end() ||
452           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
453         --RegionEnd;
454         // Count the boundary instruction.
455         --RemainingInstrs;
456       }
457 
458       // The next region starts above the previous region. Look backward in the
459       // instruction stream until we find the nearest boundary.
460       unsigned NumRegionInstrs = 0;
461       MachineBasicBlock::iterator I = RegionEnd;
462       for(;I != MBB->begin(); --I, --RemainingInstrs) {
463         if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII))
464           break;
465         if (!I->isDebugValue())
466           ++NumRegionInstrs;
467       }
468       // Notify the scheduler of the region, even if we may skip scheduling
469       // it. Perhaps it still needs to be bundled.
470       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
471 
472       // Skip empty scheduling regions (0 or 1 schedulable instructions).
473       if (I == RegionEnd || I == std::prev(RegionEnd)) {
474         // Close the current region. Bundle the terminator if needed.
475         // This invalidates 'RegionEnd' and 'I'.
476         Scheduler.exitRegion();
477         continue;
478       }
479       DEBUG(dbgs() << "********** MI Scheduling **********\n");
480       DEBUG(dbgs() << MF->getName()
481             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
482             << "\n  From: " << *I << "    To: ";
483             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
484             else dbgs() << "End";
485             dbgs() << " RegionInstrs: " << NumRegionInstrs
486             << " Remaining: " << RemainingInstrs << "\n");
487       if (DumpCriticalPathLength) {
488         errs() << MF->getName();
489         errs() << ":BB# " << MBB->getNumber();
490         errs() << " " << MBB->getName() << " \n";
491       }
492 
493       // Schedule a region: possibly reorder instructions.
494       // This invalidates 'RegionEnd' and 'I'.
495       Scheduler.schedule();
496 
497       // Close the current region.
498       Scheduler.exitRegion();
499 
500       // Scheduling has invalidated the current iterator 'I'. Ask the
501       // scheduler for the top of it's scheduled region.
502       RegionEnd = Scheduler.begin();
503     }
504     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
505     Scheduler.finishBlock();
506     // FIXME: Ideally, no further passes should rely on kill flags. However,
507     // thumb2 size reduction is currently an exception, so the PostMIScheduler
508     // needs to do this.
509     if (FixKillFlags)
510         Scheduler.fixupKills(&*MBB);
511   }
512   Scheduler.finalizeSchedule();
513 }
514 
515 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
516   // unimplemented
517 }
518 
519 LLVM_DUMP_METHOD
520 void ReadyQueue::dump() {
521   dbgs() << "Queue " << Name << ": ";
522   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
523     dbgs() << Queue[i]->NodeNum << " ";
524   dbgs() << "\n";
525 }
526 
527 //===----------------------------------------------------------------------===//
528 // ScheduleDAGMI - Basic machine instruction scheduling. This is
529 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
530 // virtual registers.
531 // ===----------------------------------------------------------------------===/
532 
533 // Provide a vtable anchor.
534 ScheduleDAGMI::~ScheduleDAGMI() {
535 }
536 
537 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
538   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
539 }
540 
541 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
542   if (SuccSU != &ExitSU) {
543     // Do not use WillCreateCycle, it assumes SD scheduling.
544     // If Pred is reachable from Succ, then the edge creates a cycle.
545     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
546       return false;
547     Topo.AddPred(SuccSU, PredDep.getSUnit());
548   }
549   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
550   // Return true regardless of whether a new edge needed to be inserted.
551   return true;
552 }
553 
554 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
555 /// NumPredsLeft reaches zero, release the successor node.
556 ///
557 /// FIXME: Adjust SuccSU height based on MinLatency.
558 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
559   SUnit *SuccSU = SuccEdge->getSUnit();
560 
561   if (SuccEdge->isWeak()) {
562     --SuccSU->WeakPredsLeft;
563     if (SuccEdge->isCluster())
564       NextClusterSucc = SuccSU;
565     return;
566   }
567 #ifndef NDEBUG
568   if (SuccSU->NumPredsLeft == 0) {
569     dbgs() << "*** Scheduling failed! ***\n";
570     SuccSU->dump(this);
571     dbgs() << " has been released too many times!\n";
572     llvm_unreachable(nullptr);
573   }
574 #endif
575   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
576   // CurrCycle may have advanced since then.
577   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
578     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
579 
580   --SuccSU->NumPredsLeft;
581   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
582     SchedImpl->releaseTopNode(SuccSU);
583 }
584 
585 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
586 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
587   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
588        I != E; ++I) {
589     releaseSucc(SU, &*I);
590   }
591 }
592 
593 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
594 /// NumSuccsLeft reaches zero, release the predecessor node.
595 ///
596 /// FIXME: Adjust PredSU height based on MinLatency.
597 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
598   SUnit *PredSU = PredEdge->getSUnit();
599 
600   if (PredEdge->isWeak()) {
601     --PredSU->WeakSuccsLeft;
602     if (PredEdge->isCluster())
603       NextClusterPred = PredSU;
604     return;
605   }
606 #ifndef NDEBUG
607   if (PredSU->NumSuccsLeft == 0) {
608     dbgs() << "*** Scheduling failed! ***\n";
609     PredSU->dump(this);
610     dbgs() << " has been released too many times!\n";
611     llvm_unreachable(nullptr);
612   }
613 #endif
614   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
615   // CurrCycle may have advanced since then.
616   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
617     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
618 
619   --PredSU->NumSuccsLeft;
620   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
621     SchedImpl->releaseBottomNode(PredSU);
622 }
623 
624 /// releasePredecessors - Call releasePred on each of SU's predecessors.
625 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
626   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
627        I != E; ++I) {
628     releasePred(SU, &*I);
629   }
630 }
631 
632 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
633 /// crossing a scheduling boundary. [begin, end) includes all instructions in
634 /// the region, including the boundary itself and single-instruction regions
635 /// that don't get scheduled.
636 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
637                                      MachineBasicBlock::iterator begin,
638                                      MachineBasicBlock::iterator end,
639                                      unsigned regioninstrs)
640 {
641   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
642 
643   SchedImpl->initPolicy(begin, end, regioninstrs);
644 }
645 
646 /// This is normally called from the main scheduler loop but may also be invoked
647 /// by the scheduling strategy to perform additional code motion.
648 void ScheduleDAGMI::moveInstruction(
649   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
650   // Advance RegionBegin if the first instruction moves down.
651   if (&*RegionBegin == MI)
652     ++RegionBegin;
653 
654   // Update the instruction stream.
655   BB->splice(InsertPos, BB, MI);
656 
657   // Update LiveIntervals
658   if (LIS)
659     LIS->handleMove(*MI, /*UpdateFlags=*/true);
660 
661   // Recede RegionBegin if an instruction moves above the first.
662   if (RegionBegin == InsertPos)
663     RegionBegin = MI;
664 }
665 
666 bool ScheduleDAGMI::checkSchedLimit() {
667 #ifndef NDEBUG
668   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
669     CurrentTop = CurrentBottom;
670     return false;
671   }
672   ++NumInstrsScheduled;
673 #endif
674   return true;
675 }
676 
677 /// Per-region scheduling driver, called back from
678 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
679 /// does not consider liveness or register pressure. It is useful for PostRA
680 /// scheduling and potentially other custom schedulers.
681 void ScheduleDAGMI::schedule() {
682   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
683   DEBUG(SchedImpl->dumpPolicy());
684 
685   // Build the DAG.
686   buildSchedGraph(AA);
687 
688   Topo.InitDAGTopologicalSorting();
689 
690   postprocessDAG();
691 
692   SmallVector<SUnit*, 8> TopRoots, BotRoots;
693   findRootsAndBiasEdges(TopRoots, BotRoots);
694 
695   // Initialize the strategy before modifying the DAG.
696   // This may initialize a DFSResult to be used for queue priority.
697   SchedImpl->initialize(this);
698 
699   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
700           SUnits[su].dumpAll(this));
701   if (ViewMISchedDAGs) viewGraph();
702 
703   // Initialize ready queues now that the DAG and priority data are finalized.
704   initQueues(TopRoots, BotRoots);
705 
706   bool IsTopNode = false;
707   while (true) {
708     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
709     SUnit *SU = SchedImpl->pickNode(IsTopNode);
710     if (!SU) break;
711 
712     assert(!SU->isScheduled && "Node already scheduled");
713     if (!checkSchedLimit())
714       break;
715 
716     MachineInstr *MI = SU->getInstr();
717     if (IsTopNode) {
718       assert(SU->isTopReady() && "node still has unscheduled dependencies");
719       if (&*CurrentTop == MI)
720         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
721       else
722         moveInstruction(MI, CurrentTop);
723     } else {
724       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
725       MachineBasicBlock::iterator priorII =
726         priorNonDebug(CurrentBottom, CurrentTop);
727       if (&*priorII == MI)
728         CurrentBottom = priorII;
729       else {
730         if (&*CurrentTop == MI)
731           CurrentTop = nextIfDebug(++CurrentTop, priorII);
732         moveInstruction(MI, CurrentBottom);
733         CurrentBottom = MI;
734       }
735     }
736     // Notify the scheduling strategy before updating the DAG.
737     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
738     // runs, it can then use the accurate ReadyCycle time to determine whether
739     // newly released nodes can move to the readyQ.
740     SchedImpl->schedNode(SU, IsTopNode);
741 
742     updateQueues(SU, IsTopNode);
743   }
744   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
745 
746   placeDebugValues();
747 
748   DEBUG({
749       unsigned BBNum = begin()->getParent()->getNumber();
750       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
751       dumpSchedule();
752       dbgs() << '\n';
753     });
754 }
755 
756 /// Apply each ScheduleDAGMutation step in order.
757 void ScheduleDAGMI::postprocessDAG() {
758   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
759     Mutations[i]->apply(this);
760   }
761 }
762 
763 void ScheduleDAGMI::
764 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
765                       SmallVectorImpl<SUnit*> &BotRoots) {
766   for (std::vector<SUnit>::iterator
767          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
768     SUnit *SU = &(*I);
769     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
770 
771     // Order predecessors so DFSResult follows the critical path.
772     SU->biasCriticalPath();
773 
774     // A SUnit is ready to top schedule if it has no predecessors.
775     if (!I->NumPredsLeft)
776       TopRoots.push_back(SU);
777     // A SUnit is ready to bottom schedule if it has no successors.
778     if (!I->NumSuccsLeft)
779       BotRoots.push_back(SU);
780   }
781   ExitSU.biasCriticalPath();
782 }
783 
784 /// Identify DAG roots and setup scheduler queues.
785 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
786                                ArrayRef<SUnit*> BotRoots) {
787   NextClusterSucc = nullptr;
788   NextClusterPred = nullptr;
789 
790   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
791   //
792   // Nodes with unreleased weak edges can still be roots.
793   // Release top roots in forward order.
794   for (SmallVectorImpl<SUnit*>::const_iterator
795          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
796     SchedImpl->releaseTopNode(*I);
797   }
798   // Release bottom roots in reverse order so the higher priority nodes appear
799   // first. This is more natural and slightly more efficient.
800   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
801          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
802     SchedImpl->releaseBottomNode(*I);
803   }
804 
805   releaseSuccessors(&EntrySU);
806   releasePredecessors(&ExitSU);
807 
808   SchedImpl->registerRoots();
809 
810   // Advance past initial DebugValues.
811   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
812   CurrentBottom = RegionEnd;
813 }
814 
815 /// Update scheduler queues after scheduling an instruction.
816 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
817   // Release dependent instructions for scheduling.
818   if (IsTopNode)
819     releaseSuccessors(SU);
820   else
821     releasePredecessors(SU);
822 
823   SU->isScheduled = true;
824 }
825 
826 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
827 void ScheduleDAGMI::placeDebugValues() {
828   // If first instruction was a DBG_VALUE then put it back.
829   if (FirstDbgValue) {
830     BB->splice(RegionBegin, BB, FirstDbgValue);
831     RegionBegin = FirstDbgValue;
832   }
833 
834   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
835          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
836     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
837     MachineInstr *DbgValue = P.first;
838     MachineBasicBlock::iterator OrigPrevMI = P.second;
839     if (&*RegionBegin == DbgValue)
840       ++RegionBegin;
841     BB->splice(++OrigPrevMI, BB, DbgValue);
842     if (OrigPrevMI == std::prev(RegionEnd))
843       RegionEnd = DbgValue;
844   }
845   DbgValues.clear();
846   FirstDbgValue = nullptr;
847 }
848 
849 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
850 void ScheduleDAGMI::dumpSchedule() const {
851   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
852     if (SUnit *SU = getSUnit(&(*MI)))
853       SU->dump(this);
854     else
855       dbgs() << "Missing SUnit\n";
856   }
857 }
858 #endif
859 
860 //===----------------------------------------------------------------------===//
861 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
862 // preservation.
863 //===----------------------------------------------------------------------===//
864 
865 ScheduleDAGMILive::~ScheduleDAGMILive() {
866   delete DFSResult;
867 }
868 
869 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
870 /// crossing a scheduling boundary. [begin, end) includes all instructions in
871 /// the region, including the boundary itself and single-instruction regions
872 /// that don't get scheduled.
873 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
874                                 MachineBasicBlock::iterator begin,
875                                 MachineBasicBlock::iterator end,
876                                 unsigned regioninstrs)
877 {
878   // ScheduleDAGMI initializes SchedImpl's per-region policy.
879   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
880 
881   // For convenience remember the end of the liveness region.
882   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
883 
884   SUPressureDiffs.clear();
885 
886   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
887   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
888 
889   if (ShouldTrackLaneMasks) {
890     if (!ShouldTrackPressure)
891       report_fatal_error("ShouldTrackLaneMasks requires ShouldTrackPressure");
892     // Dead subregister defs have no users and therefore no dependencies,
893     // moving them around may cause liveintervals to degrade into multiple
894     // components. Change independent components to have their own vreg to avoid
895     // this.
896     if (!DisconnectedComponentsRenamed)
897       LIS->renameDisconnectedComponents();
898   }
899 }
900 
901 // Setup the register pressure trackers for the top scheduled top and bottom
902 // scheduled regions.
903 void ScheduleDAGMILive::initRegPressure() {
904   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
905                     ShouldTrackLaneMasks, false);
906   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
907                     ShouldTrackLaneMasks, false);
908 
909   // Close the RPTracker to finalize live ins.
910   RPTracker.closeRegion();
911 
912   DEBUG(RPTracker.dump());
913 
914   // Initialize the live ins and live outs.
915   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
916   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
917 
918   // Close one end of the tracker so we can call
919   // getMaxUpward/DownwardPressureDelta before advancing across any
920   // instructions. This converts currently live regs into live ins/outs.
921   TopRPTracker.closeTop();
922   BotRPTracker.closeBottom();
923 
924   BotRPTracker.initLiveThru(RPTracker);
925   if (!BotRPTracker.getLiveThru().empty()) {
926     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
927     DEBUG(dbgs() << "Live Thru: ";
928           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
929   };
930 
931   // For each live out vreg reduce the pressure change associated with other
932   // uses of the same vreg below the live-out reaching def.
933   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
934 
935   // Account for liveness generated by the region boundary.
936   if (LiveRegionEnd != RegionEnd) {
937     SmallVector<RegisterMaskPair, 8> LiveUses;
938     BotRPTracker.recede(&LiveUses);
939     updatePressureDiffs(LiveUses);
940   }
941 
942   DEBUG(
943     dbgs() << "Top Pressure:\n";
944     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
945     dbgs() << "Bottom Pressure:\n";
946     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
947   );
948 
949   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
950 
951   // Cache the list of excess pressure sets in this region. This will also track
952   // the max pressure in the scheduled code for these sets.
953   RegionCriticalPSets.clear();
954   const std::vector<unsigned> &RegionPressure =
955     RPTracker.getPressure().MaxSetPressure;
956   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
957     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
958     if (RegionPressure[i] > Limit) {
959       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
960             << " Limit " << Limit
961             << " Actual " << RegionPressure[i] << "\n");
962       RegionCriticalPSets.push_back(PressureChange(i));
963     }
964   }
965   DEBUG(dbgs() << "Excess PSets: ";
966         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
967           dbgs() << TRI->getRegPressureSetName(
968             RegionCriticalPSets[i].getPSet()) << " ";
969         dbgs() << "\n");
970 }
971 
972 void ScheduleDAGMILive::
973 updateScheduledPressure(const SUnit *SU,
974                         const std::vector<unsigned> &NewMaxPressure) {
975   const PressureDiff &PDiff = getPressureDiff(SU);
976   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
977   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
978        I != E; ++I) {
979     if (!I->isValid())
980       break;
981     unsigned ID = I->getPSet();
982     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
983       ++CritIdx;
984     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
985       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
986           && NewMaxPressure[ID] <= INT16_MAX)
987         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
988     }
989     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
990     if (NewMaxPressure[ID] >= Limit - 2) {
991       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
992             << NewMaxPressure[ID]
993             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
994             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
995     }
996   }
997 }
998 
999 /// Update the PressureDiff array for liveness after scheduling this
1000 /// instruction.
1001 void ScheduleDAGMILive::updatePressureDiffs(
1002     ArrayRef<RegisterMaskPair> LiveUses) {
1003   for (const RegisterMaskPair &P : LiveUses) {
1004     unsigned Reg = P.RegUnit;
1005     /// FIXME: Currently assuming single-use physregs.
1006     if (!TRI->isVirtualRegister(Reg))
1007       continue;
1008 
1009     if (ShouldTrackLaneMasks) {
1010       // If the register has just become live then other uses won't change
1011       // this fact anymore => decrement pressure.
1012       // If the register has just become dead then other uses make it come
1013       // back to life => increment pressure.
1014       bool Decrement = P.LaneMask != 0;
1015 
1016       for (const VReg2SUnit &V2SU
1017            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1018         SUnit &SU = *V2SU.SU;
1019         if (SU.isScheduled || &SU == &ExitSU)
1020           continue;
1021 
1022         PressureDiff &PDiff = getPressureDiff(&SU);
1023         PDiff.addPressureChange(Reg, Decrement, &MRI);
1024         DEBUG(
1025           dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1026                  << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1027                  << ' ' << *SU.getInstr();
1028           dbgs() << "              to ";
1029           PDiff.dump(*TRI);
1030         );
1031       }
1032     } else {
1033       assert(P.LaneMask != 0);
1034       DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1035       // This may be called before CurrentBottom has been initialized. However,
1036       // BotRPTracker must have a valid position. We want the value live into the
1037       // instruction or live out of the block, so ask for the previous
1038       // instruction's live-out.
1039       const LiveInterval &LI = LIS->getInterval(Reg);
1040       VNInfo *VNI;
1041       MachineBasicBlock::const_iterator I =
1042         nextIfDebug(BotRPTracker.getPos(), BB->end());
1043       if (I == BB->end())
1044         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1045       else {
1046         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1047         VNI = LRQ.valueIn();
1048       }
1049       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1050       assert(VNI && "No live value at use.");
1051       for (const VReg2SUnit &V2SU
1052            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1053         SUnit *SU = V2SU.SU;
1054         // If this use comes before the reaching def, it cannot be a last use,
1055         // so decrease its pressure change.
1056         if (!SU->isScheduled && SU != &ExitSU) {
1057           LiveQueryResult LRQ =
1058               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1059           if (LRQ.valueIn() == VNI) {
1060             PressureDiff &PDiff = getPressureDiff(SU);
1061             PDiff.addPressureChange(Reg, true, &MRI);
1062             DEBUG(
1063               dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1064                      << *SU->getInstr();
1065               dbgs() << "              to ";
1066               PDiff.dump(*TRI);
1067             );
1068           }
1069         }
1070       }
1071     }
1072   }
1073 }
1074 
1075 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1076 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1077 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1078 ///
1079 /// This is a skeletal driver, with all the functionality pushed into helpers,
1080 /// so that it can be easily extended by experimental schedulers. Generally,
1081 /// implementing MachineSchedStrategy should be sufficient to implement a new
1082 /// scheduling algorithm. However, if a scheduler further subclasses
1083 /// ScheduleDAGMILive then it will want to override this virtual method in order
1084 /// to update any specialized state.
1085 void ScheduleDAGMILive::schedule() {
1086   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1087   DEBUG(SchedImpl->dumpPolicy());
1088   buildDAGWithRegPressure();
1089 
1090   Topo.InitDAGTopologicalSorting();
1091 
1092   postprocessDAG();
1093 
1094   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1095   findRootsAndBiasEdges(TopRoots, BotRoots);
1096 
1097   // Initialize the strategy before modifying the DAG.
1098   // This may initialize a DFSResult to be used for queue priority.
1099   SchedImpl->initialize(this);
1100 
1101   DEBUG(
1102     for (const SUnit &SU : SUnits) {
1103       SU.dumpAll(this);
1104       if (ShouldTrackPressure) {
1105         dbgs() << "  Pressure Diff      : ";
1106         getPressureDiff(&SU).dump(*TRI);
1107       }
1108       dbgs() << '\n';
1109     }
1110   );
1111   if (ViewMISchedDAGs) viewGraph();
1112 
1113   // Initialize ready queues now that the DAG and priority data are finalized.
1114   initQueues(TopRoots, BotRoots);
1115 
1116   bool IsTopNode = false;
1117   while (true) {
1118     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1119     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1120     if (!SU) break;
1121 
1122     assert(!SU->isScheduled && "Node already scheduled");
1123     if (!checkSchedLimit())
1124       break;
1125 
1126     scheduleMI(SU, IsTopNode);
1127 
1128     if (DFSResult) {
1129       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1130       if (!ScheduledTrees.test(SubtreeID)) {
1131         ScheduledTrees.set(SubtreeID);
1132         DFSResult->scheduleTree(SubtreeID);
1133         SchedImpl->scheduleTree(SubtreeID);
1134       }
1135     }
1136 
1137     // Notify the scheduling strategy after updating the DAG.
1138     SchedImpl->schedNode(SU, IsTopNode);
1139 
1140     updateQueues(SU, IsTopNode);
1141   }
1142   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1143 
1144   placeDebugValues();
1145 
1146   DEBUG({
1147       unsigned BBNum = begin()->getParent()->getNumber();
1148       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1149       dumpSchedule();
1150       dbgs() << '\n';
1151     });
1152 }
1153 
1154 /// Build the DAG and setup three register pressure trackers.
1155 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1156   if (!ShouldTrackPressure) {
1157     RPTracker.reset();
1158     RegionCriticalPSets.clear();
1159     buildSchedGraph(AA);
1160     return;
1161   }
1162 
1163   // Initialize the register pressure tracker used by buildSchedGraph.
1164   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1165                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1166 
1167   // Account for liveness generate by the region boundary.
1168   if (LiveRegionEnd != RegionEnd)
1169     RPTracker.recede();
1170 
1171   // Build the DAG, and compute current register pressure.
1172   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1173 
1174   // Initialize top/bottom trackers after computing region pressure.
1175   initRegPressure();
1176 }
1177 
1178 void ScheduleDAGMILive::computeDFSResult() {
1179   if (!DFSResult)
1180     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1181   DFSResult->clear();
1182   ScheduledTrees.clear();
1183   DFSResult->resize(SUnits.size());
1184   DFSResult->compute(SUnits);
1185   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1186 }
1187 
1188 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1189 /// only provides the critical path for single block loops. To handle loops that
1190 /// span blocks, we could use the vreg path latencies provided by
1191 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1192 /// available for use in the scheduler.
1193 ///
1194 /// The cyclic path estimation identifies a def-use pair that crosses the back
1195 /// edge and considers the depth and height of the nodes. For example, consider
1196 /// the following instruction sequence where each instruction has unit latency
1197 /// and defines an epomymous virtual register:
1198 ///
1199 /// a->b(a,c)->c(b)->d(c)->exit
1200 ///
1201 /// The cyclic critical path is a two cycles: b->c->b
1202 /// The acyclic critical path is four cycles: a->b->c->d->exit
1203 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1204 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1205 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1206 /// LiveInDepth = depth(b) = len(a->b) = 1
1207 ///
1208 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1209 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1210 /// CyclicCriticalPath = min(2, 2) = 2
1211 ///
1212 /// This could be relevant to PostRA scheduling, but is currently implemented
1213 /// assuming LiveIntervals.
1214 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1215   // This only applies to single block loop.
1216   if (!BB->isSuccessor(BB))
1217     return 0;
1218 
1219   unsigned MaxCyclicLatency = 0;
1220   // Visit each live out vreg def to find def/use pairs that cross iterations.
1221   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1222     unsigned Reg = P.RegUnit;
1223     if (!TRI->isVirtualRegister(Reg))
1224         continue;
1225     const LiveInterval &LI = LIS->getInterval(Reg);
1226     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1227     if (!DefVNI)
1228       continue;
1229 
1230     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1231     const SUnit *DefSU = getSUnit(DefMI);
1232     if (!DefSU)
1233       continue;
1234 
1235     unsigned LiveOutHeight = DefSU->getHeight();
1236     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1237     // Visit all local users of the vreg def.
1238     for (const VReg2SUnit &V2SU
1239          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1240       SUnit *SU = V2SU.SU;
1241       if (SU == &ExitSU)
1242         continue;
1243 
1244       // Only consider uses of the phi.
1245       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1246       if (!LRQ.valueIn()->isPHIDef())
1247         continue;
1248 
1249       // Assume that a path spanning two iterations is a cycle, which could
1250       // overestimate in strange cases. This allows cyclic latency to be
1251       // estimated as the minimum slack of the vreg's depth or height.
1252       unsigned CyclicLatency = 0;
1253       if (LiveOutDepth > SU->getDepth())
1254         CyclicLatency = LiveOutDepth - SU->getDepth();
1255 
1256       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1257       if (LiveInHeight > LiveOutHeight) {
1258         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1259           CyclicLatency = LiveInHeight - LiveOutHeight;
1260       } else
1261         CyclicLatency = 0;
1262 
1263       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1264             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1265       if (CyclicLatency > MaxCyclicLatency)
1266         MaxCyclicLatency = CyclicLatency;
1267     }
1268   }
1269   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1270   return MaxCyclicLatency;
1271 }
1272 
1273 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1274 /// the Top RP tracker in case the region beginning has changed.
1275 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1276                                    ArrayRef<SUnit*> BotRoots) {
1277   ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1278   if (ShouldTrackPressure) {
1279     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1280     TopRPTracker.setPos(CurrentTop);
1281   }
1282 }
1283 
1284 /// Move an instruction and update register pressure.
1285 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1286   // Move the instruction to its new location in the instruction stream.
1287   MachineInstr *MI = SU->getInstr();
1288 
1289   if (IsTopNode) {
1290     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1291     if (&*CurrentTop == MI)
1292       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1293     else {
1294       moveInstruction(MI, CurrentTop);
1295       TopRPTracker.setPos(MI);
1296     }
1297 
1298     if (ShouldTrackPressure) {
1299       // Update top scheduled pressure.
1300       RegisterOperands RegOpers;
1301       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1302       if (ShouldTrackLaneMasks) {
1303         // Adjust liveness and add missing dead+read-undef flags.
1304         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1305         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1306       } else {
1307         // Adjust for missing dead-def flags.
1308         RegOpers.detectDeadDefs(*MI, *LIS);
1309       }
1310 
1311       TopRPTracker.advance(RegOpers);
1312       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1313       DEBUG(
1314         dbgs() << "Top Pressure:\n";
1315         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1316       );
1317 
1318       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1319     }
1320   } else {
1321     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1322     MachineBasicBlock::iterator priorII =
1323       priorNonDebug(CurrentBottom, CurrentTop);
1324     if (&*priorII == MI)
1325       CurrentBottom = priorII;
1326     else {
1327       if (&*CurrentTop == MI) {
1328         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1329         TopRPTracker.setPos(CurrentTop);
1330       }
1331       moveInstruction(MI, CurrentBottom);
1332       CurrentBottom = MI;
1333     }
1334     if (ShouldTrackPressure) {
1335       RegisterOperands RegOpers;
1336       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1337       if (ShouldTrackLaneMasks) {
1338         // Adjust liveness and add missing dead+read-undef flags.
1339         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1340         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1341       } else {
1342         // Adjust for missing dead-def flags.
1343         RegOpers.detectDeadDefs(*MI, *LIS);
1344       }
1345 
1346       BotRPTracker.recedeSkipDebugValues();
1347       SmallVector<RegisterMaskPair, 8> LiveUses;
1348       BotRPTracker.recede(RegOpers, &LiveUses);
1349       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1350       DEBUG(
1351         dbgs() << "Bottom Pressure:\n";
1352         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1353       );
1354 
1355       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1356       updatePressureDiffs(LiveUses);
1357     }
1358   }
1359 }
1360 
1361 //===----------------------------------------------------------------------===//
1362 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1363 //===----------------------------------------------------------------------===//
1364 
1365 namespace {
1366 /// \brief Post-process the DAG to create cluster edges between neighboring
1367 /// loads or between neighboring stores.
1368 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1369   struct MemOpInfo {
1370     SUnit *SU;
1371     unsigned BaseReg;
1372     int64_t Offset;
1373     MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1374         : SU(su), BaseReg(reg), Offset(ofs) {}
1375 
1376     bool operator<(const MemOpInfo&RHS) const {
1377       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1378     }
1379   };
1380 
1381   const TargetInstrInfo *TII;
1382   const TargetRegisterInfo *TRI;
1383   bool IsLoad;
1384 
1385 public:
1386   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1387                            const TargetRegisterInfo *tri, bool IsLoad)
1388       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1389 
1390   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1391 
1392 protected:
1393   void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1394 };
1395 
1396 class StoreClusterMutation : public BaseMemOpClusterMutation {
1397 public:
1398   StoreClusterMutation(const TargetInstrInfo *tii,
1399                        const TargetRegisterInfo *tri)
1400       : BaseMemOpClusterMutation(tii, tri, false) {}
1401 };
1402 
1403 class LoadClusterMutation : public BaseMemOpClusterMutation {
1404 public:
1405   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1406       : BaseMemOpClusterMutation(tii, tri, true) {}
1407 };
1408 } // anonymous
1409 
1410 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1411     ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1412   SmallVector<MemOpInfo, 32> MemOpRecords;
1413   for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1414     SUnit *SU = MemOps[Idx];
1415     unsigned BaseReg;
1416     int64_t Offset;
1417     if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1418       MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
1419   }
1420   if (MemOpRecords.size() < 2)
1421     return;
1422 
1423   std::sort(MemOpRecords.begin(), MemOpRecords.end());
1424   unsigned ClusterLength = 1;
1425   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1426     if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
1427       ClusterLength = 1;
1428       continue;
1429     }
1430 
1431     SUnit *SUa = MemOpRecords[Idx].SU;
1432     SUnit *SUb = MemOpRecords[Idx+1].SU;
1433     if (TII->shouldClusterMemOps(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1434         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1435       DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1436             << SUb->NodeNum << ")\n");
1437       // Copy successor edges from SUa to SUb. Interleaving computation
1438       // dependent on SUa can prevent load combining due to register reuse.
1439       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1440       // loads should have effectively the same inputs.
1441       for (SUnit::const_succ_iterator
1442              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1443         if (SI->getSUnit() == SUb)
1444           continue;
1445         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1446         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1447       }
1448       ++ClusterLength;
1449     } else
1450       ClusterLength = 1;
1451   }
1452 }
1453 
1454 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1455 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1456 
1457   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1458 
1459   // Map DAG NodeNum to store chain ID.
1460   DenseMap<unsigned, unsigned> StoreChainIDs;
1461   // Map each store chain to a set of dependent MemOps.
1462   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1463   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1464     SUnit *SU = &DAG->SUnits[Idx];
1465     if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1466         (!IsLoad && !SU->getInstr()->mayStore()))
1467       continue;
1468 
1469     unsigned ChainPredID = DAG->SUnits.size();
1470     for (SUnit::const_pred_iterator
1471            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1472       if (PI->isCtrl()) {
1473         ChainPredID = PI->getSUnit()->NodeNum;
1474         break;
1475       }
1476     }
1477     // Check if this chain-like pred has been seen
1478     // before. ChainPredID==MaxNodeID at the top of the schedule.
1479     unsigned NumChains = StoreChainDependents.size();
1480     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1481       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1482     if (Result.second)
1483       StoreChainDependents.resize(NumChains + 1);
1484     StoreChainDependents[Result.first->second].push_back(SU);
1485   }
1486 
1487   // Iterate over the store chains.
1488   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1489     clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
1490 }
1491 
1492 //===----------------------------------------------------------------------===//
1493 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1494 //===----------------------------------------------------------------------===//
1495 
1496 namespace {
1497 /// \brief Post-process the DAG to create cluster edges between instructions
1498 /// that may be fused by the processor into a single operation.
1499 class MacroFusion : public ScheduleDAGMutation {
1500   const TargetInstrInfo &TII;
1501   const TargetRegisterInfo &TRI;
1502 public:
1503   MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1504     : TII(TII), TRI(TRI) {}
1505 
1506   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1507 };
1508 } // anonymous
1509 
1510 /// Returns true if \p MI reads a register written by \p Other.
1511 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1512                        const MachineInstr &Other) {
1513   for (const MachineOperand &MO : MI.uses()) {
1514     if (!MO.isReg() || !MO.readsReg())
1515       continue;
1516 
1517     unsigned Reg = MO.getReg();
1518     if (Other.modifiesRegister(Reg, &TRI))
1519       return true;
1520   }
1521   return false;
1522 }
1523 
1524 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1525 /// fused operations.
1526 void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1527   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1528 
1529   // For now, assume targets can only fuse with the branch.
1530   SUnit &ExitSU = DAG->ExitSU;
1531   MachineInstr *Branch = ExitSU.getInstr();
1532   if (!Branch)
1533     return;
1534 
1535   for (SUnit &SU : DAG->SUnits) {
1536     // SUnits with successors can't be schedule in front of the ExitSU.
1537     if (!SU.Succs.empty())
1538       continue;
1539     // We only care if the node writes to a register that the branch reads.
1540     MachineInstr *Pred = SU.getInstr();
1541     if (!HasDataDep(TRI, *Branch, *Pred))
1542       continue;
1543 
1544     if (!TII.shouldScheduleAdjacent(Pred, Branch))
1545       continue;
1546 
1547     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1548     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1549     // need to copy predecessor edges from ExitSU to SU, since top-down
1550     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1551     // of SU, we could create an artificial edge from the deepest root, but it
1552     // hasn't been needed yet.
1553     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1554     (void)Success;
1555     assert(Success && "No DAG nodes should be reachable from ExitSU");
1556 
1557     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1558     break;
1559   }
1560 }
1561 
1562 //===----------------------------------------------------------------------===//
1563 // CopyConstrain - DAG post-processing to encourage copy elimination.
1564 //===----------------------------------------------------------------------===//
1565 
1566 namespace {
1567 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1568 /// the one use that defines the copy's source vreg, most likely an induction
1569 /// variable increment.
1570 class CopyConstrain : public ScheduleDAGMutation {
1571   // Transient state.
1572   SlotIndex RegionBeginIdx;
1573   // RegionEndIdx is the slot index of the last non-debug instruction in the
1574   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1575   SlotIndex RegionEndIdx;
1576 public:
1577   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1578 
1579   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1580 
1581 protected:
1582   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1583 };
1584 } // anonymous
1585 
1586 /// constrainLocalCopy handles two possibilities:
1587 /// 1) Local src:
1588 /// I0:     = dst
1589 /// I1: src = ...
1590 /// I2:     = dst
1591 /// I3: dst = src (copy)
1592 /// (create pred->succ edges I0->I1, I2->I1)
1593 ///
1594 /// 2) Local copy:
1595 /// I0: dst = src (copy)
1596 /// I1:     = dst
1597 /// I2: src = ...
1598 /// I3:     = dst
1599 /// (create pred->succ edges I1->I2, I3->I2)
1600 ///
1601 /// Although the MachineScheduler is currently constrained to single blocks,
1602 /// this algorithm should handle extended blocks. An EBB is a set of
1603 /// contiguously numbered blocks such that the previous block in the EBB is
1604 /// always the single predecessor.
1605 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1606   LiveIntervals *LIS = DAG->getLIS();
1607   MachineInstr *Copy = CopySU->getInstr();
1608 
1609   // Check for pure vreg copies.
1610   const MachineOperand &SrcOp = Copy->getOperand(1);
1611   unsigned SrcReg = SrcOp.getReg();
1612   if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1613     return;
1614 
1615   const MachineOperand &DstOp = Copy->getOperand(0);
1616   unsigned DstReg = DstOp.getReg();
1617   if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1618     return;
1619 
1620   // Check if either the dest or source is local. If it's live across a back
1621   // edge, it's not local. Note that if both vregs are live across the back
1622   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1623   // If both the copy's source and dest are local live intervals, then we
1624   // should treat the dest as the global for the purpose of adding
1625   // constraints. This adds edges from source's other uses to the copy.
1626   unsigned LocalReg = SrcReg;
1627   unsigned GlobalReg = DstReg;
1628   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1629   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1630     LocalReg = DstReg;
1631     GlobalReg = SrcReg;
1632     LocalLI = &LIS->getInterval(LocalReg);
1633     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1634       return;
1635   }
1636   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1637 
1638   // Find the global segment after the start of the local LI.
1639   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1640   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1641   // local live range. We could create edges from other global uses to the local
1642   // start, but the coalescer should have already eliminated these cases, so
1643   // don't bother dealing with it.
1644   if (GlobalSegment == GlobalLI->end())
1645     return;
1646 
1647   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1648   // returned the next global segment. But if GlobalSegment overlaps with
1649   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1650   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1651   if (GlobalSegment->contains(LocalLI->beginIndex()))
1652     ++GlobalSegment;
1653 
1654   if (GlobalSegment == GlobalLI->end())
1655     return;
1656 
1657   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1658   if (GlobalSegment != GlobalLI->begin()) {
1659     // Two address defs have no hole.
1660     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1661                                GlobalSegment->start)) {
1662       return;
1663     }
1664     // If the prior global segment may be defined by the same two-address
1665     // instruction that also defines LocalLI, then can't make a hole here.
1666     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1667                                LocalLI->beginIndex())) {
1668       return;
1669     }
1670     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1671     // it would be a disconnected component in the live range.
1672     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1673            "Disconnected LRG within the scheduling region.");
1674   }
1675   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1676   if (!GlobalDef)
1677     return;
1678 
1679   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1680   if (!GlobalSU)
1681     return;
1682 
1683   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1684   // constraining the uses of the last local def to precede GlobalDef.
1685   SmallVector<SUnit*,8> LocalUses;
1686   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1687   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1688   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1689   for (SUnit::const_succ_iterator
1690          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1691        I != E; ++I) {
1692     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1693       continue;
1694     if (I->getSUnit() == GlobalSU)
1695       continue;
1696     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1697       return;
1698     LocalUses.push_back(I->getSUnit());
1699   }
1700   // Open the top of the GlobalLI hole by constraining any earlier global uses
1701   // to precede the start of LocalLI.
1702   SmallVector<SUnit*,8> GlobalUses;
1703   MachineInstr *FirstLocalDef =
1704     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1705   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1706   for (SUnit::const_pred_iterator
1707          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1708     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1709       continue;
1710     if (I->getSUnit() == FirstLocalSU)
1711       continue;
1712     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1713       return;
1714     GlobalUses.push_back(I->getSUnit());
1715   }
1716   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1717   // Add the weak edges.
1718   for (SmallVectorImpl<SUnit*>::const_iterator
1719          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1720     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1721           << GlobalSU->NodeNum << ")\n");
1722     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1723   }
1724   for (SmallVectorImpl<SUnit*>::const_iterator
1725          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1726     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1727           << FirstLocalSU->NodeNum << ")\n");
1728     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1729   }
1730 }
1731 
1732 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1733 /// copy elimination.
1734 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1735   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1736   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1737 
1738   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1739   if (FirstPos == DAG->end())
1740     return;
1741   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1742   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1743       *priorNonDebug(DAG->end(), DAG->begin()));
1744 
1745   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1746     SUnit *SU = &DAG->SUnits[Idx];
1747     if (!SU->getInstr()->isCopy())
1748       continue;
1749 
1750     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1751   }
1752 }
1753 
1754 //===----------------------------------------------------------------------===//
1755 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1756 // and possibly other custom schedulers.
1757 //===----------------------------------------------------------------------===//
1758 
1759 static const unsigned InvalidCycle = ~0U;
1760 
1761 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1762 
1763 void SchedBoundary::reset() {
1764   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1765   // Destroying and reconstructing it is very expensive though. So keep
1766   // invalid, placeholder HazardRecs.
1767   if (HazardRec && HazardRec->isEnabled()) {
1768     delete HazardRec;
1769     HazardRec = nullptr;
1770   }
1771   Available.clear();
1772   Pending.clear();
1773   CheckPending = false;
1774   NextSUs.clear();
1775   CurrCycle = 0;
1776   CurrMOps = 0;
1777   MinReadyCycle = UINT_MAX;
1778   ExpectedLatency = 0;
1779   DependentLatency = 0;
1780   RetiredMOps = 0;
1781   MaxExecutedResCount = 0;
1782   ZoneCritResIdx = 0;
1783   IsResourceLimited = false;
1784   ReservedCycles.clear();
1785 #ifndef NDEBUG
1786   // Track the maximum number of stall cycles that could arise either from the
1787   // latency of a DAG edge or the number of cycles that a processor resource is
1788   // reserved (SchedBoundary::ReservedCycles).
1789   MaxObservedStall = 0;
1790 #endif
1791   // Reserve a zero-count for invalid CritResIdx.
1792   ExecutedResCounts.resize(1);
1793   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1794 }
1795 
1796 void SchedRemainder::
1797 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1798   reset();
1799   if (!SchedModel->hasInstrSchedModel())
1800     return;
1801   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1802   for (std::vector<SUnit>::iterator
1803          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1804     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1805     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1806       * SchedModel->getMicroOpFactor();
1807     for (TargetSchedModel::ProcResIter
1808            PI = SchedModel->getWriteProcResBegin(SC),
1809            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1810       unsigned PIdx = PI->ProcResourceIdx;
1811       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1812       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1813     }
1814   }
1815 }
1816 
1817 void SchedBoundary::
1818 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1819   reset();
1820   DAG = dag;
1821   SchedModel = smodel;
1822   Rem = rem;
1823   if (SchedModel->hasInstrSchedModel()) {
1824     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1825     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1826   }
1827 }
1828 
1829 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1830 /// these "soft stalls" differently than the hard stall cycles based on CPU
1831 /// resources and computed by checkHazard(). A fully in-order model
1832 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1833 /// available for scheduling until they are ready. However, a weaker in-order
1834 /// model may use this for heuristics. For example, if a processor has in-order
1835 /// behavior when reading certain resources, this may come into play.
1836 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1837   if (!SU->isUnbuffered)
1838     return 0;
1839 
1840   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1841   if (ReadyCycle > CurrCycle)
1842     return ReadyCycle - CurrCycle;
1843   return 0;
1844 }
1845 
1846 /// Compute the next cycle at which the given processor resource can be
1847 /// scheduled.
1848 unsigned SchedBoundary::
1849 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1850   unsigned NextUnreserved = ReservedCycles[PIdx];
1851   // If this resource has never been used, always return cycle zero.
1852   if (NextUnreserved == InvalidCycle)
1853     return 0;
1854   // For bottom-up scheduling add the cycles needed for the current operation.
1855   if (!isTop())
1856     NextUnreserved += Cycles;
1857   return NextUnreserved;
1858 }
1859 
1860 /// Does this SU have a hazard within the current instruction group.
1861 ///
1862 /// The scheduler supports two modes of hazard recognition. The first is the
1863 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1864 /// supports highly complicated in-order reservation tables
1865 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1866 ///
1867 /// The second is a streamlined mechanism that checks for hazards based on
1868 /// simple counters that the scheduler itself maintains. It explicitly checks
1869 /// for instruction dispatch limitations, including the number of micro-ops that
1870 /// can dispatch per cycle.
1871 ///
1872 /// TODO: Also check whether the SU must start a new group.
1873 bool SchedBoundary::checkHazard(SUnit *SU) {
1874   if (HazardRec->isEnabled()
1875       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1876     return true;
1877   }
1878   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1879   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1880     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1881           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1882     return true;
1883   }
1884   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1885     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1886     for (TargetSchedModel::ProcResIter
1887            PI = SchedModel->getWriteProcResBegin(SC),
1888            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1889       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1890       if (NRCycle > CurrCycle) {
1891 #ifndef NDEBUG
1892         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1893 #endif
1894         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1895               << SchedModel->getResourceName(PI->ProcResourceIdx)
1896               << "=" << NRCycle << "c\n");
1897         return true;
1898       }
1899     }
1900   }
1901   return false;
1902 }
1903 
1904 // Find the unscheduled node in ReadySUs with the highest latency.
1905 unsigned SchedBoundary::
1906 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1907   SUnit *LateSU = nullptr;
1908   unsigned RemLatency = 0;
1909   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1910        I != E; ++I) {
1911     unsigned L = getUnscheduledLatency(*I);
1912     if (L > RemLatency) {
1913       RemLatency = L;
1914       LateSU = *I;
1915     }
1916   }
1917   if (LateSU) {
1918     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1919           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1920   }
1921   return RemLatency;
1922 }
1923 
1924 // Count resources in this zone and the remaining unscheduled
1925 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1926 // resource index, or zero if the zone is issue limited.
1927 unsigned SchedBoundary::
1928 getOtherResourceCount(unsigned &OtherCritIdx) {
1929   OtherCritIdx = 0;
1930   if (!SchedModel->hasInstrSchedModel())
1931     return 0;
1932 
1933   unsigned OtherCritCount = Rem->RemIssueCount
1934     + (RetiredMOps * SchedModel->getMicroOpFactor());
1935   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1936         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1937   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1938        PIdx != PEnd; ++PIdx) {
1939     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1940     if (OtherCount > OtherCritCount) {
1941       OtherCritCount = OtherCount;
1942       OtherCritIdx = PIdx;
1943     }
1944   }
1945   if (OtherCritIdx) {
1946     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1947           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1948           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1949   }
1950   return OtherCritCount;
1951 }
1952 
1953 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1954   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1955 
1956 #ifndef NDEBUG
1957   // ReadyCycle was been bumped up to the CurrCycle when this node was
1958   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1959   // scheduling, so may now be greater than ReadyCycle.
1960   if (ReadyCycle > CurrCycle)
1961     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1962 #endif
1963 
1964   if (ReadyCycle < MinReadyCycle)
1965     MinReadyCycle = ReadyCycle;
1966 
1967   // Check for interlocks first. For the purpose of other heuristics, an
1968   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1969   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1970   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
1971       Available.size() >= ReadyListLimit)
1972     Pending.push(SU);
1973   else
1974     Available.push(SU);
1975 
1976   // Record this node as an immediate dependent of the scheduled node.
1977   NextSUs.insert(SU);
1978 }
1979 
1980 void SchedBoundary::releaseTopNode(SUnit *SU) {
1981   if (SU->isScheduled)
1982     return;
1983 
1984   releaseNode(SU, SU->TopReadyCycle);
1985 }
1986 
1987 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1988   if (SU->isScheduled)
1989     return;
1990 
1991   releaseNode(SU, SU->BotReadyCycle);
1992 }
1993 
1994 /// Move the boundary of scheduled code by one cycle.
1995 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1996   if (SchedModel->getMicroOpBufferSize() == 0) {
1997     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1998     if (MinReadyCycle > NextCycle)
1999       NextCycle = MinReadyCycle;
2000   }
2001   // Update the current micro-ops, which will issue in the next cycle.
2002   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2003   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2004 
2005   // Decrement DependentLatency based on the next cycle.
2006   if ((NextCycle - CurrCycle) > DependentLatency)
2007     DependentLatency = 0;
2008   else
2009     DependentLatency -= (NextCycle - CurrCycle);
2010 
2011   if (!HazardRec->isEnabled()) {
2012     // Bypass HazardRec virtual calls.
2013     CurrCycle = NextCycle;
2014   } else {
2015     // Bypass getHazardType calls in case of long latency.
2016     for (; CurrCycle != NextCycle; ++CurrCycle) {
2017       if (isTop())
2018         HazardRec->AdvanceCycle();
2019       else
2020         HazardRec->RecedeCycle();
2021     }
2022   }
2023   CheckPending = true;
2024   unsigned LFactor = SchedModel->getLatencyFactor();
2025   IsResourceLimited =
2026     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2027     > (int)LFactor;
2028 
2029   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2030 }
2031 
2032 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2033   ExecutedResCounts[PIdx] += Count;
2034   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2035     MaxExecutedResCount = ExecutedResCounts[PIdx];
2036 }
2037 
2038 /// Add the given processor resource to this scheduled zone.
2039 ///
2040 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2041 /// during which this resource is consumed.
2042 ///
2043 /// \return the next cycle at which the instruction may execute without
2044 /// oversubscribing resources.
2045 unsigned SchedBoundary::
2046 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2047   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2048   unsigned Count = Factor * Cycles;
2049   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
2050         << " +" << Cycles << "x" << Factor << "u\n");
2051 
2052   // Update Executed resources counts.
2053   incExecutedResources(PIdx, Count);
2054   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2055   Rem->RemainingCounts[PIdx] -= Count;
2056 
2057   // Check if this resource exceeds the current critical resource. If so, it
2058   // becomes the critical resource.
2059   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2060     ZoneCritResIdx = PIdx;
2061     DEBUG(dbgs() << "  *** Critical resource "
2062           << SchedModel->getResourceName(PIdx) << ": "
2063           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2064   }
2065   // For reserved resources, record the highest cycle using the resource.
2066   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2067   if (NextAvailable > CurrCycle) {
2068     DEBUG(dbgs() << "  Resource conflict: "
2069           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2070           << NextAvailable << "\n");
2071   }
2072   return NextAvailable;
2073 }
2074 
2075 /// Move the boundary of scheduled code by one SUnit.
2076 void SchedBoundary::bumpNode(SUnit *SU) {
2077   // Update the reservation table.
2078   if (HazardRec->isEnabled()) {
2079     if (!isTop() && SU->isCall) {
2080       // Calls are scheduled with their preceding instructions. For bottom-up
2081       // scheduling, clear the pipeline state before emitting.
2082       HazardRec->Reset();
2083     }
2084     HazardRec->EmitInstruction(SU);
2085   }
2086   // checkHazard should prevent scheduling multiple instructions per cycle that
2087   // exceed the issue width.
2088   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2089   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2090   assert(
2091       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2092       "Cannot schedule this instruction's MicroOps in the current cycle.");
2093 
2094   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2095   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2096 
2097   unsigned NextCycle = CurrCycle;
2098   switch (SchedModel->getMicroOpBufferSize()) {
2099   case 0:
2100     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2101     break;
2102   case 1:
2103     if (ReadyCycle > NextCycle) {
2104       NextCycle = ReadyCycle;
2105       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2106     }
2107     break;
2108   default:
2109     // We don't currently model the OOO reorder buffer, so consider all
2110     // scheduled MOps to be "retired". We do loosely model in-order resource
2111     // latency. If this instruction uses an in-order resource, account for any
2112     // likely stall cycles.
2113     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2114       NextCycle = ReadyCycle;
2115     break;
2116   }
2117   RetiredMOps += IncMOps;
2118 
2119   // Update resource counts and critical resource.
2120   if (SchedModel->hasInstrSchedModel()) {
2121     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2122     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2123     Rem->RemIssueCount -= DecRemIssue;
2124     if (ZoneCritResIdx) {
2125       // Scale scheduled micro-ops for comparing with the critical resource.
2126       unsigned ScaledMOps =
2127         RetiredMOps * SchedModel->getMicroOpFactor();
2128 
2129       // If scaled micro-ops are now more than the previous critical resource by
2130       // a full cycle, then micro-ops issue becomes critical.
2131       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2132           >= (int)SchedModel->getLatencyFactor()) {
2133         ZoneCritResIdx = 0;
2134         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2135               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2136       }
2137     }
2138     for (TargetSchedModel::ProcResIter
2139            PI = SchedModel->getWriteProcResBegin(SC),
2140            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2141       unsigned RCycle =
2142         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2143       if (RCycle > NextCycle)
2144         NextCycle = RCycle;
2145     }
2146     if (SU->hasReservedResource) {
2147       // For reserved resources, record the highest cycle using the resource.
2148       // For top-down scheduling, this is the cycle in which we schedule this
2149       // instruction plus the number of cycles the operations reserves the
2150       // resource. For bottom-up is it simply the instruction's cycle.
2151       for (TargetSchedModel::ProcResIter
2152              PI = SchedModel->getWriteProcResBegin(SC),
2153              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2154         unsigned PIdx = PI->ProcResourceIdx;
2155         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2156           if (isTop()) {
2157             ReservedCycles[PIdx] =
2158               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2159           }
2160           else
2161             ReservedCycles[PIdx] = NextCycle;
2162         }
2163       }
2164     }
2165   }
2166   // Update ExpectedLatency and DependentLatency.
2167   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2168   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2169   if (SU->getDepth() > TopLatency) {
2170     TopLatency = SU->getDepth();
2171     DEBUG(dbgs() << "  " << Available.getName()
2172           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2173   }
2174   if (SU->getHeight() > BotLatency) {
2175     BotLatency = SU->getHeight();
2176     DEBUG(dbgs() << "  " << Available.getName()
2177           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2178   }
2179   // If we stall for any reason, bump the cycle.
2180   if (NextCycle > CurrCycle) {
2181     bumpCycle(NextCycle);
2182   } else {
2183     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2184     // resource limited. If a stall occurred, bumpCycle does this.
2185     unsigned LFactor = SchedModel->getLatencyFactor();
2186     IsResourceLimited =
2187       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2188       > (int)LFactor;
2189   }
2190   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2191   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2192   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2193   // bump the cycle to avoid uselessly checking everything in the readyQ.
2194   CurrMOps += IncMOps;
2195   while (CurrMOps >= SchedModel->getIssueWidth()) {
2196     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2197           << " at cycle " << CurrCycle << '\n');
2198     bumpCycle(++NextCycle);
2199   }
2200   DEBUG(dumpScheduledState());
2201 }
2202 
2203 /// Release pending ready nodes in to the available queue. This makes them
2204 /// visible to heuristics.
2205 void SchedBoundary::releasePending() {
2206   // If the available queue is empty, it is safe to reset MinReadyCycle.
2207   if (Available.empty())
2208     MinReadyCycle = UINT_MAX;
2209 
2210   // Check to see if any of the pending instructions are ready to issue.  If
2211   // so, add them to the available queue.
2212   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2213   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2214     SUnit *SU = *(Pending.begin()+i);
2215     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2216 
2217     if (ReadyCycle < MinReadyCycle)
2218       MinReadyCycle = ReadyCycle;
2219 
2220     if (!IsBuffered && ReadyCycle > CurrCycle)
2221       continue;
2222 
2223     if (checkHazard(SU))
2224       continue;
2225 
2226     if (Available.size() >= ReadyListLimit)
2227       break;
2228 
2229     Available.push(SU);
2230     Pending.remove(Pending.begin()+i);
2231     --i; --e;
2232   }
2233   DEBUG(if (!Pending.empty()) Pending.dump());
2234   CheckPending = false;
2235 }
2236 
2237 /// Remove SU from the ready set for this boundary.
2238 void SchedBoundary::removeReady(SUnit *SU) {
2239   if (Available.isInQueue(SU))
2240     Available.remove(Available.find(SU));
2241   else {
2242     assert(Pending.isInQueue(SU) && "bad ready count");
2243     Pending.remove(Pending.find(SU));
2244   }
2245 }
2246 
2247 /// If this queue only has one ready candidate, return it. As a side effect,
2248 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2249 /// one node is ready. If multiple instructions are ready, return NULL.
2250 SUnit *SchedBoundary::pickOnlyChoice() {
2251   if (CheckPending)
2252     releasePending();
2253 
2254   if (CurrMOps > 0) {
2255     // Defer any ready instrs that now have a hazard.
2256     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2257       if (checkHazard(*I)) {
2258         Pending.push(*I);
2259         I = Available.remove(I);
2260         continue;
2261       }
2262       ++I;
2263     }
2264   }
2265   for (unsigned i = 0; Available.empty(); ++i) {
2266 //  FIXME: Re-enable assert once PR20057 is resolved.
2267 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2268 //           "permanent hazard");
2269     (void)i;
2270     bumpCycle(CurrCycle + 1);
2271     releasePending();
2272   }
2273   if (Available.size() == 1)
2274     return *Available.begin();
2275   return nullptr;
2276 }
2277 
2278 #ifndef NDEBUG
2279 // This is useful information to dump after bumpNode.
2280 // Note that the Queue contents are more useful before pickNodeFromQueue.
2281 void SchedBoundary::dumpScheduledState() {
2282   unsigned ResFactor;
2283   unsigned ResCount;
2284   if (ZoneCritResIdx) {
2285     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2286     ResCount = getResourceCount(ZoneCritResIdx);
2287   } else {
2288     ResFactor = SchedModel->getMicroOpFactor();
2289     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2290   }
2291   unsigned LFactor = SchedModel->getLatencyFactor();
2292   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2293          << "  Retired: " << RetiredMOps;
2294   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2295   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2296          << ResCount / ResFactor << " "
2297          << SchedModel->getResourceName(ZoneCritResIdx)
2298          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2299          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2300          << " limited.\n";
2301 }
2302 #endif
2303 
2304 //===----------------------------------------------------------------------===//
2305 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2306 //===----------------------------------------------------------------------===//
2307 
2308 void GenericSchedulerBase::SchedCandidate::
2309 initResourceDelta(const ScheduleDAGMI *DAG,
2310                   const TargetSchedModel *SchedModel) {
2311   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2312     return;
2313 
2314   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2315   for (TargetSchedModel::ProcResIter
2316          PI = SchedModel->getWriteProcResBegin(SC),
2317          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2318     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2319       ResDelta.CritResources += PI->Cycles;
2320     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2321       ResDelta.DemandedResources += PI->Cycles;
2322   }
2323 }
2324 
2325 /// Set the CandPolicy given a scheduling zone given the current resources and
2326 /// latencies inside and outside the zone.
2327 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2328                                      SchedBoundary &CurrZone,
2329                                      SchedBoundary *OtherZone) {
2330   // Apply preemptive heuristics based on the total latency and resources
2331   // inside and outside this zone. Potential stalls should be considered before
2332   // following this policy.
2333 
2334   // Compute remaining latency. We need this both to determine whether the
2335   // overall schedule has become latency-limited and whether the instructions
2336   // outside this zone are resource or latency limited.
2337   //
2338   // The "dependent" latency is updated incrementally during scheduling as the
2339   // max height/depth of scheduled nodes minus the cycles since it was
2340   // scheduled:
2341   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2342   //
2343   // The "independent" latency is the max ready queue depth:
2344   //   ILat = max N.depth for N in Available|Pending
2345   //
2346   // RemainingLatency is the greater of independent and dependent latency.
2347   unsigned RemLatency = CurrZone.getDependentLatency();
2348   RemLatency = std::max(RemLatency,
2349                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2350   RemLatency = std::max(RemLatency,
2351                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2352 
2353   // Compute the critical resource outside the zone.
2354   unsigned OtherCritIdx = 0;
2355   unsigned OtherCount =
2356     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2357 
2358   bool OtherResLimited = false;
2359   if (SchedModel->hasInstrSchedModel()) {
2360     unsigned LFactor = SchedModel->getLatencyFactor();
2361     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2362   }
2363   // Schedule aggressively for latency in PostRA mode. We don't check for
2364   // acyclic latency during PostRA, and highly out-of-order processors will
2365   // skip PostRA scheduling.
2366   if (!OtherResLimited) {
2367     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2368       Policy.ReduceLatency |= true;
2369       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2370             << " RemainingLatency " << RemLatency << " + "
2371             << CurrZone.getCurrCycle() << "c > CritPath "
2372             << Rem.CriticalPath << "\n");
2373     }
2374   }
2375   // If the same resource is limiting inside and outside the zone, do nothing.
2376   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2377     return;
2378 
2379   DEBUG(
2380     if (CurrZone.isResourceLimited()) {
2381       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2382              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2383              << "\n";
2384     }
2385     if (OtherResLimited)
2386       dbgs() << "  RemainingLimit: "
2387              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2388     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2389       dbgs() << "  Latency limited both directions.\n");
2390 
2391   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2392     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2393 
2394   if (OtherResLimited)
2395     Policy.DemandResIdx = OtherCritIdx;
2396 }
2397 
2398 #ifndef NDEBUG
2399 const char *GenericSchedulerBase::getReasonStr(
2400   GenericSchedulerBase::CandReason Reason) {
2401   switch (Reason) {
2402   case NoCand:         return "NOCAND    ";
2403   case PhysRegCopy:    return "PREG-COPY";
2404   case RegExcess:      return "REG-EXCESS";
2405   case RegCritical:    return "REG-CRIT  ";
2406   case Stall:          return "STALL     ";
2407   case Cluster:        return "CLUSTER   ";
2408   case Weak:           return "WEAK      ";
2409   case RegMax:         return "REG-MAX   ";
2410   case ResourceReduce: return "RES-REDUCE";
2411   case ResourceDemand: return "RES-DEMAND";
2412   case TopDepthReduce: return "TOP-DEPTH ";
2413   case TopPathReduce:  return "TOP-PATH  ";
2414   case BotHeightReduce:return "BOT-HEIGHT";
2415   case BotPathReduce:  return "BOT-PATH  ";
2416   case NextDefUse:     return "DEF-USE   ";
2417   case NodeOrder:      return "ORDER     ";
2418   };
2419   llvm_unreachable("Unknown reason!");
2420 }
2421 
2422 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2423   PressureChange P;
2424   unsigned ResIdx = 0;
2425   unsigned Latency = 0;
2426   switch (Cand.Reason) {
2427   default:
2428     break;
2429   case RegExcess:
2430     P = Cand.RPDelta.Excess;
2431     break;
2432   case RegCritical:
2433     P = Cand.RPDelta.CriticalMax;
2434     break;
2435   case RegMax:
2436     P = Cand.RPDelta.CurrentMax;
2437     break;
2438   case ResourceReduce:
2439     ResIdx = Cand.Policy.ReduceResIdx;
2440     break;
2441   case ResourceDemand:
2442     ResIdx = Cand.Policy.DemandResIdx;
2443     break;
2444   case TopDepthReduce:
2445     Latency = Cand.SU->getDepth();
2446     break;
2447   case TopPathReduce:
2448     Latency = Cand.SU->getHeight();
2449     break;
2450   case BotHeightReduce:
2451     Latency = Cand.SU->getHeight();
2452     break;
2453   case BotPathReduce:
2454     Latency = Cand.SU->getDepth();
2455     break;
2456   }
2457   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2458   if (P.isValid())
2459     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2460            << ":" << P.getUnitInc() << " ";
2461   else
2462     dbgs() << "      ";
2463   if (ResIdx)
2464     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2465   else
2466     dbgs() << "         ";
2467   if (Latency)
2468     dbgs() << " " << Latency << " cycles ";
2469   else
2470     dbgs() << "          ";
2471   dbgs() << '\n';
2472 }
2473 #endif
2474 
2475 /// Return true if this heuristic determines order.
2476 static bool tryLess(int TryVal, int CandVal,
2477                     GenericSchedulerBase::SchedCandidate &TryCand,
2478                     GenericSchedulerBase::SchedCandidate &Cand,
2479                     GenericSchedulerBase::CandReason Reason) {
2480   if (TryVal < CandVal) {
2481     TryCand.Reason = Reason;
2482     return true;
2483   }
2484   if (TryVal > CandVal) {
2485     if (Cand.Reason > Reason)
2486       Cand.Reason = Reason;
2487     return true;
2488   }
2489   Cand.setRepeat(Reason);
2490   return false;
2491 }
2492 
2493 static bool tryGreater(int TryVal, int CandVal,
2494                        GenericSchedulerBase::SchedCandidate &TryCand,
2495                        GenericSchedulerBase::SchedCandidate &Cand,
2496                        GenericSchedulerBase::CandReason Reason) {
2497   if (TryVal > CandVal) {
2498     TryCand.Reason = Reason;
2499     return true;
2500   }
2501   if (TryVal < CandVal) {
2502     if (Cand.Reason > Reason)
2503       Cand.Reason = Reason;
2504     return true;
2505   }
2506   Cand.setRepeat(Reason);
2507   return false;
2508 }
2509 
2510 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2511                        GenericSchedulerBase::SchedCandidate &Cand,
2512                        SchedBoundary &Zone) {
2513   if (Zone.isTop()) {
2514     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2515       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2516                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2517         return true;
2518     }
2519     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2520                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2521       return true;
2522   } else {
2523     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2524       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2525                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2526         return true;
2527     }
2528     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2529                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2530       return true;
2531   }
2532   return false;
2533 }
2534 
2535 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2536                       bool IsTop) {
2537   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2538         << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
2539 }
2540 
2541 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2542   assert(dag->hasVRegLiveness() &&
2543          "(PreRA)GenericScheduler needs vreg liveness");
2544   DAG = static_cast<ScheduleDAGMILive*>(dag);
2545   SchedModel = DAG->getSchedModel();
2546   TRI = DAG->TRI;
2547 
2548   Rem.init(DAG, SchedModel);
2549   Top.init(DAG, SchedModel, &Rem);
2550   Bot.init(DAG, SchedModel, &Rem);
2551 
2552   // Initialize resource counts.
2553 
2554   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2555   // are disabled, then these HazardRecs will be disabled.
2556   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2557   if (!Top.HazardRec) {
2558     Top.HazardRec =
2559         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2560             Itin, DAG);
2561   }
2562   if (!Bot.HazardRec) {
2563     Bot.HazardRec =
2564         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2565             Itin, DAG);
2566   }
2567 }
2568 
2569 /// Initialize the per-region scheduling policy.
2570 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2571                                   MachineBasicBlock::iterator End,
2572                                   unsigned NumRegionInstrs) {
2573   const MachineFunction &MF = *Begin->getParent()->getParent();
2574   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2575 
2576   // Avoid setting up the register pressure tracker for small regions to save
2577   // compile time. As a rough heuristic, only track pressure when the number of
2578   // schedulable instructions exceeds half the integer register file.
2579   RegionPolicy.ShouldTrackPressure = true;
2580   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2581     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2582     if (TLI->isTypeLegal(LegalIntVT)) {
2583       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2584         TLI->getRegClassFor(LegalIntVT));
2585       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2586     }
2587   }
2588 
2589   // For generic targets, we default to bottom-up, because it's simpler and more
2590   // compile-time optimizations have been implemented in that direction.
2591   RegionPolicy.OnlyBottomUp = true;
2592 
2593   // Allow the subtarget to override default policy.
2594   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
2595                                         NumRegionInstrs);
2596 
2597   // After subtarget overrides, apply command line options.
2598   if (!EnableRegPressure)
2599     RegionPolicy.ShouldTrackPressure = false;
2600 
2601   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2602   // e.g. -misched-bottomup=false allows scheduling in both directions.
2603   assert((!ForceTopDown || !ForceBottomUp) &&
2604          "-misched-topdown incompatible with -misched-bottomup");
2605   if (ForceBottomUp.getNumOccurrences() > 0) {
2606     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2607     if (RegionPolicy.OnlyBottomUp)
2608       RegionPolicy.OnlyTopDown = false;
2609   }
2610   if (ForceTopDown.getNumOccurrences() > 0) {
2611     RegionPolicy.OnlyTopDown = ForceTopDown;
2612     if (RegionPolicy.OnlyTopDown)
2613       RegionPolicy.OnlyBottomUp = false;
2614   }
2615 }
2616 
2617 void GenericScheduler::dumpPolicy() {
2618   dbgs() << "GenericScheduler RegionPolicy: "
2619          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2620          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2621          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2622          << "\n";
2623 }
2624 
2625 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2626 /// critical path by more cycles than it takes to drain the instruction buffer.
2627 /// We estimate an upper bounds on in-flight instructions as:
2628 ///
2629 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2630 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2631 /// InFlightResources = InFlightIterations * LoopResources
2632 ///
2633 /// TODO: Check execution resources in addition to IssueCount.
2634 void GenericScheduler::checkAcyclicLatency() {
2635   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2636     return;
2637 
2638   // Scaled number of cycles per loop iteration.
2639   unsigned IterCount =
2640     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2641              Rem.RemIssueCount);
2642   // Scaled acyclic critical path.
2643   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2644   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2645   unsigned InFlightCount =
2646     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2647   unsigned BufferLimit =
2648     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2649 
2650   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2651 
2652   DEBUG(dbgs() << "IssueCycles="
2653         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2654         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2655         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2656         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2657         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2658         if (Rem.IsAcyclicLatencyLimited)
2659           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2660 }
2661 
2662 void GenericScheduler::registerRoots() {
2663   Rem.CriticalPath = DAG->ExitSU.getDepth();
2664 
2665   // Some roots may not feed into ExitSU. Check all of them in case.
2666   for (std::vector<SUnit*>::const_iterator
2667          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2668     if ((*I)->getDepth() > Rem.CriticalPath)
2669       Rem.CriticalPath = (*I)->getDepth();
2670   }
2671   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2672   if (DumpCriticalPathLength) {
2673     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2674   }
2675 
2676   if (EnableCyclicPath) {
2677     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2678     checkAcyclicLatency();
2679   }
2680 }
2681 
2682 static bool tryPressure(const PressureChange &TryP,
2683                         const PressureChange &CandP,
2684                         GenericSchedulerBase::SchedCandidate &TryCand,
2685                         GenericSchedulerBase::SchedCandidate &Cand,
2686                         GenericSchedulerBase::CandReason Reason,
2687                         const TargetRegisterInfo *TRI,
2688                         const MachineFunction &MF) {
2689   unsigned TryPSet = TryP.getPSetOrMax();
2690   unsigned CandPSet = CandP.getPSetOrMax();
2691   // If both candidates affect the same set, go with the smallest increase.
2692   if (TryPSet == CandPSet) {
2693     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2694                    Reason);
2695   }
2696   // If one candidate decreases and the other increases, go with it.
2697   // Invalid candidates have UnitInc==0.
2698   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2699                  Reason)) {
2700     return true;
2701   }
2702 
2703   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2704                                  std::numeric_limits<int>::max();
2705 
2706   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2707                                    std::numeric_limits<int>::max();
2708 
2709   // If the candidates are decreasing pressure, reverse priority.
2710   if (TryP.getUnitInc() < 0)
2711     std::swap(TryRank, CandRank);
2712   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2713 }
2714 
2715 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2716   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2717 }
2718 
2719 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2720 /// their physreg def/use.
2721 ///
2722 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2723 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2724 /// with the operation that produces or consumes the physreg. We'll do this when
2725 /// regalloc has support for parallel copies.
2726 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2727   const MachineInstr *MI = SU->getInstr();
2728   if (!MI->isCopy())
2729     return 0;
2730 
2731   unsigned ScheduledOper = isTop ? 1 : 0;
2732   unsigned UnscheduledOper = isTop ? 0 : 1;
2733   // If we have already scheduled the physreg produce/consumer, immediately
2734   // schedule the copy.
2735   if (TargetRegisterInfo::isPhysicalRegister(
2736         MI->getOperand(ScheduledOper).getReg()))
2737     return 1;
2738   // If the physreg is at the boundary, defer it. Otherwise schedule it
2739   // immediately to free the dependent. We can hoist the copy later.
2740   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2741   if (TargetRegisterInfo::isPhysicalRegister(
2742         MI->getOperand(UnscheduledOper).getReg()))
2743     return AtBoundary ? -1 : 1;
2744   return 0;
2745 }
2746 
2747 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2748                                      bool AtTop,
2749                                      const RegPressureTracker &RPTracker,
2750                                      RegPressureTracker &TempTracker) {
2751   Cand.SU = SU;
2752   if (DAG->isTrackingPressure()) {
2753     if (AtTop) {
2754       TempTracker.getMaxDownwardPressureDelta(
2755         Cand.SU->getInstr(),
2756         Cand.RPDelta,
2757         DAG->getRegionCriticalPSets(),
2758         DAG->getRegPressure().MaxSetPressure);
2759     } else {
2760       if (VerifyScheduling) {
2761         TempTracker.getMaxUpwardPressureDelta(
2762           Cand.SU->getInstr(),
2763           &DAG->getPressureDiff(Cand.SU),
2764           Cand.RPDelta,
2765           DAG->getRegionCriticalPSets(),
2766           DAG->getRegPressure().MaxSetPressure);
2767       } else {
2768         RPTracker.getUpwardPressureDelta(
2769           Cand.SU->getInstr(),
2770           DAG->getPressureDiff(Cand.SU),
2771           Cand.RPDelta,
2772           DAG->getRegionCriticalPSets(),
2773           DAG->getRegPressure().MaxSetPressure);
2774       }
2775     }
2776   }
2777   DEBUG(if (Cand.RPDelta.Excess.isValid())
2778           dbgs() << "  Try  SU(" << Cand.SU->NodeNum << ") "
2779                  << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
2780                  << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
2781 }
2782 
2783 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2784 /// hierarchical. This may be more efficient than a graduated cost model because
2785 /// we don't need to evaluate all aspects of the model for each node in the
2786 /// queue. But it's really done to make the heuristics easier to debug and
2787 /// statistically analyze.
2788 ///
2789 /// \param Cand provides the policy and current best candidate.
2790 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2791 /// \param Zone describes the scheduled zone that we are extending.
2792 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2793                                     SchedCandidate &TryCand,
2794                                     SchedBoundary &Zone) {
2795   // Initialize the candidate if needed.
2796   if (!Cand.isValid()) {
2797     TryCand.Reason = NodeOrder;
2798     return;
2799   }
2800 
2801   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2802                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2803                  TryCand, Cand, PhysRegCopy))
2804     return;
2805 
2806   // Avoid exceeding the target's limit.
2807   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2808                                                Cand.RPDelta.Excess,
2809                                                TryCand, Cand, RegExcess, TRI,
2810                                                DAG->MF))
2811     return;
2812 
2813   // Avoid increasing the max critical pressure in the scheduled region.
2814   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2815                                                Cand.RPDelta.CriticalMax,
2816                                                TryCand, Cand, RegCritical, TRI,
2817                                                DAG->MF))
2818     return;
2819 
2820   // For loops that are acyclic path limited, aggressively schedule for latency.
2821   // This can result in very long dependence chains scheduled in sequence, so
2822   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2823   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2824       && tryLatency(TryCand, Cand, Zone))
2825     return;
2826 
2827   // Prioritize instructions that read unbuffered resources by stall cycles.
2828   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2829               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2830     return;
2831 
2832   // Keep clustered nodes together to encourage downstream peephole
2833   // optimizations which may reduce resource requirements.
2834   //
2835   // This is a best effort to set things up for a post-RA pass. Optimizations
2836   // like generating loads of multiple registers should ideally be done within
2837   // the scheduler pass by combining the loads during DAG postprocessing.
2838   const SUnit *NextClusterSU =
2839     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2840   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2841                  TryCand, Cand, Cluster))
2842     return;
2843 
2844   // Weak edges are for clustering and other constraints.
2845   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2846               getWeakLeft(Cand.SU, Zone.isTop()),
2847               TryCand, Cand, Weak)) {
2848     return;
2849   }
2850   // Avoid increasing the max pressure of the entire region.
2851   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2852                                                Cand.RPDelta.CurrentMax,
2853                                                TryCand, Cand, RegMax, TRI,
2854                                                DAG->MF))
2855     return;
2856 
2857   // Avoid critical resource consumption and balance the schedule.
2858   TryCand.initResourceDelta(DAG, SchedModel);
2859   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2860               TryCand, Cand, ResourceReduce))
2861     return;
2862   if (tryGreater(TryCand.ResDelta.DemandedResources,
2863                  Cand.ResDelta.DemandedResources,
2864                  TryCand, Cand, ResourceDemand))
2865     return;
2866 
2867   // Avoid serializing long latency dependence chains.
2868   // For acyclic path limited loops, latency was already checked above.
2869   if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency &&
2870       !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) {
2871     return;
2872   }
2873 
2874   // Prefer immediate defs/users of the last scheduled instruction. This is a
2875   // local pressure avoidance strategy that also makes the machine code
2876   // readable.
2877   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2878                  TryCand, Cand, NextDefUse))
2879     return;
2880 
2881   // Fall through to original instruction order.
2882   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2883       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2884     TryCand.Reason = NodeOrder;
2885   }
2886 }
2887 
2888 /// Pick the best candidate from the queue.
2889 ///
2890 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2891 /// DAG building. To adjust for the current scheduling location we need to
2892 /// maintain the number of vreg uses remaining to be top-scheduled.
2893 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2894                                          const RegPressureTracker &RPTracker,
2895                                          SchedCandidate &Cand) {
2896   ReadyQueue &Q = Zone.Available;
2897 
2898   DEBUG(Q.dump());
2899 
2900   // getMaxPressureDelta temporarily modifies the tracker.
2901   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2902 
2903   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2904 
2905     SchedCandidate TryCand(Cand.Policy);
2906     initCandidate(TryCand, *I, Zone.isTop(), RPTracker, TempTracker);
2907     tryCandidate(Cand, TryCand, Zone);
2908     if (TryCand.Reason != NoCand) {
2909       // Initialize resource delta if needed in case future heuristics query it.
2910       if (TryCand.ResDelta == SchedResourceDelta())
2911         TryCand.initResourceDelta(DAG, SchedModel);
2912       Cand.setBest(TryCand);
2913       DEBUG(traceCandidate(Cand));
2914     }
2915   }
2916 }
2917 
2918 /// Pick the best candidate node from either the top or bottom queue.
2919 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2920   // Schedule as far as possible in the direction of no choice. This is most
2921   // efficient, but also provides the best heuristics for CriticalPSets.
2922   if (SUnit *SU = Bot.pickOnlyChoice()) {
2923     IsTopNode = false;
2924     DEBUG(dbgs() << "Pick Bot ONLY1\n");
2925     return SU;
2926   }
2927   if (SUnit *SU = Top.pickOnlyChoice()) {
2928     IsTopNode = true;
2929     DEBUG(dbgs() << "Pick Top ONLY1\n");
2930     return SU;
2931   }
2932   CandPolicy NoPolicy;
2933   SchedCandidate BotCand(NoPolicy);
2934   SchedCandidate TopCand(NoPolicy);
2935   // Set the bottom-up policy based on the state of the current bottom zone and
2936   // the instructions outside the zone, including the top zone.
2937   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2938   // Set the top-down policy based on the state of the current top zone and
2939   // the instructions outside the zone, including the bottom zone.
2940   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2941 
2942   // Prefer bottom scheduling when heuristics are silent.
2943   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2944   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2945 
2946   // If either Q has a single candidate that provides the least increase in
2947   // Excess pressure, we can immediately schedule from that Q.
2948   //
2949   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2950   // affects picking from either Q. If scheduling in one direction must
2951   // increase pressure for one of the excess PSets, then schedule in that
2952   // direction first to provide more freedom in the other direction.
2953   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2954       || (BotCand.Reason == RegCritical && !BotCand.isRepeat(RegCritical)))
2955   {
2956     IsTopNode = false;
2957     tracePick(BotCand, IsTopNode);
2958     return BotCand.SU;
2959   }
2960   // Check if the top Q has a better candidate.
2961   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2962   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2963 
2964   // Choose the queue with the most important (lowest enum) reason.
2965   if (TopCand.Reason < BotCand.Reason) {
2966     IsTopNode = true;
2967     tracePick(TopCand, IsTopNode);
2968     return TopCand.SU;
2969   }
2970   // Otherwise prefer the bottom candidate, in node order if all else failed.
2971   IsTopNode = false;
2972   tracePick(BotCand, IsTopNode);
2973   return BotCand.SU;
2974 }
2975 
2976 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2977 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2978   if (DAG->top() == DAG->bottom()) {
2979     assert(Top.Available.empty() && Top.Pending.empty() &&
2980            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2981     return nullptr;
2982   }
2983   SUnit *SU;
2984   do {
2985     if (RegionPolicy.OnlyTopDown) {
2986       SU = Top.pickOnlyChoice();
2987       if (!SU) {
2988         CandPolicy NoPolicy;
2989         SchedCandidate TopCand(NoPolicy);
2990         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2991         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2992         tracePick(TopCand, true);
2993         SU = TopCand.SU;
2994       }
2995       IsTopNode = true;
2996     } else if (RegionPolicy.OnlyBottomUp) {
2997       SU = Bot.pickOnlyChoice();
2998       if (!SU) {
2999         CandPolicy NoPolicy;
3000         SchedCandidate BotCand(NoPolicy);
3001         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
3002         assert(BotCand.Reason != NoCand && "failed to find a candidate");
3003         tracePick(BotCand, false);
3004         SU = BotCand.SU;
3005       }
3006       IsTopNode = false;
3007     } else {
3008       SU = pickNodeBidirectional(IsTopNode);
3009     }
3010   } while (SU->isScheduled);
3011 
3012   if (SU->isTopReady())
3013     Top.removeReady(SU);
3014   if (SU->isBottomReady())
3015     Bot.removeReady(SU);
3016 
3017   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3018   return SU;
3019 }
3020 
3021 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
3022 
3023   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3024   if (!isTop)
3025     ++InsertPos;
3026   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3027 
3028   // Find already scheduled copies with a single physreg dependence and move
3029   // them just above the scheduled instruction.
3030   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3031        I != E; ++I) {
3032     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3033       continue;
3034     SUnit *DepSU = I->getSUnit();
3035     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3036       continue;
3037     MachineInstr *Copy = DepSU->getInstr();
3038     if (!Copy->isCopy())
3039       continue;
3040     DEBUG(dbgs() << "  Rescheduling physreg copy ";
3041           I->getSUnit()->dump(DAG));
3042     DAG->moveInstruction(Copy, InsertPos);
3043   }
3044 }
3045 
3046 /// Update the scheduler's state after scheduling a node. This is the same node
3047 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3048 /// update it's state based on the current cycle before MachineSchedStrategy
3049 /// does.
3050 ///
3051 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3052 /// them here. See comments in biasPhysRegCopy.
3053 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3054   if (IsTopNode) {
3055     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3056     Top.bumpNode(SU);
3057     if (SU->hasPhysRegUses)
3058       reschedulePhysRegCopies(SU, true);
3059   } else {
3060     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3061     Bot.bumpNode(SU);
3062     if (SU->hasPhysRegDefs)
3063       reschedulePhysRegCopies(SU, false);
3064   }
3065 }
3066 
3067 /// Create the standard converging machine scheduler. This will be used as the
3068 /// default scheduler if the target does not set a default.
3069 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
3070   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
3071   // Register DAG post-processors.
3072   //
3073   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3074   // data and pass it to later mutations. Have a single mutation that gathers
3075   // the interesting nodes in one pass.
3076   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
3077   if (EnableMemOpCluster) {
3078     if (DAG->TII->enableClusterLoads())
3079       DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
3080     if (DAG->TII->enableClusterStores())
3081       DAG->addMutation(make_unique<StoreClusterMutation>(DAG->TII, DAG->TRI));
3082   }
3083   if (EnableMacroFusion)
3084     DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI));
3085   return DAG;
3086 }
3087 
3088 static MachineSchedRegistry
3089 GenericSchedRegistry("converge", "Standard converging scheduler.",
3090                      createGenericSchedLive);
3091 
3092 //===----------------------------------------------------------------------===//
3093 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3094 //===----------------------------------------------------------------------===//
3095 
3096 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3097   DAG = Dag;
3098   SchedModel = DAG->getSchedModel();
3099   TRI = DAG->TRI;
3100 
3101   Rem.init(DAG, SchedModel);
3102   Top.init(DAG, SchedModel, &Rem);
3103   BotRoots.clear();
3104 
3105   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3106   // or are disabled, then these HazardRecs will be disabled.
3107   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3108   if (!Top.HazardRec) {
3109     Top.HazardRec =
3110         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3111             Itin, DAG);
3112   }
3113 }
3114 
3115 
3116 void PostGenericScheduler::registerRoots() {
3117   Rem.CriticalPath = DAG->ExitSU.getDepth();
3118 
3119   // Some roots may not feed into ExitSU. Check all of them in case.
3120   for (SmallVectorImpl<SUnit*>::const_iterator
3121          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3122     if ((*I)->getDepth() > Rem.CriticalPath)
3123       Rem.CriticalPath = (*I)->getDepth();
3124   }
3125   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3126   if (DumpCriticalPathLength) {
3127     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3128   }
3129 }
3130 
3131 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3132 ///
3133 /// \param Cand provides the policy and current best candidate.
3134 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3135 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3136                                         SchedCandidate &TryCand) {
3137 
3138   // Initialize the candidate if needed.
3139   if (!Cand.isValid()) {
3140     TryCand.Reason = NodeOrder;
3141     return;
3142   }
3143 
3144   // Prioritize instructions that read unbuffered resources by stall cycles.
3145   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3146               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3147     return;
3148 
3149   // Avoid critical resource consumption and balance the schedule.
3150   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3151               TryCand, Cand, ResourceReduce))
3152     return;
3153   if (tryGreater(TryCand.ResDelta.DemandedResources,
3154                  Cand.ResDelta.DemandedResources,
3155                  TryCand, Cand, ResourceDemand))
3156     return;
3157 
3158   // Avoid serializing long latency dependence chains.
3159   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3160     return;
3161   }
3162 
3163   // Fall through to original instruction order.
3164   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3165     TryCand.Reason = NodeOrder;
3166 }
3167 
3168 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3169   ReadyQueue &Q = Top.Available;
3170 
3171   DEBUG(Q.dump());
3172 
3173   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3174     SchedCandidate TryCand(Cand.Policy);
3175     TryCand.SU = *I;
3176     TryCand.initResourceDelta(DAG, SchedModel);
3177     tryCandidate(Cand, TryCand);
3178     if (TryCand.Reason != NoCand) {
3179       Cand.setBest(TryCand);
3180       DEBUG(traceCandidate(Cand));
3181     }
3182   }
3183 }
3184 
3185 /// Pick the next node to schedule.
3186 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3187   if (DAG->top() == DAG->bottom()) {
3188     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3189     return nullptr;
3190   }
3191   SUnit *SU;
3192   do {
3193     SU = Top.pickOnlyChoice();
3194     if (!SU) {
3195       CandPolicy NoPolicy;
3196       SchedCandidate TopCand(NoPolicy);
3197       // Set the top-down policy based on the state of the current top zone and
3198       // the instructions outside the zone, including the bottom zone.
3199       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3200       pickNodeFromQueue(TopCand);
3201       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3202       tracePick(TopCand, true);
3203       SU = TopCand.SU;
3204     }
3205   } while (SU->isScheduled);
3206 
3207   IsTopNode = true;
3208   Top.removeReady(SU);
3209 
3210   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3211   return SU;
3212 }
3213 
3214 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3215 /// scheduled/remaining flags in the DAG nodes.
3216 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3217   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3218   Top.bumpNode(SU);
3219 }
3220 
3221 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3222 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3223   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3224 }
3225 
3226 //===----------------------------------------------------------------------===//
3227 // ILP Scheduler. Currently for experimental analysis of heuristics.
3228 //===----------------------------------------------------------------------===//
3229 
3230 namespace {
3231 /// \brief Order nodes by the ILP metric.
3232 struct ILPOrder {
3233   const SchedDFSResult *DFSResult;
3234   const BitVector *ScheduledTrees;
3235   bool MaximizeILP;
3236 
3237   ILPOrder(bool MaxILP)
3238     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3239 
3240   /// \brief Apply a less-than relation on node priority.
3241   ///
3242   /// (Return true if A comes after B in the Q.)
3243   bool operator()(const SUnit *A, const SUnit *B) const {
3244     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3245     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3246     if (SchedTreeA != SchedTreeB) {
3247       // Unscheduled trees have lower priority.
3248       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3249         return ScheduledTrees->test(SchedTreeB);
3250 
3251       // Trees with shallower connections have have lower priority.
3252       if (DFSResult->getSubtreeLevel(SchedTreeA)
3253           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3254         return DFSResult->getSubtreeLevel(SchedTreeA)
3255           < DFSResult->getSubtreeLevel(SchedTreeB);
3256       }
3257     }
3258     if (MaximizeILP)
3259       return DFSResult->getILP(A) < DFSResult->getILP(B);
3260     else
3261       return DFSResult->getILP(A) > DFSResult->getILP(B);
3262   }
3263 };
3264 
3265 /// \brief Schedule based on the ILP metric.
3266 class ILPScheduler : public MachineSchedStrategy {
3267   ScheduleDAGMILive *DAG;
3268   ILPOrder Cmp;
3269 
3270   std::vector<SUnit*> ReadyQ;
3271 public:
3272   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3273 
3274   void initialize(ScheduleDAGMI *dag) override {
3275     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3276     DAG = static_cast<ScheduleDAGMILive*>(dag);
3277     DAG->computeDFSResult();
3278     Cmp.DFSResult = DAG->getDFSResult();
3279     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3280     ReadyQ.clear();
3281   }
3282 
3283   void registerRoots() override {
3284     // Restore the heap in ReadyQ with the updated DFS results.
3285     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3286   }
3287 
3288   /// Implement MachineSchedStrategy interface.
3289   /// -----------------------------------------
3290 
3291   /// Callback to select the highest priority node from the ready Q.
3292   SUnit *pickNode(bool &IsTopNode) override {
3293     if (ReadyQ.empty()) return nullptr;
3294     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3295     SUnit *SU = ReadyQ.back();
3296     ReadyQ.pop_back();
3297     IsTopNode = false;
3298     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3299           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3300           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3301           << DAG->getDFSResult()->getSubtreeLevel(
3302             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3303           << "Scheduling " << *SU->getInstr());
3304     return SU;
3305   }
3306 
3307   /// \brief Scheduler callback to notify that a new subtree is scheduled.
3308   void scheduleTree(unsigned SubtreeID) override {
3309     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3310   }
3311 
3312   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3313   /// DFSResults, and resort the priority Q.
3314   void schedNode(SUnit *SU, bool IsTopNode) override {
3315     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3316   }
3317 
3318   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3319 
3320   void releaseBottomNode(SUnit *SU) override {
3321     ReadyQ.push_back(SU);
3322     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3323   }
3324 };
3325 } // namespace
3326 
3327 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3328   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3329 }
3330 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3331   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3332 }
3333 static MachineSchedRegistry ILPMaxRegistry(
3334   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3335 static MachineSchedRegistry ILPMinRegistry(
3336   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3337 
3338 //===----------------------------------------------------------------------===//
3339 // Machine Instruction Shuffler for Correctness Testing
3340 //===----------------------------------------------------------------------===//
3341 
3342 #ifndef NDEBUG
3343 namespace {
3344 /// Apply a less-than relation on the node order, which corresponds to the
3345 /// instruction order prior to scheduling. IsReverse implements greater-than.
3346 template<bool IsReverse>
3347 struct SUnitOrder {
3348   bool operator()(SUnit *A, SUnit *B) const {
3349     if (IsReverse)
3350       return A->NodeNum > B->NodeNum;
3351     else
3352       return A->NodeNum < B->NodeNum;
3353   }
3354 };
3355 
3356 /// Reorder instructions as much as possible.
3357 class InstructionShuffler : public MachineSchedStrategy {
3358   bool IsAlternating;
3359   bool IsTopDown;
3360 
3361   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3362   // gives nodes with a higher number higher priority causing the latest
3363   // instructions to be scheduled first.
3364   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3365     TopQ;
3366   // When scheduling bottom-up, use greater-than as the queue priority.
3367   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3368     BottomQ;
3369 public:
3370   InstructionShuffler(bool alternate, bool topdown)
3371     : IsAlternating(alternate), IsTopDown(topdown) {}
3372 
3373   void initialize(ScheduleDAGMI*) override {
3374     TopQ.clear();
3375     BottomQ.clear();
3376   }
3377 
3378   /// Implement MachineSchedStrategy interface.
3379   /// -----------------------------------------
3380 
3381   SUnit *pickNode(bool &IsTopNode) override {
3382     SUnit *SU;
3383     if (IsTopDown) {
3384       do {
3385         if (TopQ.empty()) return nullptr;
3386         SU = TopQ.top();
3387         TopQ.pop();
3388       } while (SU->isScheduled);
3389       IsTopNode = true;
3390     } else {
3391       do {
3392         if (BottomQ.empty()) return nullptr;
3393         SU = BottomQ.top();
3394         BottomQ.pop();
3395       } while (SU->isScheduled);
3396       IsTopNode = false;
3397     }
3398     if (IsAlternating)
3399       IsTopDown = !IsTopDown;
3400     return SU;
3401   }
3402 
3403   void schedNode(SUnit *SU, bool IsTopNode) override {}
3404 
3405   void releaseTopNode(SUnit *SU) override {
3406     TopQ.push(SU);
3407   }
3408   void releaseBottomNode(SUnit *SU) override {
3409     BottomQ.push(SU);
3410   }
3411 };
3412 } // namespace
3413 
3414 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3415   bool Alternate = !ForceTopDown && !ForceBottomUp;
3416   bool TopDown = !ForceBottomUp;
3417   assert((TopDown || !ForceTopDown) &&
3418          "-misched-topdown incompatible with -misched-bottomup");
3419   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3420 }
3421 static MachineSchedRegistry ShufflerRegistry(
3422   "shuffle", "Shuffle machine instructions alternating directions",
3423   createInstructionShuffler);
3424 #endif // !NDEBUG
3425 
3426 //===----------------------------------------------------------------------===//
3427 // GraphWriter support for ScheduleDAGMILive.
3428 //===----------------------------------------------------------------------===//
3429 
3430 #ifndef NDEBUG
3431 namespace llvm {
3432 
3433 template<> struct GraphTraits<
3434   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3435 
3436 template<>
3437 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3438 
3439   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3440 
3441   static std::string getGraphName(const ScheduleDAG *G) {
3442     return G->MF.getName();
3443   }
3444 
3445   static bool renderGraphFromBottomUp() {
3446     return true;
3447   }
3448 
3449   static bool isNodeHidden(const SUnit *Node) {
3450     if (ViewMISchedCutoff == 0)
3451       return false;
3452     return (Node->Preds.size() > ViewMISchedCutoff
3453          || Node->Succs.size() > ViewMISchedCutoff);
3454   }
3455 
3456   /// If you want to override the dot attributes printed for a particular
3457   /// edge, override this method.
3458   static std::string getEdgeAttributes(const SUnit *Node,
3459                                        SUnitIterator EI,
3460                                        const ScheduleDAG *Graph) {
3461     if (EI.isArtificialDep())
3462       return "color=cyan,style=dashed";
3463     if (EI.isCtrlDep())
3464       return "color=blue,style=dashed";
3465     return "";
3466   }
3467 
3468   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3469     std::string Str;
3470     raw_string_ostream SS(Str);
3471     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3472     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3473       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3474     SS << "SU:" << SU->NodeNum;
3475     if (DFS)
3476       SS << " I:" << DFS->getNumInstrs(SU);
3477     return SS.str();
3478   }
3479   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3480     return G->getGraphNodeLabel(SU);
3481   }
3482 
3483   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3484     std::string Str("shape=Mrecord");
3485     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3486     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3487       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3488     if (DFS) {
3489       Str += ",style=filled,fillcolor=\"#";
3490       Str += DOT::getColorString(DFS->getSubtreeID(N));
3491       Str += '"';
3492     }
3493     return Str;
3494   }
3495 };
3496 } // namespace llvm
3497 #endif // NDEBUG
3498 
3499 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3500 /// rendered using 'dot'.
3501 ///
3502 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3503 #ifndef NDEBUG
3504   ViewGraph(this, Name, false, Title);
3505 #else
3506   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3507          << "systems with Graphviz or gv!\n";
3508 #endif  // NDEBUG
3509 }
3510 
3511 /// Out-of-line implementation with no arguments is handy for gdb.
3512 void ScheduleDAGMI::viewGraph() {
3513   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3514 }
3515