xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision 263280248a28d23bc6dd664c77a2bec87bec9b9a)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define DEBUG_TYPE "misched"
16 
17 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
18 #include "llvm/CodeGen/MachineScheduler.h"
19 #include "llvm/CodeGen/Passes.h"
20 #include "llvm/CodeGen/RegisterClassInfo.h"
21 #include "llvm/CodeGen/ScheduleDAGILP.h"
22 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/OwningPtr.h"
29 #include "llvm/ADT/PriorityQueue.h"
30 
31 #include <queue>
32 
33 using namespace llvm;
34 
35 namespace llvm {
36 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
37                            cl::desc("Force top-down list scheduling"));
38 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
39                             cl::desc("Force bottom-up list scheduling"));
40 }
41 
42 #ifndef NDEBUG
43 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
44   cl::desc("Pop up a window to show MISched dags after they are processed"));
45 
46 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
47   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
48 #else
49 static bool ViewMISchedDAGs = false;
50 #endif // NDEBUG
51 
52 // Threshold to very roughly model an out-of-order processor's instruction
53 // buffers. If the actual value of this threshold matters much in practice, then
54 // it can be specified by the machine model. For now, it's an experimental
55 // tuning knob to determine when and if it matters.
56 static cl::opt<unsigned> ILPWindow("ilp-window", cl::Hidden,
57   cl::desc("Allow expected latency to exceed the critical path by N cycles "
58            "before attempting to balance ILP"),
59   cl::init(10U));
60 
61 // Experimental heuristics
62 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
63   cl::desc("Enable load clustering."));
64 
65 // Experimental heuristics
66 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
67   cl::desc("Enable scheduling for macro fusion."));
68 
69 //===----------------------------------------------------------------------===//
70 // Machine Instruction Scheduling Pass and Registry
71 //===----------------------------------------------------------------------===//
72 
73 MachineSchedContext::MachineSchedContext():
74     MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
75   RegClassInfo = new RegisterClassInfo();
76 }
77 
78 MachineSchedContext::~MachineSchedContext() {
79   delete RegClassInfo;
80 }
81 
82 namespace {
83 /// MachineScheduler runs after coalescing and before register allocation.
84 class MachineScheduler : public MachineSchedContext,
85                          public MachineFunctionPass {
86 public:
87   MachineScheduler();
88 
89   virtual void getAnalysisUsage(AnalysisUsage &AU) const;
90 
91   virtual void releaseMemory() {}
92 
93   virtual bool runOnMachineFunction(MachineFunction&);
94 
95   virtual void print(raw_ostream &O, const Module* = 0) const;
96 
97   static char ID; // Class identification, replacement for typeinfo
98 };
99 } // namespace
100 
101 char MachineScheduler::ID = 0;
102 
103 char &llvm::MachineSchedulerID = MachineScheduler::ID;
104 
105 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
106                       "Machine Instruction Scheduler", false, false)
107 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
108 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
109 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
110 INITIALIZE_PASS_END(MachineScheduler, "misched",
111                     "Machine Instruction Scheduler", false, false)
112 
113 MachineScheduler::MachineScheduler()
114 : MachineFunctionPass(ID) {
115   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
116 }
117 
118 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
119   AU.setPreservesCFG();
120   AU.addRequiredID(MachineDominatorsID);
121   AU.addRequired<MachineLoopInfo>();
122   AU.addRequired<AliasAnalysis>();
123   AU.addRequired<TargetPassConfig>();
124   AU.addRequired<SlotIndexes>();
125   AU.addPreserved<SlotIndexes>();
126   AU.addRequired<LiveIntervals>();
127   AU.addPreserved<LiveIntervals>();
128   MachineFunctionPass::getAnalysisUsage(AU);
129 }
130 
131 MachinePassRegistry MachineSchedRegistry::Registry;
132 
133 /// A dummy default scheduler factory indicates whether the scheduler
134 /// is overridden on the command line.
135 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
136   return 0;
137 }
138 
139 /// MachineSchedOpt allows command line selection of the scheduler.
140 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
141                RegisterPassParser<MachineSchedRegistry> >
142 MachineSchedOpt("misched",
143                 cl::init(&useDefaultMachineSched), cl::Hidden,
144                 cl::desc("Machine instruction scheduler to use"));
145 
146 static MachineSchedRegistry
147 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
148                      useDefaultMachineSched);
149 
150 /// Forward declare the standard machine scheduler. This will be used as the
151 /// default scheduler if the target does not set a default.
152 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
153 
154 
155 /// Decrement this iterator until reaching the top or a non-debug instr.
156 static MachineBasicBlock::iterator
157 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
158   assert(I != Beg && "reached the top of the region, cannot decrement");
159   while (--I != Beg) {
160     if (!I->isDebugValue())
161       break;
162   }
163   return I;
164 }
165 
166 /// If this iterator is a debug value, increment until reaching the End or a
167 /// non-debug instruction.
168 static MachineBasicBlock::iterator
169 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
170   for(; I != End; ++I) {
171     if (!I->isDebugValue())
172       break;
173   }
174   return I;
175 }
176 
177 /// Top-level MachineScheduler pass driver.
178 ///
179 /// Visit blocks in function order. Divide each block into scheduling regions
180 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
181 /// consistent with the DAG builder, which traverses the interior of the
182 /// scheduling regions bottom-up.
183 ///
184 /// This design avoids exposing scheduling boundaries to the DAG builder,
185 /// simplifying the DAG builder's support for "special" target instructions.
186 /// At the same time the design allows target schedulers to operate across
187 /// scheduling boundaries, for example to bundle the boudary instructions
188 /// without reordering them. This creates complexity, because the target
189 /// scheduler must update the RegionBegin and RegionEnd positions cached by
190 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
191 /// design would be to split blocks at scheduling boundaries, but LLVM has a
192 /// general bias against block splitting purely for implementation simplicity.
193 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
194   DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
195 
196   // Initialize the context of the pass.
197   MF = &mf;
198   MLI = &getAnalysis<MachineLoopInfo>();
199   MDT = &getAnalysis<MachineDominatorTree>();
200   PassConfig = &getAnalysis<TargetPassConfig>();
201   AA = &getAnalysis<AliasAnalysis>();
202 
203   LIS = &getAnalysis<LiveIntervals>();
204   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
205 
206   RegClassInfo->runOnMachineFunction(*MF);
207 
208   // Select the scheduler, or set the default.
209   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
210   if (Ctor == useDefaultMachineSched) {
211     // Get the default scheduler set by the target.
212     Ctor = MachineSchedRegistry::getDefault();
213     if (!Ctor) {
214       Ctor = createConvergingSched;
215       MachineSchedRegistry::setDefault(Ctor);
216     }
217   }
218   // Instantiate the selected scheduler.
219   OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
220 
221   // Visit all machine basic blocks.
222   //
223   // TODO: Visit blocks in global postorder or postorder within the bottom-up
224   // loop tree. Then we can optionally compute global RegPressure.
225   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
226        MBB != MBBEnd; ++MBB) {
227 
228     Scheduler->startBlock(MBB);
229 
230     // Break the block into scheduling regions [I, RegionEnd), and schedule each
231     // region as soon as it is discovered. RegionEnd points the scheduling
232     // boundary at the bottom of the region. The DAG does not include RegionEnd,
233     // but the region does (i.e. the next RegionEnd is above the previous
234     // RegionBegin). If the current block has no terminator then RegionEnd ==
235     // MBB->end() for the bottom region.
236     //
237     // The Scheduler may insert instructions during either schedule() or
238     // exitRegion(), even for empty regions. So the local iterators 'I' and
239     // 'RegionEnd' are invalid across these calls.
240     unsigned RemainingInstrs = MBB->size();
241     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
242         RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
243 
244       // Avoid decrementing RegionEnd for blocks with no terminator.
245       if (RegionEnd != MBB->end()
246           || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
247         --RegionEnd;
248         // Count the boundary instruction.
249         --RemainingInstrs;
250       }
251 
252       // The next region starts above the previous region. Look backward in the
253       // instruction stream until we find the nearest boundary.
254       MachineBasicBlock::iterator I = RegionEnd;
255       for(;I != MBB->begin(); --I, --RemainingInstrs) {
256         if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
257           break;
258       }
259       // Notify the scheduler of the region, even if we may skip scheduling
260       // it. Perhaps it still needs to be bundled.
261       Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs);
262 
263       // Skip empty scheduling regions (0 or 1 schedulable instructions).
264       if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
265         // Close the current region. Bundle the terminator if needed.
266         // This invalidates 'RegionEnd' and 'I'.
267         Scheduler->exitRegion();
268         continue;
269       }
270       DEBUG(dbgs() << "********** MI Scheduling **********\n");
271       DEBUG(dbgs() << MF->getName()
272             << ":BB#" << MBB->getNumber() << "\n  From: " << *I << "    To: ";
273             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
274             else dbgs() << "End";
275             dbgs() << " Remaining: " << RemainingInstrs << "\n");
276 
277       // Schedule a region: possibly reorder instructions.
278       // This invalidates 'RegionEnd' and 'I'.
279       Scheduler->schedule();
280 
281       // Close the current region.
282       Scheduler->exitRegion();
283 
284       // Scheduling has invalidated the current iterator 'I'. Ask the
285       // scheduler for the top of it's scheduled region.
286       RegionEnd = Scheduler->begin();
287     }
288     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
289     Scheduler->finishBlock();
290   }
291   Scheduler->finalizeSchedule();
292   DEBUG(LIS->print(dbgs()));
293   return true;
294 }
295 
296 void MachineScheduler::print(raw_ostream &O, const Module* m) const {
297   // unimplemented
298 }
299 
300 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
301 void ReadyQueue::dump() {
302   dbgs() << Name << ": ";
303   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
304     dbgs() << Queue[i]->NodeNum << " ";
305   dbgs() << "\n";
306 }
307 #endif
308 
309 //===----------------------------------------------------------------------===//
310 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
311 // preservation.
312 //===----------------------------------------------------------------------===//
313 
314 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
315   if (SuccSU != &ExitSU) {
316     // Do not use WillCreateCycle, it assumes SD scheduling.
317     // If Pred is reachable from Succ, then the edge creates a cycle.
318     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
319       return false;
320     Topo.AddPred(SuccSU, PredDep.getSUnit());
321   }
322   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
323   // Return true regardless of whether a new edge needed to be inserted.
324   return true;
325 }
326 
327 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
328 /// NumPredsLeft reaches zero, release the successor node.
329 ///
330 /// FIXME: Adjust SuccSU height based on MinLatency.
331 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
332   SUnit *SuccSU = SuccEdge->getSUnit();
333 
334   if (SuccEdge->isWeak()) {
335     --SuccSU->WeakPredsLeft;
336     if (SuccEdge->isCluster())
337       NextClusterSucc = SuccSU;
338     return;
339   }
340 #ifndef NDEBUG
341   if (SuccSU->NumPredsLeft == 0) {
342     dbgs() << "*** Scheduling failed! ***\n";
343     SuccSU->dump(this);
344     dbgs() << " has been released too many times!\n";
345     llvm_unreachable(0);
346   }
347 #endif
348   --SuccSU->NumPredsLeft;
349   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
350     SchedImpl->releaseTopNode(SuccSU);
351 }
352 
353 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
354 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
355   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
356        I != E; ++I) {
357     releaseSucc(SU, &*I);
358   }
359 }
360 
361 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
362 /// NumSuccsLeft reaches zero, release the predecessor node.
363 ///
364 /// FIXME: Adjust PredSU height based on MinLatency.
365 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
366   SUnit *PredSU = PredEdge->getSUnit();
367 
368   if (PredEdge->isWeak()) {
369     --PredSU->WeakSuccsLeft;
370     if (PredEdge->isCluster())
371       NextClusterPred = PredSU;
372     return;
373   }
374 #ifndef NDEBUG
375   if (PredSU->NumSuccsLeft == 0) {
376     dbgs() << "*** Scheduling failed! ***\n";
377     PredSU->dump(this);
378     dbgs() << " has been released too many times!\n";
379     llvm_unreachable(0);
380   }
381 #endif
382   --PredSU->NumSuccsLeft;
383   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
384     SchedImpl->releaseBottomNode(PredSU);
385 }
386 
387 /// releasePredecessors - Call releasePred on each of SU's predecessors.
388 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
389   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
390        I != E; ++I) {
391     releasePred(SU, &*I);
392   }
393 }
394 
395 void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
396                                     MachineBasicBlock::iterator InsertPos) {
397   // Advance RegionBegin if the first instruction moves down.
398   if (&*RegionBegin == MI)
399     ++RegionBegin;
400 
401   // Update the instruction stream.
402   BB->splice(InsertPos, BB, MI);
403 
404   // Update LiveIntervals
405   LIS->handleMove(MI, /*UpdateFlags=*/true);
406 
407   // Recede RegionBegin if an instruction moves above the first.
408   if (RegionBegin == InsertPos)
409     RegionBegin = MI;
410 }
411 
412 bool ScheduleDAGMI::checkSchedLimit() {
413 #ifndef NDEBUG
414   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
415     CurrentTop = CurrentBottom;
416     return false;
417   }
418   ++NumInstrsScheduled;
419 #endif
420   return true;
421 }
422 
423 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
424 /// crossing a scheduling boundary. [begin, end) includes all instructions in
425 /// the region, including the boundary itself and single-instruction regions
426 /// that don't get scheduled.
427 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
428                                 MachineBasicBlock::iterator begin,
429                                 MachineBasicBlock::iterator end,
430                                 unsigned endcount)
431 {
432   ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
433 
434   // For convenience remember the end of the liveness region.
435   LiveRegionEnd =
436     (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
437 }
438 
439 // Setup the register pressure trackers for the top scheduled top and bottom
440 // scheduled regions.
441 void ScheduleDAGMI::initRegPressure() {
442   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
443   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
444 
445   // Close the RPTracker to finalize live ins.
446   RPTracker.closeRegion();
447 
448   DEBUG(RPTracker.getPressure().dump(TRI));
449 
450   // Initialize the live ins and live outs.
451   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
452   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
453 
454   // Close one end of the tracker so we can call
455   // getMaxUpward/DownwardPressureDelta before advancing across any
456   // instructions. This converts currently live regs into live ins/outs.
457   TopRPTracker.closeTop();
458   BotRPTracker.closeBottom();
459 
460   // Account for liveness generated by the region boundary.
461   if (LiveRegionEnd != RegionEnd)
462     BotRPTracker.recede();
463 
464   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
465 
466   // Cache the list of excess pressure sets in this region. This will also track
467   // the max pressure in the scheduled code for these sets.
468   RegionCriticalPSets.clear();
469   std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure;
470   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
471     unsigned Limit = TRI->getRegPressureSetLimit(i);
472     DEBUG(dbgs() << TRI->getRegPressureSetName(i)
473           << "Limit " << Limit
474           << " Actual " << RegionPressure[i] << "\n");
475     if (RegionPressure[i] > Limit)
476       RegionCriticalPSets.push_back(PressureElement(i, 0));
477   }
478   DEBUG(dbgs() << "Excess PSets: ";
479         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
480           dbgs() << TRI->getRegPressureSetName(
481             RegionCriticalPSets[i].PSetID) << " ";
482         dbgs() << "\n");
483 }
484 
485 // FIXME: When the pressure tracker deals in pressure differences then we won't
486 // iterate over all RegionCriticalPSets[i].
487 void ScheduleDAGMI::
488 updateScheduledPressure(std::vector<unsigned> NewMaxPressure) {
489   for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
490     unsigned ID = RegionCriticalPSets[i].PSetID;
491     int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
492     if ((int)NewMaxPressure[ID] > MaxUnits)
493       MaxUnits = NewMaxPressure[ID];
494   }
495 }
496 
497 /// schedule - Called back from MachineScheduler::runOnMachineFunction
498 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
499 /// only includes instructions that have DAG nodes, not scheduling boundaries.
500 ///
501 /// This is a skeletal driver, with all the functionality pushed into helpers,
502 /// so that it can be easilly extended by experimental schedulers. Generally,
503 /// implementing MachineSchedStrategy should be sufficient to implement a new
504 /// scheduling algorithm. However, if a scheduler further subclasses
505 /// ScheduleDAGMI then it will want to override this virtual method in order to
506 /// update any specialized state.
507 void ScheduleDAGMI::schedule() {
508   buildDAGWithRegPressure();
509 
510   Topo.InitDAGTopologicalSorting();
511 
512   postprocessDAG();
513 
514   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
515           SUnits[su].dumpAll(this));
516 
517   if (ViewMISchedDAGs) viewGraph();
518 
519   initQueues();
520 
521   bool IsTopNode = false;
522   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
523     assert(!SU->isScheduled && "Node already scheduled");
524     if (!checkSchedLimit())
525       break;
526 
527     scheduleMI(SU, IsTopNode);
528 
529     updateQueues(SU, IsTopNode);
530   }
531   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
532 
533   placeDebugValues();
534 
535   DEBUG({
536       unsigned BBNum = top()->getParent()->getNumber();
537       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
538       dumpSchedule();
539       dbgs() << '\n';
540     });
541 }
542 
543 /// Build the DAG and setup three register pressure trackers.
544 void ScheduleDAGMI::buildDAGWithRegPressure() {
545   // Initialize the register pressure tracker used by buildSchedGraph.
546   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
547 
548   // Account for liveness generate by the region boundary.
549   if (LiveRegionEnd != RegionEnd)
550     RPTracker.recede();
551 
552   // Build the DAG, and compute current register pressure.
553   buildSchedGraph(AA, &RPTracker);
554   if (ViewMISchedDAGs) viewGraph();
555 
556   // Initialize top/bottom trackers after computing region pressure.
557   initRegPressure();
558 }
559 
560 /// Apply each ScheduleDAGMutation step in order.
561 void ScheduleDAGMI::postprocessDAG() {
562   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
563     Mutations[i]->apply(this);
564   }
565 }
566 
567 // Release all DAG roots for scheduling.
568 //
569 // Nodes with unreleased weak edges can still be roots.
570 void ScheduleDAGMI::releaseRoots() {
571   SmallVector<SUnit*, 16> BotRoots;
572 
573   for (std::vector<SUnit>::iterator
574          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
575     SUnit *SU = &(*I);
576     // A SUnit is ready to top schedule if it has no predecessors.
577     if (!I->NumPredsLeft && SU != &EntrySU)
578       SchedImpl->releaseTopNode(SU);
579     // A SUnit is ready to bottom schedule if it has no successors.
580     if (!I->NumSuccsLeft && SU != &ExitSU)
581       BotRoots.push_back(SU);
582   }
583   // Release bottom roots in reverse order so the higher priority nodes appear
584   // first. This is more natural and slightly more efficient.
585   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
586          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I)
587     SchedImpl->releaseBottomNode(*I);
588 }
589 
590 /// Identify DAG roots and setup scheduler queues.
591 void ScheduleDAGMI::initQueues() {
592   NextClusterSucc = NULL;
593   NextClusterPred = NULL;
594 
595   // Initialize the strategy before modifying the DAG.
596   SchedImpl->initialize(this);
597 
598   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
599   releaseRoots();
600 
601   releaseSuccessors(&EntrySU);
602   releasePredecessors(&ExitSU);
603 
604   SchedImpl->registerRoots();
605 
606   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
607   CurrentBottom = RegionEnd;
608 }
609 
610 /// Move an instruction and update register pressure.
611 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
612   // Move the instruction to its new location in the instruction stream.
613   MachineInstr *MI = SU->getInstr();
614 
615   if (IsTopNode) {
616     assert(SU->isTopReady() && "node still has unscheduled dependencies");
617     if (&*CurrentTop == MI)
618       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
619     else {
620       moveInstruction(MI, CurrentTop);
621       TopRPTracker.setPos(MI);
622     }
623 
624     // Update top scheduled pressure.
625     TopRPTracker.advance();
626     assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
627     updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
628   }
629   else {
630     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
631     MachineBasicBlock::iterator priorII =
632       priorNonDebug(CurrentBottom, CurrentTop);
633     if (&*priorII == MI)
634       CurrentBottom = priorII;
635     else {
636       if (&*CurrentTop == MI) {
637         CurrentTop = nextIfDebug(++CurrentTop, priorII);
638         TopRPTracker.setPos(CurrentTop);
639       }
640       moveInstruction(MI, CurrentBottom);
641       CurrentBottom = MI;
642     }
643     // Update bottom scheduled pressure.
644     BotRPTracker.recede();
645     assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
646     updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
647   }
648 }
649 
650 /// Update scheduler queues after scheduling an instruction.
651 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
652   // Release dependent instructions for scheduling.
653   if (IsTopNode)
654     releaseSuccessors(SU);
655   else
656     releasePredecessors(SU);
657 
658   SU->isScheduled = true;
659 
660   // Notify the scheduling strategy after updating the DAG.
661   SchedImpl->schedNode(SU, IsTopNode);
662 }
663 
664 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
665 void ScheduleDAGMI::placeDebugValues() {
666   // If first instruction was a DBG_VALUE then put it back.
667   if (FirstDbgValue) {
668     BB->splice(RegionBegin, BB, FirstDbgValue);
669     RegionBegin = FirstDbgValue;
670   }
671 
672   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
673          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
674     std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
675     MachineInstr *DbgValue = P.first;
676     MachineBasicBlock::iterator OrigPrevMI = P.second;
677     BB->splice(++OrigPrevMI, BB, DbgValue);
678     if (OrigPrevMI == llvm::prior(RegionEnd))
679       RegionEnd = DbgValue;
680   }
681   DbgValues.clear();
682   FirstDbgValue = NULL;
683 }
684 
685 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
686 void ScheduleDAGMI::dumpSchedule() const {
687   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
688     if (SUnit *SU = getSUnit(&(*MI)))
689       SU->dump(this);
690     else
691       dbgs() << "Missing SUnit\n";
692   }
693 }
694 #endif
695 
696 //===----------------------------------------------------------------------===//
697 // LoadClusterMutation - DAG post-processing to cluster loads.
698 //===----------------------------------------------------------------------===//
699 
700 namespace {
701 /// \brief Post-process the DAG to create cluster edges between neighboring
702 /// loads.
703 class LoadClusterMutation : public ScheduleDAGMutation {
704   struct LoadInfo {
705     SUnit *SU;
706     unsigned BaseReg;
707     unsigned Offset;
708     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
709       : SU(su), BaseReg(reg), Offset(ofs) {}
710   };
711   static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS,
712                            const LoadClusterMutation::LoadInfo &RHS);
713 
714   const TargetInstrInfo *TII;
715   const TargetRegisterInfo *TRI;
716 public:
717   LoadClusterMutation(const TargetInstrInfo *tii,
718                       const TargetRegisterInfo *tri)
719     : TII(tii), TRI(tri) {}
720 
721   virtual void apply(ScheduleDAGMI *DAG);
722 protected:
723   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
724 };
725 } // anonymous
726 
727 bool LoadClusterMutation::LoadInfoLess(
728   const LoadClusterMutation::LoadInfo &LHS,
729   const LoadClusterMutation::LoadInfo &RHS) {
730   if (LHS.BaseReg != RHS.BaseReg)
731     return LHS.BaseReg < RHS.BaseReg;
732   return LHS.Offset < RHS.Offset;
733 }
734 
735 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
736                                                   ScheduleDAGMI *DAG) {
737   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
738   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
739     SUnit *SU = Loads[Idx];
740     unsigned BaseReg;
741     unsigned Offset;
742     if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
743       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
744   }
745   if (LoadRecords.size() < 2)
746     return;
747   std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess);
748   unsigned ClusterLength = 1;
749   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
750     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
751       ClusterLength = 1;
752       continue;
753     }
754 
755     SUnit *SUa = LoadRecords[Idx].SU;
756     SUnit *SUb = LoadRecords[Idx+1].SU;
757     if (TII->shouldScheduleLoadsNear(SUa->getInstr(), SUb->getInstr(),
758                                      ClusterLength)
759         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
760 
761       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
762             << SUb->NodeNum << ")\n");
763       // Copy successor edges from SUa to SUb. Interleaving computation
764       // dependent on SUa can prevent load combining due to register reuse.
765       // Predecessor edges do not need to be copied from SUb to SUa since nearby
766       // loads should have effectively the same inputs.
767       for (SUnit::const_succ_iterator
768              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
769         if (SI->getSUnit() == SUb)
770           continue;
771         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
772         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
773       }
774       ++ClusterLength;
775     }
776     else
777       ClusterLength = 1;
778   }
779 }
780 
781 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
782 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
783   // Map DAG NodeNum to store chain ID.
784   DenseMap<unsigned, unsigned> StoreChainIDs;
785   // Map each store chain to a set of dependent loads.
786   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
787   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
788     SUnit *SU = &DAG->SUnits[Idx];
789     if (!SU->getInstr()->mayLoad())
790       continue;
791     unsigned ChainPredID = DAG->SUnits.size();
792     for (SUnit::const_pred_iterator
793            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
794       if (PI->isCtrl()) {
795         ChainPredID = PI->getSUnit()->NodeNum;
796         break;
797       }
798     }
799     // Check if this chain-like pred has been seen
800     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
801     unsigned NumChains = StoreChainDependents.size();
802     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
803       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
804     if (Result.second)
805       StoreChainDependents.resize(NumChains + 1);
806     StoreChainDependents[Result.first->second].push_back(SU);
807   }
808   // Iterate over the store chains.
809   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
810     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
811 }
812 
813 //===----------------------------------------------------------------------===//
814 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
815 //===----------------------------------------------------------------------===//
816 
817 namespace {
818 /// \brief Post-process the DAG to create cluster edges between instructions
819 /// that may be fused by the processor into a single operation.
820 class MacroFusion : public ScheduleDAGMutation {
821   const TargetInstrInfo *TII;
822 public:
823   MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
824 
825   virtual void apply(ScheduleDAGMI *DAG);
826 };
827 } // anonymous
828 
829 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
830 /// fused operations.
831 void MacroFusion::apply(ScheduleDAGMI *DAG) {
832   // For now, assume targets can only fuse with the branch.
833   MachineInstr *Branch = DAG->ExitSU.getInstr();
834   if (!Branch)
835     return;
836 
837   for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
838     SUnit *SU = &DAG->SUnits[--Idx];
839     if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
840       continue;
841 
842     // Create a single weak edge from SU to ExitSU. The only effect is to cause
843     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
844     // need to copy predecessor edges from ExitSU to SU, since top-down
845     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
846     // of SU, we could create an artificial edge from the deepest root, but it
847     // hasn't been needed yet.
848     bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
849     (void)Success;
850     assert(Success && "No DAG nodes should be reachable from ExitSU");
851 
852     DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
853     break;
854   }
855 }
856 
857 //===----------------------------------------------------------------------===//
858 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
859 //===----------------------------------------------------------------------===//
860 
861 namespace {
862 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
863 /// the schedule.
864 class ConvergingScheduler : public MachineSchedStrategy {
865 public:
866   /// Represent the type of SchedCandidate found within a single queue.
867   /// pickNodeBidirectional depends on these listed by decreasing priority.
868   enum CandReason {
869     NoCand, SingleExcess, SingleCritical, Cluster,
870     ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
871     TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse,
872     NodeOrder};
873 
874 #ifndef NDEBUG
875   static const char *getReasonStr(ConvergingScheduler::CandReason Reason);
876 #endif
877 
878   /// Policy for scheduling the next instruction in the candidate's zone.
879   struct CandPolicy {
880     bool ReduceLatency;
881     unsigned ReduceResIdx;
882     unsigned DemandResIdx;
883 
884     CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
885   };
886 
887   /// Status of an instruction's critical resource consumption.
888   struct SchedResourceDelta {
889     // Count critical resources in the scheduled region required by SU.
890     unsigned CritResources;
891 
892     // Count critical resources from another region consumed by SU.
893     unsigned DemandedResources;
894 
895     SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
896 
897     bool operator==(const SchedResourceDelta &RHS) const {
898       return CritResources == RHS.CritResources
899         && DemandedResources == RHS.DemandedResources;
900     }
901     bool operator!=(const SchedResourceDelta &RHS) const {
902       return !operator==(RHS);
903     }
904   };
905 
906   /// Store the state used by ConvergingScheduler heuristics, required for the
907   /// lifetime of one invocation of pickNode().
908   struct SchedCandidate {
909     CandPolicy Policy;
910 
911     // The best SUnit candidate.
912     SUnit *SU;
913 
914     // The reason for this candidate.
915     CandReason Reason;
916 
917     // Register pressure values for the best candidate.
918     RegPressureDelta RPDelta;
919 
920     // Critical resource consumption of the best candidate.
921     SchedResourceDelta ResDelta;
922 
923     SchedCandidate(const CandPolicy &policy)
924     : Policy(policy), SU(NULL), Reason(NoCand) {}
925 
926     bool isValid() const { return SU; }
927 
928     // Copy the status of another candidate without changing policy.
929     void setBest(SchedCandidate &Best) {
930       assert(Best.Reason != NoCand && "uninitialized Sched candidate");
931       SU = Best.SU;
932       Reason = Best.Reason;
933       RPDelta = Best.RPDelta;
934       ResDelta = Best.ResDelta;
935     }
936 
937     void initResourceDelta(const ScheduleDAGMI *DAG,
938                            const TargetSchedModel *SchedModel);
939   };
940 
941   /// Summarize the unscheduled region.
942   struct SchedRemainder {
943     // Critical path through the DAG in expected latency.
944     unsigned CriticalPath;
945 
946     // Unscheduled resources
947     SmallVector<unsigned, 16> RemainingCounts;
948     // Critical resource for the unscheduled zone.
949     unsigned CritResIdx;
950     // Number of micro-ops left to schedule.
951     unsigned RemainingMicroOps;
952     // Is the unscheduled zone resource limited.
953     bool IsResourceLimited;
954 
955     unsigned MaxRemainingCount;
956 
957     void reset() {
958       CriticalPath = 0;
959       RemainingCounts.clear();
960       CritResIdx = 0;
961       RemainingMicroOps = 0;
962       IsResourceLimited = false;
963       MaxRemainingCount = 0;
964     }
965 
966     SchedRemainder() { reset(); }
967 
968     void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
969   };
970 
971   /// Each Scheduling boundary is associated with ready queues. It tracks the
972   /// current cycle in the direction of movement, and maintains the state
973   /// of "hazards" and other interlocks at the current cycle.
974   struct SchedBoundary {
975     ScheduleDAGMI *DAG;
976     const TargetSchedModel *SchedModel;
977     SchedRemainder *Rem;
978 
979     ReadyQueue Available;
980     ReadyQueue Pending;
981     bool CheckPending;
982 
983     // For heuristics, keep a list of the nodes that immediately depend on the
984     // most recently scheduled node.
985     SmallPtrSet<const SUnit*, 8> NextSUs;
986 
987     ScheduleHazardRecognizer *HazardRec;
988 
989     unsigned CurrCycle;
990     unsigned IssueCount;
991 
992     /// MinReadyCycle - Cycle of the soonest available instruction.
993     unsigned MinReadyCycle;
994 
995     // The expected latency of the critical path in this scheduled zone.
996     unsigned ExpectedLatency;
997 
998     // Resources used in the scheduled zone beyond this boundary.
999     SmallVector<unsigned, 16> ResourceCounts;
1000 
1001     // Cache the critical resources ID in this scheduled zone.
1002     unsigned CritResIdx;
1003 
1004     // Is the scheduled region resource limited vs. latency limited.
1005     bool IsResourceLimited;
1006 
1007     unsigned ExpectedCount;
1008 
1009     // Policy flag: attempt to find ILP until expected latency is covered.
1010     bool ShouldIncreaseILP;
1011 
1012 #ifndef NDEBUG
1013     // Remember the greatest min operand latency.
1014     unsigned MaxMinLatency;
1015 #endif
1016 
1017     void reset() {
1018       Available.clear();
1019       Pending.clear();
1020       CheckPending = false;
1021       NextSUs.clear();
1022       HazardRec = 0;
1023       CurrCycle = 0;
1024       IssueCount = 0;
1025       MinReadyCycle = UINT_MAX;
1026       ExpectedLatency = 0;
1027       ResourceCounts.resize(1);
1028       assert(!ResourceCounts[0] && "nonzero count for bad resource");
1029       CritResIdx = 0;
1030       IsResourceLimited = false;
1031       ExpectedCount = 0;
1032       ShouldIncreaseILP = false;
1033 #ifndef NDEBUG
1034       MaxMinLatency = 0;
1035 #endif
1036       // Reserve a zero-count for invalid CritResIdx.
1037       ResourceCounts.resize(1);
1038     }
1039 
1040     /// Pending queues extend the ready queues with the same ID and the
1041     /// PendingFlag set.
1042     SchedBoundary(unsigned ID, const Twine &Name):
1043       DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
1044       Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P") {
1045       reset();
1046     }
1047 
1048     ~SchedBoundary() { delete HazardRec; }
1049 
1050     void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
1051               SchedRemainder *rem);
1052 
1053     bool isTop() const {
1054       return Available.getID() == ConvergingScheduler::TopQID;
1055     }
1056 
1057     unsigned getUnscheduledLatency(SUnit *SU) const {
1058       if (isTop())
1059         return SU->getHeight();
1060       return SU->getDepth();
1061     }
1062 
1063     unsigned getCriticalCount() const {
1064       return ResourceCounts[CritResIdx];
1065     }
1066 
1067     bool checkHazard(SUnit *SU);
1068 
1069     void checkILPPolicy();
1070 
1071     void releaseNode(SUnit *SU, unsigned ReadyCycle);
1072 
1073     void bumpCycle();
1074 
1075     void countResource(unsigned PIdx, unsigned Cycles);
1076 
1077     void bumpNode(SUnit *SU);
1078 
1079     void releasePending();
1080 
1081     void removeReady(SUnit *SU);
1082 
1083     SUnit *pickOnlyChoice();
1084   };
1085 
1086 private:
1087   ScheduleDAGMI *DAG;
1088   const TargetSchedModel *SchedModel;
1089   const TargetRegisterInfo *TRI;
1090 
1091   // State of the top and bottom scheduled instruction boundaries.
1092   SchedRemainder Rem;
1093   SchedBoundary Top;
1094   SchedBoundary Bot;
1095 
1096 public:
1097   /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
1098   enum {
1099     TopQID = 1,
1100     BotQID = 2,
1101     LogMaxQID = 2
1102   };
1103 
1104   ConvergingScheduler():
1105     DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
1106 
1107   virtual void initialize(ScheduleDAGMI *dag);
1108 
1109   virtual SUnit *pickNode(bool &IsTopNode);
1110 
1111   virtual void schedNode(SUnit *SU, bool IsTopNode);
1112 
1113   virtual void releaseTopNode(SUnit *SU);
1114 
1115   virtual void releaseBottomNode(SUnit *SU);
1116 
1117   virtual void registerRoots();
1118 
1119 protected:
1120   void balanceZones(
1121     ConvergingScheduler::SchedBoundary &CriticalZone,
1122     ConvergingScheduler::SchedCandidate &CriticalCand,
1123     ConvergingScheduler::SchedBoundary &OppositeZone,
1124     ConvergingScheduler::SchedCandidate &OppositeCand);
1125 
1126   void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand,
1127                            ConvergingScheduler::SchedCandidate &BotCand);
1128 
1129   void tryCandidate(SchedCandidate &Cand,
1130                     SchedCandidate &TryCand,
1131                     SchedBoundary &Zone,
1132                     const RegPressureTracker &RPTracker,
1133                     RegPressureTracker &TempTracker);
1134 
1135   SUnit *pickNodeBidirectional(bool &IsTopNode);
1136 
1137   void pickNodeFromQueue(SchedBoundary &Zone,
1138                          const RegPressureTracker &RPTracker,
1139                          SchedCandidate &Candidate);
1140 
1141 #ifndef NDEBUG
1142   void traceCandidate(const SchedCandidate &Cand, const SchedBoundary &Zone);
1143 #endif
1144 };
1145 } // namespace
1146 
1147 void ConvergingScheduler::SchedRemainder::
1148 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1149   reset();
1150   if (!SchedModel->hasInstrSchedModel())
1151     return;
1152   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1153   for (std::vector<SUnit>::iterator
1154          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1155     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1156     RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC);
1157     for (TargetSchedModel::ProcResIter
1158            PI = SchedModel->getWriteProcResBegin(SC),
1159            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1160       unsigned PIdx = PI->ProcResourceIdx;
1161       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1162       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1163     }
1164   }
1165 }
1166 
1167 void ConvergingScheduler::SchedBoundary::
1168 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1169   reset();
1170   DAG = dag;
1171   SchedModel = smodel;
1172   Rem = rem;
1173   if (SchedModel->hasInstrSchedModel())
1174     ResourceCounts.resize(SchedModel->getNumProcResourceKinds());
1175 }
1176 
1177 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
1178   DAG = dag;
1179   SchedModel = DAG->getSchedModel();
1180   TRI = DAG->TRI;
1181   Rem.init(DAG, SchedModel);
1182   Top.init(DAG, SchedModel, &Rem);
1183   Bot.init(DAG, SchedModel, &Rem);
1184 
1185   // Initialize resource counts.
1186 
1187   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
1188   // are disabled, then these HazardRecs will be disabled.
1189   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
1190   const TargetMachine &TM = DAG->MF.getTarget();
1191   Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1192   Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1193 
1194   assert((!ForceTopDown || !ForceBottomUp) &&
1195          "-misched-topdown incompatible with -misched-bottomup");
1196 }
1197 
1198 void ConvergingScheduler::releaseTopNode(SUnit *SU) {
1199   if (SU->isScheduled)
1200     return;
1201 
1202   for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1203        I != E; ++I) {
1204     unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
1205     unsigned MinLatency = I->getMinLatency();
1206 #ifndef NDEBUG
1207     Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
1208 #endif
1209     if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
1210       SU->TopReadyCycle = PredReadyCycle + MinLatency;
1211   }
1212   Top.releaseNode(SU, SU->TopReadyCycle);
1213 }
1214 
1215 void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
1216   if (SU->isScheduled)
1217     return;
1218 
1219   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1220 
1221   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1222        I != E; ++I) {
1223     if (I->isWeak())
1224       continue;
1225     unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
1226     unsigned MinLatency = I->getMinLatency();
1227 #ifndef NDEBUG
1228     Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
1229 #endif
1230     if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
1231       SU->BotReadyCycle = SuccReadyCycle + MinLatency;
1232   }
1233   Bot.releaseNode(SU, SU->BotReadyCycle);
1234 }
1235 
1236 void ConvergingScheduler::registerRoots() {
1237   Rem.CriticalPath = DAG->ExitSU.getDepth();
1238   // Some roots may not feed into ExitSU. Check all of them in case.
1239   for (std::vector<SUnit*>::const_iterator
1240          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
1241     if ((*I)->getDepth() > Rem.CriticalPath)
1242       Rem.CriticalPath = (*I)->getDepth();
1243   }
1244   DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
1245 }
1246 
1247 /// Does this SU have a hazard within the current instruction group.
1248 ///
1249 /// The scheduler supports two modes of hazard recognition. The first is the
1250 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1251 /// supports highly complicated in-order reservation tables
1252 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1253 ///
1254 /// The second is a streamlined mechanism that checks for hazards based on
1255 /// simple counters that the scheduler itself maintains. It explicitly checks
1256 /// for instruction dispatch limitations, including the number of micro-ops that
1257 /// can dispatch per cycle.
1258 ///
1259 /// TODO: Also check whether the SU must start a new group.
1260 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
1261   if (HazardRec->isEnabled())
1262     return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
1263 
1264   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1265   if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) {
1266     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1267           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1268     return true;
1269   }
1270   return false;
1271 }
1272 
1273 /// If expected latency is covered, disable ILP policy.
1274 void ConvergingScheduler::SchedBoundary::checkILPPolicy() {
1275   if (ShouldIncreaseILP
1276       && (IsResourceLimited || ExpectedLatency <= CurrCycle)) {
1277     ShouldIncreaseILP = false;
1278     DEBUG(dbgs() << "Disable ILP: " << Available.getName() << '\n');
1279   }
1280 }
1281 
1282 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
1283                                                      unsigned ReadyCycle) {
1284 
1285   if (ReadyCycle < MinReadyCycle)
1286     MinReadyCycle = ReadyCycle;
1287 
1288   // Check for interlocks first. For the purpose of other heuristics, an
1289   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1290   if (ReadyCycle > CurrCycle || checkHazard(SU))
1291     Pending.push(SU);
1292   else
1293     Available.push(SU);
1294 
1295   // Record this node as an immediate dependent of the scheduled node.
1296   NextSUs.insert(SU);
1297 
1298   // If CriticalPath has been computed, then check if the unscheduled nodes
1299   // exceed the ILP window. Before registerRoots, CriticalPath==0.
1300   if (Rem->CriticalPath && (ExpectedLatency + getUnscheduledLatency(SU)
1301                             > Rem->CriticalPath + ILPWindow)) {
1302     ShouldIncreaseILP = true;
1303     DEBUG(dbgs() << "Increase ILP: " << Available.getName() << " "
1304           << ExpectedLatency << " + " << getUnscheduledLatency(SU) << '\n');
1305   }
1306 }
1307 
1308 /// Move the boundary of scheduled code by one cycle.
1309 void ConvergingScheduler::SchedBoundary::bumpCycle() {
1310   unsigned Width = SchedModel->getIssueWidth();
1311   IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
1312 
1313   unsigned NextCycle = CurrCycle + 1;
1314   assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1315   if (MinReadyCycle > NextCycle) {
1316     IssueCount = 0;
1317     NextCycle = MinReadyCycle;
1318   }
1319 
1320   if (!HazardRec->isEnabled()) {
1321     // Bypass HazardRec virtual calls.
1322     CurrCycle = NextCycle;
1323   }
1324   else {
1325     // Bypass getHazardType calls in case of long latency.
1326     for (; CurrCycle != NextCycle; ++CurrCycle) {
1327       if (isTop())
1328         HazardRec->AdvanceCycle();
1329       else
1330         HazardRec->RecedeCycle();
1331     }
1332   }
1333   CheckPending = true;
1334   IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
1335 
1336   DEBUG(dbgs() << "  *** " << Available.getName() << " cycle "
1337         << CurrCycle << '\n');
1338 }
1339 
1340 /// Add the given processor resource to this scheduled zone.
1341 void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx,
1342                                                        unsigned Cycles) {
1343   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1344   DEBUG(dbgs() << "  " << SchedModel->getProcResource(PIdx)->Name
1345         << " +(" << Cycles << "x" << Factor
1346         << ") / " << SchedModel->getLatencyFactor() << '\n');
1347 
1348   unsigned Count = Factor * Cycles;
1349   ResourceCounts[PIdx] += Count;
1350   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1351   Rem->RemainingCounts[PIdx] -= Count;
1352 
1353   // Reset MaxRemainingCount for sanity.
1354   Rem->MaxRemainingCount = 0;
1355 
1356   // Check if this resource exceeds the current critical resource by a full
1357   // cycle. If so, it becomes the critical resource.
1358   if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx])
1359       >= (int)SchedModel->getLatencyFactor()) {
1360     CritResIdx = PIdx;
1361     DEBUG(dbgs() << "  *** Critical resource "
1362           << SchedModel->getProcResource(PIdx)->Name << " x"
1363           << ResourceCounts[PIdx] << '\n');
1364   }
1365 }
1366 
1367 /// Move the boundary of scheduled code by one SUnit.
1368 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
1369   // Update the reservation table.
1370   if (HazardRec->isEnabled()) {
1371     if (!isTop() && SU->isCall) {
1372       // Calls are scheduled with their preceding instructions. For bottom-up
1373       // scheduling, clear the pipeline state before emitting.
1374       HazardRec->Reset();
1375     }
1376     HazardRec->EmitInstruction(SU);
1377   }
1378   // Update resource counts and critical resource.
1379   if (SchedModel->hasInstrSchedModel()) {
1380     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1381     Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC);
1382     for (TargetSchedModel::ProcResIter
1383            PI = SchedModel->getWriteProcResBegin(SC),
1384            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1385       countResource(PI->ProcResourceIdx, PI->Cycles);
1386     }
1387   }
1388   if (isTop()) {
1389     if (SU->getDepth() > ExpectedLatency)
1390       ExpectedLatency = SU->getDepth();
1391   }
1392   else {
1393     if (SU->getHeight() > ExpectedLatency)
1394       ExpectedLatency = SU->getHeight();
1395   }
1396 
1397   IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
1398 
1399   // Check the instruction group dispatch limit.
1400   // TODO: Check if this SU must end a dispatch group.
1401   IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
1402 
1403   // checkHazard prevents scheduling multiple instructions per cycle that exceed
1404   // issue width. However, we commonly reach the maximum. In this case
1405   // opportunistically bump the cycle to avoid uselessly checking everything in
1406   // the readyQ. Furthermore, a single instruction may produce more than one
1407   // cycle's worth of micro-ops.
1408   if (IssueCount >= SchedModel->getIssueWidth()) {
1409     DEBUG(dbgs() << "  *** Max instrs at cycle " << CurrCycle << '\n');
1410     bumpCycle();
1411   }
1412 }
1413 
1414 /// Release pending ready nodes in to the available queue. This makes them
1415 /// visible to heuristics.
1416 void ConvergingScheduler::SchedBoundary::releasePending() {
1417   // If the available queue is empty, it is safe to reset MinReadyCycle.
1418   if (Available.empty())
1419     MinReadyCycle = UINT_MAX;
1420 
1421   // Check to see if any of the pending instructions are ready to issue.  If
1422   // so, add them to the available queue.
1423   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
1424     SUnit *SU = *(Pending.begin()+i);
1425     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
1426 
1427     if (ReadyCycle < MinReadyCycle)
1428       MinReadyCycle = ReadyCycle;
1429 
1430     if (ReadyCycle > CurrCycle)
1431       continue;
1432 
1433     if (checkHazard(SU))
1434       continue;
1435 
1436     Available.push(SU);
1437     Pending.remove(Pending.begin()+i);
1438     --i; --e;
1439   }
1440   DEBUG(if (!Pending.empty()) Pending.dump());
1441   CheckPending = false;
1442 }
1443 
1444 /// Remove SU from the ready set for this boundary.
1445 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
1446   if (Available.isInQueue(SU))
1447     Available.remove(Available.find(SU));
1448   else {
1449     assert(Pending.isInQueue(SU) && "bad ready count");
1450     Pending.remove(Pending.find(SU));
1451   }
1452 }
1453 
1454 /// If this queue only has one ready candidate, return it. As a side effect,
1455 /// defer any nodes that now hit a hazard, and advance the cycle until at least
1456 /// one node is ready. If multiple instructions are ready, return NULL.
1457 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
1458   if (CheckPending)
1459     releasePending();
1460 
1461   if (IssueCount > 0) {
1462     // Defer any ready instrs that now have a hazard.
1463     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
1464       if (checkHazard(*I)) {
1465         Pending.push(*I);
1466         I = Available.remove(I);
1467         continue;
1468       }
1469       ++I;
1470     }
1471   }
1472   for (unsigned i = 0; Available.empty(); ++i) {
1473     assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
1474            "permanent hazard"); (void)i;
1475     bumpCycle();
1476     releasePending();
1477   }
1478   if (Available.size() == 1)
1479     return *Available.begin();
1480   return NULL;
1481 }
1482 
1483 /// Record the candidate policy for opposite zones with different critical
1484 /// resources.
1485 ///
1486 /// If the CriticalZone is latency limited, don't force a policy for the
1487 /// candidates here. Instead, When releasing each candidate, releaseNode
1488 /// compares the region's critical path to the candidate's height or depth and
1489 /// the scheduled zone's expected latency then sets ShouldIncreaseILP.
1490 void ConvergingScheduler::balanceZones(
1491   ConvergingScheduler::SchedBoundary &CriticalZone,
1492   ConvergingScheduler::SchedCandidate &CriticalCand,
1493   ConvergingScheduler::SchedBoundary &OppositeZone,
1494   ConvergingScheduler::SchedCandidate &OppositeCand) {
1495 
1496   if (!CriticalZone.IsResourceLimited)
1497     return;
1498 
1499   SchedRemainder *Rem = CriticalZone.Rem;
1500 
1501   // If the critical zone is overconsuming a resource relative to the
1502   // remainder, try to reduce it.
1503   unsigned RemainingCritCount =
1504     Rem->RemainingCounts[CriticalZone.CritResIdx];
1505   if ((int)(Rem->MaxRemainingCount - RemainingCritCount)
1506       > (int)SchedModel->getLatencyFactor()) {
1507     CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx;
1508     DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce "
1509           << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name
1510           << '\n');
1511   }
1512   // If the other zone is underconsuming a resource relative to the full zone,
1513   // try to increase it.
1514   unsigned OppositeCount =
1515     OppositeZone.ResourceCounts[CriticalZone.CritResIdx];
1516   if ((int)(OppositeZone.ExpectedCount - OppositeCount)
1517       > (int)SchedModel->getLatencyFactor()) {
1518     OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx;
1519     DEBUG(dbgs() << "Balance " << OppositeZone.Available.getName() << " demand "
1520           << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name
1521           << '\n');
1522   }
1523 }
1524 
1525 /// Determine if the scheduled zones exceed resource limits or critical path and
1526 /// set each candidate's ReduceHeight policy accordingly.
1527 void ConvergingScheduler::checkResourceLimits(
1528   ConvergingScheduler::SchedCandidate &TopCand,
1529   ConvergingScheduler::SchedCandidate &BotCand) {
1530 
1531   Bot.checkILPPolicy();
1532   Top.checkILPPolicy();
1533   if (Bot.ShouldIncreaseILP)
1534     BotCand.Policy.ReduceLatency = true;
1535   if (Top.ShouldIncreaseILP)
1536     TopCand.Policy.ReduceLatency = true;
1537 
1538   // Handle resource-limited regions.
1539   if (Top.IsResourceLimited && Bot.IsResourceLimited
1540       && Top.CritResIdx == Bot.CritResIdx) {
1541     // If the scheduled critical resource in both zones is no longer the
1542     // critical remaining resource, attempt to reduce resource height both ways.
1543     if (Top.CritResIdx != Rem.CritResIdx) {
1544       TopCand.Policy.ReduceResIdx = Top.CritResIdx;
1545       BotCand.Policy.ReduceResIdx = Bot.CritResIdx;
1546       DEBUG(dbgs() << "Reduce scheduled "
1547             << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n');
1548     }
1549     return;
1550   }
1551   // Handle latency-limited regions.
1552   if (!Top.IsResourceLimited && !Bot.IsResourceLimited) {
1553     // If the total scheduled expected latency exceeds the region's critical
1554     // path then reduce latency both ways.
1555     //
1556     // Just because a zone is not resource limited does not mean it is latency
1557     // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle
1558     // to exceed expected latency.
1559     if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath)
1560         && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) {
1561       TopCand.Policy.ReduceLatency = true;
1562       BotCand.Policy.ReduceLatency = true;
1563       DEBUG(dbgs() << "Reduce scheduled latency " << Top.ExpectedLatency
1564             << " + " << Bot.ExpectedLatency << '\n');
1565     }
1566     return;
1567   }
1568   // The critical resource is different in each zone, so request balancing.
1569 
1570   // Compute the cost of each zone.
1571   Rem.MaxRemainingCount = std::max(
1572     Rem.RemainingMicroOps * SchedModel->getMicroOpFactor(),
1573     Rem.RemainingCounts[Rem.CritResIdx]);
1574   Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle);
1575   Top.ExpectedCount = std::max(
1576     Top.getCriticalCount(),
1577     Top.ExpectedCount * SchedModel->getLatencyFactor());
1578   Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle);
1579   Bot.ExpectedCount = std::max(
1580     Bot.getCriticalCount(),
1581     Bot.ExpectedCount * SchedModel->getLatencyFactor());
1582 
1583   balanceZones(Top, TopCand, Bot, BotCand);
1584   balanceZones(Bot, BotCand, Top, TopCand);
1585 }
1586 
1587 void ConvergingScheduler::SchedCandidate::
1588 initResourceDelta(const ScheduleDAGMI *DAG,
1589                   const TargetSchedModel *SchedModel) {
1590   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
1591     return;
1592 
1593   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1594   for (TargetSchedModel::ProcResIter
1595          PI = SchedModel->getWriteProcResBegin(SC),
1596          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1597     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
1598       ResDelta.CritResources += PI->Cycles;
1599     if (PI->ProcResourceIdx == Policy.DemandResIdx)
1600       ResDelta.DemandedResources += PI->Cycles;
1601   }
1602 }
1603 
1604 /// Return true if this heuristic determines order.
1605 static bool tryLess(unsigned TryVal, unsigned CandVal,
1606                     ConvergingScheduler::SchedCandidate &TryCand,
1607                     ConvergingScheduler::SchedCandidate &Cand,
1608                     ConvergingScheduler::CandReason Reason) {
1609   if (TryVal < CandVal) {
1610     TryCand.Reason = Reason;
1611     return true;
1612   }
1613   if (TryVal > CandVal) {
1614     if (Cand.Reason > Reason)
1615       Cand.Reason = Reason;
1616     return true;
1617   }
1618   return false;
1619 }
1620 
1621 static bool tryGreater(unsigned TryVal, unsigned CandVal,
1622                        ConvergingScheduler::SchedCandidate &TryCand,
1623                        ConvergingScheduler::SchedCandidate &Cand,
1624                        ConvergingScheduler::CandReason Reason) {
1625   if (TryVal > CandVal) {
1626     TryCand.Reason = Reason;
1627     return true;
1628   }
1629   if (TryVal < CandVal) {
1630     if (Cand.Reason > Reason)
1631       Cand.Reason = Reason;
1632     return true;
1633   }
1634   return false;
1635 }
1636 
1637 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
1638   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
1639 }
1640 
1641 /// Apply a set of heursitics to a new candidate. Heuristics are currently
1642 /// hierarchical. This may be more efficient than a graduated cost model because
1643 /// we don't need to evaluate all aspects of the model for each node in the
1644 /// queue. But it's really done to make the heuristics easier to debug and
1645 /// statistically analyze.
1646 ///
1647 /// \param Cand provides the policy and current best candidate.
1648 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
1649 /// \param Zone describes the scheduled zone that we are extending.
1650 /// \param RPTracker describes reg pressure within the scheduled zone.
1651 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
1652 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
1653                                        SchedCandidate &TryCand,
1654                                        SchedBoundary &Zone,
1655                                        const RegPressureTracker &RPTracker,
1656                                        RegPressureTracker &TempTracker) {
1657 
1658   // Always initialize TryCand's RPDelta.
1659   TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta,
1660                                   DAG->getRegionCriticalPSets(),
1661                                   DAG->getRegPressure().MaxSetPressure);
1662 
1663   // Initialize the candidate if needed.
1664   if (!Cand.isValid()) {
1665     TryCand.Reason = NodeOrder;
1666     return;
1667   }
1668   // Avoid exceeding the target's limit.
1669   if (tryLess(TryCand.RPDelta.Excess.UnitIncrease,
1670               Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess))
1671     return;
1672   if (Cand.Reason == SingleExcess)
1673     Cand.Reason = MultiPressure;
1674 
1675   // Avoid increasing the max critical pressure in the scheduled region.
1676   if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease,
1677               Cand.RPDelta.CriticalMax.UnitIncrease,
1678               TryCand, Cand, SingleCritical))
1679     return;
1680   if (Cand.Reason == SingleCritical)
1681     Cand.Reason = MultiPressure;
1682 
1683   // Keep clustered nodes together to encourage downstream peephole
1684   // optimizations which may reduce resource requirements.
1685   //
1686   // This is a best effort to set things up for a post-RA pass. Optimizations
1687   // like generating loads of multiple registers should ideally be done within
1688   // the scheduler pass by combining the loads during DAG postprocessing.
1689   const SUnit *NextClusterSU =
1690     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
1691   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
1692                  TryCand, Cand, Cluster))
1693     return;
1694   // Currently, weak edges are for clustering, so we hard-code that reason.
1695   // However, deferring the current TryCand will not change Cand's reason.
1696   CandReason OrigReason = Cand.Reason;
1697   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
1698               getWeakLeft(Cand.SU, Zone.isTop()),
1699               TryCand, Cand, Cluster)) {
1700     Cand.Reason = OrigReason;
1701     return;
1702   }
1703   // Avoid critical resource consumption and balance the schedule.
1704   TryCand.initResourceDelta(DAG, SchedModel);
1705   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
1706               TryCand, Cand, ResourceReduce))
1707     return;
1708   if (tryGreater(TryCand.ResDelta.DemandedResources,
1709                  Cand.ResDelta.DemandedResources,
1710                  TryCand, Cand, ResourceDemand))
1711     return;
1712 
1713   // Avoid serializing long latency dependence chains.
1714   if (Cand.Policy.ReduceLatency) {
1715     if (Zone.isTop()) {
1716       if (Cand.SU->getDepth() * SchedModel->getLatencyFactor()
1717           > Zone.ExpectedCount) {
1718         if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
1719                     TryCand, Cand, TopDepthReduce))
1720           return;
1721       }
1722       if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
1723                      TryCand, Cand, TopPathReduce))
1724         return;
1725     }
1726     else {
1727       if (Cand.SU->getHeight() * SchedModel->getLatencyFactor()
1728           > Zone.ExpectedCount) {
1729         if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
1730                     TryCand, Cand, BotHeightReduce))
1731           return;
1732       }
1733       if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
1734                      TryCand, Cand, BotPathReduce))
1735         return;
1736     }
1737   }
1738 
1739   // Avoid increasing the max pressure of the entire region.
1740   if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease,
1741               Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax))
1742     return;
1743   if (Cand.Reason == SingleMax)
1744     Cand.Reason = MultiPressure;
1745 
1746   // Prefer immediate defs/users of the last scheduled instruction. This is a
1747   // nice pressure avoidance strategy that also conserves the processor's
1748   // register renaming resources and keeps the machine code readable.
1749   if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
1750                  TryCand, Cand, NextDefUse))
1751     return;
1752 
1753   // Fall through to original instruction order.
1754   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
1755       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
1756     TryCand.Reason = NodeOrder;
1757   }
1758 }
1759 
1760 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
1761 /// more desirable than RHS from scheduling standpoint.
1762 static bool compareRPDelta(const RegPressureDelta &LHS,
1763                            const RegPressureDelta &RHS) {
1764   // Compare each component of pressure in decreasing order of importance
1765   // without checking if any are valid. Invalid PressureElements are assumed to
1766   // have UnitIncrease==0, so are neutral.
1767 
1768   // Avoid increasing the max critical pressure in the scheduled region.
1769   if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) {
1770     DEBUG(dbgs() << "RP excess top - bot: "
1771           << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n');
1772     return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
1773   }
1774   // Avoid increasing the max critical pressure in the scheduled region.
1775   if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) {
1776     DEBUG(dbgs() << "RP critical top - bot: "
1777           << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease)
1778           << '\n');
1779     return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
1780   }
1781   // Avoid increasing the max pressure of the entire region.
1782   if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) {
1783     DEBUG(dbgs() << "RP current top - bot: "
1784           << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease)
1785           << '\n');
1786     return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
1787   }
1788   return false;
1789 }
1790 
1791 #ifndef NDEBUG
1792 const char *ConvergingScheduler::getReasonStr(
1793   ConvergingScheduler::CandReason Reason) {
1794   switch (Reason) {
1795   case NoCand:         return "NOCAND    ";
1796   case SingleExcess:   return "REG-EXCESS";
1797   case SingleCritical: return "REG-CRIT  ";
1798   case Cluster:        return "CLUSTER   ";
1799   case SingleMax:      return "REG-MAX   ";
1800   case MultiPressure:  return "REG-MULTI ";
1801   case ResourceReduce: return "RES-REDUCE";
1802   case ResourceDemand: return "RES-DEMAND";
1803   case TopDepthReduce: return "TOP-DEPTH ";
1804   case TopPathReduce:  return "TOP-PATH  ";
1805   case BotHeightReduce:return "BOT-HEIGHT";
1806   case BotPathReduce:  return "BOT-PATH  ";
1807   case NextDefUse:     return "DEF-USE   ";
1808   case NodeOrder:      return "ORDER     ";
1809   };
1810   llvm_unreachable("Unknown reason!");
1811 }
1812 
1813 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand,
1814                                          const SchedBoundary &Zone) {
1815   const char *Label = getReasonStr(Cand.Reason);
1816   PressureElement P;
1817   unsigned ResIdx = 0;
1818   unsigned Latency = 0;
1819   switch (Cand.Reason) {
1820   default:
1821     break;
1822   case SingleExcess:
1823     P = Cand.RPDelta.Excess;
1824     break;
1825   case SingleCritical:
1826     P = Cand.RPDelta.CriticalMax;
1827     break;
1828   case SingleMax:
1829     P = Cand.RPDelta.CurrentMax;
1830     break;
1831   case ResourceReduce:
1832     ResIdx = Cand.Policy.ReduceResIdx;
1833     break;
1834   case ResourceDemand:
1835     ResIdx = Cand.Policy.DemandResIdx;
1836     break;
1837   case TopDepthReduce:
1838     Latency = Cand.SU->getDepth();
1839     break;
1840   case TopPathReduce:
1841     Latency = Cand.SU->getHeight();
1842     break;
1843   case BotHeightReduce:
1844     Latency = Cand.SU->getHeight();
1845     break;
1846   case BotPathReduce:
1847     Latency = Cand.SU->getDepth();
1848     break;
1849   }
1850   dbgs() << Label << " " << Zone.Available.getName() << " ";
1851   if (P.isValid())
1852     dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
1853            << " ";
1854   else
1855     dbgs() << "     ";
1856   if (ResIdx)
1857     dbgs() << SchedModel->getProcResource(ResIdx)->Name << " ";
1858   else
1859     dbgs() << "        ";
1860   if (Latency)
1861     dbgs() << Latency << " cycles ";
1862   else
1863     dbgs() << "         ";
1864   Cand.SU->dump(DAG);
1865 }
1866 #endif
1867 
1868 /// Pick the best candidate from the top queue.
1869 ///
1870 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
1871 /// DAG building. To adjust for the current scheduling location we need to
1872 /// maintain the number of vreg uses remaining to be top-scheduled.
1873 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
1874                                             const RegPressureTracker &RPTracker,
1875                                             SchedCandidate &Cand) {
1876   ReadyQueue &Q = Zone.Available;
1877 
1878   DEBUG(Q.dump());
1879 
1880   // getMaxPressureDelta temporarily modifies the tracker.
1881   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
1882 
1883   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
1884 
1885     SchedCandidate TryCand(Cand.Policy);
1886     TryCand.SU = *I;
1887     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
1888     if (TryCand.Reason != NoCand) {
1889       // Initialize resource delta if needed in case future heuristics query it.
1890       if (TryCand.ResDelta == SchedResourceDelta())
1891         TryCand.initResourceDelta(DAG, SchedModel);
1892       Cand.setBest(TryCand);
1893       DEBUG(traceCandidate(Cand, Zone));
1894     }
1895     TryCand.SU = *I;
1896   }
1897 }
1898 
1899 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand,
1900                       bool IsTop) {
1901   DEBUG(dbgs() << "Pick " << (IsTop ? "top" : "bot")
1902         << " SU(" << Cand.SU->NodeNum << ") "
1903         << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n');
1904 }
1905 
1906 /// Pick the best candidate node from either the top or bottom queue.
1907 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
1908   // Schedule as far as possible in the direction of no choice. This is most
1909   // efficient, but also provides the best heuristics for CriticalPSets.
1910   if (SUnit *SU = Bot.pickOnlyChoice()) {
1911     IsTopNode = false;
1912     return SU;
1913   }
1914   if (SUnit *SU = Top.pickOnlyChoice()) {
1915     IsTopNode = true;
1916     return SU;
1917   }
1918   CandPolicy NoPolicy;
1919   SchedCandidate BotCand(NoPolicy);
1920   SchedCandidate TopCand(NoPolicy);
1921   checkResourceLimits(TopCand, BotCand);
1922 
1923   // Prefer bottom scheduling when heuristics are silent.
1924   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
1925   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
1926 
1927   // If either Q has a single candidate that provides the least increase in
1928   // Excess pressure, we can immediately schedule from that Q.
1929   //
1930   // RegionCriticalPSets summarizes the pressure within the scheduled region and
1931   // affects picking from either Q. If scheduling in one direction must
1932   // increase pressure for one of the excess PSets, then schedule in that
1933   // direction first to provide more freedom in the other direction.
1934   if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) {
1935     IsTopNode = false;
1936     tracePick(BotCand, IsTopNode);
1937     return BotCand.SU;
1938   }
1939   // Check if the top Q has a better candidate.
1940   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
1941   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
1942 
1943   // If either Q has a single candidate that minimizes pressure above the
1944   // original region's pressure pick it.
1945   if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) {
1946     if (TopCand.Reason < BotCand.Reason) {
1947       IsTopNode = true;
1948       tracePick(TopCand, IsTopNode);
1949       return TopCand.SU;
1950     }
1951     IsTopNode = false;
1952     tracePick(BotCand, IsTopNode);
1953     return BotCand.SU;
1954   }
1955   // Check for a salient pressure difference and pick the best from either side.
1956   if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
1957     IsTopNode = true;
1958     tracePick(TopCand, IsTopNode);
1959     return TopCand.SU;
1960   }
1961   // Otherwise prefer the bottom candidate, in node order if all else failed.
1962   if (TopCand.Reason < BotCand.Reason) {
1963     IsTopNode = true;
1964     tracePick(TopCand, IsTopNode);
1965     return TopCand.SU;
1966   }
1967   IsTopNode = false;
1968   tracePick(BotCand, IsTopNode);
1969   return BotCand.SU;
1970 }
1971 
1972 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
1973 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
1974   if (DAG->top() == DAG->bottom()) {
1975     assert(Top.Available.empty() && Top.Pending.empty() &&
1976            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
1977     return NULL;
1978   }
1979   SUnit *SU;
1980   do {
1981     if (ForceTopDown) {
1982       SU = Top.pickOnlyChoice();
1983       if (!SU) {
1984         CandPolicy NoPolicy;
1985         SchedCandidate TopCand(NoPolicy);
1986         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
1987         assert(TopCand.Reason != NoCand && "failed to find the first candidate");
1988         SU = TopCand.SU;
1989       }
1990       IsTopNode = true;
1991     }
1992     else if (ForceBottomUp) {
1993       SU = Bot.pickOnlyChoice();
1994       if (!SU) {
1995         CandPolicy NoPolicy;
1996         SchedCandidate BotCand(NoPolicy);
1997         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
1998         assert(BotCand.Reason != NoCand && "failed to find the first candidate");
1999         SU = BotCand.SU;
2000       }
2001       IsTopNode = false;
2002     }
2003     else {
2004       SU = pickNodeBidirectional(IsTopNode);
2005     }
2006   } while (SU->isScheduled);
2007 
2008   if (SU->isTopReady())
2009     Top.removeReady(SU);
2010   if (SU->isBottomReady())
2011     Bot.removeReady(SU);
2012 
2013   DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
2014         << " Scheduling Instruction in cycle "
2015         << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
2016         SU->dump(DAG));
2017   return SU;
2018 }
2019 
2020 /// Update the scheduler's state after scheduling a node. This is the same node
2021 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
2022 /// it's state based on the current cycle before MachineSchedStrategy does.
2023 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2024   if (IsTopNode) {
2025     SU->TopReadyCycle = Top.CurrCycle;
2026     Top.bumpNode(SU);
2027   }
2028   else {
2029     SU->BotReadyCycle = Bot.CurrCycle;
2030     Bot.bumpNode(SU);
2031   }
2032 }
2033 
2034 /// Create the standard converging machine scheduler. This will be used as the
2035 /// default scheduler if the target does not set a default.
2036 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
2037   assert((!ForceTopDown || !ForceBottomUp) &&
2038          "-misched-topdown incompatible with -misched-bottomup");
2039   ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler());
2040   // Register DAG post-processors.
2041   if (EnableLoadCluster)
2042     DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
2043   if (EnableMacroFusion)
2044     DAG->addMutation(new MacroFusion(DAG->TII));
2045   return DAG;
2046 }
2047 static MachineSchedRegistry
2048 ConvergingSchedRegistry("converge", "Standard converging scheduler.",
2049                         createConvergingSched);
2050 
2051 //===----------------------------------------------------------------------===//
2052 // ILP Scheduler. Currently for experimental analysis of heuristics.
2053 //===----------------------------------------------------------------------===//
2054 
2055 namespace {
2056 /// \brief Order nodes by the ILP metric.
2057 struct ILPOrder {
2058   ScheduleDAGILP *ILP;
2059   bool MaximizeILP;
2060 
2061   ILPOrder(ScheduleDAGILP *ilp, bool MaxILP): ILP(ilp), MaximizeILP(MaxILP) {}
2062 
2063   /// \brief Apply a less-than relation on node priority.
2064   bool operator()(const SUnit *A, const SUnit *B) const {
2065     // Return true if A comes after B in the Q.
2066     if (MaximizeILP)
2067       return ILP->getILP(A) < ILP->getILP(B);
2068     else
2069       return ILP->getILP(A) > ILP->getILP(B);
2070   }
2071 };
2072 
2073 /// \brief Schedule based on the ILP metric.
2074 class ILPScheduler : public MachineSchedStrategy {
2075   ScheduleDAGILP ILP;
2076   ILPOrder Cmp;
2077 
2078   std::vector<SUnit*> ReadyQ;
2079 public:
2080   ILPScheduler(bool MaximizeILP)
2081   : ILP(/*BottomUp=*/true), Cmp(&ILP, MaximizeILP) {}
2082 
2083   virtual void initialize(ScheduleDAGMI *DAG) {
2084     ReadyQ.clear();
2085     ILP.resize(DAG->SUnits.size());
2086   }
2087 
2088   virtual void registerRoots() {
2089     for (std::vector<SUnit*>::const_iterator
2090            I = ReadyQ.begin(), E = ReadyQ.end(); I != E; ++I) {
2091       ILP.computeILP(*I);
2092     }
2093   }
2094 
2095   /// Implement MachineSchedStrategy interface.
2096   /// -----------------------------------------
2097 
2098   virtual SUnit *pickNode(bool &IsTopNode) {
2099     if (ReadyQ.empty()) return NULL;
2100     pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2101     SUnit *SU = ReadyQ.back();
2102     ReadyQ.pop_back();
2103     IsTopNode = false;
2104     DEBUG(dbgs() << "*** Scheduling " << *SU->getInstr()
2105           << " ILP: " << ILP.getILP(SU) << '\n');
2106     return SU;
2107   }
2108 
2109   virtual void schedNode(SUnit *, bool) {}
2110 
2111   virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
2112 
2113   virtual void releaseBottomNode(SUnit *SU) {
2114     ReadyQ.push_back(SU);
2115     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2116   }
2117 };
2118 } // namespace
2119 
2120 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
2121   return new ScheduleDAGMI(C, new ILPScheduler(true));
2122 }
2123 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
2124   return new ScheduleDAGMI(C, new ILPScheduler(false));
2125 }
2126 static MachineSchedRegistry ILPMaxRegistry(
2127   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
2128 static MachineSchedRegistry ILPMinRegistry(
2129   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
2130 
2131 //===----------------------------------------------------------------------===//
2132 // Machine Instruction Shuffler for Correctness Testing
2133 //===----------------------------------------------------------------------===//
2134 
2135 #ifndef NDEBUG
2136 namespace {
2137 /// Apply a less-than relation on the node order, which corresponds to the
2138 /// instruction order prior to scheduling. IsReverse implements greater-than.
2139 template<bool IsReverse>
2140 struct SUnitOrder {
2141   bool operator()(SUnit *A, SUnit *B) const {
2142     if (IsReverse)
2143       return A->NodeNum > B->NodeNum;
2144     else
2145       return A->NodeNum < B->NodeNum;
2146   }
2147 };
2148 
2149 /// Reorder instructions as much as possible.
2150 class InstructionShuffler : public MachineSchedStrategy {
2151   bool IsAlternating;
2152   bool IsTopDown;
2153 
2154   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
2155   // gives nodes with a higher number higher priority causing the latest
2156   // instructions to be scheduled first.
2157   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
2158     TopQ;
2159   // When scheduling bottom-up, use greater-than as the queue priority.
2160   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
2161     BottomQ;
2162 public:
2163   InstructionShuffler(bool alternate, bool topdown)
2164     : IsAlternating(alternate), IsTopDown(topdown) {}
2165 
2166   virtual void initialize(ScheduleDAGMI *) {
2167     TopQ.clear();
2168     BottomQ.clear();
2169   }
2170 
2171   /// Implement MachineSchedStrategy interface.
2172   /// -----------------------------------------
2173 
2174   virtual SUnit *pickNode(bool &IsTopNode) {
2175     SUnit *SU;
2176     if (IsTopDown) {
2177       do {
2178         if (TopQ.empty()) return NULL;
2179         SU = TopQ.top();
2180         TopQ.pop();
2181       } while (SU->isScheduled);
2182       IsTopNode = true;
2183     }
2184     else {
2185       do {
2186         if (BottomQ.empty()) return NULL;
2187         SU = BottomQ.top();
2188         BottomQ.pop();
2189       } while (SU->isScheduled);
2190       IsTopNode = false;
2191     }
2192     if (IsAlternating)
2193       IsTopDown = !IsTopDown;
2194     return SU;
2195   }
2196 
2197   virtual void schedNode(SUnit *SU, bool IsTopNode) {}
2198 
2199   virtual void releaseTopNode(SUnit *SU) {
2200     TopQ.push(SU);
2201   }
2202   virtual void releaseBottomNode(SUnit *SU) {
2203     BottomQ.push(SU);
2204   }
2205 };
2206 } // namespace
2207 
2208 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
2209   bool Alternate = !ForceTopDown && !ForceBottomUp;
2210   bool TopDown = !ForceBottomUp;
2211   assert((TopDown || !ForceTopDown) &&
2212          "-misched-topdown incompatible with -misched-bottomup");
2213   return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
2214 }
2215 static MachineSchedRegistry ShufflerRegistry(
2216   "shuffle", "Shuffle machine instructions alternating directions",
2217   createInstructionShuffler);
2218 #endif // !NDEBUG
2219