xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision 5580e5ca80d26ef11c020c3431bfb9e77e10e8fc)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define DEBUG_TYPE "misched"
16 
17 #include "llvm/CodeGen/MachineScheduler.h"
18 #include "llvm/ADT/OwningPtr.h"
19 #include "llvm/ADT/PriorityQueue.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/CodeGen/RegisterClassInfo.h"
26 #include "llvm/CodeGen/ScheduleDFS.h"
27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/GraphWriter.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include <queue>
35 
36 using namespace llvm;
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 }
44 
45 #ifndef NDEBUG
46 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
47   cl::desc("Pop up a window to show MISched dags after they are processed"));
48 
49 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
50   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
51 #else
52 static bool ViewMISchedDAGs = false;
53 #endif // NDEBUG
54 
55 // FIXME: remove this flag after initial testing. It should always be a good
56 // thing.
57 static cl::opt<bool> EnableCopyConstrain("misched-vcopy", cl::Hidden,
58     cl::desc("Constrain vreg copies."), cl::init(true));
59 
60 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
61   cl::desc("Enable load clustering."), cl::init(true));
62 
63 // Experimental heuristics
64 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
65   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
66 
67 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
68   cl::desc("Verify machine instrs before and after machine scheduling"));
69 
70 // DAG subtrees must have at least this many nodes.
71 static const unsigned MinSubtreeSize = 8;
72 
73 //===----------------------------------------------------------------------===//
74 // Machine Instruction Scheduling Pass and Registry
75 //===----------------------------------------------------------------------===//
76 
77 MachineSchedContext::MachineSchedContext():
78     MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
79   RegClassInfo = new RegisterClassInfo();
80 }
81 
82 MachineSchedContext::~MachineSchedContext() {
83   delete RegClassInfo;
84 }
85 
86 namespace {
87 /// MachineScheduler runs after coalescing and before register allocation.
88 class MachineScheduler : public MachineSchedContext,
89                          public MachineFunctionPass {
90 public:
91   MachineScheduler();
92 
93   virtual void getAnalysisUsage(AnalysisUsage &AU) const;
94 
95   virtual void releaseMemory() {}
96 
97   virtual bool runOnMachineFunction(MachineFunction&);
98 
99   virtual void print(raw_ostream &O, const Module* = 0) const;
100 
101   static char ID; // Class identification, replacement for typeinfo
102 };
103 } // namespace
104 
105 char MachineScheduler::ID = 0;
106 
107 char &llvm::MachineSchedulerID = MachineScheduler::ID;
108 
109 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
110                       "Machine Instruction Scheduler", false, false)
111 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
112 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
113 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
114 INITIALIZE_PASS_END(MachineScheduler, "misched",
115                     "Machine Instruction Scheduler", false, false)
116 
117 MachineScheduler::MachineScheduler()
118 : MachineFunctionPass(ID) {
119   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
120 }
121 
122 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
123   AU.setPreservesCFG();
124   AU.addRequiredID(MachineDominatorsID);
125   AU.addRequired<MachineLoopInfo>();
126   AU.addRequired<AliasAnalysis>();
127   AU.addRequired<TargetPassConfig>();
128   AU.addRequired<SlotIndexes>();
129   AU.addPreserved<SlotIndexes>();
130   AU.addRequired<LiveIntervals>();
131   AU.addPreserved<LiveIntervals>();
132   MachineFunctionPass::getAnalysisUsage(AU);
133 }
134 
135 MachinePassRegistry MachineSchedRegistry::Registry;
136 
137 /// A dummy default scheduler factory indicates whether the scheduler
138 /// is overridden on the command line.
139 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
140   return 0;
141 }
142 
143 /// MachineSchedOpt allows command line selection of the scheduler.
144 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
145                RegisterPassParser<MachineSchedRegistry> >
146 MachineSchedOpt("misched",
147                 cl::init(&useDefaultMachineSched), cl::Hidden,
148                 cl::desc("Machine instruction scheduler to use"));
149 
150 static MachineSchedRegistry
151 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
152                      useDefaultMachineSched);
153 
154 /// Forward declare the standard machine scheduler. This will be used as the
155 /// default scheduler if the target does not set a default.
156 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
157 
158 
159 /// Decrement this iterator until reaching the top or a non-debug instr.
160 static MachineBasicBlock::iterator
161 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
162   assert(I != Beg && "reached the top of the region, cannot decrement");
163   while (--I != Beg) {
164     if (!I->isDebugValue())
165       break;
166   }
167   return I;
168 }
169 
170 /// If this iterator is a debug value, increment until reaching the End or a
171 /// non-debug instruction.
172 static MachineBasicBlock::iterator
173 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
174   for(; I != End; ++I) {
175     if (!I->isDebugValue())
176       break;
177   }
178   return I;
179 }
180 
181 /// Top-level MachineScheduler pass driver.
182 ///
183 /// Visit blocks in function order. Divide each block into scheduling regions
184 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
185 /// consistent with the DAG builder, which traverses the interior of the
186 /// scheduling regions bottom-up.
187 ///
188 /// This design avoids exposing scheduling boundaries to the DAG builder,
189 /// simplifying the DAG builder's support for "special" target instructions.
190 /// At the same time the design allows target schedulers to operate across
191 /// scheduling boundaries, for example to bundle the boudary instructions
192 /// without reordering them. This creates complexity, because the target
193 /// scheduler must update the RegionBegin and RegionEnd positions cached by
194 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
195 /// design would be to split blocks at scheduling boundaries, but LLVM has a
196 /// general bias against block splitting purely for implementation simplicity.
197 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
198   DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
199 
200   // Initialize the context of the pass.
201   MF = &mf;
202   MLI = &getAnalysis<MachineLoopInfo>();
203   MDT = &getAnalysis<MachineDominatorTree>();
204   PassConfig = &getAnalysis<TargetPassConfig>();
205   AA = &getAnalysis<AliasAnalysis>();
206 
207   LIS = &getAnalysis<LiveIntervals>();
208   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
209 
210   if (VerifyScheduling) {
211     DEBUG(LIS->print(dbgs()));
212     MF->verify(this, "Before machine scheduling.");
213   }
214   RegClassInfo->runOnMachineFunction(*MF);
215 
216   // Select the scheduler, or set the default.
217   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
218   if (Ctor == useDefaultMachineSched) {
219     // Get the default scheduler set by the target.
220     Ctor = MachineSchedRegistry::getDefault();
221     if (!Ctor) {
222       Ctor = createConvergingSched;
223       MachineSchedRegistry::setDefault(Ctor);
224     }
225   }
226   // Instantiate the selected scheduler.
227   OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
228 
229   // Visit all machine basic blocks.
230   //
231   // TODO: Visit blocks in global postorder or postorder within the bottom-up
232   // loop tree. Then we can optionally compute global RegPressure.
233   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
234        MBB != MBBEnd; ++MBB) {
235 
236     Scheduler->startBlock(MBB);
237 
238     // Break the block into scheduling regions [I, RegionEnd), and schedule each
239     // region as soon as it is discovered. RegionEnd points the scheduling
240     // boundary at the bottom of the region. The DAG does not include RegionEnd,
241     // but the region does (i.e. the next RegionEnd is above the previous
242     // RegionBegin). If the current block has no terminator then RegionEnd ==
243     // MBB->end() for the bottom region.
244     //
245     // The Scheduler may insert instructions during either schedule() or
246     // exitRegion(), even for empty regions. So the local iterators 'I' and
247     // 'RegionEnd' are invalid across these calls.
248     unsigned RemainingInstrs = MBB->size();
249     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
250         RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
251 
252       // Avoid decrementing RegionEnd for blocks with no terminator.
253       if (RegionEnd != MBB->end()
254           || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
255         --RegionEnd;
256         // Count the boundary instruction.
257         --RemainingInstrs;
258       }
259 
260       // The next region starts above the previous region. Look backward in the
261       // instruction stream until we find the nearest boundary.
262       MachineBasicBlock::iterator I = RegionEnd;
263       for(;I != MBB->begin(); --I, --RemainingInstrs) {
264         if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
265           break;
266       }
267       // Notify the scheduler of the region, even if we may skip scheduling
268       // it. Perhaps it still needs to be bundled.
269       Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs);
270 
271       // Skip empty scheduling regions (0 or 1 schedulable instructions).
272       if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
273         // Close the current region. Bundle the terminator if needed.
274         // This invalidates 'RegionEnd' and 'I'.
275         Scheduler->exitRegion();
276         continue;
277       }
278       DEBUG(dbgs() << "********** MI Scheduling **********\n");
279       DEBUG(dbgs() << MF->getName()
280             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
281             << "\n  From: " << *I << "    To: ";
282             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
283             else dbgs() << "End";
284             dbgs() << " Remaining: " << RemainingInstrs << "\n");
285 
286       // Schedule a region: possibly reorder instructions.
287       // This invalidates 'RegionEnd' and 'I'.
288       Scheduler->schedule();
289 
290       // Close the current region.
291       Scheduler->exitRegion();
292 
293       // Scheduling has invalidated the current iterator 'I'. Ask the
294       // scheduler for the top of it's scheduled region.
295       RegionEnd = Scheduler->begin();
296     }
297     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
298     Scheduler->finishBlock();
299   }
300   Scheduler->finalizeSchedule();
301   DEBUG(LIS->print(dbgs()));
302   if (VerifyScheduling)
303     MF->verify(this, "After machine scheduling.");
304   return true;
305 }
306 
307 void MachineScheduler::print(raw_ostream &O, const Module* m) const {
308   // unimplemented
309 }
310 
311 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
312 void ReadyQueue::dump() {
313   dbgs() << "  " << Name << ": ";
314   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
315     dbgs() << Queue[i]->NodeNum << " ";
316   dbgs() << "\n";
317 }
318 #endif
319 
320 //===----------------------------------------------------------------------===//
321 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
322 // preservation.
323 //===----------------------------------------------------------------------===//
324 
325 ScheduleDAGMI::~ScheduleDAGMI() {
326   delete DFSResult;
327   DeleteContainerPointers(Mutations);
328   delete SchedImpl;
329 }
330 
331 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
332   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
333 }
334 
335 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
336   if (SuccSU != &ExitSU) {
337     // Do not use WillCreateCycle, it assumes SD scheduling.
338     // If Pred is reachable from Succ, then the edge creates a cycle.
339     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
340       return false;
341     Topo.AddPred(SuccSU, PredDep.getSUnit());
342   }
343   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
344   // Return true regardless of whether a new edge needed to be inserted.
345   return true;
346 }
347 
348 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
349 /// NumPredsLeft reaches zero, release the successor node.
350 ///
351 /// FIXME: Adjust SuccSU height based on MinLatency.
352 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
353   SUnit *SuccSU = SuccEdge->getSUnit();
354 
355   if (SuccEdge->isWeak()) {
356     --SuccSU->WeakPredsLeft;
357     if (SuccEdge->isCluster())
358       NextClusterSucc = SuccSU;
359     return;
360   }
361 #ifndef NDEBUG
362   if (SuccSU->NumPredsLeft == 0) {
363     dbgs() << "*** Scheduling failed! ***\n";
364     SuccSU->dump(this);
365     dbgs() << " has been released too many times!\n";
366     llvm_unreachable(0);
367   }
368 #endif
369   --SuccSU->NumPredsLeft;
370   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
371     SchedImpl->releaseTopNode(SuccSU);
372 }
373 
374 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
375 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
376   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
377        I != E; ++I) {
378     releaseSucc(SU, &*I);
379   }
380 }
381 
382 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
383 /// NumSuccsLeft reaches zero, release the predecessor node.
384 ///
385 /// FIXME: Adjust PredSU height based on MinLatency.
386 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
387   SUnit *PredSU = PredEdge->getSUnit();
388 
389   if (PredEdge->isWeak()) {
390     --PredSU->WeakSuccsLeft;
391     if (PredEdge->isCluster())
392       NextClusterPred = PredSU;
393     return;
394   }
395 #ifndef NDEBUG
396   if (PredSU->NumSuccsLeft == 0) {
397     dbgs() << "*** Scheduling failed! ***\n";
398     PredSU->dump(this);
399     dbgs() << " has been released too many times!\n";
400     llvm_unreachable(0);
401   }
402 #endif
403   --PredSU->NumSuccsLeft;
404   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
405     SchedImpl->releaseBottomNode(PredSU);
406 }
407 
408 /// releasePredecessors - Call releasePred on each of SU's predecessors.
409 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
410   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
411        I != E; ++I) {
412     releasePred(SU, &*I);
413   }
414 }
415 
416 /// This is normally called from the main scheduler loop but may also be invoked
417 /// by the scheduling strategy to perform additional code motion.
418 void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
419                                     MachineBasicBlock::iterator InsertPos) {
420   // Advance RegionBegin if the first instruction moves down.
421   if (&*RegionBegin == MI)
422     ++RegionBegin;
423 
424   // Update the instruction stream.
425   BB->splice(InsertPos, BB, MI);
426 
427   // Update LiveIntervals
428   LIS->handleMove(MI, /*UpdateFlags=*/true);
429 
430   // Recede RegionBegin if an instruction moves above the first.
431   if (RegionBegin == InsertPos)
432     RegionBegin = MI;
433 }
434 
435 bool ScheduleDAGMI::checkSchedLimit() {
436 #ifndef NDEBUG
437   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
438     CurrentTop = CurrentBottom;
439     return false;
440   }
441   ++NumInstrsScheduled;
442 #endif
443   return true;
444 }
445 
446 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
447 /// crossing a scheduling boundary. [begin, end) includes all instructions in
448 /// the region, including the boundary itself and single-instruction regions
449 /// that don't get scheduled.
450 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
451                                 MachineBasicBlock::iterator begin,
452                                 MachineBasicBlock::iterator end,
453                                 unsigned endcount)
454 {
455   ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
456 
457   // For convenience remember the end of the liveness region.
458   LiveRegionEnd =
459     (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
460 }
461 
462 // Setup the register pressure trackers for the top scheduled top and bottom
463 // scheduled regions.
464 void ScheduleDAGMI::initRegPressure() {
465   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
466   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
467 
468   // Close the RPTracker to finalize live ins.
469   RPTracker.closeRegion();
470 
471   DEBUG(RPTracker.getPressure().dump(TRI));
472 
473   // Initialize the live ins and live outs.
474   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
475   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
476 
477   // Close one end of the tracker so we can call
478   // getMaxUpward/DownwardPressureDelta before advancing across any
479   // instructions. This converts currently live regs into live ins/outs.
480   TopRPTracker.closeTop();
481   BotRPTracker.closeBottom();
482 
483   // Account for liveness generated by the region boundary.
484   if (LiveRegionEnd != RegionEnd)
485     BotRPTracker.recede();
486 
487   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
488 
489   // Cache the list of excess pressure sets in this region. This will also track
490   // the max pressure in the scheduled code for these sets.
491   RegionCriticalPSets.clear();
492   const std::vector<unsigned> &RegionPressure =
493     RPTracker.getPressure().MaxSetPressure;
494   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
495     unsigned Limit = TRI->getRegPressureSetLimit(i);
496     DEBUG(dbgs() << TRI->getRegPressureSetName(i)
497           << "Limit " << Limit
498           << " Actual " << RegionPressure[i] << "\n");
499     if (RegionPressure[i] > Limit)
500       RegionCriticalPSets.push_back(PressureElement(i, 0));
501   }
502   DEBUG(dbgs() << "Excess PSets: ";
503         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
504           dbgs() << TRI->getRegPressureSetName(
505             RegionCriticalPSets[i].PSetID) << " ";
506         dbgs() << "\n");
507 }
508 
509 // FIXME: When the pressure tracker deals in pressure differences then we won't
510 // iterate over all RegionCriticalPSets[i].
511 void ScheduleDAGMI::
512 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) {
513   for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
514     unsigned ID = RegionCriticalPSets[i].PSetID;
515     int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
516     if ((int)NewMaxPressure[ID] > MaxUnits)
517       MaxUnits = NewMaxPressure[ID];
518   }
519   DEBUG(
520     for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) {
521       unsigned Limit = TRI->getRegPressureSetLimit(i);
522       if (NewMaxPressure[i] > Limit ) {
523         dbgs() << "  " << TRI->getRegPressureSetName(i) << ": "
524                << NewMaxPressure[i] << " > " << Limit << "\n";
525       }
526     });
527 }
528 
529 /// schedule - Called back from MachineScheduler::runOnMachineFunction
530 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
531 /// only includes instructions that have DAG nodes, not scheduling boundaries.
532 ///
533 /// This is a skeletal driver, with all the functionality pushed into helpers,
534 /// so that it can be easilly extended by experimental schedulers. Generally,
535 /// implementing MachineSchedStrategy should be sufficient to implement a new
536 /// scheduling algorithm. However, if a scheduler further subclasses
537 /// ScheduleDAGMI then it will want to override this virtual method in order to
538 /// update any specialized state.
539 void ScheduleDAGMI::schedule() {
540   buildDAGWithRegPressure();
541 
542   Topo.InitDAGTopologicalSorting();
543 
544   postprocessDAG();
545 
546   SmallVector<SUnit*, 8> TopRoots, BotRoots;
547   findRootsAndBiasEdges(TopRoots, BotRoots);
548 
549   // Initialize the strategy before modifying the DAG.
550   // This may initialize a DFSResult to be used for queue priority.
551   SchedImpl->initialize(this);
552 
553   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
554           SUnits[su].dumpAll(this));
555   if (ViewMISchedDAGs) viewGraph();
556 
557   // Initialize ready queues now that the DAG and priority data are finalized.
558   initQueues(TopRoots, BotRoots);
559 
560   bool IsTopNode = false;
561   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
562     assert(!SU->isScheduled && "Node already scheduled");
563     if (!checkSchedLimit())
564       break;
565 
566     scheduleMI(SU, IsTopNode);
567 
568     updateQueues(SU, IsTopNode);
569   }
570   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
571 
572   placeDebugValues();
573 
574   DEBUG({
575       unsigned BBNum = begin()->getParent()->getNumber();
576       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
577       dumpSchedule();
578       dbgs() << '\n';
579     });
580 }
581 
582 /// Build the DAG and setup three register pressure trackers.
583 void ScheduleDAGMI::buildDAGWithRegPressure() {
584   // Initialize the register pressure tracker used by buildSchedGraph.
585   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
586 
587   // Account for liveness generate by the region boundary.
588   if (LiveRegionEnd != RegionEnd)
589     RPTracker.recede();
590 
591   // Build the DAG, and compute current register pressure.
592   buildSchedGraph(AA, &RPTracker);
593 
594   // Initialize top/bottom trackers after computing region pressure.
595   initRegPressure();
596 }
597 
598 /// Apply each ScheduleDAGMutation step in order.
599 void ScheduleDAGMI::postprocessDAG() {
600   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
601     Mutations[i]->apply(this);
602   }
603 }
604 
605 void ScheduleDAGMI::computeDFSResult() {
606   if (!DFSResult)
607     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
608   DFSResult->clear();
609   ScheduledTrees.clear();
610   DFSResult->resize(SUnits.size());
611   DFSResult->compute(SUnits);
612   ScheduledTrees.resize(DFSResult->getNumSubtrees());
613 }
614 
615 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
616                                           SmallVectorImpl<SUnit*> &BotRoots) {
617   for (std::vector<SUnit>::iterator
618          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
619     SUnit *SU = &(*I);
620     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
621 
622     // Order predecessors so DFSResult follows the critical path.
623     SU->biasCriticalPath();
624 
625     // A SUnit is ready to top schedule if it has no predecessors.
626     if (!I->NumPredsLeft)
627       TopRoots.push_back(SU);
628     // A SUnit is ready to bottom schedule if it has no successors.
629     if (!I->NumSuccsLeft)
630       BotRoots.push_back(SU);
631   }
632   ExitSU.biasCriticalPath();
633 }
634 
635 /// Identify DAG roots and setup scheduler queues.
636 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
637                                ArrayRef<SUnit*> BotRoots) {
638   NextClusterSucc = NULL;
639   NextClusterPred = NULL;
640 
641   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
642   //
643   // Nodes with unreleased weak edges can still be roots.
644   // Release top roots in forward order.
645   for (SmallVectorImpl<SUnit*>::const_iterator
646          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
647     SchedImpl->releaseTopNode(*I);
648   }
649   // Release bottom roots in reverse order so the higher priority nodes appear
650   // first. This is more natural and slightly more efficient.
651   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
652          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
653     SchedImpl->releaseBottomNode(*I);
654   }
655 
656   releaseSuccessors(&EntrySU);
657   releasePredecessors(&ExitSU);
658 
659   SchedImpl->registerRoots();
660 
661   // Advance past initial DebugValues.
662   assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
663   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
664   TopRPTracker.setPos(CurrentTop);
665 
666   CurrentBottom = RegionEnd;
667 }
668 
669 /// Move an instruction and update register pressure.
670 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
671   // Move the instruction to its new location in the instruction stream.
672   MachineInstr *MI = SU->getInstr();
673 
674   if (IsTopNode) {
675     assert(SU->isTopReady() && "node still has unscheduled dependencies");
676     if (&*CurrentTop == MI)
677       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
678     else {
679       moveInstruction(MI, CurrentTop);
680       TopRPTracker.setPos(MI);
681     }
682 
683     // Update top scheduled pressure.
684     TopRPTracker.advance();
685     assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
686     updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
687   }
688   else {
689     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
690     MachineBasicBlock::iterator priorII =
691       priorNonDebug(CurrentBottom, CurrentTop);
692     if (&*priorII == MI)
693       CurrentBottom = priorII;
694     else {
695       if (&*CurrentTop == MI) {
696         CurrentTop = nextIfDebug(++CurrentTop, priorII);
697         TopRPTracker.setPos(CurrentTop);
698       }
699       moveInstruction(MI, CurrentBottom);
700       CurrentBottom = MI;
701     }
702     // Update bottom scheduled pressure.
703     BotRPTracker.recede();
704     assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
705     updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
706   }
707 }
708 
709 /// Update scheduler queues after scheduling an instruction.
710 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
711   // Release dependent instructions for scheduling.
712   if (IsTopNode)
713     releaseSuccessors(SU);
714   else
715     releasePredecessors(SU);
716 
717   SU->isScheduled = true;
718 
719   if (DFSResult) {
720     unsigned SubtreeID = DFSResult->getSubtreeID(SU);
721     if (!ScheduledTrees.test(SubtreeID)) {
722       ScheduledTrees.set(SubtreeID);
723       DFSResult->scheduleTree(SubtreeID);
724       SchedImpl->scheduleTree(SubtreeID);
725     }
726   }
727 
728   // Notify the scheduling strategy after updating the DAG.
729   SchedImpl->schedNode(SU, IsTopNode);
730 }
731 
732 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
733 void ScheduleDAGMI::placeDebugValues() {
734   // If first instruction was a DBG_VALUE then put it back.
735   if (FirstDbgValue) {
736     BB->splice(RegionBegin, BB, FirstDbgValue);
737     RegionBegin = FirstDbgValue;
738   }
739 
740   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
741          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
742     std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
743     MachineInstr *DbgValue = P.first;
744     MachineBasicBlock::iterator OrigPrevMI = P.second;
745     if (&*RegionBegin == DbgValue)
746       ++RegionBegin;
747     BB->splice(++OrigPrevMI, BB, DbgValue);
748     if (OrigPrevMI == llvm::prior(RegionEnd))
749       RegionEnd = DbgValue;
750   }
751   DbgValues.clear();
752   FirstDbgValue = NULL;
753 }
754 
755 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
756 void ScheduleDAGMI::dumpSchedule() const {
757   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
758     if (SUnit *SU = getSUnit(&(*MI)))
759       SU->dump(this);
760     else
761       dbgs() << "Missing SUnit\n";
762   }
763 }
764 #endif
765 
766 //===----------------------------------------------------------------------===//
767 // LoadClusterMutation - DAG post-processing to cluster loads.
768 //===----------------------------------------------------------------------===//
769 
770 namespace {
771 /// \brief Post-process the DAG to create cluster edges between neighboring
772 /// loads.
773 class LoadClusterMutation : public ScheduleDAGMutation {
774   struct LoadInfo {
775     SUnit *SU;
776     unsigned BaseReg;
777     unsigned Offset;
778     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
779       : SU(su), BaseReg(reg), Offset(ofs) {}
780   };
781   static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS,
782                            const LoadClusterMutation::LoadInfo &RHS);
783 
784   const TargetInstrInfo *TII;
785   const TargetRegisterInfo *TRI;
786 public:
787   LoadClusterMutation(const TargetInstrInfo *tii,
788                       const TargetRegisterInfo *tri)
789     : TII(tii), TRI(tri) {}
790 
791   virtual void apply(ScheduleDAGMI *DAG);
792 protected:
793   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
794 };
795 } // anonymous
796 
797 bool LoadClusterMutation::LoadInfoLess(
798   const LoadClusterMutation::LoadInfo &LHS,
799   const LoadClusterMutation::LoadInfo &RHS) {
800   if (LHS.BaseReg != RHS.BaseReg)
801     return LHS.BaseReg < RHS.BaseReg;
802   return LHS.Offset < RHS.Offset;
803 }
804 
805 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
806                                                   ScheduleDAGMI *DAG) {
807   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
808   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
809     SUnit *SU = Loads[Idx];
810     unsigned BaseReg;
811     unsigned Offset;
812     if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
813       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
814   }
815   if (LoadRecords.size() < 2)
816     return;
817   std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess);
818   unsigned ClusterLength = 1;
819   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
820     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
821       ClusterLength = 1;
822       continue;
823     }
824 
825     SUnit *SUa = LoadRecords[Idx].SU;
826     SUnit *SUb = LoadRecords[Idx+1].SU;
827     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
828         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
829 
830       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
831             << SUb->NodeNum << ")\n");
832       // Copy successor edges from SUa to SUb. Interleaving computation
833       // dependent on SUa can prevent load combining due to register reuse.
834       // Predecessor edges do not need to be copied from SUb to SUa since nearby
835       // loads should have effectively the same inputs.
836       for (SUnit::const_succ_iterator
837              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
838         if (SI->getSUnit() == SUb)
839           continue;
840         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
841         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
842       }
843       ++ClusterLength;
844     }
845     else
846       ClusterLength = 1;
847   }
848 }
849 
850 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
851 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
852   // Map DAG NodeNum to store chain ID.
853   DenseMap<unsigned, unsigned> StoreChainIDs;
854   // Map each store chain to a set of dependent loads.
855   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
856   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
857     SUnit *SU = &DAG->SUnits[Idx];
858     if (!SU->getInstr()->mayLoad())
859       continue;
860     unsigned ChainPredID = DAG->SUnits.size();
861     for (SUnit::const_pred_iterator
862            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
863       if (PI->isCtrl()) {
864         ChainPredID = PI->getSUnit()->NodeNum;
865         break;
866       }
867     }
868     // Check if this chain-like pred has been seen
869     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
870     unsigned NumChains = StoreChainDependents.size();
871     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
872       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
873     if (Result.second)
874       StoreChainDependents.resize(NumChains + 1);
875     StoreChainDependents[Result.first->second].push_back(SU);
876   }
877   // Iterate over the store chains.
878   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
879     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
880 }
881 
882 //===----------------------------------------------------------------------===//
883 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
884 //===----------------------------------------------------------------------===//
885 
886 namespace {
887 /// \brief Post-process the DAG to create cluster edges between instructions
888 /// that may be fused by the processor into a single operation.
889 class MacroFusion : public ScheduleDAGMutation {
890   const TargetInstrInfo *TII;
891 public:
892   MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
893 
894   virtual void apply(ScheduleDAGMI *DAG);
895 };
896 } // anonymous
897 
898 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
899 /// fused operations.
900 void MacroFusion::apply(ScheduleDAGMI *DAG) {
901   // For now, assume targets can only fuse with the branch.
902   MachineInstr *Branch = DAG->ExitSU.getInstr();
903   if (!Branch)
904     return;
905 
906   for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
907     SUnit *SU = &DAG->SUnits[--Idx];
908     if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
909       continue;
910 
911     // Create a single weak edge from SU to ExitSU. The only effect is to cause
912     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
913     // need to copy predecessor edges from ExitSU to SU, since top-down
914     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
915     // of SU, we could create an artificial edge from the deepest root, but it
916     // hasn't been needed yet.
917     bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
918     (void)Success;
919     assert(Success && "No DAG nodes should be reachable from ExitSU");
920 
921     DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
922     break;
923   }
924 }
925 
926 //===----------------------------------------------------------------------===//
927 // CopyConstrain - DAG post-processing to encourage copy elimination.
928 //===----------------------------------------------------------------------===//
929 
930 namespace {
931 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
932 /// the one use that defines the copy's source vreg, most likely an induction
933 /// variable increment.
934 class CopyConstrain : public ScheduleDAGMutation {
935   // Transient state.
936   SlotIndex RegionBeginIdx;
937   // RegionEndIdx is the slot index of the last non-debug instruction in the
938   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
939   SlotIndex RegionEndIdx;
940 public:
941   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
942 
943   virtual void apply(ScheduleDAGMI *DAG);
944 
945 protected:
946   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG);
947 };
948 } // anonymous
949 
950 /// constrainLocalCopy handles two possibilities:
951 /// 1) Local src:
952 /// I0:     = dst
953 /// I1: src = ...
954 /// I2:     = dst
955 /// I3: dst = src (copy)
956 /// (create pred->succ edges I0->I1, I2->I1)
957 ///
958 /// 2) Local copy:
959 /// I0: dst = src (copy)
960 /// I1:     = dst
961 /// I2: src = ...
962 /// I3:     = dst
963 /// (create pred->succ edges I1->I2, I3->I2)
964 ///
965 /// Although the MachineScheduler is currently constrained to single blocks,
966 /// this algorithm should handle extended blocks. An EBB is a set of
967 /// contiguously numbered blocks such that the previous block in the EBB is
968 /// always the single predecessor.
969 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
970   LiveIntervals *LIS = DAG->getLIS();
971   MachineInstr *Copy = CopySU->getInstr();
972 
973   // Check for pure vreg copies.
974   unsigned SrcReg = Copy->getOperand(1).getReg();
975   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
976     return;
977 
978   unsigned DstReg = Copy->getOperand(0).getReg();
979   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
980     return;
981 
982   // Check if either the dest or source is local. If it's live across a back
983   // edge, it's not local. Note that if both vregs are live across the back
984   // edge, we cannot successfully contrain the copy without cyclic scheduling.
985   unsigned LocalReg = DstReg;
986   unsigned GlobalReg = SrcReg;
987   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
988   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
989     LocalReg = SrcReg;
990     GlobalReg = DstReg;
991     LocalLI = &LIS->getInterval(LocalReg);
992     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
993       return;
994   }
995   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
996 
997   // Find the global segment after the start of the local LI.
998   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
999   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1000   // local live range. We could create edges from other global uses to the local
1001   // start, but the coalescer should have already eliminated these cases, so
1002   // don't bother dealing with it.
1003   if (GlobalSegment == GlobalLI->end())
1004     return;
1005 
1006   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1007   // returned the next global segment. But if GlobalSegment overlaps with
1008   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1009   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1010   if (GlobalSegment->contains(LocalLI->beginIndex()))
1011     ++GlobalSegment;
1012 
1013   if (GlobalSegment == GlobalLI->end())
1014     return;
1015 
1016   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1017   if (GlobalSegment != GlobalLI->begin()) {
1018     // Two address defs have no hole.
1019     if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end,
1020                                GlobalSegment->start)) {
1021       return;
1022     }
1023     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1024     // it would be a disconnected component in the live range.
1025     assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
1026            "Disconnected LRG within the scheduling region.");
1027   }
1028   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1029   if (!GlobalDef)
1030     return;
1031 
1032   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1033   if (!GlobalSU)
1034     return;
1035 
1036   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1037   // constraining the uses of the last local def to precede GlobalDef.
1038   SmallVector<SUnit*,8> LocalUses;
1039   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1040   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1041   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1042   for (SUnit::const_succ_iterator
1043          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1044        I != E; ++I) {
1045     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1046       continue;
1047     if (I->getSUnit() == GlobalSU)
1048       continue;
1049     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1050       return;
1051     LocalUses.push_back(I->getSUnit());
1052   }
1053   // Open the top of the GlobalLI hole by constraining any earlier global uses
1054   // to precede the start of LocalLI.
1055   SmallVector<SUnit*,8> GlobalUses;
1056   MachineInstr *FirstLocalDef =
1057     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1058   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1059   for (SUnit::const_pred_iterator
1060          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1061     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1062       continue;
1063     if (I->getSUnit() == FirstLocalSU)
1064       continue;
1065     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1066       return;
1067     GlobalUses.push_back(I->getSUnit());
1068   }
1069   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1070   // Add the weak edges.
1071   for (SmallVectorImpl<SUnit*>::const_iterator
1072          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1073     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1074           << GlobalSU->NodeNum << ")\n");
1075     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1076   }
1077   for (SmallVectorImpl<SUnit*>::const_iterator
1078          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1079     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1080           << FirstLocalSU->NodeNum << ")\n");
1081     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1082   }
1083 }
1084 
1085 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1086 /// copy elimination.
1087 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1088   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1089   if (FirstPos == DAG->end())
1090     return;
1091   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1092   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1093     &*priorNonDebug(DAG->end(), DAG->begin()));
1094 
1095   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1096     SUnit *SU = &DAG->SUnits[Idx];
1097     if (!SU->getInstr()->isCopy())
1098       continue;
1099 
1100     constrainLocalCopy(SU, DAG);
1101   }
1102 }
1103 
1104 //===----------------------------------------------------------------------===//
1105 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
1106 //===----------------------------------------------------------------------===//
1107 
1108 namespace {
1109 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
1110 /// the schedule.
1111 class ConvergingScheduler : public MachineSchedStrategy {
1112 public:
1113   /// Represent the type of SchedCandidate found within a single queue.
1114   /// pickNodeBidirectional depends on these listed by decreasing priority.
1115   enum CandReason {
1116     NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak,
1117     ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
1118     TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse,
1119     NodeOrder};
1120 
1121 #ifndef NDEBUG
1122   static const char *getReasonStr(ConvergingScheduler::CandReason Reason);
1123 #endif
1124 
1125   /// Policy for scheduling the next instruction in the candidate's zone.
1126   struct CandPolicy {
1127     bool ReduceLatency;
1128     unsigned ReduceResIdx;
1129     unsigned DemandResIdx;
1130 
1131     CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
1132   };
1133 
1134   /// Status of an instruction's critical resource consumption.
1135   struct SchedResourceDelta {
1136     // Count critical resources in the scheduled region required by SU.
1137     unsigned CritResources;
1138 
1139     // Count critical resources from another region consumed by SU.
1140     unsigned DemandedResources;
1141 
1142     SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
1143 
1144     bool operator==(const SchedResourceDelta &RHS) const {
1145       return CritResources == RHS.CritResources
1146         && DemandedResources == RHS.DemandedResources;
1147     }
1148     bool operator!=(const SchedResourceDelta &RHS) const {
1149       return !operator==(RHS);
1150     }
1151   };
1152 
1153   /// Store the state used by ConvergingScheduler heuristics, required for the
1154   /// lifetime of one invocation of pickNode().
1155   struct SchedCandidate {
1156     CandPolicy Policy;
1157 
1158     // The best SUnit candidate.
1159     SUnit *SU;
1160 
1161     // The reason for this candidate.
1162     CandReason Reason;
1163 
1164     // Register pressure values for the best candidate.
1165     RegPressureDelta RPDelta;
1166 
1167     // Critical resource consumption of the best candidate.
1168     SchedResourceDelta ResDelta;
1169 
1170     SchedCandidate(const CandPolicy &policy)
1171     : Policy(policy), SU(NULL), Reason(NoCand) {}
1172 
1173     bool isValid() const { return SU; }
1174 
1175     // Copy the status of another candidate without changing policy.
1176     void setBest(SchedCandidate &Best) {
1177       assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1178       SU = Best.SU;
1179       Reason = Best.Reason;
1180       RPDelta = Best.RPDelta;
1181       ResDelta = Best.ResDelta;
1182     }
1183 
1184     void initResourceDelta(const ScheduleDAGMI *DAG,
1185                            const TargetSchedModel *SchedModel);
1186   };
1187 
1188   /// Summarize the unscheduled region.
1189   struct SchedRemainder {
1190     // Critical path through the DAG in expected latency.
1191     unsigned CriticalPath;
1192 
1193     // Unscheduled resources
1194     SmallVector<unsigned, 16> RemainingCounts;
1195     // Critical resource for the unscheduled zone.
1196     unsigned CritResIdx;
1197     // Number of micro-ops left to schedule.
1198     unsigned RemainingMicroOps;
1199 
1200     void reset() {
1201       CriticalPath = 0;
1202       RemainingCounts.clear();
1203       CritResIdx = 0;
1204       RemainingMicroOps = 0;
1205     }
1206 
1207     SchedRemainder() { reset(); }
1208 
1209     void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
1210 
1211     unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const {
1212       if (!SchedModel->hasInstrSchedModel())
1213         return 0;
1214 
1215       return std::max(
1216         RemainingMicroOps * SchedModel->getMicroOpFactor(),
1217         RemainingCounts[CritResIdx]);
1218     }
1219   };
1220 
1221   /// Each Scheduling boundary is associated with ready queues. It tracks the
1222   /// current cycle in the direction of movement, and maintains the state
1223   /// of "hazards" and other interlocks at the current cycle.
1224   struct SchedBoundary {
1225     ScheduleDAGMI *DAG;
1226     const TargetSchedModel *SchedModel;
1227     SchedRemainder *Rem;
1228 
1229     ReadyQueue Available;
1230     ReadyQueue Pending;
1231     bool CheckPending;
1232 
1233     // For heuristics, keep a list of the nodes that immediately depend on the
1234     // most recently scheduled node.
1235     SmallPtrSet<const SUnit*, 8> NextSUs;
1236 
1237     ScheduleHazardRecognizer *HazardRec;
1238 
1239     unsigned CurrCycle;
1240     unsigned IssueCount;
1241 
1242     /// MinReadyCycle - Cycle of the soonest available instruction.
1243     unsigned MinReadyCycle;
1244 
1245     // The expected latency of the critical path in this scheduled zone.
1246     unsigned ExpectedLatency;
1247 
1248     // Resources used in the scheduled zone beyond this boundary.
1249     SmallVector<unsigned, 16> ResourceCounts;
1250 
1251     // Cache the critical resources ID in this scheduled zone.
1252     unsigned CritResIdx;
1253 
1254     // Is the scheduled region resource limited vs. latency limited.
1255     bool IsResourceLimited;
1256 
1257     unsigned ExpectedCount;
1258 
1259 #ifndef NDEBUG
1260     // Remember the greatest min operand latency.
1261     unsigned MaxMinLatency;
1262 #endif
1263 
1264     void reset() {
1265       // A new HazardRec is created for each DAG and owned by SchedBoundary.
1266       delete HazardRec;
1267 
1268       Available.clear();
1269       Pending.clear();
1270       CheckPending = false;
1271       NextSUs.clear();
1272       HazardRec = 0;
1273       CurrCycle = 0;
1274       IssueCount = 0;
1275       MinReadyCycle = UINT_MAX;
1276       ExpectedLatency = 0;
1277       ResourceCounts.resize(1);
1278       assert(!ResourceCounts[0] && "nonzero count for bad resource");
1279       CritResIdx = 0;
1280       IsResourceLimited = false;
1281       ExpectedCount = 0;
1282 #ifndef NDEBUG
1283       MaxMinLatency = 0;
1284 #endif
1285       // Reserve a zero-count for invalid CritResIdx.
1286       ResourceCounts.resize(1);
1287     }
1288 
1289     /// Pending queues extend the ready queues with the same ID and the
1290     /// PendingFlag set.
1291     SchedBoundary(unsigned ID, const Twine &Name):
1292       DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
1293       Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
1294       HazardRec(0) {
1295       reset();
1296     }
1297 
1298     ~SchedBoundary() { delete HazardRec; }
1299 
1300     void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
1301               SchedRemainder *rem);
1302 
1303     bool isTop() const {
1304       return Available.getID() == ConvergingScheduler::TopQID;
1305     }
1306 
1307     unsigned getUnscheduledLatency(SUnit *SU) const {
1308       if (isTop())
1309         return SU->getHeight();
1310       return SU->getDepth() + SU->Latency;
1311     }
1312 
1313     unsigned getCriticalCount() const {
1314       return ResourceCounts[CritResIdx];
1315     }
1316 
1317     bool checkHazard(SUnit *SU);
1318 
1319     void setLatencyPolicy(CandPolicy &Policy);
1320 
1321     void releaseNode(SUnit *SU, unsigned ReadyCycle);
1322 
1323     void bumpCycle();
1324 
1325     void countResource(unsigned PIdx, unsigned Cycles);
1326 
1327     void bumpNode(SUnit *SU);
1328 
1329     void releasePending();
1330 
1331     void removeReady(SUnit *SU);
1332 
1333     SUnit *pickOnlyChoice();
1334   };
1335 
1336 private:
1337   ScheduleDAGMI *DAG;
1338   const TargetSchedModel *SchedModel;
1339   const TargetRegisterInfo *TRI;
1340 
1341   // State of the top and bottom scheduled instruction boundaries.
1342   SchedRemainder Rem;
1343   SchedBoundary Top;
1344   SchedBoundary Bot;
1345 
1346 public:
1347   /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
1348   enum {
1349     TopQID = 1,
1350     BotQID = 2,
1351     LogMaxQID = 2
1352   };
1353 
1354   ConvergingScheduler():
1355     DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
1356 
1357   virtual void initialize(ScheduleDAGMI *dag);
1358 
1359   virtual SUnit *pickNode(bool &IsTopNode);
1360 
1361   virtual void schedNode(SUnit *SU, bool IsTopNode);
1362 
1363   virtual void releaseTopNode(SUnit *SU);
1364 
1365   virtual void releaseBottomNode(SUnit *SU);
1366 
1367   virtual void registerRoots();
1368 
1369 protected:
1370   void balanceZones(
1371     ConvergingScheduler::SchedBoundary &CriticalZone,
1372     ConvergingScheduler::SchedCandidate &CriticalCand,
1373     ConvergingScheduler::SchedBoundary &OppositeZone,
1374     ConvergingScheduler::SchedCandidate &OppositeCand);
1375 
1376   void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand,
1377                            ConvergingScheduler::SchedCandidate &BotCand);
1378 
1379   void tryCandidate(SchedCandidate &Cand,
1380                     SchedCandidate &TryCand,
1381                     SchedBoundary &Zone,
1382                     const RegPressureTracker &RPTracker,
1383                     RegPressureTracker &TempTracker);
1384 
1385   SUnit *pickNodeBidirectional(bool &IsTopNode);
1386 
1387   void pickNodeFromQueue(SchedBoundary &Zone,
1388                          const RegPressureTracker &RPTracker,
1389                          SchedCandidate &Candidate);
1390 
1391   void reschedulePhysRegCopies(SUnit *SU, bool isTop);
1392 
1393 #ifndef NDEBUG
1394   void traceCandidate(const SchedCandidate &Cand);
1395 #endif
1396 };
1397 } // namespace
1398 
1399 void ConvergingScheduler::SchedRemainder::
1400 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1401   reset();
1402   if (!SchedModel->hasInstrSchedModel())
1403     return;
1404   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1405   for (std::vector<SUnit>::iterator
1406          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1407     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1408     RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC);
1409     for (TargetSchedModel::ProcResIter
1410            PI = SchedModel->getWriteProcResBegin(SC),
1411            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1412       unsigned PIdx = PI->ProcResourceIdx;
1413       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1414       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1415     }
1416   }
1417   for (unsigned PIdx = 0, PEnd = SchedModel->getNumProcResourceKinds();
1418        PIdx != PEnd; ++PIdx) {
1419     if ((int)(RemainingCounts[PIdx] - RemainingCounts[CritResIdx])
1420         >= (int)SchedModel->getLatencyFactor()) {
1421       CritResIdx = PIdx;
1422     }
1423   }
1424   DEBUG(dbgs() << "Critical Resource: "
1425         << SchedModel->getProcResource(CritResIdx)->Name
1426         << ": " << RemainingCounts[CritResIdx]
1427         << " / " << SchedModel->getLatencyFactor() << '\n');
1428 }
1429 
1430 void ConvergingScheduler::SchedBoundary::
1431 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1432   reset();
1433   DAG = dag;
1434   SchedModel = smodel;
1435   Rem = rem;
1436   if (SchedModel->hasInstrSchedModel())
1437     ResourceCounts.resize(SchedModel->getNumProcResourceKinds());
1438 }
1439 
1440 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
1441   DAG = dag;
1442   SchedModel = DAG->getSchedModel();
1443   TRI = DAG->TRI;
1444 
1445   Rem.init(DAG, SchedModel);
1446   Top.init(DAG, SchedModel, &Rem);
1447   Bot.init(DAG, SchedModel, &Rem);
1448 
1449   // Initialize resource counts.
1450 
1451   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
1452   // are disabled, then these HazardRecs will be disabled.
1453   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
1454   const TargetMachine &TM = DAG->MF.getTarget();
1455   Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1456   Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1457 
1458   assert((!ForceTopDown || !ForceBottomUp) &&
1459          "-misched-topdown incompatible with -misched-bottomup");
1460 }
1461 
1462 void ConvergingScheduler::releaseTopNode(SUnit *SU) {
1463   if (SU->isScheduled)
1464     return;
1465 
1466   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1467        I != E; ++I) {
1468     unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
1469     unsigned MinLatency = I->getMinLatency();
1470 #ifndef NDEBUG
1471     Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
1472 #endif
1473     if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
1474       SU->TopReadyCycle = PredReadyCycle + MinLatency;
1475   }
1476   Top.releaseNode(SU, SU->TopReadyCycle);
1477 }
1478 
1479 void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
1480   if (SU->isScheduled)
1481     return;
1482 
1483   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1484 
1485   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1486        I != E; ++I) {
1487     if (I->isWeak())
1488       continue;
1489     unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
1490     unsigned MinLatency = I->getMinLatency();
1491 #ifndef NDEBUG
1492     Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
1493 #endif
1494     if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
1495       SU->BotReadyCycle = SuccReadyCycle + MinLatency;
1496   }
1497   Bot.releaseNode(SU, SU->BotReadyCycle);
1498 }
1499 
1500 void ConvergingScheduler::registerRoots() {
1501   Rem.CriticalPath = DAG->ExitSU.getDepth();
1502   // Some roots may not feed into ExitSU. Check all of them in case.
1503   for (std::vector<SUnit*>::const_iterator
1504          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
1505     if ((*I)->getDepth() > Rem.CriticalPath)
1506       Rem.CriticalPath = (*I)->getDepth();
1507   }
1508   DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
1509 }
1510 
1511 /// Does this SU have a hazard within the current instruction group.
1512 ///
1513 /// The scheduler supports two modes of hazard recognition. The first is the
1514 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1515 /// supports highly complicated in-order reservation tables
1516 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1517 ///
1518 /// The second is a streamlined mechanism that checks for hazards based on
1519 /// simple counters that the scheduler itself maintains. It explicitly checks
1520 /// for instruction dispatch limitations, including the number of micro-ops that
1521 /// can dispatch per cycle.
1522 ///
1523 /// TODO: Also check whether the SU must start a new group.
1524 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
1525   if (HazardRec->isEnabled())
1526     return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
1527 
1528   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1529   if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) {
1530     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1531           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1532     return true;
1533   }
1534   return false;
1535 }
1536 
1537 /// Compute the remaining latency to determine whether ILP should be increased.
1538 void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) {
1539   // FIXME: compile time. In all, we visit four queues here one we should only
1540   // need to visit the one that was last popped if we cache the result.
1541   unsigned RemLatency = 0;
1542   for (ReadyQueue::iterator I = Available.begin(), E = Available.end();
1543        I != E; ++I) {
1544     unsigned L = getUnscheduledLatency(*I);
1545     DEBUG(dbgs() << "  " << Available.getName()
1546           << " RemLatency SU(" << (*I)->NodeNum << ") " << L << '\n');
1547     if (L > RemLatency)
1548       RemLatency = L;
1549   }
1550   for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end();
1551        I != E; ++I) {
1552     unsigned L = getUnscheduledLatency(*I);
1553     if (L > RemLatency)
1554       RemLatency = L;
1555   }
1556   unsigned CriticalPathLimit = Rem->CriticalPath + SchedModel->getILPWindow();
1557   DEBUG(dbgs() << "  " << Available.getName()
1558         << " ExpectedLatency " << ExpectedLatency
1559         << " CP Limit " << CriticalPathLimit << '\n');
1560   if (RemLatency + ExpectedLatency >= CriticalPathLimit
1561       && RemLatency > Rem->getMaxRemainingCount(SchedModel)) {
1562     Policy.ReduceLatency = true;
1563     DEBUG(dbgs() << "  Increase ILP: " << Available.getName() << '\n');
1564   }
1565 }
1566 
1567 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
1568                                                      unsigned ReadyCycle) {
1569 
1570   if (ReadyCycle < MinReadyCycle)
1571     MinReadyCycle = ReadyCycle;
1572 
1573   // Check for interlocks first. For the purpose of other heuristics, an
1574   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1575   if (ReadyCycle > CurrCycle || checkHazard(SU))
1576     Pending.push(SU);
1577   else
1578     Available.push(SU);
1579 
1580   // Record this node as an immediate dependent of the scheduled node.
1581   NextSUs.insert(SU);
1582 }
1583 
1584 /// Move the boundary of scheduled code by one cycle.
1585 void ConvergingScheduler::SchedBoundary::bumpCycle() {
1586   unsigned Width = SchedModel->getIssueWidth();
1587   IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
1588 
1589   unsigned NextCycle = CurrCycle + 1;
1590   assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1591   if (MinReadyCycle > NextCycle) {
1592     IssueCount = 0;
1593     NextCycle = MinReadyCycle;
1594   }
1595 
1596   if (!HazardRec->isEnabled()) {
1597     // Bypass HazardRec virtual calls.
1598     CurrCycle = NextCycle;
1599   }
1600   else {
1601     // Bypass getHazardType calls in case of long latency.
1602     for (; CurrCycle != NextCycle; ++CurrCycle) {
1603       if (isTop())
1604         HazardRec->AdvanceCycle();
1605       else
1606         HazardRec->RecedeCycle();
1607     }
1608   }
1609   CheckPending = true;
1610   IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
1611 
1612   DEBUG(dbgs() << "  " << Available.getName()
1613         << " Cycle: " << CurrCycle << '\n');
1614 }
1615 
1616 /// Add the given processor resource to this scheduled zone.
1617 void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx,
1618                                                        unsigned Cycles) {
1619   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1620   DEBUG(dbgs() << "  " << SchedModel->getProcResource(PIdx)->Name
1621         << " +(" << Cycles << "x" << Factor
1622         << ") / " << SchedModel->getLatencyFactor() << '\n');
1623 
1624   unsigned Count = Factor * Cycles;
1625   ResourceCounts[PIdx] += Count;
1626   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1627   Rem->RemainingCounts[PIdx] -= Count;
1628 
1629   // Check if this resource exceeds the current critical resource by a full
1630   // cycle. If so, it becomes the critical resource.
1631   if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx])
1632       >= (int)SchedModel->getLatencyFactor()) {
1633     CritResIdx = PIdx;
1634     DEBUG(dbgs() << "  *** Critical resource "
1635           << SchedModel->getProcResource(PIdx)->Name << " x"
1636           << ResourceCounts[PIdx] << '\n');
1637   }
1638 }
1639 
1640 /// Move the boundary of scheduled code by one SUnit.
1641 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
1642   // Update the reservation table.
1643   if (HazardRec->isEnabled()) {
1644     if (!isTop() && SU->isCall) {
1645       // Calls are scheduled with their preceding instructions. For bottom-up
1646       // scheduling, clear the pipeline state before emitting.
1647       HazardRec->Reset();
1648     }
1649     HazardRec->EmitInstruction(SU);
1650   }
1651   // Update resource counts and critical resource.
1652   if (SchedModel->hasInstrSchedModel()) {
1653     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1654     Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC);
1655     for (TargetSchedModel::ProcResIter
1656            PI = SchedModel->getWriteProcResBegin(SC),
1657            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1658       countResource(PI->ProcResourceIdx, PI->Cycles);
1659     }
1660   }
1661   if (isTop()) {
1662     if (SU->getDepth() > ExpectedLatency)
1663       ExpectedLatency = SU->getDepth();
1664   }
1665   else {
1666     if (SU->getHeight() > ExpectedLatency)
1667       ExpectedLatency = SU->getHeight();
1668   }
1669 
1670   IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
1671 
1672   // Check the instruction group dispatch limit.
1673   // TODO: Check if this SU must end a dispatch group.
1674   IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
1675 
1676   // checkHazard prevents scheduling multiple instructions per cycle that exceed
1677   // issue width. However, we commonly reach the maximum. In this case
1678   // opportunistically bump the cycle to avoid uselessly checking everything in
1679   // the readyQ. Furthermore, a single instruction may produce more than one
1680   // cycle's worth of micro-ops.
1681   if (IssueCount >= SchedModel->getIssueWidth()) {
1682     DEBUG(dbgs() << "  *** Max instrs at cycle " << CurrCycle << '\n');
1683     bumpCycle();
1684   }
1685 }
1686 
1687 /// Release pending ready nodes in to the available queue. This makes them
1688 /// visible to heuristics.
1689 void ConvergingScheduler::SchedBoundary::releasePending() {
1690   // If the available queue is empty, it is safe to reset MinReadyCycle.
1691   if (Available.empty())
1692     MinReadyCycle = UINT_MAX;
1693 
1694   // Check to see if any of the pending instructions are ready to issue.  If
1695   // so, add them to the available queue.
1696   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
1697     SUnit *SU = *(Pending.begin()+i);
1698     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
1699 
1700     if (ReadyCycle < MinReadyCycle)
1701       MinReadyCycle = ReadyCycle;
1702 
1703     if (ReadyCycle > CurrCycle)
1704       continue;
1705 
1706     if (checkHazard(SU))
1707       continue;
1708 
1709     Available.push(SU);
1710     Pending.remove(Pending.begin()+i);
1711     --i; --e;
1712   }
1713   DEBUG(if (!Pending.empty()) Pending.dump());
1714   CheckPending = false;
1715 }
1716 
1717 /// Remove SU from the ready set for this boundary.
1718 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
1719   if (Available.isInQueue(SU))
1720     Available.remove(Available.find(SU));
1721   else {
1722     assert(Pending.isInQueue(SU) && "bad ready count");
1723     Pending.remove(Pending.find(SU));
1724   }
1725 }
1726 
1727 /// If this queue only has one ready candidate, return it. As a side effect,
1728 /// defer any nodes that now hit a hazard, and advance the cycle until at least
1729 /// one node is ready. If multiple instructions are ready, return NULL.
1730 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
1731   if (CheckPending)
1732     releasePending();
1733 
1734   if (IssueCount > 0) {
1735     // Defer any ready instrs that now have a hazard.
1736     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
1737       if (checkHazard(*I)) {
1738         Pending.push(*I);
1739         I = Available.remove(I);
1740         continue;
1741       }
1742       ++I;
1743     }
1744   }
1745   for (unsigned i = 0; Available.empty(); ++i) {
1746     assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
1747            "permanent hazard"); (void)i;
1748     bumpCycle();
1749     releasePending();
1750   }
1751   if (Available.size() == 1)
1752     return *Available.begin();
1753   return NULL;
1754 }
1755 
1756 /// Record the candidate policy for opposite zones with different critical
1757 /// resources.
1758 ///
1759 /// If the CriticalZone is latency limited, don't force a policy for the
1760 /// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed.
1761 void ConvergingScheduler::balanceZones(
1762   ConvergingScheduler::SchedBoundary &CriticalZone,
1763   ConvergingScheduler::SchedCandidate &CriticalCand,
1764   ConvergingScheduler::SchedBoundary &OppositeZone,
1765   ConvergingScheduler::SchedCandidate &OppositeCand) {
1766 
1767   if (!CriticalZone.IsResourceLimited)
1768     return;
1769   assert(SchedModel->hasInstrSchedModel() && "required schedmodel");
1770 
1771   SchedRemainder *Rem = CriticalZone.Rem;
1772 
1773   // If the critical zone is overconsuming a resource relative to the
1774   // remainder, try to reduce it.
1775   unsigned RemainingCritCount =
1776     Rem->RemainingCounts[CriticalZone.CritResIdx];
1777   if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount)
1778       > (int)SchedModel->getLatencyFactor()) {
1779     CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx;
1780     DEBUG(dbgs() << "  Balance " << CriticalZone.Available.getName()
1781           << " reduce "
1782           << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name
1783           << '\n');
1784   }
1785   // If the other zone is underconsuming a resource relative to the full zone,
1786   // try to increase it.
1787   unsigned OppositeCount =
1788     OppositeZone.ResourceCounts[CriticalZone.CritResIdx];
1789   if ((int)(OppositeZone.ExpectedCount - OppositeCount)
1790       > (int)SchedModel->getLatencyFactor()) {
1791     OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx;
1792     DEBUG(dbgs() << "  Balance " << OppositeZone.Available.getName()
1793           << " demand "
1794           << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name
1795           << '\n');
1796   }
1797 }
1798 
1799 /// Determine if the scheduled zones exceed resource limits or critical path and
1800 /// set each candidate's ReduceHeight policy accordingly.
1801 void ConvergingScheduler::checkResourceLimits(
1802   ConvergingScheduler::SchedCandidate &TopCand,
1803   ConvergingScheduler::SchedCandidate &BotCand) {
1804 
1805   // Set ReduceLatency to true if needed.
1806   Bot.setLatencyPolicy(BotCand.Policy);
1807   Top.setLatencyPolicy(TopCand.Policy);
1808 
1809   // Handle resource-limited regions.
1810   if (Top.IsResourceLimited && Bot.IsResourceLimited
1811       && Top.CritResIdx == Bot.CritResIdx) {
1812     // If the scheduled critical resource in both zones is no longer the
1813     // critical remaining resource, attempt to reduce resource height both ways.
1814     if (Top.CritResIdx != Rem.CritResIdx) {
1815       TopCand.Policy.ReduceResIdx = Top.CritResIdx;
1816       BotCand.Policy.ReduceResIdx = Bot.CritResIdx;
1817       DEBUG(dbgs() << "  Reduce scheduled "
1818             << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n');
1819     }
1820     return;
1821   }
1822   // Handle latency-limited regions.
1823   if (!Top.IsResourceLimited && !Bot.IsResourceLimited) {
1824     // If the total scheduled expected latency exceeds the region's critical
1825     // path then reduce latency both ways.
1826     //
1827     // Just because a zone is not resource limited does not mean it is latency
1828     // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle
1829     // to exceed expected latency.
1830     if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath)
1831         && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) {
1832       TopCand.Policy.ReduceLatency = true;
1833       BotCand.Policy.ReduceLatency = true;
1834       DEBUG(dbgs() << "  Reduce scheduled latency " << Top.ExpectedLatency
1835             << " + " << Bot.ExpectedLatency << '\n');
1836     }
1837     return;
1838   }
1839   // The critical resource is different in each zone, so request balancing.
1840 
1841   // Compute the cost of each zone.
1842   Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle);
1843   Top.ExpectedCount = std::max(
1844     Top.getCriticalCount(),
1845     Top.ExpectedCount * SchedModel->getLatencyFactor());
1846   Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle);
1847   Bot.ExpectedCount = std::max(
1848     Bot.getCriticalCount(),
1849     Bot.ExpectedCount * SchedModel->getLatencyFactor());
1850 
1851   balanceZones(Top, TopCand, Bot, BotCand);
1852   balanceZones(Bot, BotCand, Top, TopCand);
1853 }
1854 
1855 void ConvergingScheduler::SchedCandidate::
1856 initResourceDelta(const ScheduleDAGMI *DAG,
1857                   const TargetSchedModel *SchedModel) {
1858   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
1859     return;
1860 
1861   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1862   for (TargetSchedModel::ProcResIter
1863          PI = SchedModel->getWriteProcResBegin(SC),
1864          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1865     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
1866       ResDelta.CritResources += PI->Cycles;
1867     if (PI->ProcResourceIdx == Policy.DemandResIdx)
1868       ResDelta.DemandedResources += PI->Cycles;
1869   }
1870 }
1871 
1872 /// Return true if this heuristic determines order.
1873 static bool tryLess(int TryVal, int CandVal,
1874                     ConvergingScheduler::SchedCandidate &TryCand,
1875                     ConvergingScheduler::SchedCandidate &Cand,
1876                     ConvergingScheduler::CandReason Reason) {
1877   if (TryVal < CandVal) {
1878     TryCand.Reason = Reason;
1879     return true;
1880   }
1881   if (TryVal > CandVal) {
1882     if (Cand.Reason > Reason)
1883       Cand.Reason = Reason;
1884     return true;
1885   }
1886   return false;
1887 }
1888 
1889 static bool tryGreater(int TryVal, int CandVal,
1890                        ConvergingScheduler::SchedCandidate &TryCand,
1891                        ConvergingScheduler::SchedCandidate &Cand,
1892                        ConvergingScheduler::CandReason Reason) {
1893   if (TryVal > CandVal) {
1894     TryCand.Reason = Reason;
1895     return true;
1896   }
1897   if (TryVal < CandVal) {
1898     if (Cand.Reason > Reason)
1899       Cand.Reason = Reason;
1900     return true;
1901   }
1902   return false;
1903 }
1904 
1905 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
1906   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
1907 }
1908 
1909 /// Minimize physical register live ranges. Regalloc wants them adjacent to
1910 /// their physreg def/use.
1911 ///
1912 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
1913 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
1914 /// with the operation that produces or consumes the physreg. We'll do this when
1915 /// regalloc has support for parallel copies.
1916 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
1917   const MachineInstr *MI = SU->getInstr();
1918   if (!MI->isCopy())
1919     return 0;
1920 
1921   unsigned ScheduledOper = isTop ? 1 : 0;
1922   unsigned UnscheduledOper = isTop ? 0 : 1;
1923   // If we have already scheduled the physreg produce/consumer, immediately
1924   // schedule the copy.
1925   if (TargetRegisterInfo::isPhysicalRegister(
1926         MI->getOperand(ScheduledOper).getReg()))
1927     return 1;
1928   // If the physreg is at the boundary, defer it. Otherwise schedule it
1929   // immediately to free the dependent. We can hoist the copy later.
1930   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
1931   if (TargetRegisterInfo::isPhysicalRegister(
1932         MI->getOperand(UnscheduledOper).getReg()))
1933     return AtBoundary ? -1 : 1;
1934   return 0;
1935 }
1936 
1937 /// Apply a set of heursitics to a new candidate. Heuristics are currently
1938 /// hierarchical. This may be more efficient than a graduated cost model because
1939 /// we don't need to evaluate all aspects of the model for each node in the
1940 /// queue. But it's really done to make the heuristics easier to debug and
1941 /// statistically analyze.
1942 ///
1943 /// \param Cand provides the policy and current best candidate.
1944 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
1945 /// \param Zone describes the scheduled zone that we are extending.
1946 /// \param RPTracker describes reg pressure within the scheduled zone.
1947 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
1948 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
1949                                        SchedCandidate &TryCand,
1950                                        SchedBoundary &Zone,
1951                                        const RegPressureTracker &RPTracker,
1952                                        RegPressureTracker &TempTracker) {
1953 
1954   // Always initialize TryCand's RPDelta.
1955   TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta,
1956                                   DAG->getRegionCriticalPSets(),
1957                                   DAG->getRegPressure().MaxSetPressure);
1958 
1959   // Initialize the candidate if needed.
1960   if (!Cand.isValid()) {
1961     TryCand.Reason = NodeOrder;
1962     return;
1963   }
1964 
1965   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
1966                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
1967                  TryCand, Cand, PhysRegCopy))
1968     return;
1969 
1970   // Avoid exceeding the target's limit.
1971   if (tryLess(TryCand.RPDelta.Excess.UnitIncrease,
1972               Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess))
1973     return;
1974   if (Cand.Reason == SingleExcess)
1975     Cand.Reason = MultiPressure;
1976 
1977   // Avoid increasing the max critical pressure in the scheduled region.
1978   if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease,
1979               Cand.RPDelta.CriticalMax.UnitIncrease,
1980               TryCand, Cand, SingleCritical))
1981     return;
1982   if (Cand.Reason == SingleCritical)
1983     Cand.Reason = MultiPressure;
1984 
1985   // Keep clustered nodes together to encourage downstream peephole
1986   // optimizations which may reduce resource requirements.
1987   //
1988   // This is a best effort to set things up for a post-RA pass. Optimizations
1989   // like generating loads of multiple registers should ideally be done within
1990   // the scheduler pass by combining the loads during DAG postprocessing.
1991   const SUnit *NextClusterSU =
1992     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
1993   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
1994                  TryCand, Cand, Cluster))
1995     return;
1996 
1997   // Weak edges are for clustering and other constraints.
1998   //
1999   // Deferring TryCand here does not change Cand's reason. This is good in the
2000   // sense that a bad candidate shouldn't affect a previous candidate's
2001   // goodness, but bad in that it is assymetric and depends on queue order.
2002   CandReason OrigReason = Cand.Reason;
2003   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2004               getWeakLeft(Cand.SU, Zone.isTop()),
2005               TryCand, Cand, Weak)) {
2006     Cand.Reason = OrigReason;
2007     return;
2008   }
2009   // Avoid critical resource consumption and balance the schedule.
2010   TryCand.initResourceDelta(DAG, SchedModel);
2011   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2012               TryCand, Cand, ResourceReduce))
2013     return;
2014   if (tryGreater(TryCand.ResDelta.DemandedResources,
2015                  Cand.ResDelta.DemandedResources,
2016                  TryCand, Cand, ResourceDemand))
2017     return;
2018 
2019   // Avoid serializing long latency dependence chains.
2020   if (Cand.Policy.ReduceLatency) {
2021     if (Zone.isTop()) {
2022       if (Cand.SU->getDepth() * SchedModel->getLatencyFactor()
2023           > Zone.ExpectedCount) {
2024         if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2025                     TryCand, Cand, TopDepthReduce))
2026           return;
2027       }
2028       if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2029                      TryCand, Cand, TopPathReduce))
2030         return;
2031     }
2032     else {
2033       if (Cand.SU->getHeight() * SchedModel->getLatencyFactor()
2034           > Zone.ExpectedCount) {
2035         if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2036                     TryCand, Cand, BotHeightReduce))
2037           return;
2038       }
2039       if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2040                      TryCand, Cand, BotPathReduce))
2041         return;
2042     }
2043   }
2044 
2045   // Avoid increasing the max pressure of the entire region.
2046   if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease,
2047               Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax))
2048     return;
2049   if (Cand.Reason == SingleMax)
2050     Cand.Reason = MultiPressure;
2051 
2052   // Prefer immediate defs/users of the last scheduled instruction. This is a
2053   // nice pressure avoidance strategy that also conserves the processor's
2054   // register renaming resources and keeps the machine code readable.
2055   if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
2056                  TryCand, Cand, NextDefUse))
2057     return;
2058 
2059   // Fall through to original instruction order.
2060   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2061       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2062     TryCand.Reason = NodeOrder;
2063   }
2064 }
2065 
2066 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
2067 /// more desirable than RHS from scheduling standpoint.
2068 static bool compareRPDelta(const RegPressureDelta &LHS,
2069                            const RegPressureDelta &RHS) {
2070   // Compare each component of pressure in decreasing order of importance
2071   // without checking if any are valid. Invalid PressureElements are assumed to
2072   // have UnitIncrease==0, so are neutral.
2073 
2074   // Avoid increasing the max critical pressure in the scheduled region.
2075   if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) {
2076     DEBUG(dbgs() << "  RP excess top - bot: "
2077           << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n');
2078     return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
2079   }
2080   // Avoid increasing the max critical pressure in the scheduled region.
2081   if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) {
2082     DEBUG(dbgs() << "  RP critical top - bot: "
2083           << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease)
2084           << '\n');
2085     return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
2086   }
2087   // Avoid increasing the max pressure of the entire region.
2088   if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) {
2089     DEBUG(dbgs() << "  RP current top - bot: "
2090           << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease)
2091           << '\n');
2092     return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
2093   }
2094   return false;
2095 }
2096 
2097 #ifndef NDEBUG
2098 const char *ConvergingScheduler::getReasonStr(
2099   ConvergingScheduler::CandReason Reason) {
2100   switch (Reason) {
2101   case NoCand:         return "NOCAND    ";
2102   case PhysRegCopy:    return "PREG-COPY";
2103   case SingleExcess:   return "REG-EXCESS";
2104   case SingleCritical: return "REG-CRIT  ";
2105   case Cluster:        return "CLUSTER   ";
2106   case Weak:           return "WEAK      ";
2107   case SingleMax:      return "REG-MAX   ";
2108   case MultiPressure:  return "REG-MULTI ";
2109   case ResourceReduce: return "RES-REDUCE";
2110   case ResourceDemand: return "RES-DEMAND";
2111   case TopDepthReduce: return "TOP-DEPTH ";
2112   case TopPathReduce:  return "TOP-PATH  ";
2113   case BotHeightReduce:return "BOT-HEIGHT";
2114   case BotPathReduce:  return "BOT-PATH  ";
2115   case NextDefUse:     return "DEF-USE   ";
2116   case NodeOrder:      return "ORDER     ";
2117   };
2118   llvm_unreachable("Unknown reason!");
2119 }
2120 
2121 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) {
2122   PressureElement P;
2123   unsigned ResIdx = 0;
2124   unsigned Latency = 0;
2125   switch (Cand.Reason) {
2126   default:
2127     break;
2128   case SingleExcess:
2129     P = Cand.RPDelta.Excess;
2130     break;
2131   case SingleCritical:
2132     P = Cand.RPDelta.CriticalMax;
2133     break;
2134   case SingleMax:
2135     P = Cand.RPDelta.CurrentMax;
2136     break;
2137   case ResourceReduce:
2138     ResIdx = Cand.Policy.ReduceResIdx;
2139     break;
2140   case ResourceDemand:
2141     ResIdx = Cand.Policy.DemandResIdx;
2142     break;
2143   case TopDepthReduce:
2144     Latency = Cand.SU->getDepth();
2145     break;
2146   case TopPathReduce:
2147     Latency = Cand.SU->getHeight();
2148     break;
2149   case BotHeightReduce:
2150     Latency = Cand.SU->getHeight();
2151     break;
2152   case BotPathReduce:
2153     Latency = Cand.SU->getDepth();
2154     break;
2155   }
2156   dbgs() << "  SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2157   if (P.isValid())
2158     dbgs() << " " << TRI->getRegPressureSetName(P.PSetID)
2159            << ":" << P.UnitIncrease << " ";
2160   else
2161     dbgs() << "      ";
2162   if (ResIdx)
2163     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2164   else
2165     dbgs() << "         ";
2166   if (Latency)
2167     dbgs() << " " << Latency << " cycles ";
2168   else
2169     dbgs() << "          ";
2170   dbgs() << '\n';
2171 }
2172 #endif
2173 
2174 /// Pick the best candidate from the top queue.
2175 ///
2176 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2177 /// DAG building. To adjust for the current scheduling location we need to
2178 /// maintain the number of vreg uses remaining to be top-scheduled.
2179 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2180                                             const RegPressureTracker &RPTracker,
2181                                             SchedCandidate &Cand) {
2182   ReadyQueue &Q = Zone.Available;
2183 
2184   DEBUG(Q.dump());
2185 
2186   // getMaxPressureDelta temporarily modifies the tracker.
2187   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2188 
2189   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2190 
2191     SchedCandidate TryCand(Cand.Policy);
2192     TryCand.SU = *I;
2193     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2194     if (TryCand.Reason != NoCand) {
2195       // Initialize resource delta if needed in case future heuristics query it.
2196       if (TryCand.ResDelta == SchedResourceDelta())
2197         TryCand.initResourceDelta(DAG, SchedModel);
2198       Cand.setBest(TryCand);
2199       DEBUG(traceCandidate(Cand));
2200     }
2201   }
2202 }
2203 
2204 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand,
2205                       bool IsTop) {
2206   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2207         << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n');
2208 }
2209 
2210 /// Pick the best candidate node from either the top or bottom queue.
2211 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
2212   // Schedule as far as possible in the direction of no choice. This is most
2213   // efficient, but also provides the best heuristics for CriticalPSets.
2214   if (SUnit *SU = Bot.pickOnlyChoice()) {
2215     IsTopNode = false;
2216     DEBUG(dbgs() << "Pick Top NOCAND\n");
2217     return SU;
2218   }
2219   if (SUnit *SU = Top.pickOnlyChoice()) {
2220     IsTopNode = true;
2221     DEBUG(dbgs() << "Pick Bot NOCAND\n");
2222     return SU;
2223   }
2224   CandPolicy NoPolicy;
2225   SchedCandidate BotCand(NoPolicy);
2226   SchedCandidate TopCand(NoPolicy);
2227   checkResourceLimits(TopCand, BotCand);
2228 
2229   // Prefer bottom scheduling when heuristics are silent.
2230   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2231   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2232 
2233   // If either Q has a single candidate that provides the least increase in
2234   // Excess pressure, we can immediately schedule from that Q.
2235   //
2236   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2237   // affects picking from either Q. If scheduling in one direction must
2238   // increase pressure for one of the excess PSets, then schedule in that
2239   // direction first to provide more freedom in the other direction.
2240   if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) {
2241     IsTopNode = false;
2242     tracePick(BotCand, IsTopNode);
2243     return BotCand.SU;
2244   }
2245   // Check if the top Q has a better candidate.
2246   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2247   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2248 
2249   // If either Q has a single candidate that minimizes pressure above the
2250   // original region's pressure pick it.
2251   if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) {
2252     if (TopCand.Reason < BotCand.Reason) {
2253       IsTopNode = true;
2254       tracePick(TopCand, IsTopNode);
2255       return TopCand.SU;
2256     }
2257     IsTopNode = false;
2258     tracePick(BotCand, IsTopNode);
2259     return BotCand.SU;
2260   }
2261   // Check for a salient pressure difference and pick the best from either side.
2262   if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
2263     IsTopNode = true;
2264     tracePick(TopCand, IsTopNode);
2265     return TopCand.SU;
2266   }
2267   // Otherwise prefer the bottom candidate, in node order if all else failed.
2268   if (TopCand.Reason < BotCand.Reason) {
2269     IsTopNode = true;
2270     tracePick(TopCand, IsTopNode);
2271     return TopCand.SU;
2272   }
2273   IsTopNode = false;
2274   tracePick(BotCand, IsTopNode);
2275   return BotCand.SU;
2276 }
2277 
2278 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2279 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
2280   if (DAG->top() == DAG->bottom()) {
2281     assert(Top.Available.empty() && Top.Pending.empty() &&
2282            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2283     return NULL;
2284   }
2285   SUnit *SU;
2286   do {
2287     if (ForceTopDown) {
2288       SU = Top.pickOnlyChoice();
2289       if (!SU) {
2290         CandPolicy NoPolicy;
2291         SchedCandidate TopCand(NoPolicy);
2292         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2293         assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2294         SU = TopCand.SU;
2295       }
2296       IsTopNode = true;
2297     }
2298     else if (ForceBottomUp) {
2299       SU = Bot.pickOnlyChoice();
2300       if (!SU) {
2301         CandPolicy NoPolicy;
2302         SchedCandidate BotCand(NoPolicy);
2303         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2304         assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2305         SU = BotCand.SU;
2306       }
2307       IsTopNode = false;
2308     }
2309     else {
2310       SU = pickNodeBidirectional(IsTopNode);
2311     }
2312   } while (SU->isScheduled);
2313 
2314   if (SU->isTopReady())
2315     Top.removeReady(SU);
2316   if (SU->isBottomReady())
2317     Bot.removeReady(SU);
2318 
2319   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2320   return SU;
2321 }
2322 
2323 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2324 
2325   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2326   if (!isTop)
2327     ++InsertPos;
2328   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2329 
2330   // Find already scheduled copies with a single physreg dependence and move
2331   // them just above the scheduled instruction.
2332   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2333        I != E; ++I) {
2334     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2335       continue;
2336     SUnit *DepSU = I->getSUnit();
2337     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2338       continue;
2339     MachineInstr *Copy = DepSU->getInstr();
2340     if (!Copy->isCopy())
2341       continue;
2342     DEBUG(dbgs() << "  Rescheduling physreg copy ";
2343           I->getSUnit()->dump(DAG));
2344     DAG->moveInstruction(Copy, InsertPos);
2345   }
2346 }
2347 
2348 /// Update the scheduler's state after scheduling a node. This is the same node
2349 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
2350 /// it's state based on the current cycle before MachineSchedStrategy does.
2351 ///
2352 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2353 /// them here. See comments in biasPhysRegCopy.
2354 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2355   if (IsTopNode) {
2356     SU->TopReadyCycle = Top.CurrCycle;
2357     Top.bumpNode(SU);
2358     if (SU->hasPhysRegUses)
2359       reschedulePhysRegCopies(SU, true);
2360   }
2361   else {
2362     SU->BotReadyCycle = Bot.CurrCycle;
2363     Bot.bumpNode(SU);
2364     if (SU->hasPhysRegDefs)
2365       reschedulePhysRegCopies(SU, false);
2366   }
2367 }
2368 
2369 /// Create the standard converging machine scheduler. This will be used as the
2370 /// default scheduler if the target does not set a default.
2371 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
2372   assert((!ForceTopDown || !ForceBottomUp) &&
2373          "-misched-topdown incompatible with -misched-bottomup");
2374   ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler());
2375   // Register DAG post-processors.
2376   //
2377   // FIXME: extend the mutation API to allow earlier mutations to instantiate
2378   // data and pass it to later mutations. Have a single mutation that gathers
2379   // the interesting nodes in one pass.
2380   if (EnableCopyConstrain)
2381     DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
2382   if (EnableLoadCluster)
2383     DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
2384   if (EnableMacroFusion)
2385     DAG->addMutation(new MacroFusion(DAG->TII));
2386   return DAG;
2387 }
2388 static MachineSchedRegistry
2389 ConvergingSchedRegistry("converge", "Standard converging scheduler.",
2390                         createConvergingSched);
2391 
2392 //===----------------------------------------------------------------------===//
2393 // ILP Scheduler. Currently for experimental analysis of heuristics.
2394 //===----------------------------------------------------------------------===//
2395 
2396 namespace {
2397 /// \brief Order nodes by the ILP metric.
2398 struct ILPOrder {
2399   const SchedDFSResult *DFSResult;
2400   const BitVector *ScheduledTrees;
2401   bool MaximizeILP;
2402 
2403   ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {}
2404 
2405   /// \brief Apply a less-than relation on node priority.
2406   ///
2407   /// (Return true if A comes after B in the Q.)
2408   bool operator()(const SUnit *A, const SUnit *B) const {
2409     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
2410     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
2411     if (SchedTreeA != SchedTreeB) {
2412       // Unscheduled trees have lower priority.
2413       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
2414         return ScheduledTrees->test(SchedTreeB);
2415 
2416       // Trees with shallower connections have have lower priority.
2417       if (DFSResult->getSubtreeLevel(SchedTreeA)
2418           != DFSResult->getSubtreeLevel(SchedTreeB)) {
2419         return DFSResult->getSubtreeLevel(SchedTreeA)
2420           < DFSResult->getSubtreeLevel(SchedTreeB);
2421       }
2422     }
2423     if (MaximizeILP)
2424       return DFSResult->getILP(A) < DFSResult->getILP(B);
2425     else
2426       return DFSResult->getILP(A) > DFSResult->getILP(B);
2427   }
2428 };
2429 
2430 /// \brief Schedule based on the ILP metric.
2431 class ILPScheduler : public MachineSchedStrategy {
2432   /// In case all subtrees are eventually connected to a common root through
2433   /// data dependence (e.g. reduction), place an upper limit on their size.
2434   ///
2435   /// FIXME: A subtree limit is generally good, but in the situation commented
2436   /// above, where multiple similar subtrees feed a common root, we should
2437   /// only split at a point where the resulting subtrees will be balanced.
2438   /// (a motivating test case must be found).
2439   static const unsigned SubtreeLimit = 16;
2440 
2441   ScheduleDAGMI *DAG;
2442   ILPOrder Cmp;
2443 
2444   std::vector<SUnit*> ReadyQ;
2445 public:
2446   ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {}
2447 
2448   virtual void initialize(ScheduleDAGMI *dag) {
2449     DAG = dag;
2450     DAG->computeDFSResult();
2451     Cmp.DFSResult = DAG->getDFSResult();
2452     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
2453     ReadyQ.clear();
2454   }
2455 
2456   virtual void registerRoots() {
2457     // Restore the heap in ReadyQ with the updated DFS results.
2458     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2459   }
2460 
2461   /// Implement MachineSchedStrategy interface.
2462   /// -----------------------------------------
2463 
2464   /// Callback to select the highest priority node from the ready Q.
2465   virtual SUnit *pickNode(bool &IsTopNode) {
2466     if (ReadyQ.empty()) return NULL;
2467     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2468     SUnit *SU = ReadyQ.back();
2469     ReadyQ.pop_back();
2470     IsTopNode = false;
2471     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
2472           << " ILP: " << DAG->getDFSResult()->getILP(SU)
2473           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
2474           << DAG->getDFSResult()->getSubtreeLevel(
2475             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
2476           << "Scheduling " << *SU->getInstr());
2477     return SU;
2478   }
2479 
2480   /// \brief Scheduler callback to notify that a new subtree is scheduled.
2481   virtual void scheduleTree(unsigned SubtreeID) {
2482     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2483   }
2484 
2485   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
2486   /// DFSResults, and resort the priority Q.
2487   virtual void schedNode(SUnit *SU, bool IsTopNode) {
2488     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
2489   }
2490 
2491   virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
2492 
2493   virtual void releaseBottomNode(SUnit *SU) {
2494     ReadyQ.push_back(SU);
2495     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2496   }
2497 };
2498 } // namespace
2499 
2500 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
2501   return new ScheduleDAGMI(C, new ILPScheduler(true));
2502 }
2503 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
2504   return new ScheduleDAGMI(C, new ILPScheduler(false));
2505 }
2506 static MachineSchedRegistry ILPMaxRegistry(
2507   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
2508 static MachineSchedRegistry ILPMinRegistry(
2509   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
2510 
2511 //===----------------------------------------------------------------------===//
2512 // Machine Instruction Shuffler for Correctness Testing
2513 //===----------------------------------------------------------------------===//
2514 
2515 #ifndef NDEBUG
2516 namespace {
2517 /// Apply a less-than relation on the node order, which corresponds to the
2518 /// instruction order prior to scheduling. IsReverse implements greater-than.
2519 template<bool IsReverse>
2520 struct SUnitOrder {
2521   bool operator()(SUnit *A, SUnit *B) const {
2522     if (IsReverse)
2523       return A->NodeNum > B->NodeNum;
2524     else
2525       return A->NodeNum < B->NodeNum;
2526   }
2527 };
2528 
2529 /// Reorder instructions as much as possible.
2530 class InstructionShuffler : public MachineSchedStrategy {
2531   bool IsAlternating;
2532   bool IsTopDown;
2533 
2534   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
2535   // gives nodes with a higher number higher priority causing the latest
2536   // instructions to be scheduled first.
2537   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
2538     TopQ;
2539   // When scheduling bottom-up, use greater-than as the queue priority.
2540   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
2541     BottomQ;
2542 public:
2543   InstructionShuffler(bool alternate, bool topdown)
2544     : IsAlternating(alternate), IsTopDown(topdown) {}
2545 
2546   virtual void initialize(ScheduleDAGMI *) {
2547     TopQ.clear();
2548     BottomQ.clear();
2549   }
2550 
2551   /// Implement MachineSchedStrategy interface.
2552   /// -----------------------------------------
2553 
2554   virtual SUnit *pickNode(bool &IsTopNode) {
2555     SUnit *SU;
2556     if (IsTopDown) {
2557       do {
2558         if (TopQ.empty()) return NULL;
2559         SU = TopQ.top();
2560         TopQ.pop();
2561       } while (SU->isScheduled);
2562       IsTopNode = true;
2563     }
2564     else {
2565       do {
2566         if (BottomQ.empty()) return NULL;
2567         SU = BottomQ.top();
2568         BottomQ.pop();
2569       } while (SU->isScheduled);
2570       IsTopNode = false;
2571     }
2572     if (IsAlternating)
2573       IsTopDown = !IsTopDown;
2574     return SU;
2575   }
2576 
2577   virtual void schedNode(SUnit *SU, bool IsTopNode) {}
2578 
2579   virtual void releaseTopNode(SUnit *SU) {
2580     TopQ.push(SU);
2581   }
2582   virtual void releaseBottomNode(SUnit *SU) {
2583     BottomQ.push(SU);
2584   }
2585 };
2586 } // namespace
2587 
2588 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
2589   bool Alternate = !ForceTopDown && !ForceBottomUp;
2590   bool TopDown = !ForceBottomUp;
2591   assert((TopDown || !ForceTopDown) &&
2592          "-misched-topdown incompatible with -misched-bottomup");
2593   return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
2594 }
2595 static MachineSchedRegistry ShufflerRegistry(
2596   "shuffle", "Shuffle machine instructions alternating directions",
2597   createInstructionShuffler);
2598 #endif // !NDEBUG
2599 
2600 //===----------------------------------------------------------------------===//
2601 // GraphWriter support for ScheduleDAGMI.
2602 //===----------------------------------------------------------------------===//
2603 
2604 #ifndef NDEBUG
2605 namespace llvm {
2606 
2607 template<> struct GraphTraits<
2608   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
2609 
2610 template<>
2611 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
2612 
2613   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
2614 
2615   static std::string getGraphName(const ScheduleDAG *G) {
2616     return G->MF.getName();
2617   }
2618 
2619   static bool renderGraphFromBottomUp() {
2620     return true;
2621   }
2622 
2623   static bool isNodeHidden(const SUnit *Node) {
2624     return (Node->NumPreds > 10 || Node->NumSuccs > 10);
2625   }
2626 
2627   static bool hasNodeAddressLabel(const SUnit *Node,
2628                                   const ScheduleDAG *Graph) {
2629     return false;
2630   }
2631 
2632   /// If you want to override the dot attributes printed for a particular
2633   /// edge, override this method.
2634   static std::string getEdgeAttributes(const SUnit *Node,
2635                                        SUnitIterator EI,
2636                                        const ScheduleDAG *Graph) {
2637     if (EI.isArtificialDep())
2638       return "color=cyan,style=dashed";
2639     if (EI.isCtrlDep())
2640       return "color=blue,style=dashed";
2641     return "";
2642   }
2643 
2644   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
2645     std::string Str;
2646     raw_string_ostream SS(Str);
2647     SS << "SU(" << SU->NodeNum << ')';
2648     return SS.str();
2649   }
2650   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
2651     return G->getGraphNodeLabel(SU);
2652   }
2653 
2654   static std::string getNodeAttributes(const SUnit *N,
2655                                        const ScheduleDAG *Graph) {
2656     std::string Str("shape=Mrecord");
2657     const SchedDFSResult *DFS =
2658       static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult();
2659     if (DFS) {
2660       Str += ",style=filled,fillcolor=\"#";
2661       Str += DOT::getColorString(DFS->getSubtreeID(N));
2662       Str += '"';
2663     }
2664     return Str;
2665   }
2666 };
2667 } // namespace llvm
2668 #endif // NDEBUG
2669 
2670 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
2671 /// rendered using 'dot'.
2672 ///
2673 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
2674 #ifndef NDEBUG
2675   ViewGraph(this, Name, false, Title);
2676 #else
2677   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
2678          << "systems with Graphviz or gv!\n";
2679 #endif  // NDEBUG
2680 }
2681 
2682 /// Out-of-line implementation with no arguments is handy for gdb.
2683 void ScheduleDAGMI::viewGraph() {
2684   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
2685 }
2686