xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision f78e7fa140a3ca9760db269f029ff1d71e00457a)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define DEBUG_TYPE "misched"
16 
17 #include "llvm/CodeGen/MachineScheduler.h"
18 #include "llvm/ADT/OwningPtr.h"
19 #include "llvm/ADT/PriorityQueue.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/CodeGen/RegisterClassInfo.h"
26 #include "llvm/CodeGen/ScheduleDFS.h"
27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/GraphWriter.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include <queue>
35 
36 using namespace llvm;
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 }
44 
45 #ifndef NDEBUG
46 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
47   cl::desc("Pop up a window to show MISched dags after they are processed"));
48 
49 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
50   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
51 #else
52 static bool ViewMISchedDAGs = false;
53 #endif // NDEBUG
54 
55 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
56   cl::desc("Enable load clustering."), cl::init(true));
57 
58 // Experimental heuristics
59 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
60   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
61 
62 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
63   cl::desc("Verify machine instrs before and after machine scheduling"));
64 
65 // DAG subtrees must have at least this many nodes.
66 static const unsigned MinSubtreeSize = 8;
67 
68 //===----------------------------------------------------------------------===//
69 // Machine Instruction Scheduling Pass and Registry
70 //===----------------------------------------------------------------------===//
71 
72 MachineSchedContext::MachineSchedContext():
73     MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
74   RegClassInfo = new RegisterClassInfo();
75 }
76 
77 MachineSchedContext::~MachineSchedContext() {
78   delete RegClassInfo;
79 }
80 
81 namespace {
82 /// MachineScheduler runs after coalescing and before register allocation.
83 class MachineScheduler : public MachineSchedContext,
84                          public MachineFunctionPass {
85 public:
86   MachineScheduler();
87 
88   virtual void getAnalysisUsage(AnalysisUsage &AU) const;
89 
90   virtual void releaseMemory() {}
91 
92   virtual bool runOnMachineFunction(MachineFunction&);
93 
94   virtual void print(raw_ostream &O, const Module* = 0) const;
95 
96   static char ID; // Class identification, replacement for typeinfo
97 };
98 } // namespace
99 
100 char MachineScheduler::ID = 0;
101 
102 char &llvm::MachineSchedulerID = MachineScheduler::ID;
103 
104 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
105                       "Machine Instruction Scheduler", false, false)
106 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
107 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
108 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
109 INITIALIZE_PASS_END(MachineScheduler, "misched",
110                     "Machine Instruction Scheduler", false, false)
111 
112 MachineScheduler::MachineScheduler()
113 : MachineFunctionPass(ID) {
114   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
115 }
116 
117 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
118   AU.setPreservesCFG();
119   AU.addRequiredID(MachineDominatorsID);
120   AU.addRequired<MachineLoopInfo>();
121   AU.addRequired<AliasAnalysis>();
122   AU.addRequired<TargetPassConfig>();
123   AU.addRequired<SlotIndexes>();
124   AU.addPreserved<SlotIndexes>();
125   AU.addRequired<LiveIntervals>();
126   AU.addPreserved<LiveIntervals>();
127   MachineFunctionPass::getAnalysisUsage(AU);
128 }
129 
130 MachinePassRegistry MachineSchedRegistry::Registry;
131 
132 /// A dummy default scheduler factory indicates whether the scheduler
133 /// is overridden on the command line.
134 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
135   return 0;
136 }
137 
138 /// MachineSchedOpt allows command line selection of the scheduler.
139 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
140                RegisterPassParser<MachineSchedRegistry> >
141 MachineSchedOpt("misched",
142                 cl::init(&useDefaultMachineSched), cl::Hidden,
143                 cl::desc("Machine instruction scheduler to use"));
144 
145 static MachineSchedRegistry
146 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
147                      useDefaultMachineSched);
148 
149 /// Forward declare the standard machine scheduler. This will be used as the
150 /// default scheduler if the target does not set a default.
151 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
152 
153 
154 /// Decrement this iterator until reaching the top or a non-debug instr.
155 static MachineBasicBlock::iterator
156 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
157   assert(I != Beg && "reached the top of the region, cannot decrement");
158   while (--I != Beg) {
159     if (!I->isDebugValue())
160       break;
161   }
162   return I;
163 }
164 
165 /// If this iterator is a debug value, increment until reaching the End or a
166 /// non-debug instruction.
167 static MachineBasicBlock::iterator
168 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
169   for(; I != End; ++I) {
170     if (!I->isDebugValue())
171       break;
172   }
173   return I;
174 }
175 
176 /// Top-level MachineScheduler pass driver.
177 ///
178 /// Visit blocks in function order. Divide each block into scheduling regions
179 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
180 /// consistent with the DAG builder, which traverses the interior of the
181 /// scheduling regions bottom-up.
182 ///
183 /// This design avoids exposing scheduling boundaries to the DAG builder,
184 /// simplifying the DAG builder's support for "special" target instructions.
185 /// At the same time the design allows target schedulers to operate across
186 /// scheduling boundaries, for example to bundle the boudary instructions
187 /// without reordering them. This creates complexity, because the target
188 /// scheduler must update the RegionBegin and RegionEnd positions cached by
189 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
190 /// design would be to split blocks at scheduling boundaries, but LLVM has a
191 /// general bias against block splitting purely for implementation simplicity.
192 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
193   DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
194 
195   // Initialize the context of the pass.
196   MF = &mf;
197   MLI = &getAnalysis<MachineLoopInfo>();
198   MDT = &getAnalysis<MachineDominatorTree>();
199   PassConfig = &getAnalysis<TargetPassConfig>();
200   AA = &getAnalysis<AliasAnalysis>();
201 
202   LIS = &getAnalysis<LiveIntervals>();
203   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
204 
205   if (VerifyScheduling) {
206     DEBUG(LIS->print(dbgs()));
207     MF->verify(this, "Before machine scheduling.");
208   }
209   RegClassInfo->runOnMachineFunction(*MF);
210 
211   // Select the scheduler, or set the default.
212   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
213   if (Ctor == useDefaultMachineSched) {
214     // Get the default scheduler set by the target.
215     Ctor = MachineSchedRegistry::getDefault();
216     if (!Ctor) {
217       Ctor = createConvergingSched;
218       MachineSchedRegistry::setDefault(Ctor);
219     }
220   }
221   // Instantiate the selected scheduler.
222   OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
223 
224   // Visit all machine basic blocks.
225   //
226   // TODO: Visit blocks in global postorder or postorder within the bottom-up
227   // loop tree. Then we can optionally compute global RegPressure.
228   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
229        MBB != MBBEnd; ++MBB) {
230 
231     Scheduler->startBlock(MBB);
232 
233     // Break the block into scheduling regions [I, RegionEnd), and schedule each
234     // region as soon as it is discovered. RegionEnd points the scheduling
235     // boundary at the bottom of the region. The DAG does not include RegionEnd,
236     // but the region does (i.e. the next RegionEnd is above the previous
237     // RegionBegin). If the current block has no terminator then RegionEnd ==
238     // MBB->end() for the bottom region.
239     //
240     // The Scheduler may insert instructions during either schedule() or
241     // exitRegion(), even for empty regions. So the local iterators 'I' and
242     // 'RegionEnd' are invalid across these calls.
243     unsigned RemainingInstrs = MBB->size();
244     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
245         RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
246 
247       // Avoid decrementing RegionEnd for blocks with no terminator.
248       if (RegionEnd != MBB->end()
249           || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
250         --RegionEnd;
251         // Count the boundary instruction.
252         --RemainingInstrs;
253       }
254 
255       // The next region starts above the previous region. Look backward in the
256       // instruction stream until we find the nearest boundary.
257       MachineBasicBlock::iterator I = RegionEnd;
258       for(;I != MBB->begin(); --I, --RemainingInstrs) {
259         if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
260           break;
261       }
262       // Notify the scheduler of the region, even if we may skip scheduling
263       // it. Perhaps it still needs to be bundled.
264       Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs);
265 
266       // Skip empty scheduling regions (0 or 1 schedulable instructions).
267       if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
268         // Close the current region. Bundle the terminator if needed.
269         // This invalidates 'RegionEnd' and 'I'.
270         Scheduler->exitRegion();
271         continue;
272       }
273       DEBUG(dbgs() << "********** MI Scheduling **********\n");
274       DEBUG(dbgs() << MF->getName()
275             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
276             << "\n  From: " << *I << "    To: ";
277             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
278             else dbgs() << "End";
279             dbgs() << " Remaining: " << RemainingInstrs << "\n");
280 
281       // Schedule a region: possibly reorder instructions.
282       // This invalidates 'RegionEnd' and 'I'.
283       Scheduler->schedule();
284 
285       // Close the current region.
286       Scheduler->exitRegion();
287 
288       // Scheduling has invalidated the current iterator 'I'. Ask the
289       // scheduler for the top of it's scheduled region.
290       RegionEnd = Scheduler->begin();
291     }
292     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
293     Scheduler->finishBlock();
294   }
295   Scheduler->finalizeSchedule();
296   DEBUG(LIS->print(dbgs()));
297   if (VerifyScheduling)
298     MF->verify(this, "After machine scheduling.");
299   return true;
300 }
301 
302 void MachineScheduler::print(raw_ostream &O, const Module* m) const {
303   // unimplemented
304 }
305 
306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
307 void ReadyQueue::dump() {
308   dbgs() << "  " << Name << ": ";
309   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
310     dbgs() << Queue[i]->NodeNum << " ";
311   dbgs() << "\n";
312 }
313 #endif
314 
315 //===----------------------------------------------------------------------===//
316 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
317 // preservation.
318 //===----------------------------------------------------------------------===//
319 
320 ScheduleDAGMI::~ScheduleDAGMI() {
321   delete DFSResult;
322   DeleteContainerPointers(Mutations);
323   delete SchedImpl;
324 }
325 
326 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
327   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
328 }
329 
330 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
331   if (SuccSU != &ExitSU) {
332     // Do not use WillCreateCycle, it assumes SD scheduling.
333     // If Pred is reachable from Succ, then the edge creates a cycle.
334     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
335       return false;
336     Topo.AddPred(SuccSU, PredDep.getSUnit());
337   }
338   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
339   // Return true regardless of whether a new edge needed to be inserted.
340   return true;
341 }
342 
343 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
344 /// NumPredsLeft reaches zero, release the successor node.
345 ///
346 /// FIXME: Adjust SuccSU height based on MinLatency.
347 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
348   SUnit *SuccSU = SuccEdge->getSUnit();
349 
350   if (SuccEdge->isWeak()) {
351     --SuccSU->WeakPredsLeft;
352     if (SuccEdge->isCluster())
353       NextClusterSucc = SuccSU;
354     return;
355   }
356 #ifndef NDEBUG
357   if (SuccSU->NumPredsLeft == 0) {
358     dbgs() << "*** Scheduling failed! ***\n";
359     SuccSU->dump(this);
360     dbgs() << " has been released too many times!\n";
361     llvm_unreachable(0);
362   }
363 #endif
364   --SuccSU->NumPredsLeft;
365   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
366     SchedImpl->releaseTopNode(SuccSU);
367 }
368 
369 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
370 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
371   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
372        I != E; ++I) {
373     releaseSucc(SU, &*I);
374   }
375 }
376 
377 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
378 /// NumSuccsLeft reaches zero, release the predecessor node.
379 ///
380 /// FIXME: Adjust PredSU height based on MinLatency.
381 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
382   SUnit *PredSU = PredEdge->getSUnit();
383 
384   if (PredEdge->isWeak()) {
385     --PredSU->WeakSuccsLeft;
386     if (PredEdge->isCluster())
387       NextClusterPred = PredSU;
388     return;
389   }
390 #ifndef NDEBUG
391   if (PredSU->NumSuccsLeft == 0) {
392     dbgs() << "*** Scheduling failed! ***\n";
393     PredSU->dump(this);
394     dbgs() << " has been released too many times!\n";
395     llvm_unreachable(0);
396   }
397 #endif
398   --PredSU->NumSuccsLeft;
399   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
400     SchedImpl->releaseBottomNode(PredSU);
401 }
402 
403 /// releasePredecessors - Call releasePred on each of SU's predecessors.
404 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
405   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
406        I != E; ++I) {
407     releasePred(SU, &*I);
408   }
409 }
410 
411 /// This is normally called from the main scheduler loop but may also be invoked
412 /// by the scheduling strategy to perform additional code motion.
413 void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
414                                     MachineBasicBlock::iterator InsertPos) {
415   // Advance RegionBegin if the first instruction moves down.
416   if (&*RegionBegin == MI)
417     ++RegionBegin;
418 
419   // Update the instruction stream.
420   BB->splice(InsertPos, BB, MI);
421 
422   // Update LiveIntervals
423   LIS->handleMove(MI, /*UpdateFlags=*/true);
424 
425   // Recede RegionBegin if an instruction moves above the first.
426   if (RegionBegin == InsertPos)
427     RegionBegin = MI;
428 }
429 
430 bool ScheduleDAGMI::checkSchedLimit() {
431 #ifndef NDEBUG
432   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
433     CurrentTop = CurrentBottom;
434     return false;
435   }
436   ++NumInstrsScheduled;
437 #endif
438   return true;
439 }
440 
441 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
442 /// crossing a scheduling boundary. [begin, end) includes all instructions in
443 /// the region, including the boundary itself and single-instruction regions
444 /// that don't get scheduled.
445 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
446                                 MachineBasicBlock::iterator begin,
447                                 MachineBasicBlock::iterator end,
448                                 unsigned endcount)
449 {
450   ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
451 
452   // For convenience remember the end of the liveness region.
453   LiveRegionEnd =
454     (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
455 }
456 
457 // Setup the register pressure trackers for the top scheduled top and bottom
458 // scheduled regions.
459 void ScheduleDAGMI::initRegPressure() {
460   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
461   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
462 
463   // Close the RPTracker to finalize live ins.
464   RPTracker.closeRegion();
465 
466   DEBUG(RPTracker.getPressure().dump(TRI));
467 
468   // Initialize the live ins and live outs.
469   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
470   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
471 
472   // Close one end of the tracker so we can call
473   // getMaxUpward/DownwardPressureDelta before advancing across any
474   // instructions. This converts currently live regs into live ins/outs.
475   TopRPTracker.closeTop();
476   BotRPTracker.closeBottom();
477 
478   // Account for liveness generated by the region boundary.
479   if (LiveRegionEnd != RegionEnd)
480     BotRPTracker.recede();
481 
482   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
483 
484   // Cache the list of excess pressure sets in this region. This will also track
485   // the max pressure in the scheduled code for these sets.
486   RegionCriticalPSets.clear();
487   const std::vector<unsigned> &RegionPressure =
488     RPTracker.getPressure().MaxSetPressure;
489   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
490     unsigned Limit = TRI->getRegPressureSetLimit(i);
491     DEBUG(dbgs() << TRI->getRegPressureSetName(i)
492           << "Limit " << Limit
493           << " Actual " << RegionPressure[i] << "\n");
494     if (RegionPressure[i] > Limit)
495       RegionCriticalPSets.push_back(PressureElement(i, 0));
496   }
497   DEBUG(dbgs() << "Excess PSets: ";
498         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
499           dbgs() << TRI->getRegPressureSetName(
500             RegionCriticalPSets[i].PSetID) << " ";
501         dbgs() << "\n");
502 }
503 
504 // FIXME: When the pressure tracker deals in pressure differences then we won't
505 // iterate over all RegionCriticalPSets[i].
506 void ScheduleDAGMI::
507 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) {
508   for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
509     unsigned ID = RegionCriticalPSets[i].PSetID;
510     int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
511     if ((int)NewMaxPressure[ID] > MaxUnits)
512       MaxUnits = NewMaxPressure[ID];
513   }
514   DEBUG(
515     for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) {
516       unsigned Limit = TRI->getRegPressureSetLimit(i);
517       if (NewMaxPressure[i] > Limit ) {
518         dbgs() << "  " << TRI->getRegPressureSetName(i) << ": "
519                << NewMaxPressure[i] << " > " << Limit << "\n";
520       }
521     });
522 }
523 
524 /// schedule - Called back from MachineScheduler::runOnMachineFunction
525 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
526 /// only includes instructions that have DAG nodes, not scheduling boundaries.
527 ///
528 /// This is a skeletal driver, with all the functionality pushed into helpers,
529 /// so that it can be easilly extended by experimental schedulers. Generally,
530 /// implementing MachineSchedStrategy should be sufficient to implement a new
531 /// scheduling algorithm. However, if a scheduler further subclasses
532 /// ScheduleDAGMI then it will want to override this virtual method in order to
533 /// update any specialized state.
534 void ScheduleDAGMI::schedule() {
535   buildDAGWithRegPressure();
536 
537   Topo.InitDAGTopologicalSorting();
538 
539   postprocessDAG();
540 
541   SmallVector<SUnit*, 8> TopRoots, BotRoots;
542   findRootsAndBiasEdges(TopRoots, BotRoots);
543 
544   // Initialize the strategy before modifying the DAG.
545   // This may initialize a DFSResult to be used for queue priority.
546   SchedImpl->initialize(this);
547 
548   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
549           SUnits[su].dumpAll(this));
550   if (ViewMISchedDAGs) viewGraph();
551 
552   // Initialize ready queues now that the DAG and priority data are finalized.
553   initQueues(TopRoots, BotRoots);
554 
555   bool IsTopNode = false;
556   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
557     assert(!SU->isScheduled && "Node already scheduled");
558     if (!checkSchedLimit())
559       break;
560 
561     scheduleMI(SU, IsTopNode);
562 
563     updateQueues(SU, IsTopNode);
564   }
565   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
566 
567   placeDebugValues();
568 
569   DEBUG({
570       unsigned BBNum = begin()->getParent()->getNumber();
571       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
572       dumpSchedule();
573       dbgs() << '\n';
574     });
575 }
576 
577 /// Build the DAG and setup three register pressure trackers.
578 void ScheduleDAGMI::buildDAGWithRegPressure() {
579   // Initialize the register pressure tracker used by buildSchedGraph.
580   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
581 
582   // Account for liveness generate by the region boundary.
583   if (LiveRegionEnd != RegionEnd)
584     RPTracker.recede();
585 
586   // Build the DAG, and compute current register pressure.
587   buildSchedGraph(AA, &RPTracker);
588 
589   // Initialize top/bottom trackers after computing region pressure.
590   initRegPressure();
591 }
592 
593 /// Apply each ScheduleDAGMutation step in order.
594 void ScheduleDAGMI::postprocessDAG() {
595   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
596     Mutations[i]->apply(this);
597   }
598 }
599 
600 void ScheduleDAGMI::computeDFSResult() {
601   if (!DFSResult)
602     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
603   DFSResult->clear();
604   ScheduledTrees.clear();
605   DFSResult->resize(SUnits.size());
606   DFSResult->compute(SUnits);
607   ScheduledTrees.resize(DFSResult->getNumSubtrees());
608 }
609 
610 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
611                                           SmallVectorImpl<SUnit*> &BotRoots) {
612   for (std::vector<SUnit>::iterator
613          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
614     SUnit *SU = &(*I);
615     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
616 
617     // Order predecessors so DFSResult follows the critical path.
618     SU->biasCriticalPath();
619 
620     // A SUnit is ready to top schedule if it has no predecessors.
621     if (!I->NumPredsLeft)
622       TopRoots.push_back(SU);
623     // A SUnit is ready to bottom schedule if it has no successors.
624     if (!I->NumSuccsLeft)
625       BotRoots.push_back(SU);
626   }
627   ExitSU.biasCriticalPath();
628 }
629 
630 /// Identify DAG roots and setup scheduler queues.
631 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
632                                ArrayRef<SUnit*> BotRoots) {
633   NextClusterSucc = NULL;
634   NextClusterPred = NULL;
635 
636   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
637   //
638   // Nodes with unreleased weak edges can still be roots.
639   // Release top roots in forward order.
640   for (SmallVectorImpl<SUnit*>::const_iterator
641          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
642     SchedImpl->releaseTopNode(*I);
643   }
644   // Release bottom roots in reverse order so the higher priority nodes appear
645   // first. This is more natural and slightly more efficient.
646   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
647          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
648     SchedImpl->releaseBottomNode(*I);
649   }
650 
651   releaseSuccessors(&EntrySU);
652   releasePredecessors(&ExitSU);
653 
654   SchedImpl->registerRoots();
655 
656   // Advance past initial DebugValues.
657   assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
658   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
659   TopRPTracker.setPos(CurrentTop);
660 
661   CurrentBottom = RegionEnd;
662 }
663 
664 /// Move an instruction and update register pressure.
665 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
666   // Move the instruction to its new location in the instruction stream.
667   MachineInstr *MI = SU->getInstr();
668 
669   if (IsTopNode) {
670     assert(SU->isTopReady() && "node still has unscheduled dependencies");
671     if (&*CurrentTop == MI)
672       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
673     else {
674       moveInstruction(MI, CurrentTop);
675       TopRPTracker.setPos(MI);
676     }
677 
678     // Update top scheduled pressure.
679     TopRPTracker.advance();
680     assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
681     updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
682   }
683   else {
684     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
685     MachineBasicBlock::iterator priorII =
686       priorNonDebug(CurrentBottom, CurrentTop);
687     if (&*priorII == MI)
688       CurrentBottom = priorII;
689     else {
690       if (&*CurrentTop == MI) {
691         CurrentTop = nextIfDebug(++CurrentTop, priorII);
692         TopRPTracker.setPos(CurrentTop);
693       }
694       moveInstruction(MI, CurrentBottom);
695       CurrentBottom = MI;
696     }
697     // Update bottom scheduled pressure.
698     BotRPTracker.recede();
699     assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
700     updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
701   }
702 }
703 
704 /// Update scheduler queues after scheduling an instruction.
705 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
706   // Release dependent instructions for scheduling.
707   if (IsTopNode)
708     releaseSuccessors(SU);
709   else
710     releasePredecessors(SU);
711 
712   SU->isScheduled = true;
713 
714   if (DFSResult) {
715     unsigned SubtreeID = DFSResult->getSubtreeID(SU);
716     if (!ScheduledTrees.test(SubtreeID)) {
717       ScheduledTrees.set(SubtreeID);
718       DFSResult->scheduleTree(SubtreeID);
719       SchedImpl->scheduleTree(SubtreeID);
720     }
721   }
722 
723   // Notify the scheduling strategy after updating the DAG.
724   SchedImpl->schedNode(SU, IsTopNode);
725 }
726 
727 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
728 void ScheduleDAGMI::placeDebugValues() {
729   // If first instruction was a DBG_VALUE then put it back.
730   if (FirstDbgValue) {
731     BB->splice(RegionBegin, BB, FirstDbgValue);
732     RegionBegin = FirstDbgValue;
733   }
734 
735   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
736          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
737     std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
738     MachineInstr *DbgValue = P.first;
739     MachineBasicBlock::iterator OrigPrevMI = P.second;
740     if (&*RegionBegin == DbgValue)
741       ++RegionBegin;
742     BB->splice(++OrigPrevMI, BB, DbgValue);
743     if (OrigPrevMI == llvm::prior(RegionEnd))
744       RegionEnd = DbgValue;
745   }
746   DbgValues.clear();
747   FirstDbgValue = NULL;
748 }
749 
750 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
751 void ScheduleDAGMI::dumpSchedule() const {
752   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
753     if (SUnit *SU = getSUnit(&(*MI)))
754       SU->dump(this);
755     else
756       dbgs() << "Missing SUnit\n";
757   }
758 }
759 #endif
760 
761 //===----------------------------------------------------------------------===//
762 // LoadClusterMutation - DAG post-processing to cluster loads.
763 //===----------------------------------------------------------------------===//
764 
765 namespace {
766 /// \brief Post-process the DAG to create cluster edges between neighboring
767 /// loads.
768 class LoadClusterMutation : public ScheduleDAGMutation {
769   struct LoadInfo {
770     SUnit *SU;
771     unsigned BaseReg;
772     unsigned Offset;
773     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
774       : SU(su), BaseReg(reg), Offset(ofs) {}
775   };
776   static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS,
777                            const LoadClusterMutation::LoadInfo &RHS);
778 
779   const TargetInstrInfo *TII;
780   const TargetRegisterInfo *TRI;
781 public:
782   LoadClusterMutation(const TargetInstrInfo *tii,
783                       const TargetRegisterInfo *tri)
784     : TII(tii), TRI(tri) {}
785 
786   virtual void apply(ScheduleDAGMI *DAG);
787 protected:
788   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
789 };
790 } // anonymous
791 
792 bool LoadClusterMutation::LoadInfoLess(
793   const LoadClusterMutation::LoadInfo &LHS,
794   const LoadClusterMutation::LoadInfo &RHS) {
795   if (LHS.BaseReg != RHS.BaseReg)
796     return LHS.BaseReg < RHS.BaseReg;
797   return LHS.Offset < RHS.Offset;
798 }
799 
800 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
801                                                   ScheduleDAGMI *DAG) {
802   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
803   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
804     SUnit *SU = Loads[Idx];
805     unsigned BaseReg;
806     unsigned Offset;
807     if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
808       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
809   }
810   if (LoadRecords.size() < 2)
811     return;
812   std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess);
813   unsigned ClusterLength = 1;
814   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
815     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
816       ClusterLength = 1;
817       continue;
818     }
819 
820     SUnit *SUa = LoadRecords[Idx].SU;
821     SUnit *SUb = LoadRecords[Idx+1].SU;
822     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
823         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
824 
825       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
826             << SUb->NodeNum << ")\n");
827       // Copy successor edges from SUa to SUb. Interleaving computation
828       // dependent on SUa can prevent load combining due to register reuse.
829       // Predecessor edges do not need to be copied from SUb to SUa since nearby
830       // loads should have effectively the same inputs.
831       for (SUnit::const_succ_iterator
832              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
833         if (SI->getSUnit() == SUb)
834           continue;
835         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
836         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
837       }
838       ++ClusterLength;
839     }
840     else
841       ClusterLength = 1;
842   }
843 }
844 
845 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
846 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
847   // Map DAG NodeNum to store chain ID.
848   DenseMap<unsigned, unsigned> StoreChainIDs;
849   // Map each store chain to a set of dependent loads.
850   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
851   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
852     SUnit *SU = &DAG->SUnits[Idx];
853     if (!SU->getInstr()->mayLoad())
854       continue;
855     unsigned ChainPredID = DAG->SUnits.size();
856     for (SUnit::const_pred_iterator
857            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
858       if (PI->isCtrl()) {
859         ChainPredID = PI->getSUnit()->NodeNum;
860         break;
861       }
862     }
863     // Check if this chain-like pred has been seen
864     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
865     unsigned NumChains = StoreChainDependents.size();
866     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
867       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
868     if (Result.second)
869       StoreChainDependents.resize(NumChains + 1);
870     StoreChainDependents[Result.first->second].push_back(SU);
871   }
872   // Iterate over the store chains.
873   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
874     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
875 }
876 
877 //===----------------------------------------------------------------------===//
878 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
879 //===----------------------------------------------------------------------===//
880 
881 namespace {
882 /// \brief Post-process the DAG to create cluster edges between instructions
883 /// that may be fused by the processor into a single operation.
884 class MacroFusion : public ScheduleDAGMutation {
885   const TargetInstrInfo *TII;
886 public:
887   MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
888 
889   virtual void apply(ScheduleDAGMI *DAG);
890 };
891 } // anonymous
892 
893 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
894 /// fused operations.
895 void MacroFusion::apply(ScheduleDAGMI *DAG) {
896   // For now, assume targets can only fuse with the branch.
897   MachineInstr *Branch = DAG->ExitSU.getInstr();
898   if (!Branch)
899     return;
900 
901   for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
902     SUnit *SU = &DAG->SUnits[--Idx];
903     if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
904       continue;
905 
906     // Create a single weak edge from SU to ExitSU. The only effect is to cause
907     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
908     // need to copy predecessor edges from ExitSU to SU, since top-down
909     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
910     // of SU, we could create an artificial edge from the deepest root, but it
911     // hasn't been needed yet.
912     bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
913     (void)Success;
914     assert(Success && "No DAG nodes should be reachable from ExitSU");
915 
916     DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
917     break;
918   }
919 }
920 
921 //===----------------------------------------------------------------------===//
922 // CopyConstrain - DAG post-processing to encourage copy elimination.
923 //===----------------------------------------------------------------------===//
924 
925 namespace {
926 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
927 /// the one use that defines the copy's source vreg, most likely an induction
928 /// variable increment.
929 class CopyConstrain : public ScheduleDAGMutation {
930   // Transient state.
931   SlotIndex RegionBeginIdx;
932   // RegionEndIdx is the slot index of the last non-debug instruction in the
933   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
934   SlotIndex RegionEndIdx;
935 public:
936   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
937 
938   virtual void apply(ScheduleDAGMI *DAG);
939 
940 protected:
941   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG);
942 };
943 } // anonymous
944 
945 /// constrainLocalCopy handles two possibilities:
946 /// 1) Local src:
947 /// I0:     = dst
948 /// I1: src = ...
949 /// I2:     = dst
950 /// I3: dst = src (copy)
951 /// (create pred->succ edges I0->I1, I2->I1)
952 ///
953 /// 2) Local copy:
954 /// I0: dst = src (copy)
955 /// I1:     = dst
956 /// I2: src = ...
957 /// I3:     = dst
958 /// (create pred->succ edges I1->I2, I3->I2)
959 ///
960 /// Although the MachineScheduler is currently constrained to single blocks,
961 /// this algorithm should handle extended blocks. An EBB is a set of
962 /// contiguously numbered blocks such that the previous block in the EBB is
963 /// always the single predecessor.
964 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
965   LiveIntervals *LIS = DAG->getLIS();
966   MachineInstr *Copy = CopySU->getInstr();
967 
968   // Check for pure vreg copies.
969   unsigned SrcReg = Copy->getOperand(1).getReg();
970   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
971     return;
972 
973   unsigned DstReg = Copy->getOperand(0).getReg();
974   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
975     return;
976 
977   // Check if either the dest or source is local. If it's live across a back
978   // edge, it's not local. Note that if both vregs are live across the back
979   // edge, we cannot successfully contrain the copy without cyclic scheduling.
980   unsigned LocalReg = DstReg;
981   unsigned GlobalReg = SrcReg;
982   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
983   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
984     LocalReg = SrcReg;
985     GlobalReg = DstReg;
986     LocalLI = &LIS->getInterval(LocalReg);
987     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
988       return;
989   }
990   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
991 
992   // Find the global segment after the start of the local LI.
993   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
994   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
995   // local live range. We could create edges from other global uses to the local
996   // start, but the coalescer should have already eliminated these cases, so
997   // don't bother dealing with it.
998   if (GlobalSegment == GlobalLI->end())
999     return;
1000 
1001   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1002   // returned the next global segment. But if GlobalSegment overlaps with
1003   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1004   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1005   if (GlobalSegment->contains(LocalLI->beginIndex()))
1006     ++GlobalSegment;
1007 
1008   if (GlobalSegment == GlobalLI->end())
1009     return;
1010 
1011   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1012   if (GlobalSegment != GlobalLI->begin()) {
1013     // Two address defs have no hole.
1014     if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end,
1015                                GlobalSegment->start)) {
1016       return;
1017     }
1018     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1019     // it would be a disconnected component in the live range.
1020     assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
1021            "Disconnected LRG within the scheduling region.");
1022   }
1023   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1024   if (!GlobalDef)
1025     return;
1026 
1027   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1028   if (!GlobalSU)
1029     return;
1030 
1031   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1032   // constraining the uses of the last local def to precede GlobalDef.
1033   SmallVector<SUnit*,8> LocalUses;
1034   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1035   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1036   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1037   for (SUnit::const_succ_iterator
1038          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1039        I != E; ++I) {
1040     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1041       continue;
1042     if (I->getSUnit() == GlobalSU)
1043       continue;
1044     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1045       return;
1046     LocalUses.push_back(I->getSUnit());
1047   }
1048   // Open the top of the GlobalLI hole by constraining any earlier global uses
1049   // to precede the start of LocalLI.
1050   SmallVector<SUnit*,8> GlobalUses;
1051   MachineInstr *FirstLocalDef =
1052     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1053   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1054   for (SUnit::const_pred_iterator
1055          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1056     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1057       continue;
1058     if (I->getSUnit() == FirstLocalSU)
1059       continue;
1060     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1061       return;
1062     GlobalUses.push_back(I->getSUnit());
1063   }
1064   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1065   // Add the weak edges.
1066   for (SmallVectorImpl<SUnit*>::const_iterator
1067          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1068     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1069           << GlobalSU->NodeNum << ")\n");
1070     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1071   }
1072   for (SmallVectorImpl<SUnit*>::const_iterator
1073          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1074     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1075           << FirstLocalSU->NodeNum << ")\n");
1076     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1077   }
1078 }
1079 
1080 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1081 /// copy elimination.
1082 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1083   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1084   if (FirstPos == DAG->end())
1085     return;
1086   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1087   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1088     &*priorNonDebug(DAG->end(), DAG->begin()));
1089 
1090   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1091     SUnit *SU = &DAG->SUnits[Idx];
1092     if (!SU->getInstr()->isCopy())
1093       continue;
1094 
1095     constrainLocalCopy(SU, DAG);
1096   }
1097 }
1098 
1099 //===----------------------------------------------------------------------===//
1100 // ConvergingScheduler - Implementation of the generic MachineSchedStrategy.
1101 //===----------------------------------------------------------------------===//
1102 
1103 namespace {
1104 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
1105 /// the schedule.
1106 class ConvergingScheduler : public MachineSchedStrategy {
1107 public:
1108   /// Represent the type of SchedCandidate found within a single queue.
1109   /// pickNodeBidirectional depends on these listed by decreasing priority.
1110   enum CandReason {
1111     NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak,
1112     ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
1113     TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse,
1114     NodeOrder};
1115 
1116 #ifndef NDEBUG
1117   static const char *getReasonStr(ConvergingScheduler::CandReason Reason);
1118 #endif
1119 
1120   /// Policy for scheduling the next instruction in the candidate's zone.
1121   struct CandPolicy {
1122     bool ReduceLatency;
1123     unsigned ReduceResIdx;
1124     unsigned DemandResIdx;
1125 
1126     CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
1127   };
1128 
1129   /// Status of an instruction's critical resource consumption.
1130   struct SchedResourceDelta {
1131     // Count critical resources in the scheduled region required by SU.
1132     unsigned CritResources;
1133 
1134     // Count critical resources from another region consumed by SU.
1135     unsigned DemandedResources;
1136 
1137     SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
1138 
1139     bool operator==(const SchedResourceDelta &RHS) const {
1140       return CritResources == RHS.CritResources
1141         && DemandedResources == RHS.DemandedResources;
1142     }
1143     bool operator!=(const SchedResourceDelta &RHS) const {
1144       return !operator==(RHS);
1145     }
1146   };
1147 
1148   /// Store the state used by ConvergingScheduler heuristics, required for the
1149   /// lifetime of one invocation of pickNode().
1150   struct SchedCandidate {
1151     CandPolicy Policy;
1152 
1153     // The best SUnit candidate.
1154     SUnit *SU;
1155 
1156     // The reason for this candidate.
1157     CandReason Reason;
1158 
1159     // Register pressure values for the best candidate.
1160     RegPressureDelta RPDelta;
1161 
1162     // Critical resource consumption of the best candidate.
1163     SchedResourceDelta ResDelta;
1164 
1165     SchedCandidate(const CandPolicy &policy)
1166     : Policy(policy), SU(NULL), Reason(NoCand) {}
1167 
1168     bool isValid() const { return SU; }
1169 
1170     // Copy the status of another candidate without changing policy.
1171     void setBest(SchedCandidate &Best) {
1172       assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1173       SU = Best.SU;
1174       Reason = Best.Reason;
1175       RPDelta = Best.RPDelta;
1176       ResDelta = Best.ResDelta;
1177     }
1178 
1179     void initResourceDelta(const ScheduleDAGMI *DAG,
1180                            const TargetSchedModel *SchedModel);
1181   };
1182 
1183   /// Summarize the unscheduled region.
1184   struct SchedRemainder {
1185     // Critical path through the DAG in expected latency.
1186     unsigned CriticalPath;
1187 
1188     // Scaled count of micro-ops left to schedule.
1189     unsigned RemIssueCount;
1190 
1191     // Unscheduled resources
1192     SmallVector<unsigned, 16> RemainingCounts;
1193 
1194     void reset() {
1195       CriticalPath = 0;
1196       RemIssueCount = 0;
1197       RemainingCounts.clear();
1198     }
1199 
1200     SchedRemainder() { reset(); }
1201 
1202     void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
1203   };
1204 
1205   /// Each Scheduling boundary is associated with ready queues. It tracks the
1206   /// current cycle in the direction of movement, and maintains the state
1207   /// of "hazards" and other interlocks at the current cycle.
1208   struct SchedBoundary {
1209     ScheduleDAGMI *DAG;
1210     const TargetSchedModel *SchedModel;
1211     SchedRemainder *Rem;
1212 
1213     ReadyQueue Available;
1214     ReadyQueue Pending;
1215     bool CheckPending;
1216 
1217     // For heuristics, keep a list of the nodes that immediately depend on the
1218     // most recently scheduled node.
1219     SmallPtrSet<const SUnit*, 8> NextSUs;
1220 
1221     ScheduleHazardRecognizer *HazardRec;
1222 
1223     /// Number of cycles it takes to issue the instructions scheduled in this
1224     /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
1225     /// See getStalls().
1226     unsigned CurrCycle;
1227 
1228     /// Micro-ops issued in the current cycle
1229     unsigned CurrMOps;
1230 
1231     /// MinReadyCycle - Cycle of the soonest available instruction.
1232     unsigned MinReadyCycle;
1233 
1234     // The expected latency of the critical path in this scheduled zone.
1235     unsigned ExpectedLatency;
1236 
1237     // The latency of dependence chains leading into this zone.
1238     // For each node scheduled top-down: DLat = max DLat, N.Depth.
1239     // For each cycle scheduled: DLat -= 1.
1240     unsigned DependentLatency;
1241 
1242     /// Count the scheduled (issued) micro-ops that can be retired by
1243     /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
1244     unsigned RetiredMOps;
1245 
1246     // Count scheduled resources that have been executed. Resources are
1247     // considered executed if they become ready in the time that it takes to
1248     // saturate any resource including the one in question. Counts are scaled
1249     // for direct comparison with other resources. Counts ca be compared with
1250     // MOps * getMicroOpFactor and Latency * getLatencyFactor.
1251     SmallVector<unsigned, 16> ExecutedResCounts;
1252 
1253     /// Cache the max count for a single resource.
1254     unsigned MaxExecutedResCount;
1255 
1256     // Cache the critical resources ID in this scheduled zone.
1257     unsigned ZoneCritResIdx;
1258 
1259     // Is the scheduled region resource limited vs. latency limited.
1260     bool IsResourceLimited;
1261 
1262 #ifndef NDEBUG
1263     // Remember the greatest operand latency as an upper bound on the number of
1264     // times we should retry the pending queue because of a hazard.
1265     unsigned MaxObservedLatency;
1266 #endif
1267 
1268     void reset() {
1269       // A new HazardRec is created for each DAG and owned by SchedBoundary.
1270       delete HazardRec;
1271 
1272       Available.clear();
1273       Pending.clear();
1274       CheckPending = false;
1275       NextSUs.clear();
1276       HazardRec = 0;
1277       CurrCycle = 0;
1278       CurrMOps = 0;
1279       MinReadyCycle = UINT_MAX;
1280       ExpectedLatency = 0;
1281       DependentLatency = 0;
1282       RetiredMOps = 0;
1283       MaxExecutedResCount = 0;
1284       ZoneCritResIdx = 0;
1285       IsResourceLimited = false;
1286 #ifndef NDEBUG
1287       MaxObservedLatency = 0;
1288 #endif
1289       // Reserve a zero-count for invalid CritResIdx.
1290       ExecutedResCounts.resize(1);
1291       assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1292     }
1293 
1294     /// Pending queues extend the ready queues with the same ID and the
1295     /// PendingFlag set.
1296     SchedBoundary(unsigned ID, const Twine &Name):
1297       DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
1298       Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
1299       HazardRec(0) {
1300       reset();
1301     }
1302 
1303     ~SchedBoundary() { delete HazardRec; }
1304 
1305     void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
1306               SchedRemainder *rem);
1307 
1308     bool isTop() const {
1309       return Available.getID() == ConvergingScheduler::TopQID;
1310     }
1311 
1312     const char *getResourceName(unsigned PIdx) {
1313       if (!PIdx)
1314         return "MOps";
1315       return SchedModel->getProcResource(PIdx)->Name;
1316     }
1317 
1318     /// Get the number of latency cycles "covered" by the scheduled
1319     /// instructions. This is the larger of the critical path within the zone
1320     /// and the number of cycles required to issue the instructions.
1321     unsigned getScheduledLatency() const {
1322       return std::max(ExpectedLatency, CurrCycle);
1323     }
1324 
1325     unsigned getUnscheduledLatency(SUnit *SU) const {
1326       return isTop() ? SU->getHeight() : SU->getDepth();
1327     }
1328 
1329     unsigned getResourceCount(unsigned ResIdx) const {
1330       return ExecutedResCounts[ResIdx];
1331     }
1332 
1333     /// Get the scaled count of scheduled micro-ops and resources, including
1334     /// executed resources.
1335     unsigned getCriticalCount() const {
1336       if (!ZoneCritResIdx)
1337         return RetiredMOps * SchedModel->getMicroOpFactor();
1338       return getResourceCount(ZoneCritResIdx);
1339     }
1340 
1341     /// Get a scaled count for the minimum execution time of the scheduled
1342     /// micro-ops that are ready to execute by getExecutedCount. Notice the
1343     /// feedback loop.
1344     unsigned getExecutedCount() const {
1345       return std::max(CurrCycle * SchedModel->getLatencyFactor(),
1346                       MaxExecutedResCount);
1347     }
1348 
1349     bool checkHazard(SUnit *SU);
1350 
1351     unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
1352 
1353     unsigned getOtherResourceCount(unsigned &OtherCritIdx);
1354 
1355     void setPolicy(CandPolicy &Policy, SchedBoundary &OtherZone);
1356 
1357     void releaseNode(SUnit *SU, unsigned ReadyCycle);
1358 
1359     void bumpCycle(unsigned NextCycle);
1360 
1361     void incExecutedResources(unsigned PIdx, unsigned Count);
1362 
1363     unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
1364 
1365     void bumpNode(SUnit *SU);
1366 
1367     void releasePending();
1368 
1369     void removeReady(SUnit *SU);
1370 
1371     SUnit *pickOnlyChoice();
1372 
1373     void dumpScheduledState();
1374   };
1375 
1376 private:
1377   ScheduleDAGMI *DAG;
1378   const TargetSchedModel *SchedModel;
1379   const TargetRegisterInfo *TRI;
1380 
1381   // State of the top and bottom scheduled instruction boundaries.
1382   SchedRemainder Rem;
1383   SchedBoundary Top;
1384   SchedBoundary Bot;
1385 
1386 public:
1387   /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
1388   enum {
1389     TopQID = 1,
1390     BotQID = 2,
1391     LogMaxQID = 2
1392   };
1393 
1394   ConvergingScheduler():
1395     DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
1396 
1397   virtual void initialize(ScheduleDAGMI *dag);
1398 
1399   virtual SUnit *pickNode(bool &IsTopNode);
1400 
1401   virtual void schedNode(SUnit *SU, bool IsTopNode);
1402 
1403   virtual void releaseTopNode(SUnit *SU);
1404 
1405   virtual void releaseBottomNode(SUnit *SU);
1406 
1407   virtual void registerRoots();
1408 
1409 protected:
1410   void tryCandidate(SchedCandidate &Cand,
1411                     SchedCandidate &TryCand,
1412                     SchedBoundary &Zone,
1413                     const RegPressureTracker &RPTracker,
1414                     RegPressureTracker &TempTracker);
1415 
1416   SUnit *pickNodeBidirectional(bool &IsTopNode);
1417 
1418   void pickNodeFromQueue(SchedBoundary &Zone,
1419                          const RegPressureTracker &RPTracker,
1420                          SchedCandidate &Candidate);
1421 
1422   void reschedulePhysRegCopies(SUnit *SU, bool isTop);
1423 
1424 #ifndef NDEBUG
1425   void traceCandidate(const SchedCandidate &Cand);
1426 #endif
1427 };
1428 } // namespace
1429 
1430 void ConvergingScheduler::SchedRemainder::
1431 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1432   reset();
1433   if (!SchedModel->hasInstrSchedModel())
1434     return;
1435   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1436   for (std::vector<SUnit>::iterator
1437          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1438     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1439     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1440       * SchedModel->getMicroOpFactor();
1441     for (TargetSchedModel::ProcResIter
1442            PI = SchedModel->getWriteProcResBegin(SC),
1443            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1444       unsigned PIdx = PI->ProcResourceIdx;
1445       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1446       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1447     }
1448   }
1449 }
1450 
1451 void ConvergingScheduler::SchedBoundary::
1452 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1453   reset();
1454   DAG = dag;
1455   SchedModel = smodel;
1456   Rem = rem;
1457   if (SchedModel->hasInstrSchedModel())
1458     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1459 }
1460 
1461 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
1462   DAG = dag;
1463   SchedModel = DAG->getSchedModel();
1464   TRI = DAG->TRI;
1465 
1466   Rem.init(DAG, SchedModel);
1467   Top.init(DAG, SchedModel, &Rem);
1468   Bot.init(DAG, SchedModel, &Rem);
1469 
1470   // Initialize resource counts.
1471 
1472   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
1473   // are disabled, then these HazardRecs will be disabled.
1474   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
1475   const TargetMachine &TM = DAG->MF.getTarget();
1476   Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1477   Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1478 
1479   assert((!ForceTopDown || !ForceBottomUp) &&
1480          "-misched-topdown incompatible with -misched-bottomup");
1481 }
1482 
1483 void ConvergingScheduler::releaseTopNode(SUnit *SU) {
1484   if (SU->isScheduled)
1485     return;
1486 
1487   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1488        I != E; ++I) {
1489     if (I->isWeak())
1490       continue;
1491     unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
1492     unsigned Latency = I->getLatency();
1493 #ifndef NDEBUG
1494     Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency);
1495 #endif
1496     if (SU->TopReadyCycle < PredReadyCycle + Latency)
1497       SU->TopReadyCycle = PredReadyCycle + Latency;
1498   }
1499   Top.releaseNode(SU, SU->TopReadyCycle);
1500 }
1501 
1502 void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
1503   if (SU->isScheduled)
1504     return;
1505 
1506   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1507 
1508   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1509        I != E; ++I) {
1510     if (I->isWeak())
1511       continue;
1512     unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
1513     unsigned Latency = I->getLatency();
1514 #ifndef NDEBUG
1515     Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency);
1516 #endif
1517     if (SU->BotReadyCycle < SuccReadyCycle + Latency)
1518       SU->BotReadyCycle = SuccReadyCycle + Latency;
1519   }
1520   Bot.releaseNode(SU, SU->BotReadyCycle);
1521 }
1522 
1523 void ConvergingScheduler::registerRoots() {
1524   Rem.CriticalPath = DAG->ExitSU.getDepth();
1525   // Some roots may not feed into ExitSU. Check all of them in case.
1526   for (std::vector<SUnit*>::const_iterator
1527          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
1528     if ((*I)->getDepth() > Rem.CriticalPath)
1529       Rem.CriticalPath = (*I)->getDepth();
1530   }
1531   DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
1532 }
1533 
1534 /// Does this SU have a hazard within the current instruction group.
1535 ///
1536 /// The scheduler supports two modes of hazard recognition. The first is the
1537 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1538 /// supports highly complicated in-order reservation tables
1539 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1540 ///
1541 /// The second is a streamlined mechanism that checks for hazards based on
1542 /// simple counters that the scheduler itself maintains. It explicitly checks
1543 /// for instruction dispatch limitations, including the number of micro-ops that
1544 /// can dispatch per cycle.
1545 ///
1546 /// TODO: Also check whether the SU must start a new group.
1547 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
1548   if (HazardRec->isEnabled())
1549     return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
1550 
1551   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1552   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1553     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1554           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1555     return true;
1556   }
1557   return false;
1558 }
1559 
1560 // Find the unscheduled node in ReadySUs with the highest latency.
1561 unsigned ConvergingScheduler::SchedBoundary::
1562 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1563   SUnit *LateSU = 0;
1564   unsigned RemLatency = 0;
1565   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1566        I != E; ++I) {
1567     unsigned L = getUnscheduledLatency(*I);
1568     if (L > RemLatency) {
1569       RemLatency = L;
1570       LateSU = *I;
1571     }
1572   }
1573   if (LateSU) {
1574     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1575           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1576   }
1577   return RemLatency;
1578 }
1579 
1580 // Count resources in this zone and the remaining unscheduled
1581 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1582 // resource index, or zero if the zone is issue limited.
1583 unsigned ConvergingScheduler::SchedBoundary::
1584 getOtherResourceCount(unsigned &OtherCritIdx) {
1585   if (!SchedModel->hasInstrSchedModel())
1586     return 0;
1587 
1588   unsigned OtherCritCount = Rem->RemIssueCount
1589     + (RetiredMOps * SchedModel->getMicroOpFactor());
1590   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1591         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1592   OtherCritIdx = 0;
1593   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1594        PIdx != PEnd; ++PIdx) {
1595     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1596     if (OtherCount > OtherCritCount) {
1597       OtherCritCount = OtherCount;
1598       OtherCritIdx = PIdx;
1599     }
1600   }
1601   if (OtherCritIdx) {
1602     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1603           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1604           << " " << getResourceName(OtherCritIdx) << "\n");
1605   }
1606   return OtherCritCount;
1607 }
1608 
1609 /// Set the CandPolicy for this zone given the current resources and latencies
1610 /// inside and outside the zone.
1611 void ConvergingScheduler::SchedBoundary::setPolicy(CandPolicy &Policy,
1612                                                    SchedBoundary &OtherZone) {
1613   // Now that potential stalls have been considered, apply preemptive heuristics
1614   // based on the the total latency and resources inside and outside this
1615   // zone.
1616 
1617   // Compute remaining latency. We need this both to determine whether the
1618   // overall schedule has become latency-limited and whether the instructions
1619   // outside this zone are resource or latency limited.
1620   //
1621   // The "dependent" latency is updated incrementally during scheduling as the
1622   // max height/depth of scheduled nodes minus the cycles since it was
1623   // scheduled:
1624   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
1625   //
1626   // The "independent" latency is the max ready queue depth:
1627   //   ILat = max N.depth for N in Available|Pending
1628   //
1629   // RemainingLatency is the greater of independent and dependent latency.
1630   unsigned RemLatency = DependentLatency;
1631   RemLatency = std::max(RemLatency, findMaxLatency(Available.elements()));
1632   RemLatency = std::max(RemLatency, findMaxLatency(Pending.elements()));
1633 
1634   // Compute the critical resource outside the zone.
1635   unsigned OtherCritIdx;
1636   unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx);
1637 
1638   bool OtherResLimited = false;
1639   if (SchedModel->hasInstrSchedModel()) {
1640     unsigned LFactor = SchedModel->getLatencyFactor();
1641     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
1642   }
1643   if (!OtherResLimited && (RemLatency + CurrCycle > Rem->CriticalPath)) {
1644     Policy.ReduceLatency |= true;
1645     DEBUG(dbgs() << "  " << Available.getName() << " RemainingLatency "
1646           << RemLatency << " + " << CurrCycle << "c > CritPath "
1647           << Rem->CriticalPath << "\n");
1648   }
1649   // If the same resource is limiting inside and outside the zone, do nothing.
1650   if (IsResourceLimited && OtherResLimited && (ZoneCritResIdx == OtherCritIdx))
1651     return;
1652 
1653   DEBUG(
1654     if (IsResourceLimited) {
1655       dbgs() << "  " << Available.getName() << " ResourceLimited: "
1656              << getResourceName(ZoneCritResIdx) << "\n";
1657     }
1658     if (OtherResLimited)
1659       dbgs() << "  RemainingLimit: " << getResourceName(OtherCritIdx);
1660     if (!IsResourceLimited && !OtherResLimited)
1661       dbgs() << "  Latency limited both directions.\n");
1662 
1663   if (IsResourceLimited && !Policy.ReduceResIdx)
1664     Policy.ReduceResIdx = ZoneCritResIdx;
1665 
1666   if (OtherResLimited)
1667     Policy.DemandResIdx = OtherCritIdx;
1668 }
1669 
1670 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
1671                                                      unsigned ReadyCycle) {
1672   if (ReadyCycle < MinReadyCycle)
1673     MinReadyCycle = ReadyCycle;
1674 
1675   // Check for interlocks first. For the purpose of other heuristics, an
1676   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1677   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1678   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1679     Pending.push(SU);
1680   else
1681     Available.push(SU);
1682 
1683   // Record this node as an immediate dependent of the scheduled node.
1684   NextSUs.insert(SU);
1685 }
1686 
1687 /// Move the boundary of scheduled code by one cycle.
1688 void ConvergingScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) {
1689   if (SchedModel->getMicroOpBufferSize() == 0) {
1690     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1691     if (MinReadyCycle > NextCycle)
1692       NextCycle = MinReadyCycle;
1693   }
1694   // Update the current micro-ops, which will issue in the next cycle.
1695   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1696   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1697 
1698   // Decrement DependentLatency based on the next cycle.
1699   if ((NextCycle - CurrCycle) > DependentLatency)
1700     DependentLatency = 0;
1701   else
1702     DependentLatency -= (NextCycle - CurrCycle);
1703 
1704   if (!HazardRec->isEnabled()) {
1705     // Bypass HazardRec virtual calls.
1706     CurrCycle = NextCycle;
1707   }
1708   else {
1709     // Bypass getHazardType calls in case of long latency.
1710     for (; CurrCycle != NextCycle; ++CurrCycle) {
1711       if (isTop())
1712         HazardRec->AdvanceCycle();
1713       else
1714         HazardRec->RecedeCycle();
1715     }
1716   }
1717   CheckPending = true;
1718   unsigned LFactor = SchedModel->getLatencyFactor();
1719   IsResourceLimited =
1720     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1721     > (int)LFactor;
1722 
1723   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
1724 }
1725 
1726 void ConvergingScheduler::SchedBoundary::incExecutedResources(unsigned PIdx,
1727                                                               unsigned Count) {
1728   ExecutedResCounts[PIdx] += Count;
1729   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
1730     MaxExecutedResCount = ExecutedResCounts[PIdx];
1731 }
1732 
1733 /// Add the given processor resource to this scheduled zone.
1734 ///
1735 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
1736 /// during which this resource is consumed.
1737 ///
1738 /// \return the next cycle at which the instruction may execute without
1739 /// oversubscribing resources.
1740 unsigned ConvergingScheduler::SchedBoundary::
1741 countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle) {
1742   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1743   unsigned Count = Factor * Cycles;
1744   DEBUG(dbgs() << "  " << getResourceName(PIdx)
1745         << " +" << Cycles << "x" << Factor << "u\n");
1746 
1747   // Update Executed resources counts.
1748   incExecutedResources(PIdx, Count);
1749   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1750   Rem->RemainingCounts[PIdx] -= Count;
1751 
1752   // Check if this resource exceeds the current critical resource by a full
1753   // cycle. If so, it becomes the critical resource.
1754   if (ZoneCritResIdx != PIdx
1755       && ((int)(getResourceCount(PIdx) - getCriticalCount())
1756           >= (int)SchedModel->getLatencyFactor())) {
1757     ZoneCritResIdx = PIdx;
1758     DEBUG(dbgs() << "  *** Critical resource "
1759           << getResourceName(PIdx) << ": "
1760           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
1761   }
1762   // TODO: We don't yet model reserved resources. It's not hard though.
1763   return CurrCycle;
1764 }
1765 
1766 /// Move the boundary of scheduled code by one SUnit.
1767 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
1768   // Update the reservation table.
1769   if (HazardRec->isEnabled()) {
1770     if (!isTop() && SU->isCall) {
1771       // Calls are scheduled with their preceding instructions. For bottom-up
1772       // scheduling, clear the pipeline state before emitting.
1773       HazardRec->Reset();
1774     }
1775     HazardRec->EmitInstruction(SU);
1776   }
1777   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1778   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
1779   CurrMOps += IncMOps;
1780   // checkHazard prevents scheduling multiple instructions per cycle that exceed
1781   // issue width. However, we commonly reach the maximum. In this case
1782   // opportunistically bump the cycle to avoid uselessly checking everything in
1783   // the readyQ. Furthermore, a single instruction may produce more than one
1784   // cycle's worth of micro-ops.
1785   //
1786   // TODO: Also check if this SU must end a dispatch group.
1787   unsigned NextCycle = CurrCycle;
1788   if (CurrMOps >= SchedModel->getIssueWidth()) {
1789     ++NextCycle;
1790     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
1791           << " at cycle " << CurrCycle << '\n');
1792   }
1793   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1794   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
1795 
1796   switch (SchedModel->getMicroOpBufferSize()) {
1797   case 0:
1798     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
1799     break;
1800   case 1:
1801     if (ReadyCycle > NextCycle) {
1802       NextCycle = ReadyCycle;
1803       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
1804     }
1805     break;
1806   default:
1807     // We don't currently model the OOO reorder buffer, so consider all
1808     // scheduled MOps to be "retired".
1809     break;
1810   }
1811   RetiredMOps += IncMOps;
1812 
1813   // Update resource counts and critical resource.
1814   if (SchedModel->hasInstrSchedModel()) {
1815     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
1816     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
1817     Rem->RemIssueCount -= DecRemIssue;
1818     if (ZoneCritResIdx) {
1819       // Scale scheduled micro-ops for comparing with the critical resource.
1820       unsigned ScaledMOps =
1821         RetiredMOps * SchedModel->getMicroOpFactor();
1822 
1823       // If scaled micro-ops are now more than the previous critical resource by
1824       // a full cycle, then micro-ops issue becomes critical.
1825       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
1826           >= (int)SchedModel->getLatencyFactor()) {
1827         ZoneCritResIdx = 0;
1828         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
1829               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
1830       }
1831     }
1832     for (TargetSchedModel::ProcResIter
1833            PI = SchedModel->getWriteProcResBegin(SC),
1834            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1835       unsigned RCycle =
1836         countResource(PI->ProcResourceIdx, PI->Cycles, ReadyCycle);
1837       if (RCycle > NextCycle)
1838         NextCycle = RCycle;
1839     }
1840   }
1841   // Update ExpectedLatency and DependentLatency.
1842   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
1843   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
1844   if (SU->getDepth() > TopLatency) {
1845     TopLatency = SU->getDepth();
1846     DEBUG(dbgs() << "  " << Available.getName()
1847           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
1848   }
1849   if (SU->getHeight() > BotLatency) {
1850     BotLatency = SU->getHeight();
1851     DEBUG(dbgs() << "  " << Available.getName()
1852           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
1853   }
1854   // If we stall for any reason, bump the cycle.
1855   if (NextCycle > CurrCycle) {
1856     bumpCycle(NextCycle);
1857   }
1858   else {
1859     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
1860     // resource limited. If a stall occured, bumpCycle does this.
1861     unsigned LFactor = SchedModel->getLatencyFactor();
1862     IsResourceLimited =
1863       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1864       > (int)LFactor;
1865   }
1866   DEBUG(dumpScheduledState());
1867 }
1868 
1869 /// Release pending ready nodes in to the available queue. This makes them
1870 /// visible to heuristics.
1871 void ConvergingScheduler::SchedBoundary::releasePending() {
1872   // If the available queue is empty, it is safe to reset MinReadyCycle.
1873   if (Available.empty())
1874     MinReadyCycle = UINT_MAX;
1875 
1876   // Check to see if any of the pending instructions are ready to issue.  If
1877   // so, add them to the available queue.
1878   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1879   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
1880     SUnit *SU = *(Pending.begin()+i);
1881     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
1882 
1883     if (ReadyCycle < MinReadyCycle)
1884       MinReadyCycle = ReadyCycle;
1885 
1886     if (!IsBuffered && ReadyCycle > CurrCycle)
1887       continue;
1888 
1889     if (checkHazard(SU))
1890       continue;
1891 
1892     Available.push(SU);
1893     Pending.remove(Pending.begin()+i);
1894     --i; --e;
1895   }
1896   DEBUG(if (!Pending.empty()) Pending.dump());
1897   CheckPending = false;
1898 }
1899 
1900 /// Remove SU from the ready set for this boundary.
1901 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
1902   if (Available.isInQueue(SU))
1903     Available.remove(Available.find(SU));
1904   else {
1905     assert(Pending.isInQueue(SU) && "bad ready count");
1906     Pending.remove(Pending.find(SU));
1907   }
1908 }
1909 
1910 /// If this queue only has one ready candidate, return it. As a side effect,
1911 /// defer any nodes that now hit a hazard, and advance the cycle until at least
1912 /// one node is ready. If multiple instructions are ready, return NULL.
1913 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
1914   if (CheckPending)
1915     releasePending();
1916 
1917   if (CurrMOps > 0) {
1918     // Defer any ready instrs that now have a hazard.
1919     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
1920       if (checkHazard(*I)) {
1921         Pending.push(*I);
1922         I = Available.remove(I);
1923         continue;
1924       }
1925       ++I;
1926     }
1927   }
1928   for (unsigned i = 0; Available.empty(); ++i) {
1929     assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) &&
1930            "permanent hazard"); (void)i;
1931     bumpCycle(CurrCycle + 1);
1932     releasePending();
1933   }
1934   if (Available.size() == 1)
1935     return *Available.begin();
1936   return NULL;
1937 }
1938 
1939 // This is useful information to dump after bumpNode.
1940 // Note that the Queue contents are more useful before pickNodeFromQueue.
1941 void ConvergingScheduler::SchedBoundary::dumpScheduledState() {
1942   unsigned ResFactor;
1943   unsigned ResCount;
1944   if (ZoneCritResIdx) {
1945     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
1946     ResCount = getResourceCount(ZoneCritResIdx);
1947   }
1948   else {
1949     ResFactor = SchedModel->getMicroOpFactor();
1950     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
1951   }
1952   unsigned LFactor = SchedModel->getLatencyFactor();
1953   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
1954          << "  Retired: " << RetiredMOps;
1955   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
1956   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
1957          << ResCount / ResFactor << " " << getResourceName(ZoneCritResIdx)
1958          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
1959          << (IsResourceLimited ? "  - Resource" : "  - Latency")
1960          << " limited.\n";
1961 }
1962 
1963 void ConvergingScheduler::SchedCandidate::
1964 initResourceDelta(const ScheduleDAGMI *DAG,
1965                   const TargetSchedModel *SchedModel) {
1966   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
1967     return;
1968 
1969   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1970   for (TargetSchedModel::ProcResIter
1971          PI = SchedModel->getWriteProcResBegin(SC),
1972          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1973     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
1974       ResDelta.CritResources += PI->Cycles;
1975     if (PI->ProcResourceIdx == Policy.DemandResIdx)
1976       ResDelta.DemandedResources += PI->Cycles;
1977   }
1978 }
1979 
1980 /// Return true if this heuristic determines order.
1981 static bool tryLess(int TryVal, int CandVal,
1982                     ConvergingScheduler::SchedCandidate &TryCand,
1983                     ConvergingScheduler::SchedCandidate &Cand,
1984                     ConvergingScheduler::CandReason Reason) {
1985   if (TryVal < CandVal) {
1986     TryCand.Reason = Reason;
1987     return true;
1988   }
1989   if (TryVal > CandVal) {
1990     if (Cand.Reason > Reason)
1991       Cand.Reason = Reason;
1992     return true;
1993   }
1994   return false;
1995 }
1996 
1997 static bool tryGreater(int TryVal, int CandVal,
1998                        ConvergingScheduler::SchedCandidate &TryCand,
1999                        ConvergingScheduler::SchedCandidate &Cand,
2000                        ConvergingScheduler::CandReason Reason) {
2001   if (TryVal > CandVal) {
2002     TryCand.Reason = Reason;
2003     return true;
2004   }
2005   if (TryVal < CandVal) {
2006     if (Cand.Reason > Reason)
2007       Cand.Reason = Reason;
2008     return true;
2009   }
2010   return false;
2011 }
2012 
2013 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2014   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2015 }
2016 
2017 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2018 /// their physreg def/use.
2019 ///
2020 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2021 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2022 /// with the operation that produces or consumes the physreg. We'll do this when
2023 /// regalloc has support for parallel copies.
2024 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2025   const MachineInstr *MI = SU->getInstr();
2026   if (!MI->isCopy())
2027     return 0;
2028 
2029   unsigned ScheduledOper = isTop ? 1 : 0;
2030   unsigned UnscheduledOper = isTop ? 0 : 1;
2031   // If we have already scheduled the physreg produce/consumer, immediately
2032   // schedule the copy.
2033   if (TargetRegisterInfo::isPhysicalRegister(
2034         MI->getOperand(ScheduledOper).getReg()))
2035     return 1;
2036   // If the physreg is at the boundary, defer it. Otherwise schedule it
2037   // immediately to free the dependent. We can hoist the copy later.
2038   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2039   if (TargetRegisterInfo::isPhysicalRegister(
2040         MI->getOperand(UnscheduledOper).getReg()))
2041     return AtBoundary ? -1 : 1;
2042   return 0;
2043 }
2044 
2045 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2046 /// hierarchical. This may be more efficient than a graduated cost model because
2047 /// we don't need to evaluate all aspects of the model for each node in the
2048 /// queue. But it's really done to make the heuristics easier to debug and
2049 /// statistically analyze.
2050 ///
2051 /// \param Cand provides the policy and current best candidate.
2052 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2053 /// \param Zone describes the scheduled zone that we are extending.
2054 /// \param RPTracker describes reg pressure within the scheduled zone.
2055 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
2056 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
2057                                        SchedCandidate &TryCand,
2058                                        SchedBoundary &Zone,
2059                                        const RegPressureTracker &RPTracker,
2060                                        RegPressureTracker &TempTracker) {
2061 
2062   // Always initialize TryCand's RPDelta.
2063   TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta,
2064                                   DAG->getRegionCriticalPSets(),
2065                                   DAG->getRegPressure().MaxSetPressure);
2066 
2067   // Initialize the candidate if needed.
2068   if (!Cand.isValid()) {
2069     TryCand.Reason = NodeOrder;
2070     return;
2071   }
2072 
2073   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2074                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2075                  TryCand, Cand, PhysRegCopy))
2076     return;
2077 
2078   // Avoid exceeding the target's limit.
2079   if (tryLess(TryCand.RPDelta.Excess.UnitIncrease,
2080               Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess))
2081     return;
2082   if (Cand.Reason == SingleExcess)
2083     Cand.Reason = MultiPressure;
2084 
2085   // Avoid increasing the max critical pressure in the scheduled region.
2086   if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease,
2087               Cand.RPDelta.CriticalMax.UnitIncrease,
2088               TryCand, Cand, SingleCritical))
2089     return;
2090   if (Cand.Reason == SingleCritical)
2091     Cand.Reason = MultiPressure;
2092 
2093   // Keep clustered nodes together to encourage downstream peephole
2094   // optimizations which may reduce resource requirements.
2095   //
2096   // This is a best effort to set things up for a post-RA pass. Optimizations
2097   // like generating loads of multiple registers should ideally be done within
2098   // the scheduler pass by combining the loads during DAG postprocessing.
2099   const SUnit *NextClusterSU =
2100     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2101   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2102                  TryCand, Cand, Cluster))
2103     return;
2104 
2105   // Weak edges are for clustering and other constraints.
2106   //
2107   // Deferring TryCand here does not change Cand's reason. This is good in the
2108   // sense that a bad candidate shouldn't affect a previous candidate's
2109   // goodness, but bad in that it is assymetric and depends on queue order.
2110   CandReason OrigReason = Cand.Reason;
2111   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2112               getWeakLeft(Cand.SU, Zone.isTop()),
2113               TryCand, Cand, Weak)) {
2114     Cand.Reason = OrigReason;
2115     return;
2116   }
2117   // Avoid critical resource consumption and balance the schedule.
2118   TryCand.initResourceDelta(DAG, SchedModel);
2119   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2120               TryCand, Cand, ResourceReduce))
2121     return;
2122   if (tryGreater(TryCand.ResDelta.DemandedResources,
2123                  Cand.ResDelta.DemandedResources,
2124                  TryCand, Cand, ResourceDemand))
2125     return;
2126 
2127   // Avoid serializing long latency dependence chains.
2128   if (Cand.Policy.ReduceLatency) {
2129     if (Zone.isTop()) {
2130       if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2131         if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2132                     TryCand, Cand, TopDepthReduce))
2133           return;
2134       }
2135       if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2136                      TryCand, Cand, TopPathReduce))
2137         return;
2138     }
2139     else {
2140       if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2141         if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2142                     TryCand, Cand, BotHeightReduce))
2143           return;
2144       }
2145       if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2146                      TryCand, Cand, BotPathReduce))
2147         return;
2148     }
2149   }
2150 
2151   // Avoid increasing the max pressure of the entire region.
2152   if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease,
2153               Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax))
2154     return;
2155   if (Cand.Reason == SingleMax)
2156     Cand.Reason = MultiPressure;
2157 
2158   // Prefer immediate defs/users of the last scheduled instruction. This is a
2159   // local pressure avoidance strategy that also makes the machine code
2160   // readable.
2161   if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
2162                  TryCand, Cand, NextDefUse))
2163     return;
2164 
2165   // Fall through to original instruction order.
2166   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2167       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2168     TryCand.Reason = NodeOrder;
2169   }
2170 }
2171 
2172 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
2173 /// more desirable than RHS from scheduling standpoint.
2174 static bool compareRPDelta(const RegPressureDelta &LHS,
2175                            const RegPressureDelta &RHS) {
2176   // Compare each component of pressure in decreasing order of importance
2177   // without checking if any are valid. Invalid PressureElements are assumed to
2178   // have UnitIncrease==0, so are neutral.
2179 
2180   // Avoid increasing the max critical pressure in the scheduled region.
2181   if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) {
2182     DEBUG(dbgs() << "  RP excess top - bot: "
2183           << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n');
2184     return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
2185   }
2186   // Avoid increasing the max critical pressure in the scheduled region.
2187   if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) {
2188     DEBUG(dbgs() << "  RP critical top - bot: "
2189           << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease)
2190           << '\n');
2191     return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
2192   }
2193   // Avoid increasing the max pressure of the entire region.
2194   if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) {
2195     DEBUG(dbgs() << "  RP current top - bot: "
2196           << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease)
2197           << '\n');
2198     return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
2199   }
2200   return false;
2201 }
2202 
2203 #ifndef NDEBUG
2204 const char *ConvergingScheduler::getReasonStr(
2205   ConvergingScheduler::CandReason Reason) {
2206   switch (Reason) {
2207   case NoCand:         return "NOCAND    ";
2208   case PhysRegCopy:    return "PREG-COPY";
2209   case SingleExcess:   return "REG-EXCESS";
2210   case SingleCritical: return "REG-CRIT  ";
2211   case Cluster:        return "CLUSTER   ";
2212   case Weak:           return "WEAK      ";
2213   case SingleMax:      return "REG-MAX   ";
2214   case MultiPressure:  return "REG-MULTI ";
2215   case ResourceReduce: return "RES-REDUCE";
2216   case ResourceDemand: return "RES-DEMAND";
2217   case TopDepthReduce: return "TOP-DEPTH ";
2218   case TopPathReduce:  return "TOP-PATH  ";
2219   case BotHeightReduce:return "BOT-HEIGHT";
2220   case BotPathReduce:  return "BOT-PATH  ";
2221   case NextDefUse:     return "DEF-USE   ";
2222   case NodeOrder:      return "ORDER     ";
2223   };
2224   llvm_unreachable("Unknown reason!");
2225 }
2226 
2227 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) {
2228   PressureElement P;
2229   unsigned ResIdx = 0;
2230   unsigned Latency = 0;
2231   switch (Cand.Reason) {
2232   default:
2233     break;
2234   case SingleExcess:
2235     P = Cand.RPDelta.Excess;
2236     break;
2237   case SingleCritical:
2238     P = Cand.RPDelta.CriticalMax;
2239     break;
2240   case SingleMax:
2241     P = Cand.RPDelta.CurrentMax;
2242     break;
2243   case ResourceReduce:
2244     ResIdx = Cand.Policy.ReduceResIdx;
2245     break;
2246   case ResourceDemand:
2247     ResIdx = Cand.Policy.DemandResIdx;
2248     break;
2249   case TopDepthReduce:
2250     Latency = Cand.SU->getDepth();
2251     break;
2252   case TopPathReduce:
2253     Latency = Cand.SU->getHeight();
2254     break;
2255   case BotHeightReduce:
2256     Latency = Cand.SU->getHeight();
2257     break;
2258   case BotPathReduce:
2259     Latency = Cand.SU->getDepth();
2260     break;
2261   }
2262   dbgs() << "  SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2263   if (P.isValid())
2264     dbgs() << " " << TRI->getRegPressureSetName(P.PSetID)
2265            << ":" << P.UnitIncrease << " ";
2266   else
2267     dbgs() << "      ";
2268   if (ResIdx)
2269     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2270   else
2271     dbgs() << "         ";
2272   if (Latency)
2273     dbgs() << " " << Latency << " cycles ";
2274   else
2275     dbgs() << "          ";
2276   dbgs() << '\n';
2277 }
2278 #endif
2279 
2280 /// Pick the best candidate from the top queue.
2281 ///
2282 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2283 /// DAG building. To adjust for the current scheduling location we need to
2284 /// maintain the number of vreg uses remaining to be top-scheduled.
2285 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2286                                             const RegPressureTracker &RPTracker,
2287                                             SchedCandidate &Cand) {
2288   ReadyQueue &Q = Zone.Available;
2289 
2290   DEBUG(Q.dump());
2291 
2292   // getMaxPressureDelta temporarily modifies the tracker.
2293   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2294 
2295   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2296 
2297     SchedCandidate TryCand(Cand.Policy);
2298     TryCand.SU = *I;
2299     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2300     if (TryCand.Reason != NoCand) {
2301       // Initialize resource delta if needed in case future heuristics query it.
2302       if (TryCand.ResDelta == SchedResourceDelta())
2303         TryCand.initResourceDelta(DAG, SchedModel);
2304       Cand.setBest(TryCand);
2305       DEBUG(traceCandidate(Cand));
2306     }
2307   }
2308 }
2309 
2310 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand,
2311                       bool IsTop) {
2312   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2313         << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n');
2314 }
2315 
2316 /// Pick the best candidate node from either the top or bottom queue.
2317 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
2318   // Schedule as far as possible in the direction of no choice. This is most
2319   // efficient, but also provides the best heuristics for CriticalPSets.
2320   if (SUnit *SU = Bot.pickOnlyChoice()) {
2321     IsTopNode = false;
2322     DEBUG(dbgs() << "Pick Bot NOCAND\n");
2323     return SU;
2324   }
2325   if (SUnit *SU = Top.pickOnlyChoice()) {
2326     IsTopNode = true;
2327     DEBUG(dbgs() << "Pick Top NOCAND\n");
2328     return SU;
2329   }
2330   CandPolicy NoPolicy;
2331   SchedCandidate BotCand(NoPolicy);
2332   SchedCandidate TopCand(NoPolicy);
2333   Bot.setPolicy(BotCand.Policy, Top);
2334   Top.setPolicy(TopCand.Policy, Bot);
2335 
2336   // Prefer bottom scheduling when heuristics are silent.
2337   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2338   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2339 
2340   // If either Q has a single candidate that provides the least increase in
2341   // Excess pressure, we can immediately schedule from that Q.
2342   //
2343   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2344   // affects picking from either Q. If scheduling in one direction must
2345   // increase pressure for one of the excess PSets, then schedule in that
2346   // direction first to provide more freedom in the other direction.
2347   if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) {
2348     IsTopNode = false;
2349     tracePick(BotCand, IsTopNode);
2350     return BotCand.SU;
2351   }
2352   // Check if the top Q has a better candidate.
2353   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2354   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2355 
2356   // If either Q has a single candidate that minimizes pressure above the
2357   // original region's pressure pick it.
2358   if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) {
2359     if (TopCand.Reason < BotCand.Reason) {
2360       IsTopNode = true;
2361       tracePick(TopCand, IsTopNode);
2362       return TopCand.SU;
2363     }
2364     IsTopNode = false;
2365     tracePick(BotCand, IsTopNode);
2366     return BotCand.SU;
2367   }
2368   // Check for a salient pressure difference and pick the best from either side.
2369   if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
2370     IsTopNode = true;
2371     tracePick(TopCand, IsTopNode);
2372     return TopCand.SU;
2373   }
2374   // Otherwise prefer the bottom candidate, in node order if all else failed.
2375   if (TopCand.Reason < BotCand.Reason) {
2376     IsTopNode = true;
2377     tracePick(TopCand, IsTopNode);
2378     return TopCand.SU;
2379   }
2380   IsTopNode = false;
2381   tracePick(BotCand, IsTopNode);
2382   return BotCand.SU;
2383 }
2384 
2385 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2386 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
2387   if (DAG->top() == DAG->bottom()) {
2388     assert(Top.Available.empty() && Top.Pending.empty() &&
2389            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2390     return NULL;
2391   }
2392   SUnit *SU;
2393   do {
2394     if (ForceTopDown) {
2395       SU = Top.pickOnlyChoice();
2396       if (!SU) {
2397         CandPolicy NoPolicy;
2398         SchedCandidate TopCand(NoPolicy);
2399         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2400         assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2401         SU = TopCand.SU;
2402       }
2403       IsTopNode = true;
2404     }
2405     else if (ForceBottomUp) {
2406       SU = Bot.pickOnlyChoice();
2407       if (!SU) {
2408         CandPolicy NoPolicy;
2409         SchedCandidate BotCand(NoPolicy);
2410         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2411         assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2412         SU = BotCand.SU;
2413       }
2414       IsTopNode = false;
2415     }
2416     else {
2417       SU = pickNodeBidirectional(IsTopNode);
2418     }
2419   } while (SU->isScheduled);
2420 
2421   if (SU->isTopReady())
2422     Top.removeReady(SU);
2423   if (SU->isBottomReady())
2424     Bot.removeReady(SU);
2425 
2426   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2427   return SU;
2428 }
2429 
2430 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2431 
2432   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2433   if (!isTop)
2434     ++InsertPos;
2435   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2436 
2437   // Find already scheduled copies with a single physreg dependence and move
2438   // them just above the scheduled instruction.
2439   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2440        I != E; ++I) {
2441     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2442       continue;
2443     SUnit *DepSU = I->getSUnit();
2444     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2445       continue;
2446     MachineInstr *Copy = DepSU->getInstr();
2447     if (!Copy->isCopy())
2448       continue;
2449     DEBUG(dbgs() << "  Rescheduling physreg copy ";
2450           I->getSUnit()->dump(DAG));
2451     DAG->moveInstruction(Copy, InsertPos);
2452   }
2453 }
2454 
2455 /// Update the scheduler's state after scheduling a node. This is the same node
2456 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
2457 /// it's state based on the current cycle before MachineSchedStrategy does.
2458 ///
2459 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2460 /// them here. See comments in biasPhysRegCopy.
2461 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2462   if (IsTopNode) {
2463     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.CurrCycle);
2464     Top.bumpNode(SU);
2465     if (SU->hasPhysRegUses)
2466       reschedulePhysRegCopies(SU, true);
2467   }
2468   else {
2469     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.CurrCycle);
2470     Bot.bumpNode(SU);
2471     if (SU->hasPhysRegDefs)
2472       reschedulePhysRegCopies(SU, false);
2473   }
2474 }
2475 
2476 /// Create the standard converging machine scheduler. This will be used as the
2477 /// default scheduler if the target does not set a default.
2478 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
2479   assert((!ForceTopDown || !ForceBottomUp) &&
2480          "-misched-topdown incompatible with -misched-bottomup");
2481   ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler());
2482   // Register DAG post-processors.
2483   //
2484   // FIXME: extend the mutation API to allow earlier mutations to instantiate
2485   // data and pass it to later mutations. Have a single mutation that gathers
2486   // the interesting nodes in one pass.
2487   DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
2488   if (EnableLoadCluster)
2489     DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
2490   if (EnableMacroFusion)
2491     DAG->addMutation(new MacroFusion(DAG->TII));
2492   return DAG;
2493 }
2494 static MachineSchedRegistry
2495 ConvergingSchedRegistry("converge", "Standard converging scheduler.",
2496                         createConvergingSched);
2497 
2498 //===----------------------------------------------------------------------===//
2499 // ILP Scheduler. Currently for experimental analysis of heuristics.
2500 //===----------------------------------------------------------------------===//
2501 
2502 namespace {
2503 /// \brief Order nodes by the ILP metric.
2504 struct ILPOrder {
2505   const SchedDFSResult *DFSResult;
2506   const BitVector *ScheduledTrees;
2507   bool MaximizeILP;
2508 
2509   ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {}
2510 
2511   /// \brief Apply a less-than relation on node priority.
2512   ///
2513   /// (Return true if A comes after B in the Q.)
2514   bool operator()(const SUnit *A, const SUnit *B) const {
2515     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
2516     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
2517     if (SchedTreeA != SchedTreeB) {
2518       // Unscheduled trees have lower priority.
2519       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
2520         return ScheduledTrees->test(SchedTreeB);
2521 
2522       // Trees with shallower connections have have lower priority.
2523       if (DFSResult->getSubtreeLevel(SchedTreeA)
2524           != DFSResult->getSubtreeLevel(SchedTreeB)) {
2525         return DFSResult->getSubtreeLevel(SchedTreeA)
2526           < DFSResult->getSubtreeLevel(SchedTreeB);
2527       }
2528     }
2529     if (MaximizeILP)
2530       return DFSResult->getILP(A) < DFSResult->getILP(B);
2531     else
2532       return DFSResult->getILP(A) > DFSResult->getILP(B);
2533   }
2534 };
2535 
2536 /// \brief Schedule based on the ILP metric.
2537 class ILPScheduler : public MachineSchedStrategy {
2538   /// In case all subtrees are eventually connected to a common root through
2539   /// data dependence (e.g. reduction), place an upper limit on their size.
2540   ///
2541   /// FIXME: A subtree limit is generally good, but in the situation commented
2542   /// above, where multiple similar subtrees feed a common root, we should
2543   /// only split at a point where the resulting subtrees will be balanced.
2544   /// (a motivating test case must be found).
2545   static const unsigned SubtreeLimit = 16;
2546 
2547   ScheduleDAGMI *DAG;
2548   ILPOrder Cmp;
2549 
2550   std::vector<SUnit*> ReadyQ;
2551 public:
2552   ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {}
2553 
2554   virtual void initialize(ScheduleDAGMI *dag) {
2555     DAG = dag;
2556     DAG->computeDFSResult();
2557     Cmp.DFSResult = DAG->getDFSResult();
2558     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
2559     ReadyQ.clear();
2560   }
2561 
2562   virtual void registerRoots() {
2563     // Restore the heap in ReadyQ with the updated DFS results.
2564     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2565   }
2566 
2567   /// Implement MachineSchedStrategy interface.
2568   /// -----------------------------------------
2569 
2570   /// Callback to select the highest priority node from the ready Q.
2571   virtual SUnit *pickNode(bool &IsTopNode) {
2572     if (ReadyQ.empty()) return NULL;
2573     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2574     SUnit *SU = ReadyQ.back();
2575     ReadyQ.pop_back();
2576     IsTopNode = false;
2577     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
2578           << " ILP: " << DAG->getDFSResult()->getILP(SU)
2579           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
2580           << DAG->getDFSResult()->getSubtreeLevel(
2581             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
2582           << "Scheduling " << *SU->getInstr());
2583     return SU;
2584   }
2585 
2586   /// \brief Scheduler callback to notify that a new subtree is scheduled.
2587   virtual void scheduleTree(unsigned SubtreeID) {
2588     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2589   }
2590 
2591   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
2592   /// DFSResults, and resort the priority Q.
2593   virtual void schedNode(SUnit *SU, bool IsTopNode) {
2594     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
2595   }
2596 
2597   virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
2598 
2599   virtual void releaseBottomNode(SUnit *SU) {
2600     ReadyQ.push_back(SU);
2601     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2602   }
2603 };
2604 } // namespace
2605 
2606 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
2607   return new ScheduleDAGMI(C, new ILPScheduler(true));
2608 }
2609 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
2610   return new ScheduleDAGMI(C, new ILPScheduler(false));
2611 }
2612 static MachineSchedRegistry ILPMaxRegistry(
2613   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
2614 static MachineSchedRegistry ILPMinRegistry(
2615   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
2616 
2617 //===----------------------------------------------------------------------===//
2618 // Machine Instruction Shuffler for Correctness Testing
2619 //===----------------------------------------------------------------------===//
2620 
2621 #ifndef NDEBUG
2622 namespace {
2623 /// Apply a less-than relation on the node order, which corresponds to the
2624 /// instruction order prior to scheduling. IsReverse implements greater-than.
2625 template<bool IsReverse>
2626 struct SUnitOrder {
2627   bool operator()(SUnit *A, SUnit *B) const {
2628     if (IsReverse)
2629       return A->NodeNum > B->NodeNum;
2630     else
2631       return A->NodeNum < B->NodeNum;
2632   }
2633 };
2634 
2635 /// Reorder instructions as much as possible.
2636 class InstructionShuffler : public MachineSchedStrategy {
2637   bool IsAlternating;
2638   bool IsTopDown;
2639 
2640   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
2641   // gives nodes with a higher number higher priority causing the latest
2642   // instructions to be scheduled first.
2643   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
2644     TopQ;
2645   // When scheduling bottom-up, use greater-than as the queue priority.
2646   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
2647     BottomQ;
2648 public:
2649   InstructionShuffler(bool alternate, bool topdown)
2650     : IsAlternating(alternate), IsTopDown(topdown) {}
2651 
2652   virtual void initialize(ScheduleDAGMI *) {
2653     TopQ.clear();
2654     BottomQ.clear();
2655   }
2656 
2657   /// Implement MachineSchedStrategy interface.
2658   /// -----------------------------------------
2659 
2660   virtual SUnit *pickNode(bool &IsTopNode) {
2661     SUnit *SU;
2662     if (IsTopDown) {
2663       do {
2664         if (TopQ.empty()) return NULL;
2665         SU = TopQ.top();
2666         TopQ.pop();
2667       } while (SU->isScheduled);
2668       IsTopNode = true;
2669     }
2670     else {
2671       do {
2672         if (BottomQ.empty()) return NULL;
2673         SU = BottomQ.top();
2674         BottomQ.pop();
2675       } while (SU->isScheduled);
2676       IsTopNode = false;
2677     }
2678     if (IsAlternating)
2679       IsTopDown = !IsTopDown;
2680     return SU;
2681   }
2682 
2683   virtual void schedNode(SUnit *SU, bool IsTopNode) {}
2684 
2685   virtual void releaseTopNode(SUnit *SU) {
2686     TopQ.push(SU);
2687   }
2688   virtual void releaseBottomNode(SUnit *SU) {
2689     BottomQ.push(SU);
2690   }
2691 };
2692 } // namespace
2693 
2694 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
2695   bool Alternate = !ForceTopDown && !ForceBottomUp;
2696   bool TopDown = !ForceBottomUp;
2697   assert((TopDown || !ForceTopDown) &&
2698          "-misched-topdown incompatible with -misched-bottomup");
2699   return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
2700 }
2701 static MachineSchedRegistry ShufflerRegistry(
2702   "shuffle", "Shuffle machine instructions alternating directions",
2703   createInstructionShuffler);
2704 #endif // !NDEBUG
2705 
2706 //===----------------------------------------------------------------------===//
2707 // GraphWriter support for ScheduleDAGMI.
2708 //===----------------------------------------------------------------------===//
2709 
2710 #ifndef NDEBUG
2711 namespace llvm {
2712 
2713 template<> struct GraphTraits<
2714   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
2715 
2716 template<>
2717 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
2718 
2719   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
2720 
2721   static std::string getGraphName(const ScheduleDAG *G) {
2722     return G->MF.getName();
2723   }
2724 
2725   static bool renderGraphFromBottomUp() {
2726     return true;
2727   }
2728 
2729   static bool isNodeHidden(const SUnit *Node) {
2730     return (Node->NumPreds > 10 || Node->NumSuccs > 10);
2731   }
2732 
2733   static bool hasNodeAddressLabel(const SUnit *Node,
2734                                   const ScheduleDAG *Graph) {
2735     return false;
2736   }
2737 
2738   /// If you want to override the dot attributes printed for a particular
2739   /// edge, override this method.
2740   static std::string getEdgeAttributes(const SUnit *Node,
2741                                        SUnitIterator EI,
2742                                        const ScheduleDAG *Graph) {
2743     if (EI.isArtificialDep())
2744       return "color=cyan,style=dashed";
2745     if (EI.isCtrlDep())
2746       return "color=blue,style=dashed";
2747     return "";
2748   }
2749 
2750   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
2751     std::string Str;
2752     raw_string_ostream SS(Str);
2753     SS << "SU(" << SU->NodeNum << ')';
2754     return SS.str();
2755   }
2756   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
2757     return G->getGraphNodeLabel(SU);
2758   }
2759 
2760   static std::string getNodeAttributes(const SUnit *N,
2761                                        const ScheduleDAG *Graph) {
2762     std::string Str("shape=Mrecord");
2763     const SchedDFSResult *DFS =
2764       static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult();
2765     if (DFS) {
2766       Str += ",style=filled,fillcolor=\"#";
2767       Str += DOT::getColorString(DFS->getSubtreeID(N));
2768       Str += '"';
2769     }
2770     return Str;
2771   }
2772 };
2773 } // namespace llvm
2774 #endif // NDEBUG
2775 
2776 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
2777 /// rendered using 'dot'.
2778 ///
2779 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
2780 #ifndef NDEBUG
2781   ViewGraph(this, Name, false, Title);
2782 #else
2783   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
2784          << "systems with Graphviz or gv!\n";
2785 #endif  // NDEBUG
2786 }
2787 
2788 /// Out-of-line implementation with no arguments is handy for gdb.
2789 void ScheduleDAGMI::viewGraph() {
2790   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
2791 }
2792