xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision de2109eb4cb12066d537ed975ac821405077228a)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define DEBUG_TYPE "misched"
16 
17 #include "llvm/CodeGen/MachineScheduler.h"
18 #include "llvm/ADT/OwningPtr.h"
19 #include "llvm/ADT/PriorityQueue.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/CodeGen/RegisterClassInfo.h"
26 #include "llvm/CodeGen/ScheduleDFS.h"
27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/GraphWriter.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include <queue>
35 
36 using namespace llvm;
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 }
44 
45 #ifndef NDEBUG
46 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
47   cl::desc("Pop up a window to show MISched dags after they are processed"));
48 
49 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
50   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
51 #else
52 static bool ViewMISchedDAGs = false;
53 #endif // NDEBUG
54 
55 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
56   cl::desc("Enable load clustering."), cl::init(true));
57 
58 // Experimental heuristics
59 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
60   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
61 
62 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
63   cl::desc("Verify machine instrs before and after machine scheduling"));
64 
65 // DAG subtrees must have at least this many nodes.
66 static const unsigned MinSubtreeSize = 8;
67 
68 //===----------------------------------------------------------------------===//
69 // Machine Instruction Scheduling Pass and Registry
70 //===----------------------------------------------------------------------===//
71 
72 MachineSchedContext::MachineSchedContext():
73     MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
74   RegClassInfo = new RegisterClassInfo();
75 }
76 
77 MachineSchedContext::~MachineSchedContext() {
78   delete RegClassInfo;
79 }
80 
81 namespace {
82 /// MachineScheduler runs after coalescing and before register allocation.
83 class MachineScheduler : public MachineSchedContext,
84                          public MachineFunctionPass {
85 public:
86   MachineScheduler();
87 
88   virtual void getAnalysisUsage(AnalysisUsage &AU) const;
89 
90   virtual void releaseMemory() {}
91 
92   virtual bool runOnMachineFunction(MachineFunction&);
93 
94   virtual void print(raw_ostream &O, const Module* = 0) const;
95 
96   static char ID; // Class identification, replacement for typeinfo
97 };
98 } // namespace
99 
100 char MachineScheduler::ID = 0;
101 
102 char &llvm::MachineSchedulerID = MachineScheduler::ID;
103 
104 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
105                       "Machine Instruction Scheduler", false, false)
106 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
107 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
108 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
109 INITIALIZE_PASS_END(MachineScheduler, "misched",
110                     "Machine Instruction Scheduler", false, false)
111 
112 MachineScheduler::MachineScheduler()
113 : MachineFunctionPass(ID) {
114   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
115 }
116 
117 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
118   AU.setPreservesCFG();
119   AU.addRequiredID(MachineDominatorsID);
120   AU.addRequired<MachineLoopInfo>();
121   AU.addRequired<AliasAnalysis>();
122   AU.addRequired<TargetPassConfig>();
123   AU.addRequired<SlotIndexes>();
124   AU.addPreserved<SlotIndexes>();
125   AU.addRequired<LiveIntervals>();
126   AU.addPreserved<LiveIntervals>();
127   MachineFunctionPass::getAnalysisUsage(AU);
128 }
129 
130 MachinePassRegistry MachineSchedRegistry::Registry;
131 
132 /// A dummy default scheduler factory indicates whether the scheduler
133 /// is overridden on the command line.
134 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
135   return 0;
136 }
137 
138 /// MachineSchedOpt allows command line selection of the scheduler.
139 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
140                RegisterPassParser<MachineSchedRegistry> >
141 MachineSchedOpt("misched",
142                 cl::init(&useDefaultMachineSched), cl::Hidden,
143                 cl::desc("Machine instruction scheduler to use"));
144 
145 static MachineSchedRegistry
146 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
147                      useDefaultMachineSched);
148 
149 /// Forward declare the standard machine scheduler. This will be used as the
150 /// default scheduler if the target does not set a default.
151 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
152 
153 
154 /// Decrement this iterator until reaching the top or a non-debug instr.
155 static MachineBasicBlock::iterator
156 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
157   assert(I != Beg && "reached the top of the region, cannot decrement");
158   while (--I != Beg) {
159     if (!I->isDebugValue())
160       break;
161   }
162   return I;
163 }
164 
165 /// If this iterator is a debug value, increment until reaching the End or a
166 /// non-debug instruction.
167 static MachineBasicBlock::iterator
168 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
169   for(; I != End; ++I) {
170     if (!I->isDebugValue())
171       break;
172   }
173   return I;
174 }
175 
176 /// Top-level MachineScheduler pass driver.
177 ///
178 /// Visit blocks in function order. Divide each block into scheduling regions
179 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
180 /// consistent with the DAG builder, which traverses the interior of the
181 /// scheduling regions bottom-up.
182 ///
183 /// This design avoids exposing scheduling boundaries to the DAG builder,
184 /// simplifying the DAG builder's support for "special" target instructions.
185 /// At the same time the design allows target schedulers to operate across
186 /// scheduling boundaries, for example to bundle the boudary instructions
187 /// without reordering them. This creates complexity, because the target
188 /// scheduler must update the RegionBegin and RegionEnd positions cached by
189 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
190 /// design would be to split blocks at scheduling boundaries, but LLVM has a
191 /// general bias against block splitting purely for implementation simplicity.
192 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
193   DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
194 
195   // Initialize the context of the pass.
196   MF = &mf;
197   MLI = &getAnalysis<MachineLoopInfo>();
198   MDT = &getAnalysis<MachineDominatorTree>();
199   PassConfig = &getAnalysis<TargetPassConfig>();
200   AA = &getAnalysis<AliasAnalysis>();
201 
202   LIS = &getAnalysis<LiveIntervals>();
203   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
204 
205   if (VerifyScheduling) {
206     DEBUG(LIS->print(dbgs()));
207     MF->verify(this, "Before machine scheduling.");
208   }
209   RegClassInfo->runOnMachineFunction(*MF);
210 
211   // Select the scheduler, or set the default.
212   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
213   if (Ctor == useDefaultMachineSched) {
214     // Get the default scheduler set by the target.
215     Ctor = MachineSchedRegistry::getDefault();
216     if (!Ctor) {
217       Ctor = createConvergingSched;
218       MachineSchedRegistry::setDefault(Ctor);
219     }
220   }
221   // Instantiate the selected scheduler.
222   OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
223 
224   // Visit all machine basic blocks.
225   //
226   // TODO: Visit blocks in global postorder or postorder within the bottom-up
227   // loop tree. Then we can optionally compute global RegPressure.
228   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
229        MBB != MBBEnd; ++MBB) {
230 
231     Scheduler->startBlock(MBB);
232 
233     // Break the block into scheduling regions [I, RegionEnd), and schedule each
234     // region as soon as it is discovered. RegionEnd points the scheduling
235     // boundary at the bottom of the region. The DAG does not include RegionEnd,
236     // but the region does (i.e. the next RegionEnd is above the previous
237     // RegionBegin). If the current block has no terminator then RegionEnd ==
238     // MBB->end() for the bottom region.
239     //
240     // The Scheduler may insert instructions during either schedule() or
241     // exitRegion(), even for empty regions. So the local iterators 'I' and
242     // 'RegionEnd' are invalid across these calls.
243     unsigned RemainingInstrs = MBB->size();
244     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
245         RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
246 
247       // Avoid decrementing RegionEnd for blocks with no terminator.
248       if (RegionEnd != MBB->end()
249           || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
250         --RegionEnd;
251         // Count the boundary instruction.
252         --RemainingInstrs;
253       }
254 
255       // The next region starts above the previous region. Look backward in the
256       // instruction stream until we find the nearest boundary.
257       MachineBasicBlock::iterator I = RegionEnd;
258       for(;I != MBB->begin(); --I, --RemainingInstrs) {
259         if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
260           break;
261       }
262       // Notify the scheduler of the region, even if we may skip scheduling
263       // it. Perhaps it still needs to be bundled.
264       Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs);
265 
266       // Skip empty scheduling regions (0 or 1 schedulable instructions).
267       if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
268         // Close the current region. Bundle the terminator if needed.
269         // This invalidates 'RegionEnd' and 'I'.
270         Scheduler->exitRegion();
271         continue;
272       }
273       DEBUG(dbgs() << "********** MI Scheduling **********\n");
274       DEBUG(dbgs() << MF->getName()
275             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
276             << "\n  From: " << *I << "    To: ";
277             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
278             else dbgs() << "End";
279             dbgs() << " Remaining: " << RemainingInstrs << "\n");
280 
281       // Schedule a region: possibly reorder instructions.
282       // This invalidates 'RegionEnd' and 'I'.
283       Scheduler->schedule();
284 
285       // Close the current region.
286       Scheduler->exitRegion();
287 
288       // Scheduling has invalidated the current iterator 'I'. Ask the
289       // scheduler for the top of it's scheduled region.
290       RegionEnd = Scheduler->begin();
291     }
292     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
293     Scheduler->finishBlock();
294   }
295   Scheduler->finalizeSchedule();
296   DEBUG(LIS->print(dbgs()));
297   if (VerifyScheduling)
298     MF->verify(this, "After machine scheduling.");
299   return true;
300 }
301 
302 void MachineScheduler::print(raw_ostream &O, const Module* m) const {
303   // unimplemented
304 }
305 
306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
307 void ReadyQueue::dump() {
308   dbgs() << "  " << Name << ": ";
309   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
310     dbgs() << Queue[i]->NodeNum << " ";
311   dbgs() << "\n";
312 }
313 #endif
314 
315 //===----------------------------------------------------------------------===//
316 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
317 // preservation.
318 //===----------------------------------------------------------------------===//
319 
320 ScheduleDAGMI::~ScheduleDAGMI() {
321   delete DFSResult;
322   DeleteContainerPointers(Mutations);
323   delete SchedImpl;
324 }
325 
326 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
327   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
328 }
329 
330 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
331   if (SuccSU != &ExitSU) {
332     // Do not use WillCreateCycle, it assumes SD scheduling.
333     // If Pred is reachable from Succ, then the edge creates a cycle.
334     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
335       return false;
336     Topo.AddPred(SuccSU, PredDep.getSUnit());
337   }
338   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
339   // Return true regardless of whether a new edge needed to be inserted.
340   return true;
341 }
342 
343 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
344 /// NumPredsLeft reaches zero, release the successor node.
345 ///
346 /// FIXME: Adjust SuccSU height based on MinLatency.
347 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
348   SUnit *SuccSU = SuccEdge->getSUnit();
349 
350   if (SuccEdge->isWeak()) {
351     --SuccSU->WeakPredsLeft;
352     if (SuccEdge->isCluster())
353       NextClusterSucc = SuccSU;
354     return;
355   }
356 #ifndef NDEBUG
357   if (SuccSU->NumPredsLeft == 0) {
358     dbgs() << "*** Scheduling failed! ***\n";
359     SuccSU->dump(this);
360     dbgs() << " has been released too many times!\n";
361     llvm_unreachable(0);
362   }
363 #endif
364   --SuccSU->NumPredsLeft;
365   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
366     SchedImpl->releaseTopNode(SuccSU);
367 }
368 
369 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
370 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
371   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
372        I != E; ++I) {
373     releaseSucc(SU, &*I);
374   }
375 }
376 
377 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
378 /// NumSuccsLeft reaches zero, release the predecessor node.
379 ///
380 /// FIXME: Adjust PredSU height based on MinLatency.
381 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
382   SUnit *PredSU = PredEdge->getSUnit();
383 
384   if (PredEdge->isWeak()) {
385     --PredSU->WeakSuccsLeft;
386     if (PredEdge->isCluster())
387       NextClusterPred = PredSU;
388     return;
389   }
390 #ifndef NDEBUG
391   if (PredSU->NumSuccsLeft == 0) {
392     dbgs() << "*** Scheduling failed! ***\n";
393     PredSU->dump(this);
394     dbgs() << " has been released too many times!\n";
395     llvm_unreachable(0);
396   }
397 #endif
398   --PredSU->NumSuccsLeft;
399   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
400     SchedImpl->releaseBottomNode(PredSU);
401 }
402 
403 /// releasePredecessors - Call releasePred on each of SU's predecessors.
404 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
405   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
406        I != E; ++I) {
407     releasePred(SU, &*I);
408   }
409 }
410 
411 /// This is normally called from the main scheduler loop but may also be invoked
412 /// by the scheduling strategy to perform additional code motion.
413 void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
414                                     MachineBasicBlock::iterator InsertPos) {
415   // Advance RegionBegin if the first instruction moves down.
416   if (&*RegionBegin == MI)
417     ++RegionBegin;
418 
419   // Update the instruction stream.
420   BB->splice(InsertPos, BB, MI);
421 
422   // Update LiveIntervals
423   LIS->handleMove(MI, /*UpdateFlags=*/true);
424 
425   // Recede RegionBegin if an instruction moves above the first.
426   if (RegionBegin == InsertPos)
427     RegionBegin = MI;
428 }
429 
430 bool ScheduleDAGMI::checkSchedLimit() {
431 #ifndef NDEBUG
432   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
433     CurrentTop = CurrentBottom;
434     return false;
435   }
436   ++NumInstrsScheduled;
437 #endif
438   return true;
439 }
440 
441 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
442 /// crossing a scheduling boundary. [begin, end) includes all instructions in
443 /// the region, including the boundary itself and single-instruction regions
444 /// that don't get scheduled.
445 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
446                                 MachineBasicBlock::iterator begin,
447                                 MachineBasicBlock::iterator end,
448                                 unsigned endcount)
449 {
450   ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
451 
452   // For convenience remember the end of the liveness region.
453   LiveRegionEnd =
454     (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
455 }
456 
457 // Setup the register pressure trackers for the top scheduled top and bottom
458 // scheduled regions.
459 void ScheduleDAGMI::initRegPressure() {
460   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
461   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
462 
463   // Close the RPTracker to finalize live ins.
464   RPTracker.closeRegion();
465 
466   DEBUG(RPTracker.getPressure().dump(TRI));
467 
468   // Initialize the live ins and live outs.
469   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
470   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
471 
472   // Close one end of the tracker so we can call
473   // getMaxUpward/DownwardPressureDelta before advancing across any
474   // instructions. This converts currently live regs into live ins/outs.
475   TopRPTracker.closeTop();
476   BotRPTracker.closeBottom();
477 
478   // Account for liveness generated by the region boundary.
479   if (LiveRegionEnd != RegionEnd)
480     BotRPTracker.recede();
481 
482   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
483 
484   // Cache the list of excess pressure sets in this region. This will also track
485   // the max pressure in the scheduled code for these sets.
486   RegionCriticalPSets.clear();
487   const std::vector<unsigned> &RegionPressure =
488     RPTracker.getPressure().MaxSetPressure;
489   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
490     unsigned Limit = TRI->getRegPressureSetLimit(i);
491     DEBUG(dbgs() << TRI->getRegPressureSetName(i)
492           << "Limit " << Limit
493           << " Actual " << RegionPressure[i] << "\n");
494     if (RegionPressure[i] > Limit)
495       RegionCriticalPSets.push_back(PressureElement(i, 0));
496   }
497   DEBUG(dbgs() << "Excess PSets: ";
498         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
499           dbgs() << TRI->getRegPressureSetName(
500             RegionCriticalPSets[i].PSetID) << " ";
501         dbgs() << "\n");
502 }
503 
504 // FIXME: When the pressure tracker deals in pressure differences then we won't
505 // iterate over all RegionCriticalPSets[i].
506 void ScheduleDAGMI::
507 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) {
508   for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
509     unsigned ID = RegionCriticalPSets[i].PSetID;
510     int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
511     if ((int)NewMaxPressure[ID] > MaxUnits)
512       MaxUnits = NewMaxPressure[ID];
513   }
514   DEBUG(
515     for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) {
516       unsigned Limit = TRI->getRegPressureSetLimit(i);
517       if (NewMaxPressure[i] > Limit ) {
518         dbgs() << "  " << TRI->getRegPressureSetName(i) << ": "
519                << NewMaxPressure[i] << " > " << Limit << "\n";
520       }
521     });
522 }
523 
524 /// schedule - Called back from MachineScheduler::runOnMachineFunction
525 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
526 /// only includes instructions that have DAG nodes, not scheduling boundaries.
527 ///
528 /// This is a skeletal driver, with all the functionality pushed into helpers,
529 /// so that it can be easilly extended by experimental schedulers. Generally,
530 /// implementing MachineSchedStrategy should be sufficient to implement a new
531 /// scheduling algorithm. However, if a scheduler further subclasses
532 /// ScheduleDAGMI then it will want to override this virtual method in order to
533 /// update any specialized state.
534 void ScheduleDAGMI::schedule() {
535   buildDAGWithRegPressure();
536 
537   Topo.InitDAGTopologicalSorting();
538 
539   postprocessDAG();
540 
541   SmallVector<SUnit*, 8> TopRoots, BotRoots;
542   findRootsAndBiasEdges(TopRoots, BotRoots);
543 
544   // Initialize the strategy before modifying the DAG.
545   // This may initialize a DFSResult to be used for queue priority.
546   SchedImpl->initialize(this);
547 
548   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
549           SUnits[su].dumpAll(this));
550   if (ViewMISchedDAGs) viewGraph();
551 
552   // Initialize ready queues now that the DAG and priority data are finalized.
553   initQueues(TopRoots, BotRoots);
554 
555   bool IsTopNode = false;
556   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
557     assert(!SU->isScheduled && "Node already scheduled");
558     if (!checkSchedLimit())
559       break;
560 
561     scheduleMI(SU, IsTopNode);
562 
563     updateQueues(SU, IsTopNode);
564   }
565   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
566 
567   placeDebugValues();
568 
569   DEBUG({
570       unsigned BBNum = begin()->getParent()->getNumber();
571       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
572       dumpSchedule();
573       dbgs() << '\n';
574     });
575 }
576 
577 /// Build the DAG and setup three register pressure trackers.
578 void ScheduleDAGMI::buildDAGWithRegPressure() {
579   // Initialize the register pressure tracker used by buildSchedGraph.
580   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
581 
582   // Account for liveness generate by the region boundary.
583   if (LiveRegionEnd != RegionEnd)
584     RPTracker.recede();
585 
586   // Build the DAG, and compute current register pressure.
587   buildSchedGraph(AA, &RPTracker);
588 
589   // Initialize top/bottom trackers after computing region pressure.
590   initRegPressure();
591 }
592 
593 /// Apply each ScheduleDAGMutation step in order.
594 void ScheduleDAGMI::postprocessDAG() {
595   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
596     Mutations[i]->apply(this);
597   }
598 }
599 
600 void ScheduleDAGMI::computeDFSResult() {
601   if (!DFSResult)
602     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
603   DFSResult->clear();
604   ScheduledTrees.clear();
605   DFSResult->resize(SUnits.size());
606   DFSResult->compute(SUnits);
607   ScheduledTrees.resize(DFSResult->getNumSubtrees());
608 }
609 
610 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
611                                           SmallVectorImpl<SUnit*> &BotRoots) {
612   for (std::vector<SUnit>::iterator
613          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
614     SUnit *SU = &(*I);
615     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
616 
617     // Order predecessors so DFSResult follows the critical path.
618     SU->biasCriticalPath();
619 
620     // A SUnit is ready to top schedule if it has no predecessors.
621     if (!I->NumPredsLeft)
622       TopRoots.push_back(SU);
623     // A SUnit is ready to bottom schedule if it has no successors.
624     if (!I->NumSuccsLeft)
625       BotRoots.push_back(SU);
626   }
627   ExitSU.biasCriticalPath();
628 }
629 
630 /// Identify DAG roots and setup scheduler queues.
631 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
632                                ArrayRef<SUnit*> BotRoots) {
633   NextClusterSucc = NULL;
634   NextClusterPred = NULL;
635 
636   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
637   //
638   // Nodes with unreleased weak edges can still be roots.
639   // Release top roots in forward order.
640   for (SmallVectorImpl<SUnit*>::const_iterator
641          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
642     SchedImpl->releaseTopNode(*I);
643   }
644   // Release bottom roots in reverse order so the higher priority nodes appear
645   // first. This is more natural and slightly more efficient.
646   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
647          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
648     SchedImpl->releaseBottomNode(*I);
649   }
650 
651   releaseSuccessors(&EntrySU);
652   releasePredecessors(&ExitSU);
653 
654   SchedImpl->registerRoots();
655 
656   // Advance past initial DebugValues.
657   assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
658   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
659   TopRPTracker.setPos(CurrentTop);
660 
661   CurrentBottom = RegionEnd;
662 }
663 
664 /// Move an instruction and update register pressure.
665 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
666   // Move the instruction to its new location in the instruction stream.
667   MachineInstr *MI = SU->getInstr();
668 
669   if (IsTopNode) {
670     assert(SU->isTopReady() && "node still has unscheduled dependencies");
671     if (&*CurrentTop == MI)
672       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
673     else {
674       moveInstruction(MI, CurrentTop);
675       TopRPTracker.setPos(MI);
676     }
677 
678     // Update top scheduled pressure.
679     TopRPTracker.advance();
680     assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
681     updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
682   }
683   else {
684     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
685     MachineBasicBlock::iterator priorII =
686       priorNonDebug(CurrentBottom, CurrentTop);
687     if (&*priorII == MI)
688       CurrentBottom = priorII;
689     else {
690       if (&*CurrentTop == MI) {
691         CurrentTop = nextIfDebug(++CurrentTop, priorII);
692         TopRPTracker.setPos(CurrentTop);
693       }
694       moveInstruction(MI, CurrentBottom);
695       CurrentBottom = MI;
696     }
697     // Update bottom scheduled pressure.
698     BotRPTracker.recede();
699     assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
700     updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
701   }
702 }
703 
704 /// Update scheduler queues after scheduling an instruction.
705 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
706   // Release dependent instructions for scheduling.
707   if (IsTopNode)
708     releaseSuccessors(SU);
709   else
710     releasePredecessors(SU);
711 
712   SU->isScheduled = true;
713 
714   if (DFSResult) {
715     unsigned SubtreeID = DFSResult->getSubtreeID(SU);
716     if (!ScheduledTrees.test(SubtreeID)) {
717       ScheduledTrees.set(SubtreeID);
718       DFSResult->scheduleTree(SubtreeID);
719       SchedImpl->scheduleTree(SubtreeID);
720     }
721   }
722 
723   // Notify the scheduling strategy after updating the DAG.
724   SchedImpl->schedNode(SU, IsTopNode);
725 }
726 
727 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
728 void ScheduleDAGMI::placeDebugValues() {
729   // If first instruction was a DBG_VALUE then put it back.
730   if (FirstDbgValue) {
731     BB->splice(RegionBegin, BB, FirstDbgValue);
732     RegionBegin = FirstDbgValue;
733   }
734 
735   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
736          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
737     std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
738     MachineInstr *DbgValue = P.first;
739     MachineBasicBlock::iterator OrigPrevMI = P.second;
740     if (&*RegionBegin == DbgValue)
741       ++RegionBegin;
742     BB->splice(++OrigPrevMI, BB, DbgValue);
743     if (OrigPrevMI == llvm::prior(RegionEnd))
744       RegionEnd = DbgValue;
745   }
746   DbgValues.clear();
747   FirstDbgValue = NULL;
748 }
749 
750 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
751 void ScheduleDAGMI::dumpSchedule() const {
752   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
753     if (SUnit *SU = getSUnit(&(*MI)))
754       SU->dump(this);
755     else
756       dbgs() << "Missing SUnit\n";
757   }
758 }
759 #endif
760 
761 //===----------------------------------------------------------------------===//
762 // LoadClusterMutation - DAG post-processing to cluster loads.
763 //===----------------------------------------------------------------------===//
764 
765 namespace {
766 /// \brief Post-process the DAG to create cluster edges between neighboring
767 /// loads.
768 class LoadClusterMutation : public ScheduleDAGMutation {
769   struct LoadInfo {
770     SUnit *SU;
771     unsigned BaseReg;
772     unsigned Offset;
773     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
774       : SU(su), BaseReg(reg), Offset(ofs) {}
775   };
776   static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS,
777                            const LoadClusterMutation::LoadInfo &RHS);
778 
779   const TargetInstrInfo *TII;
780   const TargetRegisterInfo *TRI;
781 public:
782   LoadClusterMutation(const TargetInstrInfo *tii,
783                       const TargetRegisterInfo *tri)
784     : TII(tii), TRI(tri) {}
785 
786   virtual void apply(ScheduleDAGMI *DAG);
787 protected:
788   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
789 };
790 } // anonymous
791 
792 bool LoadClusterMutation::LoadInfoLess(
793   const LoadClusterMutation::LoadInfo &LHS,
794   const LoadClusterMutation::LoadInfo &RHS) {
795   if (LHS.BaseReg != RHS.BaseReg)
796     return LHS.BaseReg < RHS.BaseReg;
797   return LHS.Offset < RHS.Offset;
798 }
799 
800 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
801                                                   ScheduleDAGMI *DAG) {
802   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
803   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
804     SUnit *SU = Loads[Idx];
805     unsigned BaseReg;
806     unsigned Offset;
807     if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
808       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
809   }
810   if (LoadRecords.size() < 2)
811     return;
812   std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess);
813   unsigned ClusterLength = 1;
814   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
815     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
816       ClusterLength = 1;
817       continue;
818     }
819 
820     SUnit *SUa = LoadRecords[Idx].SU;
821     SUnit *SUb = LoadRecords[Idx+1].SU;
822     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
823         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
824 
825       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
826             << SUb->NodeNum << ")\n");
827       // Copy successor edges from SUa to SUb. Interleaving computation
828       // dependent on SUa can prevent load combining due to register reuse.
829       // Predecessor edges do not need to be copied from SUb to SUa since nearby
830       // loads should have effectively the same inputs.
831       for (SUnit::const_succ_iterator
832              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
833         if (SI->getSUnit() == SUb)
834           continue;
835         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
836         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
837       }
838       ++ClusterLength;
839     }
840     else
841       ClusterLength = 1;
842   }
843 }
844 
845 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
846 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
847   // Map DAG NodeNum to store chain ID.
848   DenseMap<unsigned, unsigned> StoreChainIDs;
849   // Map each store chain to a set of dependent loads.
850   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
851   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
852     SUnit *SU = &DAG->SUnits[Idx];
853     if (!SU->getInstr()->mayLoad())
854       continue;
855     unsigned ChainPredID = DAG->SUnits.size();
856     for (SUnit::const_pred_iterator
857            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
858       if (PI->isCtrl()) {
859         ChainPredID = PI->getSUnit()->NodeNum;
860         break;
861       }
862     }
863     // Check if this chain-like pred has been seen
864     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
865     unsigned NumChains = StoreChainDependents.size();
866     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
867       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
868     if (Result.second)
869       StoreChainDependents.resize(NumChains + 1);
870     StoreChainDependents[Result.first->second].push_back(SU);
871   }
872   // Iterate over the store chains.
873   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
874     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
875 }
876 
877 //===----------------------------------------------------------------------===//
878 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
879 //===----------------------------------------------------------------------===//
880 
881 namespace {
882 /// \brief Post-process the DAG to create cluster edges between instructions
883 /// that may be fused by the processor into a single operation.
884 class MacroFusion : public ScheduleDAGMutation {
885   const TargetInstrInfo *TII;
886 public:
887   MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
888 
889   virtual void apply(ScheduleDAGMI *DAG);
890 };
891 } // anonymous
892 
893 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
894 /// fused operations.
895 void MacroFusion::apply(ScheduleDAGMI *DAG) {
896   // For now, assume targets can only fuse with the branch.
897   MachineInstr *Branch = DAG->ExitSU.getInstr();
898   if (!Branch)
899     return;
900 
901   for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
902     SUnit *SU = &DAG->SUnits[--Idx];
903     if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
904       continue;
905 
906     // Create a single weak edge from SU to ExitSU. The only effect is to cause
907     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
908     // need to copy predecessor edges from ExitSU to SU, since top-down
909     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
910     // of SU, we could create an artificial edge from the deepest root, but it
911     // hasn't been needed yet.
912     bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
913     (void)Success;
914     assert(Success && "No DAG nodes should be reachable from ExitSU");
915 
916     DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
917     break;
918   }
919 }
920 
921 //===----------------------------------------------------------------------===//
922 // CopyConstrain - DAG post-processing to encourage copy elimination.
923 //===----------------------------------------------------------------------===//
924 
925 namespace {
926 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
927 /// the one use that defines the copy's source vreg, most likely an induction
928 /// variable increment.
929 class CopyConstrain : public ScheduleDAGMutation {
930   // Transient state.
931   SlotIndex RegionBeginIdx;
932   // RegionEndIdx is the slot index of the last non-debug instruction in the
933   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
934   SlotIndex RegionEndIdx;
935 public:
936   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
937 
938   virtual void apply(ScheduleDAGMI *DAG);
939 
940 protected:
941   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG);
942 };
943 } // anonymous
944 
945 /// constrainLocalCopy handles two possibilities:
946 /// 1) Local src:
947 /// I0:     = dst
948 /// I1: src = ...
949 /// I2:     = dst
950 /// I3: dst = src (copy)
951 /// (create pred->succ edges I0->I1, I2->I1)
952 ///
953 /// 2) Local copy:
954 /// I0: dst = src (copy)
955 /// I1:     = dst
956 /// I2: src = ...
957 /// I3:     = dst
958 /// (create pred->succ edges I1->I2, I3->I2)
959 ///
960 /// Although the MachineScheduler is currently constrained to single blocks,
961 /// this algorithm should handle extended blocks. An EBB is a set of
962 /// contiguously numbered blocks such that the previous block in the EBB is
963 /// always the single predecessor.
964 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
965   LiveIntervals *LIS = DAG->getLIS();
966   MachineInstr *Copy = CopySU->getInstr();
967 
968   // Check for pure vreg copies.
969   unsigned SrcReg = Copy->getOperand(1).getReg();
970   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
971     return;
972 
973   unsigned DstReg = Copy->getOperand(0).getReg();
974   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
975     return;
976 
977   // Check if either the dest or source is local. If it's live across a back
978   // edge, it's not local. Note that if both vregs are live across the back
979   // edge, we cannot successfully contrain the copy without cyclic scheduling.
980   unsigned LocalReg = DstReg;
981   unsigned GlobalReg = SrcReg;
982   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
983   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
984     LocalReg = SrcReg;
985     GlobalReg = DstReg;
986     LocalLI = &LIS->getInterval(LocalReg);
987     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
988       return;
989   }
990   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
991 
992   // Find the global segment after the start of the local LI.
993   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
994   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
995   // local live range. We could create edges from other global uses to the local
996   // start, but the coalescer should have already eliminated these cases, so
997   // don't bother dealing with it.
998   if (GlobalSegment == GlobalLI->end())
999     return;
1000 
1001   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1002   // returned the next global segment. But if GlobalSegment overlaps with
1003   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1004   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1005   if (GlobalSegment->contains(LocalLI->beginIndex()))
1006     ++GlobalSegment;
1007 
1008   if (GlobalSegment == GlobalLI->end())
1009     return;
1010 
1011   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1012   if (GlobalSegment != GlobalLI->begin()) {
1013     // Two address defs have no hole.
1014     if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end,
1015                                GlobalSegment->start)) {
1016       return;
1017     }
1018     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1019     // it would be a disconnected component in the live range.
1020     assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
1021            "Disconnected LRG within the scheduling region.");
1022   }
1023   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1024   if (!GlobalDef)
1025     return;
1026 
1027   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1028   if (!GlobalSU)
1029     return;
1030 
1031   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1032   // constraining the uses of the last local def to precede GlobalDef.
1033   SmallVector<SUnit*,8> LocalUses;
1034   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1035   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1036   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1037   for (SUnit::const_succ_iterator
1038          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1039        I != E; ++I) {
1040     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1041       continue;
1042     if (I->getSUnit() == GlobalSU)
1043       continue;
1044     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1045       return;
1046     LocalUses.push_back(I->getSUnit());
1047   }
1048   // Open the top of the GlobalLI hole by constraining any earlier global uses
1049   // to precede the start of LocalLI.
1050   SmallVector<SUnit*,8> GlobalUses;
1051   MachineInstr *FirstLocalDef =
1052     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1053   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1054   for (SUnit::const_pred_iterator
1055          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1056     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1057       continue;
1058     if (I->getSUnit() == FirstLocalSU)
1059       continue;
1060     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1061       return;
1062     GlobalUses.push_back(I->getSUnit());
1063   }
1064   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1065   // Add the weak edges.
1066   for (SmallVectorImpl<SUnit*>::const_iterator
1067          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1068     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1069           << GlobalSU->NodeNum << ")\n");
1070     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1071   }
1072   for (SmallVectorImpl<SUnit*>::const_iterator
1073          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1074     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1075           << FirstLocalSU->NodeNum << ")\n");
1076     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1077   }
1078 }
1079 
1080 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1081 /// copy elimination.
1082 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1083   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1084   if (FirstPos == DAG->end())
1085     return;
1086   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1087   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1088     &*priorNonDebug(DAG->end(), DAG->begin()));
1089 
1090   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1091     SUnit *SU = &DAG->SUnits[Idx];
1092     if (!SU->getInstr()->isCopy())
1093       continue;
1094 
1095     constrainLocalCopy(SU, DAG);
1096   }
1097 }
1098 
1099 //===----------------------------------------------------------------------===//
1100 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
1101 //===----------------------------------------------------------------------===//
1102 
1103 namespace {
1104 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
1105 /// the schedule.
1106 class ConvergingScheduler : public MachineSchedStrategy {
1107 public:
1108   /// Represent the type of SchedCandidate found within a single queue.
1109   /// pickNodeBidirectional depends on these listed by decreasing priority.
1110   enum CandReason {
1111     NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak,
1112     ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
1113     TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse,
1114     NodeOrder};
1115 
1116 #ifndef NDEBUG
1117   static const char *getReasonStr(ConvergingScheduler::CandReason Reason);
1118 #endif
1119 
1120   /// Policy for scheduling the next instruction in the candidate's zone.
1121   struct CandPolicy {
1122     bool ReduceLatency;
1123     unsigned ReduceResIdx;
1124     unsigned DemandResIdx;
1125 
1126     CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
1127   };
1128 
1129   /// Status of an instruction's critical resource consumption.
1130   struct SchedResourceDelta {
1131     // Count critical resources in the scheduled region required by SU.
1132     unsigned CritResources;
1133 
1134     // Count critical resources from another region consumed by SU.
1135     unsigned DemandedResources;
1136 
1137     SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
1138 
1139     bool operator==(const SchedResourceDelta &RHS) const {
1140       return CritResources == RHS.CritResources
1141         && DemandedResources == RHS.DemandedResources;
1142     }
1143     bool operator!=(const SchedResourceDelta &RHS) const {
1144       return !operator==(RHS);
1145     }
1146   };
1147 
1148   /// Store the state used by ConvergingScheduler heuristics, required for the
1149   /// lifetime of one invocation of pickNode().
1150   struct SchedCandidate {
1151     CandPolicy Policy;
1152 
1153     // The best SUnit candidate.
1154     SUnit *SU;
1155 
1156     // The reason for this candidate.
1157     CandReason Reason;
1158 
1159     // Register pressure values for the best candidate.
1160     RegPressureDelta RPDelta;
1161 
1162     // Critical resource consumption of the best candidate.
1163     SchedResourceDelta ResDelta;
1164 
1165     SchedCandidate(const CandPolicy &policy)
1166     : Policy(policy), SU(NULL), Reason(NoCand) {}
1167 
1168     bool isValid() const { return SU; }
1169 
1170     // Copy the status of another candidate without changing policy.
1171     void setBest(SchedCandidate &Best) {
1172       assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1173       SU = Best.SU;
1174       Reason = Best.Reason;
1175       RPDelta = Best.RPDelta;
1176       ResDelta = Best.ResDelta;
1177     }
1178 
1179     void initResourceDelta(const ScheduleDAGMI *DAG,
1180                            const TargetSchedModel *SchedModel);
1181   };
1182 
1183   /// Summarize the unscheduled region.
1184   struct SchedRemainder {
1185     // Critical path through the DAG in expected latency.
1186     unsigned CriticalPath;
1187 
1188     // Unscheduled resources
1189     SmallVector<unsigned, 16> RemainingCounts;
1190     // Critical resource for the unscheduled zone.
1191     unsigned CritResIdx;
1192     // Number of micro-ops left to schedule.
1193     unsigned RemainingMicroOps;
1194 
1195     void reset() {
1196       CriticalPath = 0;
1197       RemainingCounts.clear();
1198       CritResIdx = 0;
1199       RemainingMicroOps = 0;
1200     }
1201 
1202     SchedRemainder() { reset(); }
1203 
1204     void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
1205 
1206     unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const {
1207       if (!SchedModel->hasInstrSchedModel())
1208         return 0;
1209 
1210       return std::max(
1211         RemainingMicroOps * SchedModel->getMicroOpFactor(),
1212         RemainingCounts[CritResIdx]);
1213     }
1214   };
1215 
1216   /// Each Scheduling boundary is associated with ready queues. It tracks the
1217   /// current cycle in the direction of movement, and maintains the state
1218   /// of "hazards" and other interlocks at the current cycle.
1219   struct SchedBoundary {
1220     ScheduleDAGMI *DAG;
1221     const TargetSchedModel *SchedModel;
1222     SchedRemainder *Rem;
1223 
1224     ReadyQueue Available;
1225     ReadyQueue Pending;
1226     bool CheckPending;
1227 
1228     // For heuristics, keep a list of the nodes that immediately depend on the
1229     // most recently scheduled node.
1230     SmallPtrSet<const SUnit*, 8> NextSUs;
1231 
1232     ScheduleHazardRecognizer *HazardRec;
1233 
1234     unsigned CurrCycle;
1235     unsigned CurrMOps;
1236 
1237     /// MinReadyCycle - Cycle of the soonest available instruction.
1238     unsigned MinReadyCycle;
1239 
1240     // The expected latency of the critical path in this scheduled zone.
1241     unsigned ExpectedLatency;
1242 
1243     // The latency of dependence chains leading into this zone.
1244     // For each node scheduled: DLat = max DLat, N.Depth.
1245     // For each cycle scheduled: DLat -= 1.
1246     unsigned DependentLatency;
1247 
1248     // Resources used in the scheduled zone beyond this boundary.
1249     SmallVector<unsigned, 16> ResourceCounts;
1250 
1251     // Cache the critical resources ID in this scheduled zone.
1252     unsigned CritResIdx;
1253 
1254     // Is the scheduled region resource limited vs. latency limited.
1255     bool IsResourceLimited;
1256 
1257     unsigned ExpectedCount;
1258 
1259 #ifndef NDEBUG
1260     // Remember the greatest operand latency as an upper bound on the number of
1261     // times we should retry the pending queue because of a hazard.
1262     unsigned MaxObservedLatency;
1263 #endif
1264 
1265     void reset() {
1266       // A new HazardRec is created for each DAG and owned by SchedBoundary.
1267       delete HazardRec;
1268 
1269       Available.clear();
1270       Pending.clear();
1271       CheckPending = false;
1272       NextSUs.clear();
1273       HazardRec = 0;
1274       CurrCycle = 0;
1275       CurrMOps = 0;
1276       MinReadyCycle = UINT_MAX;
1277       ExpectedLatency = 0;
1278       DependentLatency = 0;
1279       ResourceCounts.resize(1);
1280       assert(!ResourceCounts[0] && "nonzero count for bad resource");
1281       CritResIdx = 0;
1282       IsResourceLimited = false;
1283       ExpectedCount = 0;
1284 #ifndef NDEBUG
1285       MaxObservedLatency = 0;
1286 #endif
1287       // Reserve a zero-count for invalid CritResIdx.
1288       ResourceCounts.resize(1);
1289     }
1290 
1291     /// Pending queues extend the ready queues with the same ID and the
1292     /// PendingFlag set.
1293     SchedBoundary(unsigned ID, const Twine &Name):
1294       DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
1295       Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
1296       HazardRec(0) {
1297       reset();
1298     }
1299 
1300     ~SchedBoundary() { delete HazardRec; }
1301 
1302     void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
1303               SchedRemainder *rem);
1304 
1305     bool isTop() const {
1306       return Available.getID() == ConvergingScheduler::TopQID;
1307     }
1308 
1309     unsigned getUnscheduledLatency(SUnit *SU) const {
1310       if (isTop())
1311         return SU->getHeight();
1312       return SU->getDepth() + SU->Latency;
1313     }
1314 
1315     unsigned getCriticalCount() const {
1316       return ResourceCounts[CritResIdx];
1317     }
1318 
1319     bool checkHazard(SUnit *SU);
1320 
1321     void setLatencyPolicy(CandPolicy &Policy);
1322 
1323     void releaseNode(SUnit *SU, unsigned ReadyCycle);
1324 
1325     void bumpCycle();
1326 
1327     void countResource(unsigned PIdx, unsigned Cycles);
1328 
1329     void bumpNode(SUnit *SU);
1330 
1331     void releasePending();
1332 
1333     void removeReady(SUnit *SU);
1334 
1335     SUnit *pickOnlyChoice();
1336   };
1337 
1338 private:
1339   ScheduleDAGMI *DAG;
1340   const TargetSchedModel *SchedModel;
1341   const TargetRegisterInfo *TRI;
1342 
1343   // State of the top and bottom scheduled instruction boundaries.
1344   SchedRemainder Rem;
1345   SchedBoundary Top;
1346   SchedBoundary Bot;
1347 
1348 public:
1349   /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
1350   enum {
1351     TopQID = 1,
1352     BotQID = 2,
1353     LogMaxQID = 2
1354   };
1355 
1356   ConvergingScheduler():
1357     DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
1358 
1359   virtual void initialize(ScheduleDAGMI *dag);
1360 
1361   virtual SUnit *pickNode(bool &IsTopNode);
1362 
1363   virtual void schedNode(SUnit *SU, bool IsTopNode);
1364 
1365   virtual void releaseTopNode(SUnit *SU);
1366 
1367   virtual void releaseBottomNode(SUnit *SU);
1368 
1369   virtual void registerRoots();
1370 
1371 protected:
1372   void balanceZones(
1373     ConvergingScheduler::SchedBoundary &CriticalZone,
1374     ConvergingScheduler::SchedCandidate &CriticalCand,
1375     ConvergingScheduler::SchedBoundary &OppositeZone,
1376     ConvergingScheduler::SchedCandidate &OppositeCand);
1377 
1378   void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand,
1379                            ConvergingScheduler::SchedCandidate &BotCand);
1380 
1381   void tryCandidate(SchedCandidate &Cand,
1382                     SchedCandidate &TryCand,
1383                     SchedBoundary &Zone,
1384                     const RegPressureTracker &RPTracker,
1385                     RegPressureTracker &TempTracker);
1386 
1387   SUnit *pickNodeBidirectional(bool &IsTopNode);
1388 
1389   void pickNodeFromQueue(SchedBoundary &Zone,
1390                          const RegPressureTracker &RPTracker,
1391                          SchedCandidate &Candidate);
1392 
1393   void reschedulePhysRegCopies(SUnit *SU, bool isTop);
1394 
1395 #ifndef NDEBUG
1396   void traceCandidate(const SchedCandidate &Cand);
1397 #endif
1398 };
1399 } // namespace
1400 
1401 void ConvergingScheduler::SchedRemainder::
1402 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1403   reset();
1404   if (!SchedModel->hasInstrSchedModel())
1405     return;
1406   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1407   for (std::vector<SUnit>::iterator
1408          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1409     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1410     RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC);
1411     for (TargetSchedModel::ProcResIter
1412            PI = SchedModel->getWriteProcResBegin(SC),
1413            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1414       unsigned PIdx = PI->ProcResourceIdx;
1415       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1416       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1417     }
1418   }
1419   for (unsigned PIdx = 0, PEnd = SchedModel->getNumProcResourceKinds();
1420        PIdx != PEnd; ++PIdx) {
1421     if ((int)(RemainingCounts[PIdx] - RemainingCounts[CritResIdx])
1422         >= (int)SchedModel->getLatencyFactor()) {
1423       CritResIdx = PIdx;
1424     }
1425   }
1426   DEBUG(dbgs() << "Critical Resource: "
1427         << SchedModel->getProcResource(CritResIdx)->Name
1428         << ": " << RemainingCounts[CritResIdx]
1429         << " / " << SchedModel->getLatencyFactor() << '\n');
1430 }
1431 
1432 void ConvergingScheduler::SchedBoundary::
1433 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1434   reset();
1435   DAG = dag;
1436   SchedModel = smodel;
1437   Rem = rem;
1438   if (SchedModel->hasInstrSchedModel())
1439     ResourceCounts.resize(SchedModel->getNumProcResourceKinds());
1440 }
1441 
1442 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
1443   DAG = dag;
1444   SchedModel = DAG->getSchedModel();
1445   TRI = DAG->TRI;
1446 
1447   Rem.init(DAG, SchedModel);
1448   Top.init(DAG, SchedModel, &Rem);
1449   Bot.init(DAG, SchedModel, &Rem);
1450 
1451   // Initialize resource counts.
1452 
1453   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
1454   // are disabled, then these HazardRecs will be disabled.
1455   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
1456   const TargetMachine &TM = DAG->MF.getTarget();
1457   Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1458   Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1459 
1460   assert((!ForceTopDown || !ForceBottomUp) &&
1461          "-misched-topdown incompatible with -misched-bottomup");
1462 }
1463 
1464 void ConvergingScheduler::releaseTopNode(SUnit *SU) {
1465   if (SU->isScheduled)
1466     return;
1467 
1468   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1469        I != E; ++I) {
1470     if (I->isWeak())
1471       continue;
1472     unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
1473     unsigned Latency = I->getLatency();
1474 #ifndef NDEBUG
1475     Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency);
1476 #endif
1477     if (SU->TopReadyCycle < PredReadyCycle + Latency)
1478       SU->TopReadyCycle = PredReadyCycle + Latency;
1479   }
1480   Top.releaseNode(SU, SU->TopReadyCycle);
1481 }
1482 
1483 void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
1484   if (SU->isScheduled)
1485     return;
1486 
1487   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1488 
1489   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1490        I != E; ++I) {
1491     if (I->isWeak())
1492       continue;
1493     unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
1494     unsigned Latency = I->getLatency();
1495 #ifndef NDEBUG
1496     Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency);
1497 #endif
1498     if (SU->BotReadyCycle < SuccReadyCycle + Latency)
1499       SU->BotReadyCycle = SuccReadyCycle + Latency;
1500   }
1501   Bot.releaseNode(SU, SU->BotReadyCycle);
1502 }
1503 
1504 void ConvergingScheduler::registerRoots() {
1505   Rem.CriticalPath = DAG->ExitSU.getDepth();
1506   // Some roots may not feed into ExitSU. Check all of them in case.
1507   for (std::vector<SUnit*>::const_iterator
1508          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
1509     if ((*I)->getDepth() > Rem.CriticalPath)
1510       Rem.CriticalPath = (*I)->getDepth();
1511   }
1512   DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
1513 }
1514 
1515 /// Does this SU have a hazard within the current instruction group.
1516 ///
1517 /// The scheduler supports two modes of hazard recognition. The first is the
1518 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1519 /// supports highly complicated in-order reservation tables
1520 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1521 ///
1522 /// The second is a streamlined mechanism that checks for hazards based on
1523 /// simple counters that the scheduler itself maintains. It explicitly checks
1524 /// for instruction dispatch limitations, including the number of micro-ops that
1525 /// can dispatch per cycle.
1526 ///
1527 /// TODO: Also check whether the SU must start a new group.
1528 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
1529   if (HazardRec->isEnabled())
1530     return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
1531 
1532   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1533   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1534     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1535           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1536     return true;
1537   }
1538   return false;
1539 }
1540 
1541 /// Compute the remaining latency to determine whether ILP should be increased.
1542 void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) {
1543   DEBUG(dbgs() << "  " << Available.getName()
1544         << " DependentLatency " << DependentLatency << '\n');
1545 
1546   // FIXME: compile time. In all, we visit four queues here one we should only
1547   // need to visit the one that was last popped if we cache the result.
1548   unsigned RemLatency = DependentLatency;
1549   for (ReadyQueue::iterator I = Available.begin(), E = Available.end();
1550        I != E; ++I) {
1551     unsigned L = getUnscheduledLatency(*I);
1552     if (L > RemLatency) {
1553       DEBUG(dbgs() << "  " << Available.getName()
1554             << " RemLatency SU(" << (*I)->NodeNum << ") " << L << '\n');
1555       RemLatency = L;
1556     }
1557   }
1558   for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end();
1559        I != E; ++I) {
1560     unsigned L = getUnscheduledLatency(*I);
1561     if (L > RemLatency)
1562       RemLatency = L;
1563   }
1564   unsigned CriticalPathLimit = Rem->CriticalPath;
1565   DEBUG(dbgs() << "  " << Available.getName()
1566         << " ExpectedLatency " << ExpectedLatency
1567         << " CP Limit " << CriticalPathLimit << '\n');
1568 
1569   if (RemLatency + std::max(ExpectedLatency, CurrCycle) >= CriticalPathLimit
1570       && RemLatency > Rem->getMaxRemainingCount(SchedModel)) {
1571     Policy.ReduceLatency = true;
1572     DEBUG(dbgs() << "  Increase ILP: " << Available.getName() << '\n');
1573   }
1574 }
1575 
1576 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
1577                                                      unsigned ReadyCycle) {
1578 
1579   if (ReadyCycle < MinReadyCycle)
1580     MinReadyCycle = ReadyCycle;
1581 
1582   // Check for interlocks first. For the purpose of other heuristics, an
1583   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1584   if (ReadyCycle > CurrCycle || checkHazard(SU))
1585     Pending.push(SU);
1586   else
1587     Available.push(SU);
1588 
1589   // Record this node as an immediate dependent of the scheduled node.
1590   NextSUs.insert(SU);
1591 }
1592 
1593 /// Move the boundary of scheduled code by one cycle.
1594 void ConvergingScheduler::SchedBoundary::bumpCycle() {
1595   unsigned Width = SchedModel->getIssueWidth();
1596   CurrMOps = (CurrMOps <= Width) ? 0 : CurrMOps - Width;
1597 
1598   unsigned NextCycle = CurrCycle + 1;
1599   assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1600   if (MinReadyCycle > NextCycle) {
1601     CurrMOps = 0;
1602     NextCycle = MinReadyCycle;
1603   }
1604   if ((NextCycle - CurrCycle) > DependentLatency)
1605     DependentLatency = 0;
1606   else
1607     DependentLatency -= (NextCycle - CurrCycle);
1608 
1609   if (!HazardRec->isEnabled()) {
1610     // Bypass HazardRec virtual calls.
1611     CurrCycle = NextCycle;
1612   }
1613   else {
1614     // Bypass getHazardType calls in case of long latency.
1615     for (; CurrCycle != NextCycle; ++CurrCycle) {
1616       if (isTop())
1617         HazardRec->AdvanceCycle();
1618       else
1619         HazardRec->RecedeCycle();
1620     }
1621   }
1622   CheckPending = true;
1623   IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
1624 
1625   DEBUG(dbgs() << "  " << Available.getName()
1626         << " Cycle: " << CurrCycle << '\n');
1627 }
1628 
1629 /// Add the given processor resource to this scheduled zone.
1630 void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx,
1631                                                        unsigned Cycles) {
1632   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1633   DEBUG(dbgs() << "  " << SchedModel->getProcResource(PIdx)->Name
1634         << " +(" << Cycles << "x" << Factor
1635         << ") / " << SchedModel->getLatencyFactor() << '\n');
1636 
1637   unsigned Count = Factor * Cycles;
1638   ResourceCounts[PIdx] += Count;
1639   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1640   Rem->RemainingCounts[PIdx] -= Count;
1641 
1642   // Check if this resource exceeds the current critical resource by a full
1643   // cycle. If so, it becomes the critical resource.
1644   if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx])
1645       >= (int)SchedModel->getLatencyFactor()) {
1646     CritResIdx = PIdx;
1647     DEBUG(dbgs() << "  *** Critical resource "
1648           << SchedModel->getProcResource(PIdx)->Name << " x"
1649           << ResourceCounts[PIdx] << '\n');
1650   }
1651 }
1652 
1653 /// Move the boundary of scheduled code by one SUnit.
1654 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
1655   // Update the reservation table.
1656   if (HazardRec->isEnabled()) {
1657     if (!isTop() && SU->isCall) {
1658       // Calls are scheduled with their preceding instructions. For bottom-up
1659       // scheduling, clear the pipeline state before emitting.
1660       HazardRec->Reset();
1661     }
1662     HazardRec->EmitInstruction(SU);
1663   }
1664   // Update resource counts and critical resource.
1665   if (SchedModel->hasInstrSchedModel()) {
1666     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1667     Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC);
1668     for (TargetSchedModel::ProcResIter
1669            PI = SchedModel->getWriteProcResBegin(SC),
1670            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1671       countResource(PI->ProcResourceIdx, PI->Cycles);
1672     }
1673   }
1674   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
1675   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
1676   if (SU->getDepth() > TopLatency)
1677     TopLatency = SU->getDepth();
1678   if (SU->getHeight() > BotLatency)
1679     BotLatency = SU->getHeight();
1680 
1681   IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
1682 
1683   // Check the instruction group dispatch limit.
1684   // TODO: Check if this SU must end a dispatch group.
1685   CurrMOps += SchedModel->getNumMicroOps(SU->getInstr());
1686 
1687   // checkHazard prevents scheduling multiple instructions per cycle that exceed
1688   // issue width. However, we commonly reach the maximum. In this case
1689   // opportunistically bump the cycle to avoid uselessly checking everything in
1690   // the readyQ. Furthermore, a single instruction may produce more than one
1691   // cycle's worth of micro-ops.
1692   if (CurrMOps >= SchedModel->getIssueWidth()) {
1693     DEBUG(dbgs() << "  *** Max instrs at cycle " << CurrCycle << '\n');
1694     bumpCycle();
1695   }
1696 }
1697 
1698 /// Release pending ready nodes in to the available queue. This makes them
1699 /// visible to heuristics.
1700 void ConvergingScheduler::SchedBoundary::releasePending() {
1701   // If the available queue is empty, it is safe to reset MinReadyCycle.
1702   if (Available.empty())
1703     MinReadyCycle = UINT_MAX;
1704 
1705   // Check to see if any of the pending instructions are ready to issue.  If
1706   // so, add them to the available queue.
1707   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
1708     SUnit *SU = *(Pending.begin()+i);
1709     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
1710 
1711     if (ReadyCycle < MinReadyCycle)
1712       MinReadyCycle = ReadyCycle;
1713 
1714     if (ReadyCycle > CurrCycle)
1715       continue;
1716 
1717     if (checkHazard(SU))
1718       continue;
1719 
1720     Available.push(SU);
1721     Pending.remove(Pending.begin()+i);
1722     --i; --e;
1723   }
1724   DEBUG(if (!Pending.empty()) Pending.dump());
1725   CheckPending = false;
1726 }
1727 
1728 /// Remove SU from the ready set for this boundary.
1729 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
1730   if (Available.isInQueue(SU))
1731     Available.remove(Available.find(SU));
1732   else {
1733     assert(Pending.isInQueue(SU) && "bad ready count");
1734     Pending.remove(Pending.find(SU));
1735   }
1736 }
1737 
1738 /// If this queue only has one ready candidate, return it. As a side effect,
1739 /// defer any nodes that now hit a hazard, and advance the cycle until at least
1740 /// one node is ready. If multiple instructions are ready, return NULL.
1741 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
1742   if (CheckPending)
1743     releasePending();
1744 
1745   if (CurrMOps > 0) {
1746     // Defer any ready instrs that now have a hazard.
1747     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
1748       if (checkHazard(*I)) {
1749         Pending.push(*I);
1750         I = Available.remove(I);
1751         continue;
1752       }
1753       ++I;
1754     }
1755   }
1756   for (unsigned i = 0; Available.empty(); ++i) {
1757     assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) &&
1758            "permanent hazard"); (void)i;
1759     bumpCycle();
1760     releasePending();
1761   }
1762   if (Available.size() == 1)
1763     return *Available.begin();
1764   return NULL;
1765 }
1766 
1767 /// Record the candidate policy for opposite zones with different critical
1768 /// resources.
1769 ///
1770 /// If the CriticalZone is latency limited, don't force a policy for the
1771 /// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed.
1772 void ConvergingScheduler::balanceZones(
1773   ConvergingScheduler::SchedBoundary &CriticalZone,
1774   ConvergingScheduler::SchedCandidate &CriticalCand,
1775   ConvergingScheduler::SchedBoundary &OppositeZone,
1776   ConvergingScheduler::SchedCandidate &OppositeCand) {
1777 
1778   if (!CriticalZone.IsResourceLimited)
1779     return;
1780   assert(SchedModel->hasInstrSchedModel() && "required schedmodel");
1781 
1782   SchedRemainder *Rem = CriticalZone.Rem;
1783 
1784   // If the critical zone is overconsuming a resource relative to the
1785   // remainder, try to reduce it.
1786   unsigned RemainingCritCount =
1787     Rem->RemainingCounts[CriticalZone.CritResIdx];
1788   if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount)
1789       > (int)SchedModel->getLatencyFactor()) {
1790     CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx;
1791     DEBUG(dbgs() << "  Balance " << CriticalZone.Available.getName()
1792           << " reduce "
1793           << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name
1794           << '\n');
1795   }
1796   // If the other zone is underconsuming a resource relative to the full zone,
1797   // try to increase it.
1798   unsigned OppositeCount =
1799     OppositeZone.ResourceCounts[CriticalZone.CritResIdx];
1800   if ((int)(OppositeZone.ExpectedCount - OppositeCount)
1801       > (int)SchedModel->getLatencyFactor()) {
1802     OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx;
1803     DEBUG(dbgs() << "  Balance " << OppositeZone.Available.getName()
1804           << " demand "
1805           << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name
1806           << '\n');
1807   }
1808 }
1809 
1810 /// Determine if the scheduled zones exceed resource limits or critical path and
1811 /// set each candidate's ReduceHeight policy accordingly.
1812 void ConvergingScheduler::checkResourceLimits(
1813   ConvergingScheduler::SchedCandidate &TopCand,
1814   ConvergingScheduler::SchedCandidate &BotCand) {
1815 
1816   // Set ReduceLatency to true if needed.
1817   Bot.setLatencyPolicy(BotCand.Policy);
1818   Top.setLatencyPolicy(TopCand.Policy);
1819 
1820   // Handle resource-limited regions.
1821   if (Top.IsResourceLimited && Bot.IsResourceLimited
1822       && Top.CritResIdx == Bot.CritResIdx) {
1823     // If the scheduled critical resource in both zones is no longer the
1824     // critical remaining resource, attempt to reduce resource height both ways.
1825     if (Top.CritResIdx != Rem.CritResIdx) {
1826       TopCand.Policy.ReduceResIdx = Top.CritResIdx;
1827       BotCand.Policy.ReduceResIdx = Bot.CritResIdx;
1828       DEBUG(dbgs() << "  Reduce scheduled "
1829             << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n');
1830     }
1831     return;
1832   }
1833   // Handle latency-limited regions.
1834   if (!Top.IsResourceLimited && !Bot.IsResourceLimited) {
1835     // If the total scheduled expected latency exceeds the region's critical
1836     // path then reduce latency both ways.
1837     //
1838     // Just because a zone is not resource limited does not mean it is latency
1839     // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle
1840     // to exceed expected latency.
1841     if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath)
1842         && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) {
1843       TopCand.Policy.ReduceLatency = true;
1844       BotCand.Policy.ReduceLatency = true;
1845       DEBUG(dbgs() << "  Reduce scheduled latency " << Top.ExpectedLatency
1846             << " + " << Bot.ExpectedLatency << '\n');
1847     }
1848     return;
1849   }
1850   // The critical resource is different in each zone, so request balancing.
1851 
1852   // Compute the cost of each zone.
1853   Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle);
1854   Top.ExpectedCount = std::max(
1855     Top.getCriticalCount(),
1856     Top.ExpectedCount * SchedModel->getLatencyFactor());
1857   Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle);
1858   Bot.ExpectedCount = std::max(
1859     Bot.getCriticalCount(),
1860     Bot.ExpectedCount * SchedModel->getLatencyFactor());
1861 
1862   balanceZones(Top, TopCand, Bot, BotCand);
1863   balanceZones(Bot, BotCand, Top, TopCand);
1864 }
1865 
1866 void ConvergingScheduler::SchedCandidate::
1867 initResourceDelta(const ScheduleDAGMI *DAG,
1868                   const TargetSchedModel *SchedModel) {
1869   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
1870     return;
1871 
1872   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1873   for (TargetSchedModel::ProcResIter
1874          PI = SchedModel->getWriteProcResBegin(SC),
1875          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1876     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
1877       ResDelta.CritResources += PI->Cycles;
1878     if (PI->ProcResourceIdx == Policy.DemandResIdx)
1879       ResDelta.DemandedResources += PI->Cycles;
1880   }
1881 }
1882 
1883 /// Return true if this heuristic determines order.
1884 static bool tryLess(int TryVal, int CandVal,
1885                     ConvergingScheduler::SchedCandidate &TryCand,
1886                     ConvergingScheduler::SchedCandidate &Cand,
1887                     ConvergingScheduler::CandReason Reason) {
1888   if (TryVal < CandVal) {
1889     TryCand.Reason = Reason;
1890     return true;
1891   }
1892   if (TryVal > CandVal) {
1893     if (Cand.Reason > Reason)
1894       Cand.Reason = Reason;
1895     return true;
1896   }
1897   return false;
1898 }
1899 
1900 static bool tryGreater(int TryVal, int CandVal,
1901                        ConvergingScheduler::SchedCandidate &TryCand,
1902                        ConvergingScheduler::SchedCandidate &Cand,
1903                        ConvergingScheduler::CandReason Reason) {
1904   if (TryVal > CandVal) {
1905     TryCand.Reason = Reason;
1906     return true;
1907   }
1908   if (TryVal < CandVal) {
1909     if (Cand.Reason > Reason)
1910       Cand.Reason = Reason;
1911     return true;
1912   }
1913   return false;
1914 }
1915 
1916 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
1917   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
1918 }
1919 
1920 /// Minimize physical register live ranges. Regalloc wants them adjacent to
1921 /// their physreg def/use.
1922 ///
1923 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
1924 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
1925 /// with the operation that produces or consumes the physreg. We'll do this when
1926 /// regalloc has support for parallel copies.
1927 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
1928   const MachineInstr *MI = SU->getInstr();
1929   if (!MI->isCopy())
1930     return 0;
1931 
1932   unsigned ScheduledOper = isTop ? 1 : 0;
1933   unsigned UnscheduledOper = isTop ? 0 : 1;
1934   // If we have already scheduled the physreg produce/consumer, immediately
1935   // schedule the copy.
1936   if (TargetRegisterInfo::isPhysicalRegister(
1937         MI->getOperand(ScheduledOper).getReg()))
1938     return 1;
1939   // If the physreg is at the boundary, defer it. Otherwise schedule it
1940   // immediately to free the dependent. We can hoist the copy later.
1941   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
1942   if (TargetRegisterInfo::isPhysicalRegister(
1943         MI->getOperand(UnscheduledOper).getReg()))
1944     return AtBoundary ? -1 : 1;
1945   return 0;
1946 }
1947 
1948 /// Apply a set of heursitics to a new candidate. Heuristics are currently
1949 /// hierarchical. This may be more efficient than a graduated cost model because
1950 /// we don't need to evaluate all aspects of the model for each node in the
1951 /// queue. But it's really done to make the heuristics easier to debug and
1952 /// statistically analyze.
1953 ///
1954 /// \param Cand provides the policy and current best candidate.
1955 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
1956 /// \param Zone describes the scheduled zone that we are extending.
1957 /// \param RPTracker describes reg pressure within the scheduled zone.
1958 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
1959 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
1960                                        SchedCandidate &TryCand,
1961                                        SchedBoundary &Zone,
1962                                        const RegPressureTracker &RPTracker,
1963                                        RegPressureTracker &TempTracker) {
1964 
1965   // Always initialize TryCand's RPDelta.
1966   TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta,
1967                                   DAG->getRegionCriticalPSets(),
1968                                   DAG->getRegPressure().MaxSetPressure);
1969 
1970   // Initialize the candidate if needed.
1971   if (!Cand.isValid()) {
1972     TryCand.Reason = NodeOrder;
1973     return;
1974   }
1975 
1976   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
1977                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
1978                  TryCand, Cand, PhysRegCopy))
1979     return;
1980 
1981   // Avoid exceeding the target's limit.
1982   if (tryLess(TryCand.RPDelta.Excess.UnitIncrease,
1983               Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess))
1984     return;
1985   if (Cand.Reason == SingleExcess)
1986     Cand.Reason = MultiPressure;
1987 
1988   // Avoid increasing the max critical pressure in the scheduled region.
1989   if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease,
1990               Cand.RPDelta.CriticalMax.UnitIncrease,
1991               TryCand, Cand, SingleCritical))
1992     return;
1993   if (Cand.Reason == SingleCritical)
1994     Cand.Reason = MultiPressure;
1995 
1996   // Keep clustered nodes together to encourage downstream peephole
1997   // optimizations which may reduce resource requirements.
1998   //
1999   // This is a best effort to set things up for a post-RA pass. Optimizations
2000   // like generating loads of multiple registers should ideally be done within
2001   // the scheduler pass by combining the loads during DAG postprocessing.
2002   const SUnit *NextClusterSU =
2003     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2004   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2005                  TryCand, Cand, Cluster))
2006     return;
2007 
2008   // Weak edges are for clustering and other constraints.
2009   //
2010   // Deferring TryCand here does not change Cand's reason. This is good in the
2011   // sense that a bad candidate shouldn't affect a previous candidate's
2012   // goodness, but bad in that it is assymetric and depends on queue order.
2013   CandReason OrigReason = Cand.Reason;
2014   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2015               getWeakLeft(Cand.SU, Zone.isTop()),
2016               TryCand, Cand, Weak)) {
2017     Cand.Reason = OrigReason;
2018     return;
2019   }
2020   // Avoid critical resource consumption and balance the schedule.
2021   TryCand.initResourceDelta(DAG, SchedModel);
2022   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2023               TryCand, Cand, ResourceReduce))
2024     return;
2025   if (tryGreater(TryCand.ResDelta.DemandedResources,
2026                  Cand.ResDelta.DemandedResources,
2027                  TryCand, Cand, ResourceDemand))
2028     return;
2029 
2030   // Avoid serializing long latency dependence chains.
2031   if (Cand.Policy.ReduceLatency) {
2032     if (Zone.isTop()) {
2033       if (Cand.SU->getDepth() * SchedModel->getLatencyFactor()
2034           > Zone.ExpectedCount) {
2035         if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2036                     TryCand, Cand, TopDepthReduce))
2037           return;
2038       }
2039       if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2040                      TryCand, Cand, TopPathReduce))
2041         return;
2042     }
2043     else {
2044       if (Cand.SU->getHeight() * SchedModel->getLatencyFactor()
2045           > Zone.ExpectedCount) {
2046         if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2047                     TryCand, Cand, BotHeightReduce))
2048           return;
2049       }
2050       if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2051                      TryCand, Cand, BotPathReduce))
2052         return;
2053     }
2054   }
2055 
2056   // Avoid increasing the max pressure of the entire region.
2057   if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease,
2058               Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax))
2059     return;
2060   if (Cand.Reason == SingleMax)
2061     Cand.Reason = MultiPressure;
2062 
2063   // Prefer immediate defs/users of the last scheduled instruction. This is a
2064   // nice pressure avoidance strategy that also conserves the processor's
2065   // register renaming resources and keeps the machine code readable.
2066   if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
2067                  TryCand, Cand, NextDefUse))
2068     return;
2069 
2070   // Fall through to original instruction order.
2071   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2072       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2073     TryCand.Reason = NodeOrder;
2074   }
2075 }
2076 
2077 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
2078 /// more desirable than RHS from scheduling standpoint.
2079 static bool compareRPDelta(const RegPressureDelta &LHS,
2080                            const RegPressureDelta &RHS) {
2081   // Compare each component of pressure in decreasing order of importance
2082   // without checking if any are valid. Invalid PressureElements are assumed to
2083   // have UnitIncrease==0, so are neutral.
2084 
2085   // Avoid increasing the max critical pressure in the scheduled region.
2086   if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) {
2087     DEBUG(dbgs() << "  RP excess top - bot: "
2088           << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n');
2089     return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
2090   }
2091   // Avoid increasing the max critical pressure in the scheduled region.
2092   if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) {
2093     DEBUG(dbgs() << "  RP critical top - bot: "
2094           << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease)
2095           << '\n');
2096     return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
2097   }
2098   // Avoid increasing the max pressure of the entire region.
2099   if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) {
2100     DEBUG(dbgs() << "  RP current top - bot: "
2101           << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease)
2102           << '\n');
2103     return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
2104   }
2105   return false;
2106 }
2107 
2108 #ifndef NDEBUG
2109 const char *ConvergingScheduler::getReasonStr(
2110   ConvergingScheduler::CandReason Reason) {
2111   switch (Reason) {
2112   case NoCand:         return "NOCAND    ";
2113   case PhysRegCopy:    return "PREG-COPY";
2114   case SingleExcess:   return "REG-EXCESS";
2115   case SingleCritical: return "REG-CRIT  ";
2116   case Cluster:        return "CLUSTER   ";
2117   case Weak:           return "WEAK      ";
2118   case SingleMax:      return "REG-MAX   ";
2119   case MultiPressure:  return "REG-MULTI ";
2120   case ResourceReduce: return "RES-REDUCE";
2121   case ResourceDemand: return "RES-DEMAND";
2122   case TopDepthReduce: return "TOP-DEPTH ";
2123   case TopPathReduce:  return "TOP-PATH  ";
2124   case BotHeightReduce:return "BOT-HEIGHT";
2125   case BotPathReduce:  return "BOT-PATH  ";
2126   case NextDefUse:     return "DEF-USE   ";
2127   case NodeOrder:      return "ORDER     ";
2128   };
2129   llvm_unreachable("Unknown reason!");
2130 }
2131 
2132 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) {
2133   PressureElement P;
2134   unsigned ResIdx = 0;
2135   unsigned Latency = 0;
2136   switch (Cand.Reason) {
2137   default:
2138     break;
2139   case SingleExcess:
2140     P = Cand.RPDelta.Excess;
2141     break;
2142   case SingleCritical:
2143     P = Cand.RPDelta.CriticalMax;
2144     break;
2145   case SingleMax:
2146     P = Cand.RPDelta.CurrentMax;
2147     break;
2148   case ResourceReduce:
2149     ResIdx = Cand.Policy.ReduceResIdx;
2150     break;
2151   case ResourceDemand:
2152     ResIdx = Cand.Policy.DemandResIdx;
2153     break;
2154   case TopDepthReduce:
2155     Latency = Cand.SU->getDepth();
2156     break;
2157   case TopPathReduce:
2158     Latency = Cand.SU->getHeight();
2159     break;
2160   case BotHeightReduce:
2161     Latency = Cand.SU->getHeight();
2162     break;
2163   case BotPathReduce:
2164     Latency = Cand.SU->getDepth();
2165     break;
2166   }
2167   dbgs() << "  SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2168   if (P.isValid())
2169     dbgs() << " " << TRI->getRegPressureSetName(P.PSetID)
2170            << ":" << P.UnitIncrease << " ";
2171   else
2172     dbgs() << "      ";
2173   if (ResIdx)
2174     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2175   else
2176     dbgs() << "         ";
2177   if (Latency)
2178     dbgs() << " " << Latency << " cycles ";
2179   else
2180     dbgs() << "          ";
2181   dbgs() << '\n';
2182 }
2183 #endif
2184 
2185 /// Pick the best candidate from the top queue.
2186 ///
2187 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2188 /// DAG building. To adjust for the current scheduling location we need to
2189 /// maintain the number of vreg uses remaining to be top-scheduled.
2190 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2191                                             const RegPressureTracker &RPTracker,
2192                                             SchedCandidate &Cand) {
2193   ReadyQueue &Q = Zone.Available;
2194 
2195   DEBUG(Q.dump());
2196 
2197   // getMaxPressureDelta temporarily modifies the tracker.
2198   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2199 
2200   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2201 
2202     SchedCandidate TryCand(Cand.Policy);
2203     TryCand.SU = *I;
2204     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2205     if (TryCand.Reason != NoCand) {
2206       // Initialize resource delta if needed in case future heuristics query it.
2207       if (TryCand.ResDelta == SchedResourceDelta())
2208         TryCand.initResourceDelta(DAG, SchedModel);
2209       Cand.setBest(TryCand);
2210       DEBUG(traceCandidate(Cand));
2211     }
2212   }
2213 }
2214 
2215 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand,
2216                       bool IsTop) {
2217   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2218         << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n');
2219 }
2220 
2221 /// Pick the best candidate node from either the top or bottom queue.
2222 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
2223   // Schedule as far as possible in the direction of no choice. This is most
2224   // efficient, but also provides the best heuristics for CriticalPSets.
2225   if (SUnit *SU = Bot.pickOnlyChoice()) {
2226     IsTopNode = false;
2227     DEBUG(dbgs() << "Pick Top NOCAND\n");
2228     return SU;
2229   }
2230   if (SUnit *SU = Top.pickOnlyChoice()) {
2231     IsTopNode = true;
2232     DEBUG(dbgs() << "Pick Bot NOCAND\n");
2233     return SU;
2234   }
2235   CandPolicy NoPolicy;
2236   SchedCandidate BotCand(NoPolicy);
2237   SchedCandidate TopCand(NoPolicy);
2238   checkResourceLimits(TopCand, BotCand);
2239 
2240   // Prefer bottom scheduling when heuristics are silent.
2241   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2242   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2243 
2244   // If either Q has a single candidate that provides the least increase in
2245   // Excess pressure, we can immediately schedule from that Q.
2246   //
2247   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2248   // affects picking from either Q. If scheduling in one direction must
2249   // increase pressure for one of the excess PSets, then schedule in that
2250   // direction first to provide more freedom in the other direction.
2251   if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) {
2252     IsTopNode = false;
2253     tracePick(BotCand, IsTopNode);
2254     return BotCand.SU;
2255   }
2256   // Check if the top Q has a better candidate.
2257   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2258   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2259 
2260   // If either Q has a single candidate that minimizes pressure above the
2261   // original region's pressure pick it.
2262   if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) {
2263     if (TopCand.Reason < BotCand.Reason) {
2264       IsTopNode = true;
2265       tracePick(TopCand, IsTopNode);
2266       return TopCand.SU;
2267     }
2268     IsTopNode = false;
2269     tracePick(BotCand, IsTopNode);
2270     return BotCand.SU;
2271   }
2272   // Check for a salient pressure difference and pick the best from either side.
2273   if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
2274     IsTopNode = true;
2275     tracePick(TopCand, IsTopNode);
2276     return TopCand.SU;
2277   }
2278   // Otherwise prefer the bottom candidate, in node order if all else failed.
2279   if (TopCand.Reason < BotCand.Reason) {
2280     IsTopNode = true;
2281     tracePick(TopCand, IsTopNode);
2282     return TopCand.SU;
2283   }
2284   IsTopNode = false;
2285   tracePick(BotCand, IsTopNode);
2286   return BotCand.SU;
2287 }
2288 
2289 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2290 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
2291   if (DAG->top() == DAG->bottom()) {
2292     assert(Top.Available.empty() && Top.Pending.empty() &&
2293            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2294     return NULL;
2295   }
2296   SUnit *SU;
2297   do {
2298     if (ForceTopDown) {
2299       SU = Top.pickOnlyChoice();
2300       if (!SU) {
2301         CandPolicy NoPolicy;
2302         SchedCandidate TopCand(NoPolicy);
2303         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2304         assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2305         SU = TopCand.SU;
2306       }
2307       IsTopNode = true;
2308     }
2309     else if (ForceBottomUp) {
2310       SU = Bot.pickOnlyChoice();
2311       if (!SU) {
2312         CandPolicy NoPolicy;
2313         SchedCandidate BotCand(NoPolicy);
2314         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2315         assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2316         SU = BotCand.SU;
2317       }
2318       IsTopNode = false;
2319     }
2320     else {
2321       SU = pickNodeBidirectional(IsTopNode);
2322     }
2323   } while (SU->isScheduled);
2324 
2325   if (SU->isTopReady())
2326     Top.removeReady(SU);
2327   if (SU->isBottomReady())
2328     Bot.removeReady(SU);
2329 
2330   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2331   return SU;
2332 }
2333 
2334 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2335 
2336   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2337   if (!isTop)
2338     ++InsertPos;
2339   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2340 
2341   // Find already scheduled copies with a single physreg dependence and move
2342   // them just above the scheduled instruction.
2343   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2344        I != E; ++I) {
2345     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2346       continue;
2347     SUnit *DepSU = I->getSUnit();
2348     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2349       continue;
2350     MachineInstr *Copy = DepSU->getInstr();
2351     if (!Copy->isCopy())
2352       continue;
2353     DEBUG(dbgs() << "  Rescheduling physreg copy ";
2354           I->getSUnit()->dump(DAG));
2355     DAG->moveInstruction(Copy, InsertPos);
2356   }
2357 }
2358 
2359 /// Update the scheduler's state after scheduling a node. This is the same node
2360 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
2361 /// it's state based on the current cycle before MachineSchedStrategy does.
2362 ///
2363 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2364 /// them here. See comments in biasPhysRegCopy.
2365 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2366   if (IsTopNode) {
2367     SU->TopReadyCycle = Top.CurrCycle;
2368     Top.bumpNode(SU);
2369     if (SU->hasPhysRegUses)
2370       reschedulePhysRegCopies(SU, true);
2371   }
2372   else {
2373     SU->BotReadyCycle = Bot.CurrCycle;
2374     Bot.bumpNode(SU);
2375     if (SU->hasPhysRegDefs)
2376       reschedulePhysRegCopies(SU, false);
2377   }
2378 }
2379 
2380 /// Create the standard converging machine scheduler. This will be used as the
2381 /// default scheduler if the target does not set a default.
2382 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
2383   assert((!ForceTopDown || !ForceBottomUp) &&
2384          "-misched-topdown incompatible with -misched-bottomup");
2385   ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler());
2386   // Register DAG post-processors.
2387   //
2388   // FIXME: extend the mutation API to allow earlier mutations to instantiate
2389   // data and pass it to later mutations. Have a single mutation that gathers
2390   // the interesting nodes in one pass.
2391   DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
2392   if (EnableLoadCluster)
2393     DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
2394   if (EnableMacroFusion)
2395     DAG->addMutation(new MacroFusion(DAG->TII));
2396   return DAG;
2397 }
2398 static MachineSchedRegistry
2399 ConvergingSchedRegistry("converge", "Standard converging scheduler.",
2400                         createConvergingSched);
2401 
2402 //===----------------------------------------------------------------------===//
2403 // ILP Scheduler. Currently for experimental analysis of heuristics.
2404 //===----------------------------------------------------------------------===//
2405 
2406 namespace {
2407 /// \brief Order nodes by the ILP metric.
2408 struct ILPOrder {
2409   const SchedDFSResult *DFSResult;
2410   const BitVector *ScheduledTrees;
2411   bool MaximizeILP;
2412 
2413   ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {}
2414 
2415   /// \brief Apply a less-than relation on node priority.
2416   ///
2417   /// (Return true if A comes after B in the Q.)
2418   bool operator()(const SUnit *A, const SUnit *B) const {
2419     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
2420     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
2421     if (SchedTreeA != SchedTreeB) {
2422       // Unscheduled trees have lower priority.
2423       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
2424         return ScheduledTrees->test(SchedTreeB);
2425 
2426       // Trees with shallower connections have have lower priority.
2427       if (DFSResult->getSubtreeLevel(SchedTreeA)
2428           != DFSResult->getSubtreeLevel(SchedTreeB)) {
2429         return DFSResult->getSubtreeLevel(SchedTreeA)
2430           < DFSResult->getSubtreeLevel(SchedTreeB);
2431       }
2432     }
2433     if (MaximizeILP)
2434       return DFSResult->getILP(A) < DFSResult->getILP(B);
2435     else
2436       return DFSResult->getILP(A) > DFSResult->getILP(B);
2437   }
2438 };
2439 
2440 /// \brief Schedule based on the ILP metric.
2441 class ILPScheduler : public MachineSchedStrategy {
2442   /// In case all subtrees are eventually connected to a common root through
2443   /// data dependence (e.g. reduction), place an upper limit on their size.
2444   ///
2445   /// FIXME: A subtree limit is generally good, but in the situation commented
2446   /// above, where multiple similar subtrees feed a common root, we should
2447   /// only split at a point where the resulting subtrees will be balanced.
2448   /// (a motivating test case must be found).
2449   static const unsigned SubtreeLimit = 16;
2450 
2451   ScheduleDAGMI *DAG;
2452   ILPOrder Cmp;
2453 
2454   std::vector<SUnit*> ReadyQ;
2455 public:
2456   ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {}
2457 
2458   virtual void initialize(ScheduleDAGMI *dag) {
2459     DAG = dag;
2460     DAG->computeDFSResult();
2461     Cmp.DFSResult = DAG->getDFSResult();
2462     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
2463     ReadyQ.clear();
2464   }
2465 
2466   virtual void registerRoots() {
2467     // Restore the heap in ReadyQ with the updated DFS results.
2468     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2469   }
2470 
2471   /// Implement MachineSchedStrategy interface.
2472   /// -----------------------------------------
2473 
2474   /// Callback to select the highest priority node from the ready Q.
2475   virtual SUnit *pickNode(bool &IsTopNode) {
2476     if (ReadyQ.empty()) return NULL;
2477     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2478     SUnit *SU = ReadyQ.back();
2479     ReadyQ.pop_back();
2480     IsTopNode = false;
2481     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
2482           << " ILP: " << DAG->getDFSResult()->getILP(SU)
2483           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
2484           << DAG->getDFSResult()->getSubtreeLevel(
2485             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
2486           << "Scheduling " << *SU->getInstr());
2487     return SU;
2488   }
2489 
2490   /// \brief Scheduler callback to notify that a new subtree is scheduled.
2491   virtual void scheduleTree(unsigned SubtreeID) {
2492     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2493   }
2494 
2495   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
2496   /// DFSResults, and resort the priority Q.
2497   virtual void schedNode(SUnit *SU, bool IsTopNode) {
2498     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
2499   }
2500 
2501   virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
2502 
2503   virtual void releaseBottomNode(SUnit *SU) {
2504     ReadyQ.push_back(SU);
2505     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2506   }
2507 };
2508 } // namespace
2509 
2510 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
2511   return new ScheduleDAGMI(C, new ILPScheduler(true));
2512 }
2513 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
2514   return new ScheduleDAGMI(C, new ILPScheduler(false));
2515 }
2516 static MachineSchedRegistry ILPMaxRegistry(
2517   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
2518 static MachineSchedRegistry ILPMinRegistry(
2519   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
2520 
2521 //===----------------------------------------------------------------------===//
2522 // Machine Instruction Shuffler for Correctness Testing
2523 //===----------------------------------------------------------------------===//
2524 
2525 #ifndef NDEBUG
2526 namespace {
2527 /// Apply a less-than relation on the node order, which corresponds to the
2528 /// instruction order prior to scheduling. IsReverse implements greater-than.
2529 template<bool IsReverse>
2530 struct SUnitOrder {
2531   bool operator()(SUnit *A, SUnit *B) const {
2532     if (IsReverse)
2533       return A->NodeNum > B->NodeNum;
2534     else
2535       return A->NodeNum < B->NodeNum;
2536   }
2537 };
2538 
2539 /// Reorder instructions as much as possible.
2540 class InstructionShuffler : public MachineSchedStrategy {
2541   bool IsAlternating;
2542   bool IsTopDown;
2543 
2544   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
2545   // gives nodes with a higher number higher priority causing the latest
2546   // instructions to be scheduled first.
2547   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
2548     TopQ;
2549   // When scheduling bottom-up, use greater-than as the queue priority.
2550   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
2551     BottomQ;
2552 public:
2553   InstructionShuffler(bool alternate, bool topdown)
2554     : IsAlternating(alternate), IsTopDown(topdown) {}
2555 
2556   virtual void initialize(ScheduleDAGMI *) {
2557     TopQ.clear();
2558     BottomQ.clear();
2559   }
2560 
2561   /// Implement MachineSchedStrategy interface.
2562   /// -----------------------------------------
2563 
2564   virtual SUnit *pickNode(bool &IsTopNode) {
2565     SUnit *SU;
2566     if (IsTopDown) {
2567       do {
2568         if (TopQ.empty()) return NULL;
2569         SU = TopQ.top();
2570         TopQ.pop();
2571       } while (SU->isScheduled);
2572       IsTopNode = true;
2573     }
2574     else {
2575       do {
2576         if (BottomQ.empty()) return NULL;
2577         SU = BottomQ.top();
2578         BottomQ.pop();
2579       } while (SU->isScheduled);
2580       IsTopNode = false;
2581     }
2582     if (IsAlternating)
2583       IsTopDown = !IsTopDown;
2584     return SU;
2585   }
2586 
2587   virtual void schedNode(SUnit *SU, bool IsTopNode) {}
2588 
2589   virtual void releaseTopNode(SUnit *SU) {
2590     TopQ.push(SU);
2591   }
2592   virtual void releaseBottomNode(SUnit *SU) {
2593     BottomQ.push(SU);
2594   }
2595 };
2596 } // namespace
2597 
2598 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
2599   bool Alternate = !ForceTopDown && !ForceBottomUp;
2600   bool TopDown = !ForceBottomUp;
2601   assert((TopDown || !ForceTopDown) &&
2602          "-misched-topdown incompatible with -misched-bottomup");
2603   return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
2604 }
2605 static MachineSchedRegistry ShufflerRegistry(
2606   "shuffle", "Shuffle machine instructions alternating directions",
2607   createInstructionShuffler);
2608 #endif // !NDEBUG
2609 
2610 //===----------------------------------------------------------------------===//
2611 // GraphWriter support for ScheduleDAGMI.
2612 //===----------------------------------------------------------------------===//
2613 
2614 #ifndef NDEBUG
2615 namespace llvm {
2616 
2617 template<> struct GraphTraits<
2618   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
2619 
2620 template<>
2621 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
2622 
2623   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
2624 
2625   static std::string getGraphName(const ScheduleDAG *G) {
2626     return G->MF.getName();
2627   }
2628 
2629   static bool renderGraphFromBottomUp() {
2630     return true;
2631   }
2632 
2633   static bool isNodeHidden(const SUnit *Node) {
2634     return (Node->NumPreds > 10 || Node->NumSuccs > 10);
2635   }
2636 
2637   static bool hasNodeAddressLabel(const SUnit *Node,
2638                                   const ScheduleDAG *Graph) {
2639     return false;
2640   }
2641 
2642   /// If you want to override the dot attributes printed for a particular
2643   /// edge, override this method.
2644   static std::string getEdgeAttributes(const SUnit *Node,
2645                                        SUnitIterator EI,
2646                                        const ScheduleDAG *Graph) {
2647     if (EI.isArtificialDep())
2648       return "color=cyan,style=dashed";
2649     if (EI.isCtrlDep())
2650       return "color=blue,style=dashed";
2651     return "";
2652   }
2653 
2654   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
2655     std::string Str;
2656     raw_string_ostream SS(Str);
2657     SS << "SU(" << SU->NodeNum << ')';
2658     return SS.str();
2659   }
2660   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
2661     return G->getGraphNodeLabel(SU);
2662   }
2663 
2664   static std::string getNodeAttributes(const SUnit *N,
2665                                        const ScheduleDAG *Graph) {
2666     std::string Str("shape=Mrecord");
2667     const SchedDFSResult *DFS =
2668       static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult();
2669     if (DFS) {
2670       Str += ",style=filled,fillcolor=\"#";
2671       Str += DOT::getColorString(DFS->getSubtreeID(N));
2672       Str += '"';
2673     }
2674     return Str;
2675   }
2676 };
2677 } // namespace llvm
2678 #endif // NDEBUG
2679 
2680 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
2681 /// rendered using 'dot'.
2682 ///
2683 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
2684 #ifndef NDEBUG
2685   ViewGraph(this, Name, false, Title);
2686 #else
2687   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
2688          << "systems with Graphviz or gv!\n";
2689 #endif  // NDEBUG
2690 }
2691 
2692 /// Out-of-line implementation with no arguments is handy for gdb.
2693 void ScheduleDAGMI::viewGraph() {
2694   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
2695 }
2696