xref: /llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision d7f890edb092483e6aae69ae9221ea1206d4f86c)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define DEBUG_TYPE "misched"
16 
17 #include "llvm/CodeGen/MachineScheduler.h"
18 #include "llvm/ADT/OwningPtr.h"
19 #include "llvm/ADT/PriorityQueue.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/RegisterClassInfo.h"
27 #include "llvm/CodeGen/ScheduleDFS.h"
28 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/GraphWriter.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include <queue>
36 
37 using namespace llvm;
38 
39 namespace llvm {
40 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
41                            cl::desc("Force top-down list scheduling"));
42 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
43                             cl::desc("Force bottom-up list scheduling"));
44 }
45 
46 #ifndef NDEBUG
47 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
48   cl::desc("Pop up a window to show MISched dags after they are processed"));
49 
50 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
51   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
52 #else
53 static bool ViewMISchedDAGs = false;
54 #endif // NDEBUG
55 
56 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
57   cl::desc("Enable register pressure scheduling."), cl::init(true));
58 
59 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
60   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
61 
62 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
63   cl::desc("Enable load clustering."), cl::init(true));
64 
65 // Experimental heuristics
66 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
67   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
68 
69 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
70   cl::desc("Verify machine instrs before and after machine scheduling"));
71 
72 // DAG subtrees must have at least this many nodes.
73 static const unsigned MinSubtreeSize = 8;
74 
75 // Pin the vtables to this file.
76 void MachineSchedStrategy::anchor() {}
77 void ScheduleDAGMutation::anchor() {}
78 
79 //===----------------------------------------------------------------------===//
80 // Machine Instruction Scheduling Pass and Registry
81 //===----------------------------------------------------------------------===//
82 
83 MachineSchedContext::MachineSchedContext():
84     MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
85   RegClassInfo = new RegisterClassInfo();
86 }
87 
88 MachineSchedContext::~MachineSchedContext() {
89   delete RegClassInfo;
90 }
91 
92 namespace {
93 /// Base class for a machine scheduler class that can run at any point.
94 class MachineSchedulerBase : public MachineSchedContext,
95                              public MachineFunctionPass {
96 public:
97   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
98 
99   virtual void print(raw_ostream &O, const Module* = 0) const;
100 
101 protected:
102   void scheduleRegions(ScheduleDAGInstrs &Scheduler);
103 };
104 
105 /// MachineScheduler runs after coalescing and before register allocation.
106 class MachineScheduler : public MachineSchedulerBase {
107 public:
108   MachineScheduler();
109 
110   virtual void getAnalysisUsage(AnalysisUsage &AU) const;
111 
112   virtual bool runOnMachineFunction(MachineFunction&);
113 
114   static char ID; // Class identification, replacement for typeinfo
115 
116 protected:
117   ScheduleDAGInstrs *createMachineScheduler();
118 };
119 } // namespace
120 
121 char MachineScheduler::ID = 0;
122 
123 char &llvm::MachineSchedulerID = MachineScheduler::ID;
124 
125 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
126                       "Machine Instruction Scheduler", false, false)
127 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
128 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
129 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
130 INITIALIZE_PASS_END(MachineScheduler, "misched",
131                     "Machine Instruction Scheduler", false, false)
132 
133 MachineScheduler::MachineScheduler()
134 : MachineSchedulerBase(ID) {
135   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
136 }
137 
138 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
139   AU.setPreservesCFG();
140   AU.addRequiredID(MachineDominatorsID);
141   AU.addRequired<MachineLoopInfo>();
142   AU.addRequired<AliasAnalysis>();
143   AU.addRequired<TargetPassConfig>();
144   AU.addRequired<SlotIndexes>();
145   AU.addPreserved<SlotIndexes>();
146   AU.addRequired<LiveIntervals>();
147   AU.addPreserved<LiveIntervals>();
148   MachineFunctionPass::getAnalysisUsage(AU);
149 }
150 
151 MachinePassRegistry MachineSchedRegistry::Registry;
152 
153 /// A dummy default scheduler factory indicates whether the scheduler
154 /// is overridden on the command line.
155 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
156   return 0;
157 }
158 
159 /// MachineSchedOpt allows command line selection of the scheduler.
160 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
161                RegisterPassParser<MachineSchedRegistry> >
162 MachineSchedOpt("misched",
163                 cl::init(&useDefaultMachineSched), cl::Hidden,
164                 cl::desc("Machine instruction scheduler to use"));
165 
166 static MachineSchedRegistry
167 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
168                      useDefaultMachineSched);
169 
170 /// Forward declare the standard machine scheduler. This will be used as the
171 /// default scheduler if the target does not set a default.
172 static ScheduleDAGInstrs *createGenericSched(MachineSchedContext *C);
173 static ScheduleDAGInstrs *createRawGenericSched(MachineSchedContext *C);
174 
175 /// Decrement this iterator until reaching the top or a non-debug instr.
176 static MachineBasicBlock::const_iterator
177 priorNonDebug(MachineBasicBlock::const_iterator I,
178               MachineBasicBlock::const_iterator Beg) {
179   assert(I != Beg && "reached the top of the region, cannot decrement");
180   while (--I != Beg) {
181     if (!I->isDebugValue())
182       break;
183   }
184   return I;
185 }
186 
187 /// Non-const version.
188 static MachineBasicBlock::iterator
189 priorNonDebug(MachineBasicBlock::iterator I,
190               MachineBasicBlock::const_iterator Beg) {
191   return const_cast<MachineInstr*>(
192     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
193 }
194 
195 /// If this iterator is a debug value, increment until reaching the End or a
196 /// non-debug instruction.
197 static MachineBasicBlock::const_iterator
198 nextIfDebug(MachineBasicBlock::const_iterator I,
199             MachineBasicBlock::const_iterator End) {
200   for(; I != End; ++I) {
201     if (!I->isDebugValue())
202       break;
203   }
204   return I;
205 }
206 
207 /// Non-const version.
208 static MachineBasicBlock::iterator
209 nextIfDebug(MachineBasicBlock::iterator I,
210             MachineBasicBlock::const_iterator End) {
211   // Cast the return value to nonconst MachineInstr, then cast to an
212   // instr_iterator, which does not check for null, finally return a
213   // bundle_iterator.
214   return MachineBasicBlock::instr_iterator(
215     const_cast<MachineInstr*>(
216       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
217 }
218 
219 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
220 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
221   // Select the scheduler, or set the default.
222   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
223   if (Ctor != useDefaultMachineSched)
224     return Ctor(this);
225 
226   // Get the default scheduler set by the target for this function.
227   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
228   if (Scheduler)
229     return Scheduler;
230 
231   // Default to GenericScheduler.
232   return createGenericSched(this);
233 }
234 
235 /// Top-level MachineScheduler pass driver.
236 ///
237 /// Visit blocks in function order. Divide each block into scheduling regions
238 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
239 /// consistent with the DAG builder, which traverses the interior of the
240 /// scheduling regions bottom-up.
241 ///
242 /// This design avoids exposing scheduling boundaries to the DAG builder,
243 /// simplifying the DAG builder's support for "special" target instructions.
244 /// At the same time the design allows target schedulers to operate across
245 /// scheduling boundaries, for example to bundle the boudary instructions
246 /// without reordering them. This creates complexity, because the target
247 /// scheduler must update the RegionBegin and RegionEnd positions cached by
248 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
249 /// design would be to split blocks at scheduling boundaries, but LLVM has a
250 /// general bias against block splitting purely for implementation simplicity.
251 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
252   DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
253 
254   // Initialize the context of the pass.
255   MF = &mf;
256   MLI = &getAnalysis<MachineLoopInfo>();
257   MDT = &getAnalysis<MachineDominatorTree>();
258   PassConfig = &getAnalysis<TargetPassConfig>();
259   AA = &getAnalysis<AliasAnalysis>();
260 
261   LIS = &getAnalysis<LiveIntervals>();
262 
263   if (VerifyScheduling) {
264     DEBUG(LIS->dump());
265     MF->verify(this, "Before machine scheduling.");
266   }
267   RegClassInfo->runOnMachineFunction(*MF);
268 
269   // Instantiate the selected scheduler for this target, function, and
270   // optimization level.
271   OwningPtr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
272   scheduleRegions(*Scheduler);
273 
274   DEBUG(LIS->dump());
275   if (VerifyScheduling)
276     MF->verify(this, "After machine scheduling.");
277   return true;
278 }
279 
280 /// Main driver for both MachineScheduler and PostMachineScheduler.
281 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
282   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
283 
284   // Visit all machine basic blocks.
285   //
286   // TODO: Visit blocks in global postorder or postorder within the bottom-up
287   // loop tree. Then we can optionally compute global RegPressure.
288   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
289        MBB != MBBEnd; ++MBB) {
290 
291     Scheduler.startBlock(MBB);
292 
293     // Break the block into scheduling regions [I, RegionEnd), and schedule each
294     // region as soon as it is discovered. RegionEnd points the scheduling
295     // boundary at the bottom of the region. The DAG does not include RegionEnd,
296     // but the region does (i.e. the next RegionEnd is above the previous
297     // RegionBegin). If the current block has no terminator then RegionEnd ==
298     // MBB->end() for the bottom region.
299     //
300     // The Scheduler may insert instructions during either schedule() or
301     // exitRegion(), even for empty regions. So the local iterators 'I' and
302     // 'RegionEnd' are invalid across these calls.
303     unsigned RemainingInstrs = MBB->size();
304     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
305         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
306 
307       // Avoid decrementing RegionEnd for blocks with no terminator.
308       if (RegionEnd != MBB->end()
309           || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
310         --RegionEnd;
311         // Count the boundary instruction.
312         --RemainingInstrs;
313       }
314 
315       // The next region starts above the previous region. Look backward in the
316       // instruction stream until we find the nearest boundary.
317       unsigned NumRegionInstrs = 0;
318       MachineBasicBlock::iterator I = RegionEnd;
319       for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) {
320         if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
321           break;
322       }
323       // Notify the scheduler of the region, even if we may skip scheduling
324       // it. Perhaps it still needs to be bundled.
325       Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
326 
327       // Skip empty scheduling regions (0 or 1 schedulable instructions).
328       if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
329         // Close the current region. Bundle the terminator if needed.
330         // This invalidates 'RegionEnd' and 'I'.
331         Scheduler.exitRegion();
332         continue;
333       }
334       DEBUG(dbgs() << "********** MI Scheduling **********\n");
335       DEBUG(dbgs() << MF->getName()
336             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
337             << "\n  From: " << *I << "    To: ";
338             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
339             else dbgs() << "End";
340             dbgs() << " RegionInstrs: " << NumRegionInstrs
341             << " Remaining: " << RemainingInstrs << "\n");
342 
343       // Schedule a region: possibly reorder instructions.
344       // This invalidates 'RegionEnd' and 'I'.
345       Scheduler.schedule();
346 
347       // Close the current region.
348       Scheduler.exitRegion();
349 
350       // Scheduling has invalidated the current iterator 'I'. Ask the
351       // scheduler for the top of it's scheduled region.
352       RegionEnd = Scheduler.begin();
353     }
354     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
355     Scheduler.finishBlock();
356   }
357   Scheduler.finalizeSchedule();
358 }
359 
360 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
361   // unimplemented
362 }
363 
364 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
365 void ReadyQueue::dump() {
366   dbgs() << Name << ": ";
367   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
368     dbgs() << Queue[i]->NodeNum << " ";
369   dbgs() << "\n";
370 }
371 #endif
372 
373 //===----------------------------------------------------------------------===//
374 // ScheduleDAGMI - Basic machine instruction scheduling. This is
375 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
376 // virtual registers.
377 // ===----------------------------------------------------------------------===/
378 
379 ScheduleDAGMI::~ScheduleDAGMI() {
380   DeleteContainerPointers(Mutations);
381   delete SchedImpl;
382 }
383 
384 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
385   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
386 }
387 
388 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
389   if (SuccSU != &ExitSU) {
390     // Do not use WillCreateCycle, it assumes SD scheduling.
391     // If Pred is reachable from Succ, then the edge creates a cycle.
392     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
393       return false;
394     Topo.AddPred(SuccSU, PredDep.getSUnit());
395   }
396   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
397   // Return true regardless of whether a new edge needed to be inserted.
398   return true;
399 }
400 
401 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
402 /// NumPredsLeft reaches zero, release the successor node.
403 ///
404 /// FIXME: Adjust SuccSU height based on MinLatency.
405 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
406   SUnit *SuccSU = SuccEdge->getSUnit();
407 
408   if (SuccEdge->isWeak()) {
409     --SuccSU->WeakPredsLeft;
410     if (SuccEdge->isCluster())
411       NextClusterSucc = SuccSU;
412     return;
413   }
414 #ifndef NDEBUG
415   if (SuccSU->NumPredsLeft == 0) {
416     dbgs() << "*** Scheduling failed! ***\n";
417     SuccSU->dump(this);
418     dbgs() << " has been released too many times!\n";
419     llvm_unreachable(0);
420   }
421 #endif
422   --SuccSU->NumPredsLeft;
423   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
424     SchedImpl->releaseTopNode(SuccSU);
425 }
426 
427 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
428 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
429   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
430        I != E; ++I) {
431     releaseSucc(SU, &*I);
432   }
433 }
434 
435 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
436 /// NumSuccsLeft reaches zero, release the predecessor node.
437 ///
438 /// FIXME: Adjust PredSU height based on MinLatency.
439 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
440   SUnit *PredSU = PredEdge->getSUnit();
441 
442   if (PredEdge->isWeak()) {
443     --PredSU->WeakSuccsLeft;
444     if (PredEdge->isCluster())
445       NextClusterPred = PredSU;
446     return;
447   }
448 #ifndef NDEBUG
449   if (PredSU->NumSuccsLeft == 0) {
450     dbgs() << "*** Scheduling failed! ***\n";
451     PredSU->dump(this);
452     dbgs() << " has been released too many times!\n";
453     llvm_unreachable(0);
454   }
455 #endif
456   --PredSU->NumSuccsLeft;
457   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
458     SchedImpl->releaseBottomNode(PredSU);
459 }
460 
461 /// releasePredecessors - Call releasePred on each of SU's predecessors.
462 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
463   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
464        I != E; ++I) {
465     releasePred(SU, &*I);
466   }
467 }
468 
469 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
470 /// crossing a scheduling boundary. [begin, end) includes all instructions in
471 /// the region, including the boundary itself and single-instruction regions
472 /// that don't get scheduled.
473 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
474                                      MachineBasicBlock::iterator begin,
475                                      MachineBasicBlock::iterator end,
476                                      unsigned regioninstrs)
477 {
478   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
479 
480   SchedImpl->initPolicy(begin, end, regioninstrs);
481 }
482 
483 /// This is normally called from the main scheduler loop but may also be invoked
484 /// by the scheduling strategy to perform additional code motion.
485 void ScheduleDAGMI::moveInstruction(
486   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
487   // Advance RegionBegin if the first instruction moves down.
488   if (&*RegionBegin == MI)
489     ++RegionBegin;
490 
491   // Update the instruction stream.
492   BB->splice(InsertPos, BB, MI);
493 
494   // Update LiveIntervals
495   if (LIS)
496     LIS->handleMove(MI, /*UpdateFlags=*/true);
497 
498   // Recede RegionBegin if an instruction moves above the first.
499   if (RegionBegin == InsertPos)
500     RegionBegin = MI;
501 }
502 
503 bool ScheduleDAGMI::checkSchedLimit() {
504 #ifndef NDEBUG
505   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
506     CurrentTop = CurrentBottom;
507     return false;
508   }
509   ++NumInstrsScheduled;
510 #endif
511   return true;
512 }
513 
514 /// Per-region scheduling driver, called back from
515 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
516 /// does not consider liveness or register pressure. It is useful for PostRA
517 /// scheduling and potentially other custom schedulers.
518 void ScheduleDAGMI::schedule() {
519   // Build the DAG.
520   buildSchedGraph(AA);
521 
522   Topo.InitDAGTopologicalSorting();
523 
524   postprocessDAG();
525 
526   SmallVector<SUnit*, 8> TopRoots, BotRoots;
527   findRootsAndBiasEdges(TopRoots, BotRoots);
528 
529   // Initialize the strategy before modifying the DAG.
530   // This may initialize a DFSResult to be used for queue priority.
531   SchedImpl->initialize(this);
532 
533   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
534           SUnits[su].dumpAll(this));
535   if (ViewMISchedDAGs) viewGraph();
536 
537   // Initialize ready queues now that the DAG and priority data are finalized.
538   initQueues(TopRoots, BotRoots);
539 
540   bool IsTopNode = false;
541   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
542     assert(!SU->isScheduled && "Node already scheduled");
543     if (!checkSchedLimit())
544       break;
545 
546     MachineInstr *MI = SU->getInstr();
547     if (IsTopNode) {
548       assert(SU->isTopReady() && "node still has unscheduled dependencies");
549       if (&*CurrentTop == MI)
550         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
551       else
552         moveInstruction(MI, CurrentTop);
553     }
554     else {
555       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
556       MachineBasicBlock::iterator priorII =
557         priorNonDebug(CurrentBottom, CurrentTop);
558       if (&*priorII == MI)
559         CurrentBottom = priorII;
560       else {
561         if (&*CurrentTop == MI)
562           CurrentTop = nextIfDebug(++CurrentTop, priorII);
563         moveInstruction(MI, CurrentBottom);
564         CurrentBottom = MI;
565       }
566     }
567     updateQueues(SU, IsTopNode);
568 
569     // Notify the scheduling strategy after updating the DAG.
570     SchedImpl->schedNode(SU, IsTopNode);
571   }
572   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
573 
574   placeDebugValues();
575 
576   DEBUG({
577       unsigned BBNum = begin()->getParent()->getNumber();
578       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
579       dumpSchedule();
580       dbgs() << '\n';
581     });
582 }
583 
584 /// Apply each ScheduleDAGMutation step in order.
585 void ScheduleDAGMI::postprocessDAG() {
586   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
587     Mutations[i]->apply(this);
588   }
589 }
590 
591 void ScheduleDAGMI::
592 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
593                       SmallVectorImpl<SUnit*> &BotRoots) {
594   for (std::vector<SUnit>::iterator
595          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
596     SUnit *SU = &(*I);
597     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
598 
599     // Order predecessors so DFSResult follows the critical path.
600     SU->biasCriticalPath();
601 
602     // A SUnit is ready to top schedule if it has no predecessors.
603     if (!I->NumPredsLeft)
604       TopRoots.push_back(SU);
605     // A SUnit is ready to bottom schedule if it has no successors.
606     if (!I->NumSuccsLeft)
607       BotRoots.push_back(SU);
608   }
609   ExitSU.biasCriticalPath();
610 }
611 
612 /// Identify DAG roots and setup scheduler queues.
613 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
614                                ArrayRef<SUnit*> BotRoots) {
615   NextClusterSucc = NULL;
616   NextClusterPred = NULL;
617 
618   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
619   //
620   // Nodes with unreleased weak edges can still be roots.
621   // Release top roots in forward order.
622   for (SmallVectorImpl<SUnit*>::const_iterator
623          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
624     SchedImpl->releaseTopNode(*I);
625   }
626   // Release bottom roots in reverse order so the higher priority nodes appear
627   // first. This is more natural and slightly more efficient.
628   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
629          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
630     SchedImpl->releaseBottomNode(*I);
631   }
632 
633   releaseSuccessors(&EntrySU);
634   releasePredecessors(&ExitSU);
635 
636   SchedImpl->registerRoots();
637 
638   // Advance past initial DebugValues.
639   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
640   CurrentBottom = RegionEnd;
641 }
642 
643 /// Update scheduler queues after scheduling an instruction.
644 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
645   // Release dependent instructions for scheduling.
646   if (IsTopNode)
647     releaseSuccessors(SU);
648   else
649     releasePredecessors(SU);
650 
651   SU->isScheduled = true;
652 }
653 
654 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
655 void ScheduleDAGMI::placeDebugValues() {
656   // If first instruction was a DBG_VALUE then put it back.
657   if (FirstDbgValue) {
658     BB->splice(RegionBegin, BB, FirstDbgValue);
659     RegionBegin = FirstDbgValue;
660   }
661 
662   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
663          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
664     std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
665     MachineInstr *DbgValue = P.first;
666     MachineBasicBlock::iterator OrigPrevMI = P.second;
667     if (&*RegionBegin == DbgValue)
668       ++RegionBegin;
669     BB->splice(++OrigPrevMI, BB, DbgValue);
670     if (OrigPrevMI == llvm::prior(RegionEnd))
671       RegionEnd = DbgValue;
672   }
673   DbgValues.clear();
674   FirstDbgValue = NULL;
675 }
676 
677 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
678 void ScheduleDAGMI::dumpSchedule() const {
679   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
680     if (SUnit *SU = getSUnit(&(*MI)))
681       SU->dump(this);
682     else
683       dbgs() << "Missing SUnit\n";
684   }
685 }
686 #endif
687 
688 //===----------------------------------------------------------------------===//
689 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
690 // preservation.
691 //===----------------------------------------------------------------------===//
692 
693 ScheduleDAGMILive::~ScheduleDAGMILive() {
694   delete DFSResult;
695 }
696 
697 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
698 /// crossing a scheduling boundary. [begin, end) includes all instructions in
699 /// the region, including the boundary itself and single-instruction regions
700 /// that don't get scheduled.
701 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
702                                 MachineBasicBlock::iterator begin,
703                                 MachineBasicBlock::iterator end,
704                                 unsigned regioninstrs)
705 {
706   // ScheduleDAGMI initializes SchedImpl's per-region policy.
707   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
708 
709   // For convenience remember the end of the liveness region.
710   LiveRegionEnd =
711     (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
712 
713   SUPressureDiffs.clear();
714 
715   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
716 }
717 
718 // Setup the register pressure trackers for the top scheduled top and bottom
719 // scheduled regions.
720 void ScheduleDAGMILive::initRegPressure() {
721   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
722   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
723 
724   // Close the RPTracker to finalize live ins.
725   RPTracker.closeRegion();
726 
727   DEBUG(RPTracker.dump());
728 
729   // Initialize the live ins and live outs.
730   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
731   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
732 
733   // Close one end of the tracker so we can call
734   // getMaxUpward/DownwardPressureDelta before advancing across any
735   // instructions. This converts currently live regs into live ins/outs.
736   TopRPTracker.closeTop();
737   BotRPTracker.closeBottom();
738 
739   BotRPTracker.initLiveThru(RPTracker);
740   if (!BotRPTracker.getLiveThru().empty()) {
741     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
742     DEBUG(dbgs() << "Live Thru: ";
743           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
744   };
745 
746   // For each live out vreg reduce the pressure change associated with other
747   // uses of the same vreg below the live-out reaching def.
748   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
749 
750   // Account for liveness generated by the region boundary.
751   if (LiveRegionEnd != RegionEnd) {
752     SmallVector<unsigned, 8> LiveUses;
753     BotRPTracker.recede(&LiveUses);
754     updatePressureDiffs(LiveUses);
755   }
756 
757   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
758 
759   // Cache the list of excess pressure sets in this region. This will also track
760   // the max pressure in the scheduled code for these sets.
761   RegionCriticalPSets.clear();
762   const std::vector<unsigned> &RegionPressure =
763     RPTracker.getPressure().MaxSetPressure;
764   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
765     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
766     if (RegionPressure[i] > Limit) {
767       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
768             << " Limit " << Limit
769             << " Actual " << RegionPressure[i] << "\n");
770       RegionCriticalPSets.push_back(PressureChange(i));
771     }
772   }
773   DEBUG(dbgs() << "Excess PSets: ";
774         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
775           dbgs() << TRI->getRegPressureSetName(
776             RegionCriticalPSets[i].getPSet()) << " ";
777         dbgs() << "\n");
778 }
779 
780 void ScheduleDAGMILive::
781 updateScheduledPressure(const SUnit *SU,
782                         const std::vector<unsigned> &NewMaxPressure) {
783   const PressureDiff &PDiff = getPressureDiff(SU);
784   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
785   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
786        I != E; ++I) {
787     if (!I->isValid())
788       break;
789     unsigned ID = I->getPSet();
790     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
791       ++CritIdx;
792     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
793       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
794           && NewMaxPressure[ID] <= INT16_MAX)
795         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
796     }
797     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
798     if (NewMaxPressure[ID] >= Limit - 2) {
799       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
800             << NewMaxPressure[ID] << " > " << Limit << "(+ "
801             << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
802     }
803   }
804 }
805 
806 /// Update the PressureDiff array for liveness after scheduling this
807 /// instruction.
808 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
809   for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) {
810     /// FIXME: Currently assuming single-use physregs.
811     unsigned Reg = LiveUses[LUIdx];
812     DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
813     if (!TRI->isVirtualRegister(Reg))
814       continue;
815 
816     // This may be called before CurrentBottom has been initialized. However,
817     // BotRPTracker must have a valid position. We want the value live into the
818     // instruction or live out of the block, so ask for the previous
819     // instruction's live-out.
820     const LiveInterval &LI = LIS->getInterval(Reg);
821     VNInfo *VNI;
822     MachineBasicBlock::const_iterator I =
823       nextIfDebug(BotRPTracker.getPos(), BB->end());
824     if (I == BB->end())
825       VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
826     else {
827       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I));
828       VNI = LRQ.valueIn();
829     }
830     // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
831     assert(VNI && "No live value at use.");
832     for (VReg2UseMap::iterator
833            UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
834       SUnit *SU = UI->SU;
835       DEBUG(dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
836             << *SU->getInstr());
837       // If this use comes before the reaching def, it cannot be a last use, so
838       // descrease its pressure change.
839       if (!SU->isScheduled && SU != &ExitSU) {
840         LiveQueryResult LRQ
841           = LI.Query(LIS->getInstructionIndex(SU->getInstr()));
842         if (LRQ.valueIn() == VNI)
843           getPressureDiff(SU).addPressureChange(Reg, true, &MRI);
844       }
845     }
846   }
847 }
848 
849 /// schedule - Called back from MachineScheduler::runOnMachineFunction
850 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
851 /// only includes instructions that have DAG nodes, not scheduling boundaries.
852 ///
853 /// This is a skeletal driver, with all the functionality pushed into helpers,
854 /// so that it can be easilly extended by experimental schedulers. Generally,
855 /// implementing MachineSchedStrategy should be sufficient to implement a new
856 /// scheduling algorithm. However, if a scheduler further subclasses
857 /// ScheduleDAGMILive then it will want to override this virtual method in order
858 /// to update any specialized state.
859 void ScheduleDAGMILive::schedule() {
860   buildDAGWithRegPressure();
861 
862   Topo.InitDAGTopologicalSorting();
863 
864   postprocessDAG();
865 
866   SmallVector<SUnit*, 8> TopRoots, BotRoots;
867   findRootsAndBiasEdges(TopRoots, BotRoots);
868 
869   // Initialize the strategy before modifying the DAG.
870   // This may initialize a DFSResult to be used for queue priority.
871   SchedImpl->initialize(this);
872 
873   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
874           SUnits[su].dumpAll(this));
875   if (ViewMISchedDAGs) viewGraph();
876 
877   // Initialize ready queues now that the DAG and priority data are finalized.
878   initQueues(TopRoots, BotRoots);
879 
880   if (ShouldTrackPressure) {
881     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
882     TopRPTracker.setPos(CurrentTop);
883   }
884 
885   bool IsTopNode = false;
886   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
887     assert(!SU->isScheduled && "Node already scheduled");
888     if (!checkSchedLimit())
889       break;
890 
891     scheduleMI(SU, IsTopNode);
892 
893     updateQueues(SU, IsTopNode);
894 
895     if (DFSResult) {
896       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
897       if (!ScheduledTrees.test(SubtreeID)) {
898         ScheduledTrees.set(SubtreeID);
899         DFSResult->scheduleTree(SubtreeID);
900         SchedImpl->scheduleTree(SubtreeID);
901       }
902     }
903 
904     // Notify the scheduling strategy after updating the DAG.
905     SchedImpl->schedNode(SU, IsTopNode);
906   }
907   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
908 
909   placeDebugValues();
910 
911   DEBUG({
912       unsigned BBNum = begin()->getParent()->getNumber();
913       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
914       dumpSchedule();
915       dbgs() << '\n';
916     });
917 }
918 
919 /// Build the DAG and setup three register pressure trackers.
920 void ScheduleDAGMILive::buildDAGWithRegPressure() {
921   if (!ShouldTrackPressure) {
922     RPTracker.reset();
923     RegionCriticalPSets.clear();
924     buildSchedGraph(AA);
925     return;
926   }
927 
928   // Initialize the register pressure tracker used by buildSchedGraph.
929   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
930                  /*TrackUntiedDefs=*/true);
931 
932   // Account for liveness generate by the region boundary.
933   if (LiveRegionEnd != RegionEnd)
934     RPTracker.recede();
935 
936   // Build the DAG, and compute current register pressure.
937   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs);
938 
939   // Initialize top/bottom trackers after computing region pressure.
940   initRegPressure();
941 }
942 
943 void ScheduleDAGMILive::computeDFSResult() {
944   if (!DFSResult)
945     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
946   DFSResult->clear();
947   ScheduledTrees.clear();
948   DFSResult->resize(SUnits.size());
949   DFSResult->compute(SUnits);
950   ScheduledTrees.resize(DFSResult->getNumSubtrees());
951 }
952 
953 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
954 /// only provides the critical path for single block loops. To handle loops that
955 /// span blocks, we could use the vreg path latencies provided by
956 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
957 /// available for use in the scheduler.
958 ///
959 /// The cyclic path estimation identifies a def-use pair that crosses the back
960 /// edge and considers the depth and height of the nodes. For example, consider
961 /// the following instruction sequence where each instruction has unit latency
962 /// and defines an epomymous virtual register:
963 ///
964 /// a->b(a,c)->c(b)->d(c)->exit
965 ///
966 /// The cyclic critical path is a two cycles: b->c->b
967 /// The acyclic critical path is four cycles: a->b->c->d->exit
968 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
969 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
970 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
971 /// LiveInDepth = depth(b) = len(a->b) = 1
972 ///
973 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
974 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
975 /// CyclicCriticalPath = min(2, 2) = 2
976 ///
977 /// This could be relevant to PostRA scheduling, but is currently implemented
978 /// assuming LiveIntervals.
979 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
980   // This only applies to single block loop.
981   if (!BB->isSuccessor(BB))
982     return 0;
983 
984   unsigned MaxCyclicLatency = 0;
985   // Visit each live out vreg def to find def/use pairs that cross iterations.
986   ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs;
987   for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end();
988        RI != RE; ++RI) {
989     unsigned Reg = *RI;
990     if (!TRI->isVirtualRegister(Reg))
991         continue;
992     const LiveInterval &LI = LIS->getInterval(Reg);
993     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
994     if (!DefVNI)
995       continue;
996 
997     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
998     const SUnit *DefSU = getSUnit(DefMI);
999     if (!DefSU)
1000       continue;
1001 
1002     unsigned LiveOutHeight = DefSU->getHeight();
1003     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1004     // Visit all local users of the vreg def.
1005     for (VReg2UseMap::iterator
1006            UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
1007       if (UI->SU == &ExitSU)
1008         continue;
1009 
1010       // Only consider uses of the phi.
1011       LiveQueryResult LRQ =
1012         LI.Query(LIS->getInstructionIndex(UI->SU->getInstr()));
1013       if (!LRQ.valueIn()->isPHIDef())
1014         continue;
1015 
1016       // Assume that a path spanning two iterations is a cycle, which could
1017       // overestimate in strange cases. This allows cyclic latency to be
1018       // estimated as the minimum slack of the vreg's depth or height.
1019       unsigned CyclicLatency = 0;
1020       if (LiveOutDepth > UI->SU->getDepth())
1021         CyclicLatency = LiveOutDepth - UI->SU->getDepth();
1022 
1023       unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency;
1024       if (LiveInHeight > LiveOutHeight) {
1025         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1026           CyclicLatency = LiveInHeight - LiveOutHeight;
1027       }
1028       else
1029         CyclicLatency = 0;
1030 
1031       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1032             << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n");
1033       if (CyclicLatency > MaxCyclicLatency)
1034         MaxCyclicLatency = CyclicLatency;
1035     }
1036   }
1037   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1038   return MaxCyclicLatency;
1039 }
1040 
1041 /// Move an instruction and update register pressure.
1042 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1043   // Move the instruction to its new location in the instruction stream.
1044   MachineInstr *MI = SU->getInstr();
1045 
1046   if (IsTopNode) {
1047     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1048     if (&*CurrentTop == MI)
1049       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1050     else {
1051       moveInstruction(MI, CurrentTop);
1052       TopRPTracker.setPos(MI);
1053     }
1054 
1055     if (ShouldTrackPressure) {
1056       // Update top scheduled pressure.
1057       TopRPTracker.advance();
1058       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1059       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1060     }
1061   }
1062   else {
1063     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1064     MachineBasicBlock::iterator priorII =
1065       priorNonDebug(CurrentBottom, CurrentTop);
1066     if (&*priorII == MI)
1067       CurrentBottom = priorII;
1068     else {
1069       if (&*CurrentTop == MI) {
1070         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1071         TopRPTracker.setPos(CurrentTop);
1072       }
1073       moveInstruction(MI, CurrentBottom);
1074       CurrentBottom = MI;
1075     }
1076     if (ShouldTrackPressure) {
1077       // Update bottom scheduled pressure.
1078       SmallVector<unsigned, 8> LiveUses;
1079       BotRPTracker.recede(&LiveUses);
1080       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1081       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1082       updatePressureDiffs(LiveUses);
1083     }
1084   }
1085 }
1086 
1087 //===----------------------------------------------------------------------===//
1088 // LoadClusterMutation - DAG post-processing to cluster loads.
1089 //===----------------------------------------------------------------------===//
1090 
1091 namespace {
1092 /// \brief Post-process the DAG to create cluster edges between neighboring
1093 /// loads.
1094 class LoadClusterMutation : public ScheduleDAGMutation {
1095   struct LoadInfo {
1096     SUnit *SU;
1097     unsigned BaseReg;
1098     unsigned Offset;
1099     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
1100       : SU(su), BaseReg(reg), Offset(ofs) {}
1101   };
1102   static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS,
1103                            const LoadClusterMutation::LoadInfo &RHS);
1104 
1105   const TargetInstrInfo *TII;
1106   const TargetRegisterInfo *TRI;
1107 public:
1108   LoadClusterMutation(const TargetInstrInfo *tii,
1109                       const TargetRegisterInfo *tri)
1110     : TII(tii), TRI(tri) {}
1111 
1112   virtual void apply(ScheduleDAGMI *DAG);
1113 protected:
1114   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
1115 };
1116 } // anonymous
1117 
1118 bool LoadClusterMutation::LoadInfoLess(
1119   const LoadClusterMutation::LoadInfo &LHS,
1120   const LoadClusterMutation::LoadInfo &RHS) {
1121   if (LHS.BaseReg != RHS.BaseReg)
1122     return LHS.BaseReg < RHS.BaseReg;
1123   return LHS.Offset < RHS.Offset;
1124 }
1125 
1126 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
1127                                                   ScheduleDAGMI *DAG) {
1128   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
1129   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
1130     SUnit *SU = Loads[Idx];
1131     unsigned BaseReg;
1132     unsigned Offset;
1133     if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1134       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
1135   }
1136   if (LoadRecords.size() < 2)
1137     return;
1138   std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess);
1139   unsigned ClusterLength = 1;
1140   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
1141     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
1142       ClusterLength = 1;
1143       continue;
1144     }
1145 
1146     SUnit *SUa = LoadRecords[Idx].SU;
1147     SUnit *SUb = LoadRecords[Idx+1].SU;
1148     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1149         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1150 
1151       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
1152             << SUb->NodeNum << ")\n");
1153       // Copy successor edges from SUa to SUb. Interleaving computation
1154       // dependent on SUa can prevent load combining due to register reuse.
1155       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1156       // loads should have effectively the same inputs.
1157       for (SUnit::const_succ_iterator
1158              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1159         if (SI->getSUnit() == SUb)
1160           continue;
1161         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1162         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1163       }
1164       ++ClusterLength;
1165     }
1166     else
1167       ClusterLength = 1;
1168   }
1169 }
1170 
1171 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1172 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
1173   // Map DAG NodeNum to store chain ID.
1174   DenseMap<unsigned, unsigned> StoreChainIDs;
1175   // Map each store chain to a set of dependent loads.
1176   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1177   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1178     SUnit *SU = &DAG->SUnits[Idx];
1179     if (!SU->getInstr()->mayLoad())
1180       continue;
1181     unsigned ChainPredID = DAG->SUnits.size();
1182     for (SUnit::const_pred_iterator
1183            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1184       if (PI->isCtrl()) {
1185         ChainPredID = PI->getSUnit()->NodeNum;
1186         break;
1187       }
1188     }
1189     // Check if this chain-like pred has been seen
1190     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
1191     unsigned NumChains = StoreChainDependents.size();
1192     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1193       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1194     if (Result.second)
1195       StoreChainDependents.resize(NumChains + 1);
1196     StoreChainDependents[Result.first->second].push_back(SU);
1197   }
1198   // Iterate over the store chains.
1199   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1200     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
1201 }
1202 
1203 //===----------------------------------------------------------------------===//
1204 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1205 //===----------------------------------------------------------------------===//
1206 
1207 namespace {
1208 /// \brief Post-process the DAG to create cluster edges between instructions
1209 /// that may be fused by the processor into a single operation.
1210 class MacroFusion : public ScheduleDAGMutation {
1211   const TargetInstrInfo *TII;
1212 public:
1213   MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
1214 
1215   virtual void apply(ScheduleDAGMI *DAG);
1216 };
1217 } // anonymous
1218 
1219 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1220 /// fused operations.
1221 void MacroFusion::apply(ScheduleDAGMI *DAG) {
1222   // For now, assume targets can only fuse with the branch.
1223   MachineInstr *Branch = DAG->ExitSU.getInstr();
1224   if (!Branch)
1225     return;
1226 
1227   for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
1228     SUnit *SU = &DAG->SUnits[--Idx];
1229     if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
1230       continue;
1231 
1232     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1233     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1234     // need to copy predecessor edges from ExitSU to SU, since top-down
1235     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1236     // of SU, we could create an artificial edge from the deepest root, but it
1237     // hasn't been needed yet.
1238     bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
1239     (void)Success;
1240     assert(Success && "No DAG nodes should be reachable from ExitSU");
1241 
1242     DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
1243     break;
1244   }
1245 }
1246 
1247 //===----------------------------------------------------------------------===//
1248 // CopyConstrain - DAG post-processing to encourage copy elimination.
1249 //===----------------------------------------------------------------------===//
1250 
1251 namespace {
1252 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1253 /// the one use that defines the copy's source vreg, most likely an induction
1254 /// variable increment.
1255 class CopyConstrain : public ScheduleDAGMutation {
1256   // Transient state.
1257   SlotIndex RegionBeginIdx;
1258   // RegionEndIdx is the slot index of the last non-debug instruction in the
1259   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1260   SlotIndex RegionEndIdx;
1261 public:
1262   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1263 
1264   virtual void apply(ScheduleDAGMI *DAG);
1265 
1266 protected:
1267   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1268 };
1269 } // anonymous
1270 
1271 /// constrainLocalCopy handles two possibilities:
1272 /// 1) Local src:
1273 /// I0:     = dst
1274 /// I1: src = ...
1275 /// I2:     = dst
1276 /// I3: dst = src (copy)
1277 /// (create pred->succ edges I0->I1, I2->I1)
1278 ///
1279 /// 2) Local copy:
1280 /// I0: dst = src (copy)
1281 /// I1:     = dst
1282 /// I2: src = ...
1283 /// I3:     = dst
1284 /// (create pred->succ edges I1->I2, I3->I2)
1285 ///
1286 /// Although the MachineScheduler is currently constrained to single blocks,
1287 /// this algorithm should handle extended blocks. An EBB is a set of
1288 /// contiguously numbered blocks such that the previous block in the EBB is
1289 /// always the single predecessor.
1290 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1291   LiveIntervals *LIS = DAG->getLIS();
1292   MachineInstr *Copy = CopySU->getInstr();
1293 
1294   // Check for pure vreg copies.
1295   unsigned SrcReg = Copy->getOperand(1).getReg();
1296   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
1297     return;
1298 
1299   unsigned DstReg = Copy->getOperand(0).getReg();
1300   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
1301     return;
1302 
1303   // Check if either the dest or source is local. If it's live across a back
1304   // edge, it's not local. Note that if both vregs are live across the back
1305   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1306   unsigned LocalReg = DstReg;
1307   unsigned GlobalReg = SrcReg;
1308   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1309   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1310     LocalReg = SrcReg;
1311     GlobalReg = DstReg;
1312     LocalLI = &LIS->getInterval(LocalReg);
1313     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1314       return;
1315   }
1316   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1317 
1318   // Find the global segment after the start of the local LI.
1319   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1320   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1321   // local live range. We could create edges from other global uses to the local
1322   // start, but the coalescer should have already eliminated these cases, so
1323   // don't bother dealing with it.
1324   if (GlobalSegment == GlobalLI->end())
1325     return;
1326 
1327   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1328   // returned the next global segment. But if GlobalSegment overlaps with
1329   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1330   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1331   if (GlobalSegment->contains(LocalLI->beginIndex()))
1332     ++GlobalSegment;
1333 
1334   if (GlobalSegment == GlobalLI->end())
1335     return;
1336 
1337   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1338   if (GlobalSegment != GlobalLI->begin()) {
1339     // Two address defs have no hole.
1340     if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end,
1341                                GlobalSegment->start)) {
1342       return;
1343     }
1344     // If the prior global segment may be defined by the same two-address
1345     // instruction that also defines LocalLI, then can't make a hole here.
1346     if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start,
1347                                LocalLI->beginIndex())) {
1348       return;
1349     }
1350     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1351     // it would be a disconnected component in the live range.
1352     assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
1353            "Disconnected LRG within the scheduling region.");
1354   }
1355   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1356   if (!GlobalDef)
1357     return;
1358 
1359   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1360   if (!GlobalSU)
1361     return;
1362 
1363   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1364   // constraining the uses of the last local def to precede GlobalDef.
1365   SmallVector<SUnit*,8> LocalUses;
1366   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1367   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1368   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1369   for (SUnit::const_succ_iterator
1370          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1371        I != E; ++I) {
1372     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1373       continue;
1374     if (I->getSUnit() == GlobalSU)
1375       continue;
1376     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1377       return;
1378     LocalUses.push_back(I->getSUnit());
1379   }
1380   // Open the top of the GlobalLI hole by constraining any earlier global uses
1381   // to precede the start of LocalLI.
1382   SmallVector<SUnit*,8> GlobalUses;
1383   MachineInstr *FirstLocalDef =
1384     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1385   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1386   for (SUnit::const_pred_iterator
1387          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1388     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1389       continue;
1390     if (I->getSUnit() == FirstLocalSU)
1391       continue;
1392     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1393       return;
1394     GlobalUses.push_back(I->getSUnit());
1395   }
1396   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1397   // Add the weak edges.
1398   for (SmallVectorImpl<SUnit*>::const_iterator
1399          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1400     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1401           << GlobalSU->NodeNum << ")\n");
1402     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1403   }
1404   for (SmallVectorImpl<SUnit*>::const_iterator
1405          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1406     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1407           << FirstLocalSU->NodeNum << ")\n");
1408     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1409   }
1410 }
1411 
1412 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1413 /// copy elimination.
1414 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1415   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1416 
1417   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1418   if (FirstPos == DAG->end())
1419     return;
1420   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1421   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1422     &*priorNonDebug(DAG->end(), DAG->begin()));
1423 
1424   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1425     SUnit *SU = &DAG->SUnits[Idx];
1426     if (!SU->getInstr()->isCopy())
1427       continue;
1428 
1429     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1430   }
1431 }
1432 
1433 //===----------------------------------------------------------------------===//
1434 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1435 // and possibly other custom schedulers.
1436 // ===----------------------------------------------------------------------===/
1437 
1438 static const unsigned InvalidCycle = ~0U;
1439 
1440 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1441 
1442 void SchedBoundary::reset() {
1443   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1444   // Destroying and reconstructing it is very expensive though. So keep
1445   // invalid, placeholder HazardRecs.
1446   if (HazardRec && HazardRec->isEnabled()) {
1447     delete HazardRec;
1448     HazardRec = 0;
1449   }
1450   Available.clear();
1451   Pending.clear();
1452   CheckPending = false;
1453   NextSUs.clear();
1454   CurrCycle = 0;
1455   CurrMOps = 0;
1456   MinReadyCycle = UINT_MAX;
1457   ExpectedLatency = 0;
1458   DependentLatency = 0;
1459   RetiredMOps = 0;
1460   MaxExecutedResCount = 0;
1461   ZoneCritResIdx = 0;
1462   IsResourceLimited = false;
1463   ReservedCycles.clear();
1464 #ifndef NDEBUG
1465   MaxObservedLatency = 0;
1466 #endif
1467   // Reserve a zero-count for invalid CritResIdx.
1468   ExecutedResCounts.resize(1);
1469   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1470 }
1471 
1472 void SchedRemainder::
1473 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1474   reset();
1475   if (!SchedModel->hasInstrSchedModel())
1476     return;
1477   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1478   for (std::vector<SUnit>::iterator
1479          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1480     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1481     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1482       * SchedModel->getMicroOpFactor();
1483     for (TargetSchedModel::ProcResIter
1484            PI = SchedModel->getWriteProcResBegin(SC),
1485            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1486       unsigned PIdx = PI->ProcResourceIdx;
1487       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1488       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1489     }
1490   }
1491 }
1492 
1493 void SchedBoundary::
1494 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1495   reset();
1496   DAG = dag;
1497   SchedModel = smodel;
1498   Rem = rem;
1499   if (SchedModel->hasInstrSchedModel()) {
1500     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1501     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1502   }
1503 }
1504 
1505 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1506 /// these "soft stalls" differently than the hard stall cycles based on CPU
1507 /// resources and computed by checkHazard(). A fully in-order model
1508 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1509 /// available for scheduling until they are ready. However, a weaker in-order
1510 /// model may use this for heuristics. For example, if a processor has in-order
1511 /// behavior when reading certain resources, this may come into play.
1512 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1513   if (!SU->isUnbuffered)
1514     return 0;
1515 
1516   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1517   if (ReadyCycle > CurrCycle)
1518     return ReadyCycle - CurrCycle;
1519   return 0;
1520 }
1521 
1522 /// Compute the next cycle at which the given processor resource can be
1523 /// scheduled.
1524 unsigned SchedBoundary::
1525 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1526   unsigned NextUnreserved = ReservedCycles[PIdx];
1527   // If this resource has never been used, always return cycle zero.
1528   if (NextUnreserved == InvalidCycle)
1529     return 0;
1530   // For bottom-up scheduling add the cycles needed for the current operation.
1531   if (!isTop())
1532     NextUnreserved += Cycles;
1533   return NextUnreserved;
1534 }
1535 
1536 /// Does this SU have a hazard within the current instruction group.
1537 ///
1538 /// The scheduler supports two modes of hazard recognition. The first is the
1539 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1540 /// supports highly complicated in-order reservation tables
1541 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1542 ///
1543 /// The second is a streamlined mechanism that checks for hazards based on
1544 /// simple counters that the scheduler itself maintains. It explicitly checks
1545 /// for instruction dispatch limitations, including the number of micro-ops that
1546 /// can dispatch per cycle.
1547 ///
1548 /// TODO: Also check whether the SU must start a new group.
1549 bool SchedBoundary::checkHazard(SUnit *SU) {
1550   if (HazardRec->isEnabled())
1551     return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
1552 
1553   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1554   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1555     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1556           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1557     return true;
1558   }
1559   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1560     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1561     for (TargetSchedModel::ProcResIter
1562            PI = SchedModel->getWriteProcResBegin(SC),
1563            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1564       if (getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles) > CurrCycle)
1565         return true;
1566     }
1567   }
1568   return false;
1569 }
1570 
1571 // Find the unscheduled node in ReadySUs with the highest latency.
1572 unsigned SchedBoundary::
1573 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1574   SUnit *LateSU = 0;
1575   unsigned RemLatency = 0;
1576   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1577        I != E; ++I) {
1578     unsigned L = getUnscheduledLatency(*I);
1579     if (L > RemLatency) {
1580       RemLatency = L;
1581       LateSU = *I;
1582     }
1583   }
1584   if (LateSU) {
1585     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1586           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1587   }
1588   return RemLatency;
1589 }
1590 
1591 // Count resources in this zone and the remaining unscheduled
1592 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1593 // resource index, or zero if the zone is issue limited.
1594 unsigned SchedBoundary::
1595 getOtherResourceCount(unsigned &OtherCritIdx) {
1596   OtherCritIdx = 0;
1597   if (!SchedModel->hasInstrSchedModel())
1598     return 0;
1599 
1600   unsigned OtherCritCount = Rem->RemIssueCount
1601     + (RetiredMOps * SchedModel->getMicroOpFactor());
1602   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1603         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1604   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1605        PIdx != PEnd; ++PIdx) {
1606     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1607     if (OtherCount > OtherCritCount) {
1608       OtherCritCount = OtherCount;
1609       OtherCritIdx = PIdx;
1610     }
1611   }
1612   if (OtherCritIdx) {
1613     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1614           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1615           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1616   }
1617   return OtherCritCount;
1618 }
1619 
1620 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1621   if (ReadyCycle < MinReadyCycle)
1622     MinReadyCycle = ReadyCycle;
1623 
1624   // Check for interlocks first. For the purpose of other heuristics, an
1625   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1626   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1627   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1628     Pending.push(SU);
1629   else
1630     Available.push(SU);
1631 
1632   // Record this node as an immediate dependent of the scheduled node.
1633   NextSUs.insert(SU);
1634 }
1635 
1636 void SchedBoundary::releaseTopNode(SUnit *SU) {
1637   if (SU->isScheduled)
1638     return;
1639 
1640   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1641        I != E; ++I) {
1642     if (I->isWeak())
1643       continue;
1644     unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
1645     unsigned Latency = I->getLatency();
1646 #ifndef NDEBUG
1647     MaxObservedLatency = std::max(Latency, MaxObservedLatency);
1648 #endif
1649     if (SU->TopReadyCycle < PredReadyCycle + Latency)
1650       SU->TopReadyCycle = PredReadyCycle + Latency;
1651   }
1652   releaseNode(SU, SU->TopReadyCycle);
1653 }
1654 
1655 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1656   if (SU->isScheduled)
1657     return;
1658 
1659   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1660 
1661   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1662        I != E; ++I) {
1663     if (I->isWeak())
1664       continue;
1665     unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
1666     unsigned Latency = I->getLatency();
1667 #ifndef NDEBUG
1668     MaxObservedLatency = std::max(Latency, MaxObservedLatency);
1669 #endif
1670     if (SU->BotReadyCycle < SuccReadyCycle + Latency)
1671       SU->BotReadyCycle = SuccReadyCycle + Latency;
1672   }
1673   releaseNode(SU, SU->BotReadyCycle);
1674 }
1675 
1676 /// Move the boundary of scheduled code by one cycle.
1677 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1678   if (SchedModel->getMicroOpBufferSize() == 0) {
1679     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1680     if (MinReadyCycle > NextCycle)
1681       NextCycle = MinReadyCycle;
1682   }
1683   // Update the current micro-ops, which will issue in the next cycle.
1684   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1685   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1686 
1687   // Decrement DependentLatency based on the next cycle.
1688   if ((NextCycle - CurrCycle) > DependentLatency)
1689     DependentLatency = 0;
1690   else
1691     DependentLatency -= (NextCycle - CurrCycle);
1692 
1693   if (!HazardRec->isEnabled()) {
1694     // Bypass HazardRec virtual calls.
1695     CurrCycle = NextCycle;
1696   }
1697   else {
1698     // Bypass getHazardType calls in case of long latency.
1699     for (; CurrCycle != NextCycle; ++CurrCycle) {
1700       if (isTop())
1701         HazardRec->AdvanceCycle();
1702       else
1703         HazardRec->RecedeCycle();
1704     }
1705   }
1706   CheckPending = true;
1707   unsigned LFactor = SchedModel->getLatencyFactor();
1708   IsResourceLimited =
1709     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1710     > (int)LFactor;
1711 
1712   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
1713 }
1714 
1715 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
1716   ExecutedResCounts[PIdx] += Count;
1717   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
1718     MaxExecutedResCount = ExecutedResCounts[PIdx];
1719 }
1720 
1721 /// Add the given processor resource to this scheduled zone.
1722 ///
1723 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
1724 /// during which this resource is consumed.
1725 ///
1726 /// \return the next cycle at which the instruction may execute without
1727 /// oversubscribing resources.
1728 unsigned SchedBoundary::
1729 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
1730   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1731   unsigned Count = Factor * Cycles;
1732   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
1733         << " +" << Cycles << "x" << Factor << "u\n");
1734 
1735   // Update Executed resources counts.
1736   incExecutedResources(PIdx, Count);
1737   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1738   Rem->RemainingCounts[PIdx] -= Count;
1739 
1740   // Check if this resource exceeds the current critical resource. If so, it
1741   // becomes the critical resource.
1742   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
1743     ZoneCritResIdx = PIdx;
1744     DEBUG(dbgs() << "  *** Critical resource "
1745           << SchedModel->getResourceName(PIdx) << ": "
1746           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
1747   }
1748   // For reserved resources, record the highest cycle using the resource.
1749   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
1750   if (NextAvailable > CurrCycle) {
1751     DEBUG(dbgs() << "  Resource conflict: "
1752           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
1753           << NextAvailable << "\n");
1754   }
1755   return NextAvailable;
1756 }
1757 
1758 /// Move the boundary of scheduled code by one SUnit.
1759 void SchedBoundary::bumpNode(SUnit *SU) {
1760   // Update the reservation table.
1761   if (HazardRec->isEnabled()) {
1762     if (!isTop() && SU->isCall) {
1763       // Calls are scheduled with their preceding instructions. For bottom-up
1764       // scheduling, clear the pipeline state before emitting.
1765       HazardRec->Reset();
1766     }
1767     HazardRec->EmitInstruction(SU);
1768   }
1769   // checkHazard should prevent scheduling multiple instructions per cycle that
1770   // exceed the issue width.
1771   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1772   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
1773   assert(
1774       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
1775       "Cannot schedule this instruction's MicroOps in the current cycle.");
1776 
1777   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1778   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
1779 
1780   unsigned NextCycle = CurrCycle;
1781   switch (SchedModel->getMicroOpBufferSize()) {
1782   case 0:
1783     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
1784     break;
1785   case 1:
1786     if (ReadyCycle > NextCycle) {
1787       NextCycle = ReadyCycle;
1788       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
1789     }
1790     break;
1791   default:
1792     // We don't currently model the OOO reorder buffer, so consider all
1793     // scheduled MOps to be "retired". We do loosely model in-order resource
1794     // latency. If this instruction uses an in-order resource, account for any
1795     // likely stall cycles.
1796     if (SU->isUnbuffered && ReadyCycle > NextCycle)
1797       NextCycle = ReadyCycle;
1798     break;
1799   }
1800   RetiredMOps += IncMOps;
1801 
1802   // Update resource counts and critical resource.
1803   if (SchedModel->hasInstrSchedModel()) {
1804     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
1805     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
1806     Rem->RemIssueCount -= DecRemIssue;
1807     if (ZoneCritResIdx) {
1808       // Scale scheduled micro-ops for comparing with the critical resource.
1809       unsigned ScaledMOps =
1810         RetiredMOps * SchedModel->getMicroOpFactor();
1811 
1812       // If scaled micro-ops are now more than the previous critical resource by
1813       // a full cycle, then micro-ops issue becomes critical.
1814       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
1815           >= (int)SchedModel->getLatencyFactor()) {
1816         ZoneCritResIdx = 0;
1817         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
1818               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
1819       }
1820     }
1821     for (TargetSchedModel::ProcResIter
1822            PI = SchedModel->getWriteProcResBegin(SC),
1823            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1824       unsigned RCycle =
1825         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
1826       if (RCycle > NextCycle)
1827         NextCycle = RCycle;
1828     }
1829     if (SU->hasReservedResource) {
1830       // For reserved resources, record the highest cycle using the resource.
1831       // For top-down scheduling, this is the cycle in which we schedule this
1832       // instruction plus the number of cycles the operations reserves the
1833       // resource. For bottom-up is it simply the instruction's cycle.
1834       for (TargetSchedModel::ProcResIter
1835              PI = SchedModel->getWriteProcResBegin(SC),
1836              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1837         unsigned PIdx = PI->ProcResourceIdx;
1838         if (SchedModel->getProcResource(PIdx)->BufferSize == 0)
1839           ReservedCycles[PIdx] = isTop() ? NextCycle + PI->Cycles : NextCycle;
1840       }
1841     }
1842   }
1843   // Update ExpectedLatency and DependentLatency.
1844   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
1845   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
1846   if (SU->getDepth() > TopLatency) {
1847     TopLatency = SU->getDepth();
1848     DEBUG(dbgs() << "  " << Available.getName()
1849           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
1850   }
1851   if (SU->getHeight() > BotLatency) {
1852     BotLatency = SU->getHeight();
1853     DEBUG(dbgs() << "  " << Available.getName()
1854           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
1855   }
1856   // If we stall for any reason, bump the cycle.
1857   if (NextCycle > CurrCycle) {
1858     bumpCycle(NextCycle);
1859   }
1860   else {
1861     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
1862     // resource limited. If a stall occured, bumpCycle does this.
1863     unsigned LFactor = SchedModel->getLatencyFactor();
1864     IsResourceLimited =
1865       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1866       > (int)LFactor;
1867   }
1868   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
1869   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
1870   // one cycle.  Since we commonly reach the max MOps here, opportunistically
1871   // bump the cycle to avoid uselessly checking everything in the readyQ.
1872   CurrMOps += IncMOps;
1873   while (CurrMOps >= SchedModel->getIssueWidth()) {
1874     bumpCycle(++NextCycle);
1875     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
1876           << " at cycle " << CurrCycle << '\n');
1877   }
1878   DEBUG(dumpScheduledState());
1879 }
1880 
1881 /// Release pending ready nodes in to the available queue. This makes them
1882 /// visible to heuristics.
1883 void SchedBoundary::releasePending() {
1884   // If the available queue is empty, it is safe to reset MinReadyCycle.
1885   if (Available.empty())
1886     MinReadyCycle = UINT_MAX;
1887 
1888   // Check to see if any of the pending instructions are ready to issue.  If
1889   // so, add them to the available queue.
1890   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1891   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
1892     SUnit *SU = *(Pending.begin()+i);
1893     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
1894 
1895     if (ReadyCycle < MinReadyCycle)
1896       MinReadyCycle = ReadyCycle;
1897 
1898     if (!IsBuffered && ReadyCycle > CurrCycle)
1899       continue;
1900 
1901     if (checkHazard(SU))
1902       continue;
1903 
1904     Available.push(SU);
1905     Pending.remove(Pending.begin()+i);
1906     --i; --e;
1907   }
1908   DEBUG(if (!Pending.empty()) Pending.dump());
1909   CheckPending = false;
1910 }
1911 
1912 /// Remove SU from the ready set for this boundary.
1913 void SchedBoundary::removeReady(SUnit *SU) {
1914   if (Available.isInQueue(SU))
1915     Available.remove(Available.find(SU));
1916   else {
1917     assert(Pending.isInQueue(SU) && "bad ready count");
1918     Pending.remove(Pending.find(SU));
1919   }
1920 }
1921 
1922 /// If this queue only has one ready candidate, return it. As a side effect,
1923 /// defer any nodes that now hit a hazard, and advance the cycle until at least
1924 /// one node is ready. If multiple instructions are ready, return NULL.
1925 SUnit *SchedBoundary::pickOnlyChoice() {
1926   if (CheckPending)
1927     releasePending();
1928 
1929   if (CurrMOps > 0) {
1930     // Defer any ready instrs that now have a hazard.
1931     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
1932       if (checkHazard(*I)) {
1933         Pending.push(*I);
1934         I = Available.remove(I);
1935         continue;
1936       }
1937       ++I;
1938     }
1939   }
1940   for (unsigned i = 0; Available.empty(); ++i) {
1941     assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) &&
1942            "permanent hazard"); (void)i;
1943     bumpCycle(CurrCycle + 1);
1944     releasePending();
1945   }
1946   if (Available.size() == 1)
1947     return *Available.begin();
1948   return NULL;
1949 }
1950 
1951 #ifndef NDEBUG
1952 // This is useful information to dump after bumpNode.
1953 // Note that the Queue contents are more useful before pickNodeFromQueue.
1954 void SchedBoundary::dumpScheduledState() {
1955   unsigned ResFactor;
1956   unsigned ResCount;
1957   if (ZoneCritResIdx) {
1958     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
1959     ResCount = getResourceCount(ZoneCritResIdx);
1960   }
1961   else {
1962     ResFactor = SchedModel->getMicroOpFactor();
1963     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
1964   }
1965   unsigned LFactor = SchedModel->getLatencyFactor();
1966   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
1967          << "  Retired: " << RetiredMOps;
1968   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
1969   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
1970          << ResCount / ResFactor << " "
1971          << SchedModel->getResourceName(ZoneCritResIdx)
1972          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
1973          << (IsResourceLimited ? "  - Resource" : "  - Latency")
1974          << " limited.\n";
1975 }
1976 #endif
1977 
1978 //===----------------------------------------------------------------------===//
1979 // GenericScheduler - Implementation of the generic MachineSchedStrategy.
1980 //===----------------------------------------------------------------------===//
1981 
1982 namespace {
1983 /// GenericScheduler shrinks the unscheduled zone using heuristics to balance
1984 /// the schedule.
1985 class GenericScheduler : public MachineSchedStrategy {
1986 public:
1987   /// Represent the type of SchedCandidate found within a single queue.
1988   /// pickNodeBidirectional depends on these listed by decreasing priority.
1989   enum CandReason {
1990     NoCand, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, RegMax,
1991     ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
1992     TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
1993 
1994 #ifndef NDEBUG
1995   static const char *getReasonStr(GenericScheduler::CandReason Reason);
1996 #endif
1997 
1998   /// Policy for scheduling the next instruction in the candidate's zone.
1999   struct CandPolicy {
2000     bool ReduceLatency;
2001     unsigned ReduceResIdx;
2002     unsigned DemandResIdx;
2003 
2004     CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
2005   };
2006 
2007   /// Status of an instruction's critical resource consumption.
2008   struct SchedResourceDelta {
2009     // Count critical resources in the scheduled region required by SU.
2010     unsigned CritResources;
2011 
2012     // Count critical resources from another region consumed by SU.
2013     unsigned DemandedResources;
2014 
2015     SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
2016 
2017     bool operator==(const SchedResourceDelta &RHS) const {
2018       return CritResources == RHS.CritResources
2019         && DemandedResources == RHS.DemandedResources;
2020     }
2021     bool operator!=(const SchedResourceDelta &RHS) const {
2022       return !operator==(RHS);
2023     }
2024   };
2025 
2026   /// Store the state used by GenericScheduler heuristics, required for the
2027   /// lifetime of one invocation of pickNode().
2028   struct SchedCandidate {
2029     CandPolicy Policy;
2030 
2031     // The best SUnit candidate.
2032     SUnit *SU;
2033 
2034     // The reason for this candidate.
2035     CandReason Reason;
2036 
2037     // Set of reasons that apply to multiple candidates.
2038     uint32_t RepeatReasonSet;
2039 
2040     // Register pressure values for the best candidate.
2041     RegPressureDelta RPDelta;
2042 
2043     // Critical resource consumption of the best candidate.
2044     SchedResourceDelta ResDelta;
2045 
2046     SchedCandidate(const CandPolicy &policy)
2047       : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {}
2048 
2049     bool isValid() const { return SU; }
2050 
2051     // Copy the status of another candidate without changing policy.
2052     void setBest(SchedCandidate &Best) {
2053       assert(Best.Reason != NoCand && "uninitialized Sched candidate");
2054       SU = Best.SU;
2055       Reason = Best.Reason;
2056       RPDelta = Best.RPDelta;
2057       ResDelta = Best.ResDelta;
2058     }
2059 
2060     bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
2061     void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
2062 
2063     void initResourceDelta(const ScheduleDAGMILive *DAG,
2064                            const TargetSchedModel *SchedModel);
2065   };
2066 
2067 private:
2068   const MachineSchedContext *Context;
2069   ScheduleDAGMILive *DAG;
2070   const TargetSchedModel *SchedModel;
2071   const TargetRegisterInfo *TRI;
2072 
2073   // State of the top and bottom scheduled instruction boundaries.
2074   SchedRemainder Rem;
2075   SchedBoundary Top;
2076   SchedBoundary Bot;
2077 
2078   MachineSchedPolicy RegionPolicy;
2079 public:
2080   GenericScheduler(const MachineSchedContext *C):
2081     Context(C), DAG(0), SchedModel(0), TRI(0),
2082     Top(SchedBoundary::TopQID, "TopQ"), Bot(SchedBoundary::BotQID, "BotQ") {}
2083 
2084   virtual void initPolicy(MachineBasicBlock::iterator Begin,
2085                           MachineBasicBlock::iterator End,
2086                           unsigned NumRegionInstrs);
2087 
2088   bool shouldTrackPressure() const { return RegionPolicy.ShouldTrackPressure; }
2089 
2090   virtual void initialize(ScheduleDAGMI *dag);
2091 
2092   virtual SUnit *pickNode(bool &IsTopNode);
2093 
2094   virtual void schedNode(SUnit *SU, bool IsTopNode);
2095 
2096   virtual void releaseTopNode(SUnit *SU) { Top.releaseTopNode(SU); }
2097 
2098   virtual void releaseBottomNode(SUnit *SU) { Bot.releaseBottomNode(SU); }
2099 
2100   virtual void registerRoots();
2101 
2102 protected:
2103   void checkAcyclicLatency();
2104 
2105   void setPolicy(CandPolicy &Policy, SchedBoundary &CurrZone,
2106                  SchedBoundary &OtherZone);
2107 
2108   void tryCandidate(SchedCandidate &Cand,
2109                     SchedCandidate &TryCand,
2110                     SchedBoundary &Zone,
2111                     const RegPressureTracker &RPTracker,
2112                     RegPressureTracker &TempTracker);
2113 
2114   SUnit *pickNodeBidirectional(bool &IsTopNode);
2115 
2116   void pickNodeFromQueue(SchedBoundary &Zone,
2117                          const RegPressureTracker &RPTracker,
2118                          SchedCandidate &Candidate);
2119 
2120   void reschedulePhysRegCopies(SUnit *SU, bool isTop);
2121 
2122 #ifndef NDEBUG
2123   void traceCandidate(const SchedCandidate &Cand);
2124 #endif
2125 };
2126 } // namespace
2127 
2128 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2129   assert(dag->hasVRegLiveness() &&
2130          "(PreRA)GenericScheduler needs vreg liveness");
2131   DAG = static_cast<ScheduleDAGMILive*>(dag);
2132   SchedModel = DAG->getSchedModel();
2133   TRI = DAG->TRI;
2134 
2135   Rem.init(DAG, SchedModel);
2136   Top.init(DAG, SchedModel, &Rem);
2137   Bot.init(DAG, SchedModel, &Rem);
2138 
2139   // Initialize resource counts.
2140 
2141   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2142   // are disabled, then these HazardRecs will be disabled.
2143   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2144   const TargetMachine &TM = DAG->MF.getTarget();
2145   if (!Top.HazardRec) {
2146     Top.HazardRec =
2147       TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
2148   }
2149   if (!Bot.HazardRec) {
2150     Bot.HazardRec =
2151       TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
2152   }
2153 }
2154 
2155 /// Initialize the per-region scheduling policy.
2156 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2157                                   MachineBasicBlock::iterator End,
2158                                   unsigned NumRegionInstrs) {
2159   const TargetMachine &TM = Context->MF->getTarget();
2160 
2161   // Avoid setting up the register pressure tracker for small regions to save
2162   // compile time. As a rough heuristic, only track pressure when the number of
2163   // schedulable instructions exceeds half the integer register file.
2164   unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2165     TM.getTargetLowering()->getRegClassFor(MVT::i32));
2166 
2167   RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2168 
2169   // For generic targets, we default to bottom-up, because it's simpler and more
2170   // compile-time optimizations have been implemented in that direction.
2171   RegionPolicy.OnlyBottomUp = true;
2172 
2173   // Allow the subtarget to override default policy.
2174   const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
2175   ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs);
2176 
2177   // After subtarget overrides, apply command line options.
2178   if (!EnableRegPressure)
2179     RegionPolicy.ShouldTrackPressure = false;
2180 
2181   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2182   // e.g. -misched-bottomup=false allows scheduling in both directions.
2183   assert((!ForceTopDown || !ForceBottomUp) &&
2184          "-misched-topdown incompatible with -misched-bottomup");
2185   if (ForceBottomUp.getNumOccurrences() > 0) {
2186     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2187     if (RegionPolicy.OnlyBottomUp)
2188       RegionPolicy.OnlyTopDown = false;
2189   }
2190   if (ForceTopDown.getNumOccurrences() > 0) {
2191     RegionPolicy.OnlyTopDown = ForceTopDown;
2192     if (RegionPolicy.OnlyTopDown)
2193       RegionPolicy.OnlyBottomUp = false;
2194   }
2195 }
2196 
2197 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2198 /// critical path by more cycles than it takes to drain the instruction buffer.
2199 /// We estimate an upper bounds on in-flight instructions as:
2200 ///
2201 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2202 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2203 /// InFlightResources = InFlightIterations * LoopResources
2204 ///
2205 /// TODO: Check execution resources in addition to IssueCount.
2206 void GenericScheduler::checkAcyclicLatency() {
2207   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2208     return;
2209 
2210   // Scaled number of cycles per loop iteration.
2211   unsigned IterCount =
2212     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2213              Rem.RemIssueCount);
2214   // Scaled acyclic critical path.
2215   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2216   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2217   unsigned InFlightCount =
2218     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2219   unsigned BufferLimit =
2220     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2221 
2222   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2223 
2224   DEBUG(dbgs() << "IssueCycles="
2225         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2226         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2227         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2228         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2229         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2230         if (Rem.IsAcyclicLatencyLimited)
2231           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2232 }
2233 
2234 void GenericScheduler::registerRoots() {
2235   Rem.CriticalPath = DAG->ExitSU.getDepth();
2236 
2237   // Some roots may not feed into ExitSU. Check all of them in case.
2238   for (std::vector<SUnit*>::const_iterator
2239          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2240     if ((*I)->getDepth() > Rem.CriticalPath)
2241       Rem.CriticalPath = (*I)->getDepth();
2242   }
2243   DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
2244 
2245   if (EnableCyclicPath) {
2246     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2247     checkAcyclicLatency();
2248   }
2249 }
2250 
2251 /// Set the CandPolicy given a scheduling zone given the current resources and
2252 /// latencies inside and outside the zone.
2253 void GenericScheduler::setPolicy(CandPolicy &Policy, SchedBoundary &CurrZone,
2254                                  SchedBoundary &OtherZone) {
2255   // Apply preemptive heuristics based on the the total latency and resources
2256   // inside and outside this zone. Potential stalls should be considered before
2257   // following this policy.
2258 
2259   // Compute remaining latency. We need this both to determine whether the
2260   // overall schedule has become latency-limited and whether the instructions
2261   // outside this zone are resource or latency limited.
2262   //
2263   // The "dependent" latency is updated incrementally during scheduling as the
2264   // max height/depth of scheduled nodes minus the cycles since it was
2265   // scheduled:
2266   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2267   //
2268   // The "independent" latency is the max ready queue depth:
2269   //   ILat = max N.depth for N in Available|Pending
2270   //
2271   // RemainingLatency is the greater of independent and dependent latency.
2272   unsigned RemLatency = CurrZone.getDependentLatency();
2273   RemLatency = std::max(RemLatency,
2274                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2275   RemLatency = std::max(RemLatency,
2276                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2277 
2278   // Compute the critical resource outside the zone.
2279   unsigned OtherCritIdx;
2280   unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx);
2281 
2282   bool OtherResLimited = false;
2283   if (SchedModel->hasInstrSchedModel()) {
2284     unsigned LFactor = SchedModel->getLatencyFactor();
2285     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2286   }
2287   if (!OtherResLimited
2288       && (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2289     Policy.ReduceLatency |= true;
2290     DEBUG(dbgs() << "  " << CurrZone.Available.getName() << " RemainingLatency "
2291           << RemLatency << " + " << CurrZone.getCurrCycle() << "c > CritPath "
2292           << Rem.CriticalPath << "\n");
2293   }
2294   // If the same resource is limiting inside and outside the zone, do nothing.
2295   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2296     return;
2297 
2298   DEBUG(
2299     if (CurrZone.isResourceLimited()) {
2300       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2301              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2302              << "\n";
2303     }
2304     if (OtherResLimited)
2305       dbgs() << "  RemainingLimit: "
2306              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2307     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2308       dbgs() << "  Latency limited both directions.\n");
2309 
2310   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2311     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2312 
2313   if (OtherResLimited)
2314     Policy.DemandResIdx = OtherCritIdx;
2315 }
2316 
2317 void GenericScheduler::SchedCandidate::
2318 initResourceDelta(const ScheduleDAGMILive *DAG,
2319                   const TargetSchedModel *SchedModel) {
2320   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2321     return;
2322 
2323   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2324   for (TargetSchedModel::ProcResIter
2325          PI = SchedModel->getWriteProcResBegin(SC),
2326          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2327     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2328       ResDelta.CritResources += PI->Cycles;
2329     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2330       ResDelta.DemandedResources += PI->Cycles;
2331   }
2332 }
2333 
2334 /// Return true if this heuristic determines order.
2335 static bool tryLess(int TryVal, int CandVal,
2336                     GenericScheduler::SchedCandidate &TryCand,
2337                     GenericScheduler::SchedCandidate &Cand,
2338                     GenericScheduler::CandReason Reason) {
2339   if (TryVal < CandVal) {
2340     TryCand.Reason = Reason;
2341     return true;
2342   }
2343   if (TryVal > CandVal) {
2344     if (Cand.Reason > Reason)
2345       Cand.Reason = Reason;
2346     return true;
2347   }
2348   Cand.setRepeat(Reason);
2349   return false;
2350 }
2351 
2352 static bool tryGreater(int TryVal, int CandVal,
2353                        GenericScheduler::SchedCandidate &TryCand,
2354                        GenericScheduler::SchedCandidate &Cand,
2355                        GenericScheduler::CandReason Reason) {
2356   if (TryVal > CandVal) {
2357     TryCand.Reason = Reason;
2358     return true;
2359   }
2360   if (TryVal < CandVal) {
2361     if (Cand.Reason > Reason)
2362       Cand.Reason = Reason;
2363     return true;
2364   }
2365   Cand.setRepeat(Reason);
2366   return false;
2367 }
2368 
2369 static bool tryPressure(const PressureChange &TryP,
2370                         const PressureChange &CandP,
2371                         GenericScheduler::SchedCandidate &TryCand,
2372                         GenericScheduler::SchedCandidate &Cand,
2373                         GenericScheduler::CandReason Reason) {
2374   int TryRank = TryP.getPSetOrMax();
2375   int CandRank = CandP.getPSetOrMax();
2376   // If both candidates affect the same set, go with the smallest increase.
2377   if (TryRank == CandRank) {
2378     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2379                    Reason);
2380   }
2381   // If one candidate decreases and the other increases, go with it.
2382   // Invalid candidates have UnitInc==0.
2383   if (tryLess(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2384               Reason)) {
2385     return true;
2386   }
2387   // If the candidates are decreasing pressure, reverse priority.
2388   if (TryP.getUnitInc() < 0)
2389     std::swap(TryRank, CandRank);
2390   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2391 }
2392 
2393 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2394   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2395 }
2396 
2397 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2398 /// their physreg def/use.
2399 ///
2400 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2401 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2402 /// with the operation that produces or consumes the physreg. We'll do this when
2403 /// regalloc has support for parallel copies.
2404 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2405   const MachineInstr *MI = SU->getInstr();
2406   if (!MI->isCopy())
2407     return 0;
2408 
2409   unsigned ScheduledOper = isTop ? 1 : 0;
2410   unsigned UnscheduledOper = isTop ? 0 : 1;
2411   // If we have already scheduled the physreg produce/consumer, immediately
2412   // schedule the copy.
2413   if (TargetRegisterInfo::isPhysicalRegister(
2414         MI->getOperand(ScheduledOper).getReg()))
2415     return 1;
2416   // If the physreg is at the boundary, defer it. Otherwise schedule it
2417   // immediately to free the dependent. We can hoist the copy later.
2418   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2419   if (TargetRegisterInfo::isPhysicalRegister(
2420         MI->getOperand(UnscheduledOper).getReg()))
2421     return AtBoundary ? -1 : 1;
2422   return 0;
2423 }
2424 
2425 static bool tryLatency(GenericScheduler::SchedCandidate &TryCand,
2426                        GenericScheduler::SchedCandidate &Cand,
2427                        SchedBoundary &Zone) {
2428   if (Zone.isTop()) {
2429     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2430       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2431                   TryCand, Cand, GenericScheduler::TopDepthReduce))
2432         return true;
2433     }
2434     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2435                    TryCand, Cand, GenericScheduler::TopPathReduce))
2436       return true;
2437   }
2438   else {
2439     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2440       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2441                   TryCand, Cand, GenericScheduler::BotHeightReduce))
2442         return true;
2443     }
2444     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2445                    TryCand, Cand, GenericScheduler::BotPathReduce))
2446       return true;
2447   }
2448   return false;
2449 }
2450 
2451 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2452 /// hierarchical. This may be more efficient than a graduated cost model because
2453 /// we don't need to evaluate all aspects of the model for each node in the
2454 /// queue. But it's really done to make the heuristics easier to debug and
2455 /// statistically analyze.
2456 ///
2457 /// \param Cand provides the policy and current best candidate.
2458 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2459 /// \param Zone describes the scheduled zone that we are extending.
2460 /// \param RPTracker describes reg pressure within the scheduled zone.
2461 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
2462 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2463                                     SchedCandidate &TryCand,
2464                                     SchedBoundary &Zone,
2465                                     const RegPressureTracker &RPTracker,
2466                                     RegPressureTracker &TempTracker) {
2467 
2468   if (DAG->isTrackingPressure()) {
2469     // Always initialize TryCand's RPDelta.
2470     if (Zone.isTop()) {
2471       TempTracker.getMaxDownwardPressureDelta(
2472         TryCand.SU->getInstr(),
2473         TryCand.RPDelta,
2474         DAG->getRegionCriticalPSets(),
2475         DAG->getRegPressure().MaxSetPressure);
2476     }
2477     else {
2478       if (VerifyScheduling) {
2479         TempTracker.getMaxUpwardPressureDelta(
2480           TryCand.SU->getInstr(),
2481           &DAG->getPressureDiff(TryCand.SU),
2482           TryCand.RPDelta,
2483           DAG->getRegionCriticalPSets(),
2484           DAG->getRegPressure().MaxSetPressure);
2485       }
2486       else {
2487         RPTracker.getUpwardPressureDelta(
2488           TryCand.SU->getInstr(),
2489           DAG->getPressureDiff(TryCand.SU),
2490           TryCand.RPDelta,
2491           DAG->getRegionCriticalPSets(),
2492           DAG->getRegPressure().MaxSetPressure);
2493       }
2494     }
2495   }
2496   DEBUG(if (TryCand.RPDelta.Excess.isValid())
2497           dbgs() << "  SU(" << TryCand.SU->NodeNum << ") "
2498                  << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
2499                  << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
2500 
2501   // Initialize the candidate if needed.
2502   if (!Cand.isValid()) {
2503     TryCand.Reason = NodeOrder;
2504     return;
2505   }
2506 
2507   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2508                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2509                  TryCand, Cand, PhysRegCopy))
2510     return;
2511 
2512   // Avoid exceeding the target's limit. If signed PSetID is negative, it is
2513   // invalid; convert it to INT_MAX to give it lowest priority.
2514   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2515                                                Cand.RPDelta.Excess,
2516                                                TryCand, Cand, RegExcess))
2517     return;
2518 
2519   // Avoid increasing the max critical pressure in the scheduled region.
2520   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2521                                                Cand.RPDelta.CriticalMax,
2522                                                TryCand, Cand, RegCritical))
2523     return;
2524 
2525   // For loops that are acyclic path limited, aggressively schedule for latency.
2526   // This can result in very long dependence chains scheduled in sequence, so
2527   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2528   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2529       && tryLatency(TryCand, Cand, Zone))
2530     return;
2531 
2532   // Prioritize instructions that read unbuffered resources by stall cycles.
2533   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2534               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2535     return;
2536 
2537   // Keep clustered nodes together to encourage downstream peephole
2538   // optimizations which may reduce resource requirements.
2539   //
2540   // This is a best effort to set things up for a post-RA pass. Optimizations
2541   // like generating loads of multiple registers should ideally be done within
2542   // the scheduler pass by combining the loads during DAG postprocessing.
2543   const SUnit *NextClusterSU =
2544     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2545   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2546                  TryCand, Cand, Cluster))
2547     return;
2548 
2549   // Weak edges are for clustering and other constraints.
2550   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2551               getWeakLeft(Cand.SU, Zone.isTop()),
2552               TryCand, Cand, Weak)) {
2553     return;
2554   }
2555   // Avoid increasing the max pressure of the entire region.
2556   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2557                                                Cand.RPDelta.CurrentMax,
2558                                                TryCand, Cand, RegMax))
2559     return;
2560 
2561   // Avoid critical resource consumption and balance the schedule.
2562   TryCand.initResourceDelta(DAG, SchedModel);
2563   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2564               TryCand, Cand, ResourceReduce))
2565     return;
2566   if (tryGreater(TryCand.ResDelta.DemandedResources,
2567                  Cand.ResDelta.DemandedResources,
2568                  TryCand, Cand, ResourceDemand))
2569     return;
2570 
2571   // Avoid serializing long latency dependence chains.
2572   // For acyclic path limited loops, latency was already checked above.
2573   if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited
2574       && tryLatency(TryCand, Cand, Zone)) {
2575     return;
2576   }
2577 
2578   // Prefer immediate defs/users of the last scheduled instruction. This is a
2579   // local pressure avoidance strategy that also makes the machine code
2580   // readable.
2581   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2582                  TryCand, Cand, NextDefUse))
2583     return;
2584 
2585   // Fall through to original instruction order.
2586   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2587       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2588     TryCand.Reason = NodeOrder;
2589   }
2590 }
2591 
2592 #ifndef NDEBUG
2593 const char *GenericScheduler::getReasonStr(
2594   GenericScheduler::CandReason Reason) {
2595   switch (Reason) {
2596   case NoCand:         return "NOCAND    ";
2597   case PhysRegCopy:    return "PREG-COPY";
2598   case RegExcess:      return "REG-EXCESS";
2599   case RegCritical:    return "REG-CRIT  ";
2600   case Stall:          return "STALL     ";
2601   case Cluster:        return "CLUSTER   ";
2602   case Weak:           return "WEAK      ";
2603   case RegMax:         return "REG-MAX   ";
2604   case ResourceReduce: return "RES-REDUCE";
2605   case ResourceDemand: return "RES-DEMAND";
2606   case TopDepthReduce: return "TOP-DEPTH ";
2607   case TopPathReduce:  return "TOP-PATH  ";
2608   case BotHeightReduce:return "BOT-HEIGHT";
2609   case BotPathReduce:  return "BOT-PATH  ";
2610   case NextDefUse:     return "DEF-USE   ";
2611   case NodeOrder:      return "ORDER     ";
2612   };
2613   llvm_unreachable("Unknown reason!");
2614 }
2615 
2616 void GenericScheduler::traceCandidate(const SchedCandidate &Cand) {
2617   PressureChange P;
2618   unsigned ResIdx = 0;
2619   unsigned Latency = 0;
2620   switch (Cand.Reason) {
2621   default:
2622     break;
2623   case RegExcess:
2624     P = Cand.RPDelta.Excess;
2625     break;
2626   case RegCritical:
2627     P = Cand.RPDelta.CriticalMax;
2628     break;
2629   case RegMax:
2630     P = Cand.RPDelta.CurrentMax;
2631     break;
2632   case ResourceReduce:
2633     ResIdx = Cand.Policy.ReduceResIdx;
2634     break;
2635   case ResourceDemand:
2636     ResIdx = Cand.Policy.DemandResIdx;
2637     break;
2638   case TopDepthReduce:
2639     Latency = Cand.SU->getDepth();
2640     break;
2641   case TopPathReduce:
2642     Latency = Cand.SU->getHeight();
2643     break;
2644   case BotHeightReduce:
2645     Latency = Cand.SU->getHeight();
2646     break;
2647   case BotPathReduce:
2648     Latency = Cand.SU->getDepth();
2649     break;
2650   }
2651   dbgs() << "  SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2652   if (P.isValid())
2653     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2654            << ":" << P.getUnitInc() << " ";
2655   else
2656     dbgs() << "      ";
2657   if (ResIdx)
2658     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2659   else
2660     dbgs() << "         ";
2661   if (Latency)
2662     dbgs() << " " << Latency << " cycles ";
2663   else
2664     dbgs() << "          ";
2665   dbgs() << '\n';
2666 }
2667 #endif
2668 
2669 /// Pick the best candidate from the queue.
2670 ///
2671 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2672 /// DAG building. To adjust for the current scheduling location we need to
2673 /// maintain the number of vreg uses remaining to be top-scheduled.
2674 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2675                                          const RegPressureTracker &RPTracker,
2676                                          SchedCandidate &Cand) {
2677   ReadyQueue &Q = Zone.Available;
2678 
2679   DEBUG(Q.dump());
2680 
2681   // getMaxPressureDelta temporarily modifies the tracker.
2682   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2683 
2684   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2685 
2686     SchedCandidate TryCand(Cand.Policy);
2687     TryCand.SU = *I;
2688     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2689     if (TryCand.Reason != NoCand) {
2690       // Initialize resource delta if needed in case future heuristics query it.
2691       if (TryCand.ResDelta == SchedResourceDelta())
2692         TryCand.initResourceDelta(DAG, SchedModel);
2693       Cand.setBest(TryCand);
2694       DEBUG(traceCandidate(Cand));
2695     }
2696   }
2697 }
2698 
2699 static void tracePick(const GenericScheduler::SchedCandidate &Cand,
2700                       bool IsTop) {
2701   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2702         << GenericScheduler::getReasonStr(Cand.Reason) << '\n');
2703 }
2704 
2705 /// Pick the best candidate node from either the top or bottom queue.
2706 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2707   // Schedule as far as possible in the direction of no choice. This is most
2708   // efficient, but also provides the best heuristics for CriticalPSets.
2709   if (SUnit *SU = Bot.pickOnlyChoice()) {
2710     IsTopNode = false;
2711     DEBUG(dbgs() << "Pick Bot NOCAND\n");
2712     return SU;
2713   }
2714   if (SUnit *SU = Top.pickOnlyChoice()) {
2715     IsTopNode = true;
2716     DEBUG(dbgs() << "Pick Top NOCAND\n");
2717     return SU;
2718   }
2719   CandPolicy NoPolicy;
2720   SchedCandidate BotCand(NoPolicy);
2721   SchedCandidate TopCand(NoPolicy);
2722   // Set the bottom-up policy based on the state of the current bottom zone and
2723   // the instructions outside the zone, including the top zone.
2724   setPolicy(BotCand.Policy, Bot, Top);
2725   // Set the top-down policy based on the state of the current top zone and
2726   // the instructions outside the zone, including the bottom zone.
2727   setPolicy(TopCand.Policy, Top, Bot);
2728 
2729   // Prefer bottom scheduling when heuristics are silent.
2730   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2731   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2732 
2733   // If either Q has a single candidate that provides the least increase in
2734   // Excess pressure, we can immediately schedule from that Q.
2735   //
2736   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2737   // affects picking from either Q. If scheduling in one direction must
2738   // increase pressure for one of the excess PSets, then schedule in that
2739   // direction first to provide more freedom in the other direction.
2740   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2741       || (BotCand.Reason == RegCritical
2742           && !BotCand.isRepeat(RegCritical)))
2743   {
2744     IsTopNode = false;
2745     tracePick(BotCand, IsTopNode);
2746     return BotCand.SU;
2747   }
2748   // Check if the top Q has a better candidate.
2749   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2750   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2751 
2752   // Choose the queue with the most important (lowest enum) reason.
2753   if (TopCand.Reason < BotCand.Reason) {
2754     IsTopNode = true;
2755     tracePick(TopCand, IsTopNode);
2756     return TopCand.SU;
2757   }
2758   // Otherwise prefer the bottom candidate, in node order if all else failed.
2759   IsTopNode = false;
2760   tracePick(BotCand, IsTopNode);
2761   return BotCand.SU;
2762 }
2763 
2764 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2765 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2766   if (DAG->top() == DAG->bottom()) {
2767     assert(Top.Available.empty() && Top.Pending.empty() &&
2768            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2769     return NULL;
2770   }
2771   SUnit *SU;
2772   do {
2773     if (RegionPolicy.OnlyTopDown) {
2774       SU = Top.pickOnlyChoice();
2775       if (!SU) {
2776         CandPolicy NoPolicy;
2777         SchedCandidate TopCand(NoPolicy);
2778         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2779         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2780         tracePick(TopCand, true);
2781         SU = TopCand.SU;
2782       }
2783       IsTopNode = true;
2784     }
2785     else if (RegionPolicy.OnlyBottomUp) {
2786       SU = Bot.pickOnlyChoice();
2787       if (!SU) {
2788         CandPolicy NoPolicy;
2789         SchedCandidate BotCand(NoPolicy);
2790         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2791         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2792         tracePick(BotCand, false);
2793         SU = BotCand.SU;
2794       }
2795       IsTopNode = false;
2796     }
2797     else {
2798       SU = pickNodeBidirectional(IsTopNode);
2799     }
2800   } while (SU->isScheduled);
2801 
2802   if (SU->isTopReady())
2803     Top.removeReady(SU);
2804   if (SU->isBottomReady())
2805     Bot.removeReady(SU);
2806 
2807   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2808   return SU;
2809 }
2810 
2811 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2812 
2813   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2814   if (!isTop)
2815     ++InsertPos;
2816   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2817 
2818   // Find already scheduled copies with a single physreg dependence and move
2819   // them just above the scheduled instruction.
2820   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2821        I != E; ++I) {
2822     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2823       continue;
2824     SUnit *DepSU = I->getSUnit();
2825     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2826       continue;
2827     MachineInstr *Copy = DepSU->getInstr();
2828     if (!Copy->isCopy())
2829       continue;
2830     DEBUG(dbgs() << "  Rescheduling physreg copy ";
2831           I->getSUnit()->dump(DAG));
2832     DAG->moveInstruction(Copy, InsertPos);
2833   }
2834 }
2835 
2836 /// Update the scheduler's state after scheduling a node. This is the same node
2837 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to update
2838 /// it's state based on the current cycle before MachineSchedStrategy does.
2839 ///
2840 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2841 /// them here. See comments in biasPhysRegCopy.
2842 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2843   if (IsTopNode) {
2844     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
2845     Top.bumpNode(SU);
2846     if (SU->hasPhysRegUses)
2847       reschedulePhysRegCopies(SU, true);
2848   }
2849   else {
2850     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
2851     Bot.bumpNode(SU);
2852     if (SU->hasPhysRegDefs)
2853       reschedulePhysRegCopies(SU, false);
2854   }
2855 }
2856 
2857 /// Create a generic scheduler with no DAG mutation passes.
2858 static ScheduleDAGInstrs *createRawGenericSched(MachineSchedContext *C) {
2859   return new ScheduleDAGMILive(C, new GenericScheduler(C));
2860 }
2861 
2862 /// Create the standard converging machine scheduler. This will be used as the
2863 /// default scheduler if the target does not set a default.
2864 static ScheduleDAGInstrs *createGenericSched(MachineSchedContext *C) {
2865   ScheduleDAGMILive *DAG =
2866     static_cast<ScheduleDAGMILive*>(createRawGenericSched(C));
2867   // Register DAG post-processors.
2868   //
2869   // FIXME: extend the mutation API to allow earlier mutations to instantiate
2870   // data and pass it to later mutations. Have a single mutation that gathers
2871   // the interesting nodes in one pass.
2872   DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
2873   if (EnableLoadCluster && DAG->TII->enableClusterLoads())
2874     DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
2875   if (EnableMacroFusion)
2876     DAG->addMutation(new MacroFusion(DAG->TII));
2877   return DAG;
2878 }
2879 static MachineSchedRegistry
2880 GenericSchedRegistry("converge", "Standard converging scheduler.",
2881                      createGenericSched);
2882 
2883 //===----------------------------------------------------------------------===//
2884 // ILP Scheduler. Currently for experimental analysis of heuristics.
2885 //===----------------------------------------------------------------------===//
2886 
2887 namespace {
2888 /// \brief Order nodes by the ILP metric.
2889 struct ILPOrder {
2890   const SchedDFSResult *DFSResult;
2891   const BitVector *ScheduledTrees;
2892   bool MaximizeILP;
2893 
2894   ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {}
2895 
2896   /// \brief Apply a less-than relation on node priority.
2897   ///
2898   /// (Return true if A comes after B in the Q.)
2899   bool operator()(const SUnit *A, const SUnit *B) const {
2900     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
2901     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
2902     if (SchedTreeA != SchedTreeB) {
2903       // Unscheduled trees have lower priority.
2904       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
2905         return ScheduledTrees->test(SchedTreeB);
2906 
2907       // Trees with shallower connections have have lower priority.
2908       if (DFSResult->getSubtreeLevel(SchedTreeA)
2909           != DFSResult->getSubtreeLevel(SchedTreeB)) {
2910         return DFSResult->getSubtreeLevel(SchedTreeA)
2911           < DFSResult->getSubtreeLevel(SchedTreeB);
2912       }
2913     }
2914     if (MaximizeILP)
2915       return DFSResult->getILP(A) < DFSResult->getILP(B);
2916     else
2917       return DFSResult->getILP(A) > DFSResult->getILP(B);
2918   }
2919 };
2920 
2921 /// \brief Schedule based on the ILP metric.
2922 class ILPScheduler : public MachineSchedStrategy {
2923   ScheduleDAGMILive *DAG;
2924   ILPOrder Cmp;
2925 
2926   std::vector<SUnit*> ReadyQ;
2927 public:
2928   ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {}
2929 
2930   virtual void initialize(ScheduleDAGMI *dag) {
2931     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
2932     DAG = static_cast<ScheduleDAGMILive*>(dag);
2933     DAG->computeDFSResult();
2934     Cmp.DFSResult = DAG->getDFSResult();
2935     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
2936     ReadyQ.clear();
2937   }
2938 
2939   virtual void registerRoots() {
2940     // Restore the heap in ReadyQ with the updated DFS results.
2941     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2942   }
2943 
2944   /// Implement MachineSchedStrategy interface.
2945   /// -----------------------------------------
2946 
2947   /// Callback to select the highest priority node from the ready Q.
2948   virtual SUnit *pickNode(bool &IsTopNode) {
2949     if (ReadyQ.empty()) return NULL;
2950     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2951     SUnit *SU = ReadyQ.back();
2952     ReadyQ.pop_back();
2953     IsTopNode = false;
2954     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
2955           << " ILP: " << DAG->getDFSResult()->getILP(SU)
2956           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
2957           << DAG->getDFSResult()->getSubtreeLevel(
2958             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
2959           << "Scheduling " << *SU->getInstr());
2960     return SU;
2961   }
2962 
2963   /// \brief Scheduler callback to notify that a new subtree is scheduled.
2964   virtual void scheduleTree(unsigned SubtreeID) {
2965     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2966   }
2967 
2968   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
2969   /// DFSResults, and resort the priority Q.
2970   virtual void schedNode(SUnit *SU, bool IsTopNode) {
2971     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
2972   }
2973 
2974   virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
2975 
2976   virtual void releaseBottomNode(SUnit *SU) {
2977     ReadyQ.push_back(SU);
2978     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2979   }
2980 };
2981 } // namespace
2982 
2983 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
2984   return new ScheduleDAGMILive(C, new ILPScheduler(true));
2985 }
2986 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
2987   return new ScheduleDAGMILive(C, new ILPScheduler(false));
2988 }
2989 static MachineSchedRegistry ILPMaxRegistry(
2990   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
2991 static MachineSchedRegistry ILPMinRegistry(
2992   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
2993 
2994 //===----------------------------------------------------------------------===//
2995 // Machine Instruction Shuffler for Correctness Testing
2996 //===----------------------------------------------------------------------===//
2997 
2998 #ifndef NDEBUG
2999 namespace {
3000 /// Apply a less-than relation on the node order, which corresponds to the
3001 /// instruction order prior to scheduling. IsReverse implements greater-than.
3002 template<bool IsReverse>
3003 struct SUnitOrder {
3004   bool operator()(SUnit *A, SUnit *B) const {
3005     if (IsReverse)
3006       return A->NodeNum > B->NodeNum;
3007     else
3008       return A->NodeNum < B->NodeNum;
3009   }
3010 };
3011 
3012 /// Reorder instructions as much as possible.
3013 class InstructionShuffler : public MachineSchedStrategy {
3014   bool IsAlternating;
3015   bool IsTopDown;
3016 
3017   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3018   // gives nodes with a higher number higher priority causing the latest
3019   // instructions to be scheduled first.
3020   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3021     TopQ;
3022   // When scheduling bottom-up, use greater-than as the queue priority.
3023   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3024     BottomQ;
3025 public:
3026   InstructionShuffler(bool alternate, bool topdown)
3027     : IsAlternating(alternate), IsTopDown(topdown) {}
3028 
3029   virtual void initialize(ScheduleDAGMI*) {
3030     TopQ.clear();
3031     BottomQ.clear();
3032   }
3033 
3034   /// Implement MachineSchedStrategy interface.
3035   /// -----------------------------------------
3036 
3037   virtual SUnit *pickNode(bool &IsTopNode) {
3038     SUnit *SU;
3039     if (IsTopDown) {
3040       do {
3041         if (TopQ.empty()) return NULL;
3042         SU = TopQ.top();
3043         TopQ.pop();
3044       } while (SU->isScheduled);
3045       IsTopNode = true;
3046     }
3047     else {
3048       do {
3049         if (BottomQ.empty()) return NULL;
3050         SU = BottomQ.top();
3051         BottomQ.pop();
3052       } while (SU->isScheduled);
3053       IsTopNode = false;
3054     }
3055     if (IsAlternating)
3056       IsTopDown = !IsTopDown;
3057     return SU;
3058   }
3059 
3060   virtual void schedNode(SUnit *SU, bool IsTopNode) {}
3061 
3062   virtual void releaseTopNode(SUnit *SU) {
3063     TopQ.push(SU);
3064   }
3065   virtual void releaseBottomNode(SUnit *SU) {
3066     BottomQ.push(SU);
3067   }
3068 };
3069 } // namespace
3070 
3071 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3072   bool Alternate = !ForceTopDown && !ForceBottomUp;
3073   bool TopDown = !ForceBottomUp;
3074   assert((TopDown || !ForceTopDown) &&
3075          "-misched-topdown incompatible with -misched-bottomup");
3076   return new ScheduleDAGMILive(C, new InstructionShuffler(Alternate, TopDown));
3077 }
3078 static MachineSchedRegistry ShufflerRegistry(
3079   "shuffle", "Shuffle machine instructions alternating directions",
3080   createInstructionShuffler);
3081 #endif // !NDEBUG
3082 
3083 //===----------------------------------------------------------------------===//
3084 // GraphWriter support for ScheduleDAGMILive.
3085 //===----------------------------------------------------------------------===//
3086 
3087 #ifndef NDEBUG
3088 namespace llvm {
3089 
3090 template<> struct GraphTraits<
3091   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3092 
3093 template<>
3094 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3095 
3096   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3097 
3098   static std::string getGraphName(const ScheduleDAG *G) {
3099     return G->MF.getName();
3100   }
3101 
3102   static bool renderGraphFromBottomUp() {
3103     return true;
3104   }
3105 
3106   static bool isNodeHidden(const SUnit *Node) {
3107     return (Node->Preds.size() > 10 || Node->Succs.size() > 10);
3108   }
3109 
3110   static bool hasNodeAddressLabel(const SUnit *Node,
3111                                   const ScheduleDAG *Graph) {
3112     return false;
3113   }
3114 
3115   /// If you want to override the dot attributes printed for a particular
3116   /// edge, override this method.
3117   static std::string getEdgeAttributes(const SUnit *Node,
3118                                        SUnitIterator EI,
3119                                        const ScheduleDAG *Graph) {
3120     if (EI.isArtificialDep())
3121       return "color=cyan,style=dashed";
3122     if (EI.isCtrlDep())
3123       return "color=blue,style=dashed";
3124     return "";
3125   }
3126 
3127   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3128     std::string Str;
3129     raw_string_ostream SS(Str);
3130     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3131     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3132       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : 0;
3133     SS << "SU:" << SU->NodeNum;
3134     if (DFS)
3135       SS << " I:" << DFS->getNumInstrs(SU);
3136     return SS.str();
3137   }
3138   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3139     return G->getGraphNodeLabel(SU);
3140   }
3141 
3142   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3143     std::string Str("shape=Mrecord");
3144     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3145     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3146       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : 0;
3147     if (DFS) {
3148       Str += ",style=filled,fillcolor=\"#";
3149       Str += DOT::getColorString(DFS->getSubtreeID(N));
3150       Str += '"';
3151     }
3152     return Str;
3153   }
3154 };
3155 } // namespace llvm
3156 #endif // NDEBUG
3157 
3158 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3159 /// rendered using 'dot'.
3160 ///
3161 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3162 #ifndef NDEBUG
3163   ViewGraph(this, Name, false, Title);
3164 #else
3165   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3166          << "systems with Graphviz or gv!\n";
3167 #endif  // NDEBUG
3168 }
3169 
3170 /// Out-of-line implementation with no arguments is handy for gdb.
3171 void ScheduleDAGMI::viewGraph() {
3172   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3173 }
3174