1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements a top-down list scheduler, using standard algorithms.
10 // The basic approach uses a priority queue of available nodes to schedule.
11 // One at a time, nodes are taken from the priority queue (thus in priority
12 // order), checked for legality to schedule, and emitted if legal.
13 //
14 // Nodes may not be legal to schedule either due to structural hazards (e.g.
15 // pipeline or resource constraints) or because an input to the instruction has
16 // not completed execution.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/CodeGen/AntiDepBreaker.h"
23 #include "llvm/CodeGen/LatencyPriorityQueue.h"
24 #include "llvm/CodeGen/MachineDominators.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/RegisterClassInfo.h"
29 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
30 #include "llvm/CodeGen/ScheduleDAGMutation.h"
31 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
32 #include "llvm/CodeGen/TargetInstrInfo.h"
33 #include "llvm/CodeGen/TargetPassConfig.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/Config/llvm-config.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/raw_ostream.h"
42 using namespace llvm;
43
44 #define DEBUG_TYPE "post-RA-sched"
45
46 STATISTIC(NumNoops, "Number of noops inserted");
47 STATISTIC(NumStalls, "Number of pipeline stalls");
48 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
49
50 // Post-RA scheduling is enabled with
51 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
52 // override the target.
53 static cl::opt<bool>
54 EnablePostRAScheduler("post-RA-scheduler",
55 cl::desc("Enable scheduling after register allocation"),
56 cl::init(false), cl::Hidden);
57 static cl::opt<std::string>
58 EnableAntiDepBreaking("break-anti-dependencies",
59 cl::desc("Break post-RA scheduling anti-dependencies: "
60 "\"critical\", \"all\", or \"none\""),
61 cl::init("none"), cl::Hidden);
62
63 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
64 static cl::opt<int>
65 DebugDiv("postra-sched-debugdiv",
66 cl::desc("Debug control MBBs that are scheduled"),
67 cl::init(0), cl::Hidden);
68 static cl::opt<int>
69 DebugMod("postra-sched-debugmod",
70 cl::desc("Debug control MBBs that are scheduled"),
71 cl::init(0), cl::Hidden);
72
73 AntiDepBreaker::~AntiDepBreaker() = default;
74
75 namespace {
76 class PostRAScheduler : public MachineFunctionPass {
77 const TargetInstrInfo *TII = nullptr;
78 RegisterClassInfo RegClassInfo;
79
80 public:
81 static char ID;
PostRAScheduler()82 PostRAScheduler() : MachineFunctionPass(ID) {}
83
getAnalysisUsage(AnalysisUsage & AU) const84 void getAnalysisUsage(AnalysisUsage &AU) const override {
85 AU.setPreservesCFG();
86 AU.addRequired<AAResultsWrapperPass>();
87 AU.addRequired<TargetPassConfig>();
88 AU.addRequired<MachineDominatorTree>();
89 AU.addPreserved<MachineDominatorTree>();
90 AU.addRequired<MachineLoopInfo>();
91 AU.addPreserved<MachineLoopInfo>();
92 MachineFunctionPass::getAnalysisUsage(AU);
93 }
94
getRequiredProperties() const95 MachineFunctionProperties getRequiredProperties() const override {
96 return MachineFunctionProperties().set(
97 MachineFunctionProperties::Property::NoVRegs);
98 }
99
100 bool runOnMachineFunction(MachineFunction &Fn) override;
101
102 private:
103 bool enablePostRAScheduler(
104 const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
105 TargetSubtargetInfo::AntiDepBreakMode &Mode,
106 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
107 };
108 char PostRAScheduler::ID = 0;
109
110 class SchedulePostRATDList : public ScheduleDAGInstrs {
111 /// AvailableQueue - The priority queue to use for the available SUnits.
112 ///
113 LatencyPriorityQueue AvailableQueue;
114
115 /// PendingQueue - This contains all of the instructions whose operands have
116 /// been issued, but their results are not ready yet (due to the latency of
117 /// the operation). Once the operands becomes available, the instruction is
118 /// added to the AvailableQueue.
119 std::vector<SUnit*> PendingQueue;
120
121 /// HazardRec - The hazard recognizer to use.
122 ScheduleHazardRecognizer *HazardRec;
123
124 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
125 AntiDepBreaker *AntiDepBreak;
126
127 /// AA - AliasAnalysis for making memory reference queries.
128 AliasAnalysis *AA;
129
130 /// The schedule. Null SUnit*'s represent noop instructions.
131 std::vector<SUnit*> Sequence;
132
133 /// Ordered list of DAG postprocessing steps.
134 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
135
136 /// The index in BB of RegionEnd.
137 ///
138 /// This is the instruction number from the top of the current block, not
139 /// the SlotIndex. It is only used by the AntiDepBreaker.
140 unsigned EndIndex = 0;
141
142 public:
143 SchedulePostRATDList(
144 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
145 const RegisterClassInfo &,
146 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
147 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs);
148
149 ~SchedulePostRATDList() override;
150
151 /// startBlock - Initialize register live-range state for scheduling in
152 /// this block.
153 ///
154 void startBlock(MachineBasicBlock *BB) override;
155
156 // Set the index of RegionEnd within the current BB.
setEndIndex(unsigned EndIdx)157 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
158
159 /// Initialize the scheduler state for the next scheduling region.
160 void enterRegion(MachineBasicBlock *bb,
161 MachineBasicBlock::iterator begin,
162 MachineBasicBlock::iterator end,
163 unsigned regioninstrs) override;
164
165 /// Notify that the scheduler has finished scheduling the current region.
166 void exitRegion() override;
167
168 /// Schedule - Schedule the instruction range using list scheduling.
169 ///
170 void schedule() override;
171
172 void EmitSchedule();
173
174 /// Observe - Update liveness information to account for the current
175 /// instruction, which will not be scheduled.
176 ///
177 void Observe(MachineInstr &MI, unsigned Count);
178
179 /// finishBlock - Clean up register live-range state.
180 ///
181 void finishBlock() override;
182
183 private:
184 /// Apply each ScheduleDAGMutation step in order.
185 void postprocessDAG();
186
187 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
188 void ReleaseSuccessors(SUnit *SU);
189 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
190 void ListScheduleTopDown();
191
192 void dumpSchedule() const;
193 void emitNoop(unsigned CurCycle);
194 };
195 }
196
197 char &llvm::PostRASchedulerID = PostRAScheduler::ID;
198
199 INITIALIZE_PASS(PostRAScheduler, DEBUG_TYPE,
200 "Post RA top-down list latency scheduler", false, false)
201
SchedulePostRATDList(MachineFunction & MF,MachineLoopInfo & MLI,AliasAnalysis * AA,const RegisterClassInfo & RCI,TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,SmallVectorImpl<const TargetRegisterClass * > & CriticalPathRCs)202 SchedulePostRATDList::SchedulePostRATDList(
203 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
204 const RegisterClassInfo &RCI,
205 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
206 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs)
207 : ScheduleDAGInstrs(MF, &MLI), AA(AA) {
208
209 const InstrItineraryData *InstrItins =
210 MF.getSubtarget().getInstrItineraryData();
211 HazardRec =
212 MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer(
213 InstrItins, this);
214 MF.getSubtarget().getPostRAMutations(Mutations);
215
216 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
217 MRI.tracksLiveness()) &&
218 "Live-ins must be accurate for anti-dependency breaking");
219 AntiDepBreak = ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL)
220 ? createAggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs)
221 : ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL)
222 ? createCriticalAntiDepBreaker(MF, RCI)
223 : nullptr));
224 }
225
~SchedulePostRATDList()226 SchedulePostRATDList::~SchedulePostRATDList() {
227 delete HazardRec;
228 delete AntiDepBreak;
229 }
230
231 /// Initialize state associated with the next scheduling region.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)232 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
233 MachineBasicBlock::iterator begin,
234 MachineBasicBlock::iterator end,
235 unsigned regioninstrs) {
236 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
237 Sequence.clear();
238 }
239
240 /// Print the schedule before exiting the region.
exitRegion()241 void SchedulePostRATDList::exitRegion() {
242 LLVM_DEBUG({
243 dbgs() << "*** Final schedule ***\n";
244 dumpSchedule();
245 dbgs() << '\n';
246 });
247 ScheduleDAGInstrs::exitRegion();
248 }
249
250 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
251 /// dumpSchedule - dump the scheduled Sequence.
dumpSchedule() const252 LLVM_DUMP_METHOD void SchedulePostRATDList::dumpSchedule() const {
253 for (const SUnit *SU : Sequence) {
254 if (SU)
255 dumpNode(*SU);
256 else
257 dbgs() << "**** NOOP ****\n";
258 }
259 }
260 #endif
261
enablePostRAScheduler(const TargetSubtargetInfo & ST,CodeGenOpt::Level OptLevel,TargetSubtargetInfo::AntiDepBreakMode & Mode,TargetSubtargetInfo::RegClassVector & CriticalPathRCs) const262 bool PostRAScheduler::enablePostRAScheduler(
263 const TargetSubtargetInfo &ST,
264 CodeGenOpt::Level OptLevel,
265 TargetSubtargetInfo::AntiDepBreakMode &Mode,
266 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
267 Mode = ST.getAntiDepBreakMode();
268 ST.getCriticalPathRCs(CriticalPathRCs);
269
270 // Check for explicit enable/disable of post-ra scheduling.
271 if (EnablePostRAScheduler.getPosition() > 0)
272 return EnablePostRAScheduler;
273
274 return ST.enablePostRAScheduler() &&
275 OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
276 }
277
runOnMachineFunction(MachineFunction & Fn)278 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
279 if (skipFunction(Fn.getFunction()))
280 return false;
281
282 TII = Fn.getSubtarget().getInstrInfo();
283 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
284 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
285 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
286
287 RegClassInfo.runOnMachineFunction(Fn);
288
289 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
290 TargetSubtargetInfo::ANTIDEP_NONE;
291 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
292
293 // Check that post-RA scheduling is enabled for this target.
294 // This may upgrade the AntiDepMode.
295 if (!enablePostRAScheduler(Fn.getSubtarget(), PassConfig->getOptLevel(),
296 AntiDepMode, CriticalPathRCs))
297 return false;
298
299 // Check for antidep breaking override...
300 if (EnableAntiDepBreaking.getPosition() > 0) {
301 AntiDepMode = (EnableAntiDepBreaking == "all")
302 ? TargetSubtargetInfo::ANTIDEP_ALL
303 : ((EnableAntiDepBreaking == "critical")
304 ? TargetSubtargetInfo::ANTIDEP_CRITICAL
305 : TargetSubtargetInfo::ANTIDEP_NONE);
306 }
307
308 LLVM_DEBUG(dbgs() << "PostRAScheduler\n");
309
310 SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
311 CriticalPathRCs);
312
313 // Loop over all of the basic blocks
314 for (auto &MBB : Fn) {
315 #ifndef NDEBUG
316 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
317 if (DebugDiv > 0) {
318 static int bbcnt = 0;
319 if (bbcnt++ % DebugDiv != DebugMod)
320 continue;
321 dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":"
322 << printMBBReference(MBB) << " ***\n";
323 }
324 #endif
325
326 // Initialize register live-range state for scheduling in this block.
327 Scheduler.startBlock(&MBB);
328
329 // Schedule each sequence of instructions not interrupted by a label
330 // or anything else that effectively needs to shut down scheduling.
331 MachineBasicBlock::iterator Current = MBB.end();
332 unsigned Count = MBB.size(), CurrentCount = Count;
333 for (MachineBasicBlock::iterator I = Current; I != MBB.begin();) {
334 MachineInstr &MI = *std::prev(I);
335 --Count;
336 // Calls are not scheduling boundaries before register allocation, but
337 // post-ra we don't gain anything by scheduling across calls since we
338 // don't need to worry about register pressure.
339 if (MI.isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) {
340 Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count);
341 Scheduler.setEndIndex(CurrentCount);
342 Scheduler.schedule();
343 Scheduler.exitRegion();
344 Scheduler.EmitSchedule();
345 Current = &MI;
346 CurrentCount = Count;
347 Scheduler.Observe(MI, CurrentCount);
348 }
349 I = MI;
350 if (MI.isBundle())
351 Count -= MI.getBundleSize();
352 }
353 assert(Count == 0 && "Instruction count mismatch!");
354 assert((MBB.begin() == Current || CurrentCount != 0) &&
355 "Instruction count mismatch!");
356 Scheduler.enterRegion(&MBB, MBB.begin(), Current, CurrentCount);
357 Scheduler.setEndIndex(CurrentCount);
358 Scheduler.schedule();
359 Scheduler.exitRegion();
360 Scheduler.EmitSchedule();
361
362 // Clean up register live-range state.
363 Scheduler.finishBlock();
364
365 // Update register kills
366 Scheduler.fixupKills(MBB);
367 }
368
369 return true;
370 }
371
372 /// StartBlock - Initialize register live-range state for scheduling in
373 /// this block.
374 ///
startBlock(MachineBasicBlock * BB)375 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
376 // Call the superclass.
377 ScheduleDAGInstrs::startBlock(BB);
378
379 // Reset the hazard recognizer and anti-dep breaker.
380 HazardRec->Reset();
381 if (AntiDepBreak)
382 AntiDepBreak->StartBlock(BB);
383 }
384
385 /// Schedule - Schedule the instruction range using list scheduling.
386 ///
schedule()387 void SchedulePostRATDList::schedule() {
388 // Build the scheduling graph.
389 buildSchedGraph(AA);
390
391 if (AntiDepBreak) {
392 unsigned Broken =
393 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
394 EndIndex, DbgValues);
395
396 if (Broken != 0) {
397 // We made changes. Update the dependency graph.
398 // Theoretically we could update the graph in place:
399 // When a live range is changed to use a different register, remove
400 // the def's anti-dependence *and* output-dependence edges due to
401 // that register, and add new anti-dependence and output-dependence
402 // edges based on the next live range of the register.
403 ScheduleDAG::clearDAG();
404 buildSchedGraph(AA);
405
406 NumFixedAnti += Broken;
407 }
408 }
409
410 postprocessDAG();
411
412 LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
413 LLVM_DEBUG(dump());
414
415 AvailableQueue.initNodes(SUnits);
416 ListScheduleTopDown();
417 AvailableQueue.releaseState();
418 }
419
420 /// Observe - Update liveness information to account for the current
421 /// instruction, which will not be scheduled.
422 ///
Observe(MachineInstr & MI,unsigned Count)423 void SchedulePostRATDList::Observe(MachineInstr &MI, unsigned Count) {
424 if (AntiDepBreak)
425 AntiDepBreak->Observe(MI, Count, EndIndex);
426 }
427
428 /// FinishBlock - Clean up register live-range state.
429 ///
finishBlock()430 void SchedulePostRATDList::finishBlock() {
431 if (AntiDepBreak)
432 AntiDepBreak->FinishBlock();
433
434 // Call the superclass.
435 ScheduleDAGInstrs::finishBlock();
436 }
437
438 /// Apply each ScheduleDAGMutation step in order.
postprocessDAG()439 void SchedulePostRATDList::postprocessDAG() {
440 for (auto &M : Mutations)
441 M->apply(this);
442 }
443
444 //===----------------------------------------------------------------------===//
445 // Top-Down Scheduling
446 //===----------------------------------------------------------------------===//
447
448 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
449 /// the PendingQueue if the count reaches zero.
ReleaseSucc(SUnit * SU,SDep * SuccEdge)450 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
451 SUnit *SuccSU = SuccEdge->getSUnit();
452
453 if (SuccEdge->isWeak()) {
454 --SuccSU->WeakPredsLeft;
455 return;
456 }
457 #ifndef NDEBUG
458 if (SuccSU->NumPredsLeft == 0) {
459 dbgs() << "*** Scheduling failed! ***\n";
460 dumpNode(*SuccSU);
461 dbgs() << " has been released too many times!\n";
462 llvm_unreachable(nullptr);
463 }
464 #endif
465 --SuccSU->NumPredsLeft;
466
467 // Standard scheduler algorithms will recompute the depth of the successor
468 // here as such:
469 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
470 //
471 // However, we lazily compute node depth instead. Note that
472 // ScheduleNodeTopDown has already updated the depth of this node which causes
473 // all descendents to be marked dirty. Setting the successor depth explicitly
474 // here would cause depth to be recomputed for all its ancestors. If the
475 // successor is not yet ready (because of a transitively redundant edge) then
476 // this causes depth computation to be quadratic in the size of the DAG.
477
478 // If all the node's predecessors are scheduled, this node is ready
479 // to be scheduled. Ignore the special ExitSU node.
480 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
481 PendingQueue.push_back(SuccSU);
482 }
483
484 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
ReleaseSuccessors(SUnit * SU)485 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
486 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
487 I != E; ++I) {
488 ReleaseSucc(SU, &*I);
489 }
490 }
491
492 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
493 /// count of its successors. If a successor pending count is zero, add it to
494 /// the Available queue.
ScheduleNodeTopDown(SUnit * SU,unsigned CurCycle)495 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
496 LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
497 LLVM_DEBUG(dumpNode(*SU));
498
499 Sequence.push_back(SU);
500 assert(CurCycle >= SU->getDepth() &&
501 "Node scheduled above its depth!");
502 SU->setDepthToAtLeast(CurCycle);
503
504 ReleaseSuccessors(SU);
505 SU->isScheduled = true;
506 AvailableQueue.scheduledNode(SU);
507 }
508
509 /// emitNoop - Add a noop to the current instruction sequence.
emitNoop(unsigned CurCycle)510 void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
511 LLVM_DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
512 HazardRec->EmitNoop();
513 Sequence.push_back(nullptr); // NULL here means noop
514 ++NumNoops;
515 }
516
517 /// ListScheduleTopDown - The main loop of list scheduling for top-down
518 /// schedulers.
ListScheduleTopDown()519 void SchedulePostRATDList::ListScheduleTopDown() {
520 unsigned CurCycle = 0;
521
522 // We're scheduling top-down but we're visiting the regions in
523 // bottom-up order, so we don't know the hazards at the start of a
524 // region. So assume no hazards (this should usually be ok as most
525 // blocks are a single region).
526 HazardRec->Reset();
527
528 // Release any successors of the special Entry node.
529 ReleaseSuccessors(&EntrySU);
530
531 // Add all leaves to Available queue.
532 for (SUnit &SUnit : SUnits) {
533 // It is available if it has no predecessors.
534 if (!SUnit.NumPredsLeft && !SUnit.isAvailable) {
535 AvailableQueue.push(&SUnit);
536 SUnit.isAvailable = true;
537 }
538 }
539
540 // In any cycle where we can't schedule any instructions, we must
541 // stall or emit a noop, depending on the target.
542 bool CycleHasInsts = false;
543
544 // While Available queue is not empty, grab the node with the highest
545 // priority. If it is not ready put it back. Schedule the node.
546 std::vector<SUnit*> NotReady;
547 Sequence.reserve(SUnits.size());
548 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
549 // Check to see if any of the pending instructions are ready to issue. If
550 // so, add them to the available queue.
551 unsigned MinDepth = ~0u;
552 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
553 if (PendingQueue[i]->getDepth() <= CurCycle) {
554 AvailableQueue.push(PendingQueue[i]);
555 PendingQueue[i]->isAvailable = true;
556 PendingQueue[i] = PendingQueue.back();
557 PendingQueue.pop_back();
558 --i; --e;
559 } else if (PendingQueue[i]->getDepth() < MinDepth)
560 MinDepth = PendingQueue[i]->getDepth();
561 }
562
563 LLVM_DEBUG(dbgs() << "\n*** Examining Available\n";
564 AvailableQueue.dump(this));
565
566 SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
567 bool HasNoopHazards = false;
568 while (!AvailableQueue.empty()) {
569 SUnit *CurSUnit = AvailableQueue.pop();
570
571 ScheduleHazardRecognizer::HazardType HT =
572 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
573 if (HT == ScheduleHazardRecognizer::NoHazard) {
574 if (HazardRec->ShouldPreferAnother(CurSUnit)) {
575 if (!NotPreferredSUnit) {
576 // If this is the first non-preferred node for this cycle, then
577 // record it and continue searching for a preferred node. If this
578 // is not the first non-preferred node, then treat it as though
579 // there had been a hazard.
580 NotPreferredSUnit = CurSUnit;
581 continue;
582 }
583 } else {
584 FoundSUnit = CurSUnit;
585 break;
586 }
587 }
588
589 // Remember if this is a noop hazard.
590 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
591
592 NotReady.push_back(CurSUnit);
593 }
594
595 // If we have a non-preferred node, push it back onto the available list.
596 // If we did not find a preferred node, then schedule this first
597 // non-preferred node.
598 if (NotPreferredSUnit) {
599 if (!FoundSUnit) {
600 LLVM_DEBUG(
601 dbgs() << "*** Will schedule a non-preferred instruction...\n");
602 FoundSUnit = NotPreferredSUnit;
603 } else {
604 AvailableQueue.push(NotPreferredSUnit);
605 }
606
607 NotPreferredSUnit = nullptr;
608 }
609
610 // Add the nodes that aren't ready back onto the available list.
611 if (!NotReady.empty()) {
612 AvailableQueue.push_all(NotReady);
613 NotReady.clear();
614 }
615
616 // If we found a node to schedule...
617 if (FoundSUnit) {
618 // If we need to emit noops prior to this instruction, then do so.
619 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
620 for (unsigned i = 0; i != NumPreNoops; ++i)
621 emitNoop(CurCycle);
622
623 // ... schedule the node...
624 ScheduleNodeTopDown(FoundSUnit, CurCycle);
625 HazardRec->EmitInstruction(FoundSUnit);
626 CycleHasInsts = true;
627 if (HazardRec->atIssueLimit()) {
628 LLVM_DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle
629 << '\n');
630 HazardRec->AdvanceCycle();
631 ++CurCycle;
632 CycleHasInsts = false;
633 }
634 } else {
635 if (CycleHasInsts) {
636 LLVM_DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
637 HazardRec->AdvanceCycle();
638 } else if (!HasNoopHazards) {
639 // Otherwise, we have a pipeline stall, but no other problem,
640 // just advance the current cycle and try again.
641 LLVM_DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
642 HazardRec->AdvanceCycle();
643 ++NumStalls;
644 } else {
645 // Otherwise, we have no instructions to issue and we have instructions
646 // that will fault if we don't do this right. This is the case for
647 // processors without pipeline interlocks and other cases.
648 emitNoop(CurCycle);
649 }
650
651 ++CurCycle;
652 CycleHasInsts = false;
653 }
654 }
655
656 #ifndef NDEBUG
657 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
658 unsigned Noops = llvm::count(Sequence, nullptr);
659 assert(Sequence.size() - Noops == ScheduledNodes &&
660 "The number of nodes scheduled doesn't match the expected number!");
661 #endif // NDEBUG
662 }
663
664 // EmitSchedule - Emit the machine code in scheduled order.
EmitSchedule()665 void SchedulePostRATDList::EmitSchedule() {
666 RegionBegin = RegionEnd;
667
668 // If first instruction was a DBG_VALUE then put it back.
669 if (FirstDbgValue)
670 BB->splice(RegionEnd, BB, FirstDbgValue);
671
672 // Then re-insert them according to the given schedule.
673 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
674 if (SUnit *SU = Sequence[i])
675 BB->splice(RegionEnd, BB, SU->getInstr());
676 else
677 // Null SUnit* is a noop.
678 TII->insertNoop(*BB, RegionEnd);
679
680 // Update the Begin iterator, as the first instruction in the block
681 // may have been scheduled later.
682 if (i == 0)
683 RegionBegin = std::prev(RegionEnd);
684 }
685
686 // Reinsert any remaining debug_values.
687 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
688 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
689 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
690 MachineInstr *DbgValue = P.first;
691 MachineBasicBlock::iterator OrigPrivMI = P.second;
692 BB->splice(++OrigPrivMI, BB, DbgValue);
693 }
694 DbgValues.clear();
695 FirstDbgValue = nullptr;
696 }
697