1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements a top-down list scheduler, using standard algorithms. 10 // The basic approach uses a priority queue of available nodes to schedule. 11 // One at a time, nodes are taken from the priority queue (thus in priority 12 // order), checked for legality to schedule, and emitted if legal. 13 // 14 // Nodes may not be legal to schedule either due to structural hazards (e.g. 15 // pipeline or resource constraints) or because an input to the instruction has 16 // not completed execution. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/CodeGen/AntiDepBreaker.h" 23 #include "llvm/CodeGen/LatencyPriorityQueue.h" 24 #include "llvm/CodeGen/MachineDominators.h" 25 #include "llvm/CodeGen/MachineFunctionPass.h" 26 #include "llvm/CodeGen/MachineLoopInfo.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/RegisterClassInfo.h" 29 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 30 #include "llvm/CodeGen/ScheduleDAGMutation.h" 31 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 32 #include "llvm/CodeGen/TargetInstrInfo.h" 33 #include "llvm/CodeGen/TargetPassConfig.h" 34 #include "llvm/CodeGen/TargetSubtargetInfo.h" 35 #include "llvm/Config/llvm-config.h" 36 #include "llvm/InitializePasses.h" 37 #include "llvm/Pass.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/raw_ostream.h" 42 using namespace llvm; 43 44 #define DEBUG_TYPE "post-RA-sched" 45 46 STATISTIC(NumNoops, "Number of noops inserted"); 47 STATISTIC(NumStalls, "Number of pipeline stalls"); 48 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 49 50 // Post-RA scheduling is enabled with 51 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 52 // override the target. 53 static cl::opt<bool> 54 EnablePostRAScheduler("post-RA-scheduler", 55 cl::desc("Enable scheduling after register allocation"), 56 cl::init(false), cl::Hidden); 57 static cl::opt<std::string> 58 EnableAntiDepBreaking("break-anti-dependencies", 59 cl::desc("Break post-RA scheduling anti-dependencies: " 60 "\"critical\", \"all\", or \"none\""), 61 cl::init("none"), cl::Hidden); 62 63 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 64 static cl::opt<int> 65 DebugDiv("postra-sched-debugdiv", 66 cl::desc("Debug control MBBs that are scheduled"), 67 cl::init(0), cl::Hidden); 68 static cl::opt<int> 69 DebugMod("postra-sched-debugmod", 70 cl::desc("Debug control MBBs that are scheduled"), 71 cl::init(0), cl::Hidden); 72 73 AntiDepBreaker::~AntiDepBreaker() = default; 74 75 namespace { 76 class PostRAScheduler : public MachineFunctionPass { 77 const TargetInstrInfo *TII = nullptr; 78 RegisterClassInfo RegClassInfo; 79 80 public: 81 static char ID; 82 PostRAScheduler() : MachineFunctionPass(ID) {} 83 84 void getAnalysisUsage(AnalysisUsage &AU) const override { 85 AU.setPreservesCFG(); 86 AU.addRequired<AAResultsWrapperPass>(); 87 AU.addRequired<TargetPassConfig>(); 88 AU.addRequired<MachineDominatorTreeWrapperPass>(); 89 AU.addPreserved<MachineDominatorTreeWrapperPass>(); 90 AU.addRequired<MachineLoopInfoWrapperPass>(); 91 AU.addPreserved<MachineLoopInfoWrapperPass>(); 92 MachineFunctionPass::getAnalysisUsage(AU); 93 } 94 95 MachineFunctionProperties getRequiredProperties() const override { 96 return MachineFunctionProperties().set( 97 MachineFunctionProperties::Property::NoVRegs); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn) override; 101 }; 102 char PostRAScheduler::ID = 0; 103 104 class SchedulePostRATDList : public ScheduleDAGInstrs { 105 /// AvailableQueue - The priority queue to use for the available SUnits. 106 /// 107 LatencyPriorityQueue AvailableQueue; 108 109 /// PendingQueue - This contains all of the instructions whose operands have 110 /// been issued, but their results are not ready yet (due to the latency of 111 /// the operation). Once the operands becomes available, the instruction is 112 /// added to the AvailableQueue. 113 std::vector<SUnit*> PendingQueue; 114 115 /// HazardRec - The hazard recognizer to use. 116 ScheduleHazardRecognizer *HazardRec; 117 118 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 119 AntiDepBreaker *AntiDepBreak; 120 121 /// AA - AliasAnalysis for making memory reference queries. 122 AliasAnalysis *AA; 123 124 /// The schedule. Null SUnit*'s represent noop instructions. 125 std::vector<SUnit*> Sequence; 126 127 /// Ordered list of DAG postprocessing steps. 128 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations; 129 130 /// The index in BB of RegionEnd. 131 /// 132 /// This is the instruction number from the top of the current block, not 133 /// the SlotIndex. It is only used by the AntiDepBreaker. 134 unsigned EndIndex = 0; 135 136 public: 137 SchedulePostRATDList( 138 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA, 139 const RegisterClassInfo &, 140 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 141 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs); 142 143 ~SchedulePostRATDList() override; 144 145 /// startBlock - Initialize register live-range state for scheduling in 146 /// this block. 147 /// 148 void startBlock(MachineBasicBlock *BB) override; 149 150 // Set the index of RegionEnd within the current BB. 151 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; } 152 153 /// Initialize the scheduler state for the next scheduling region. 154 void enterRegion(MachineBasicBlock *bb, 155 MachineBasicBlock::iterator begin, 156 MachineBasicBlock::iterator end, 157 unsigned regioninstrs) override; 158 159 /// Notify that the scheduler has finished scheduling the current region. 160 void exitRegion() override; 161 162 /// Schedule - Schedule the instruction range using list scheduling. 163 /// 164 void schedule() override; 165 166 void EmitSchedule(); 167 168 /// Observe - Update liveness information to account for the current 169 /// instruction, which will not be scheduled. 170 /// 171 void Observe(MachineInstr &MI, unsigned Count); 172 173 /// finishBlock - Clean up register live-range state. 174 /// 175 void finishBlock() override; 176 177 private: 178 /// Apply each ScheduleDAGMutation step in order. 179 void postProcessDAG(); 180 181 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 182 void ReleaseSuccessors(SUnit *SU); 183 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 184 void ListScheduleTopDown(); 185 186 void dumpSchedule() const; 187 void emitNoop(unsigned CurCycle); 188 }; 189 } 190 191 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 192 193 INITIALIZE_PASS(PostRAScheduler, DEBUG_TYPE, 194 "Post RA top-down list latency scheduler", false, false) 195 196 SchedulePostRATDList::SchedulePostRATDList( 197 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA, 198 const RegisterClassInfo &RCI, 199 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 200 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs) 201 : ScheduleDAGInstrs(MF, &MLI), AA(AA) { 202 203 const InstrItineraryData *InstrItins = 204 MF.getSubtarget().getInstrItineraryData(); 205 HazardRec = 206 MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer( 207 InstrItins, this); 208 MF.getSubtarget().getPostRAMutations(Mutations); 209 210 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 211 MRI.tracksLiveness()) && 212 "Live-ins must be accurate for anti-dependency breaking"); 213 AntiDepBreak = ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) 214 ? createAggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) 215 : ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) 216 ? createCriticalAntiDepBreaker(MF, RCI) 217 : nullptr)); 218 } 219 220 SchedulePostRATDList::~SchedulePostRATDList() { 221 delete HazardRec; 222 delete AntiDepBreak; 223 } 224 225 /// Initialize state associated with the next scheduling region. 226 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 227 MachineBasicBlock::iterator begin, 228 MachineBasicBlock::iterator end, 229 unsigned regioninstrs) { 230 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 231 Sequence.clear(); 232 } 233 234 /// Print the schedule before exiting the region. 235 void SchedulePostRATDList::exitRegion() { 236 LLVM_DEBUG({ 237 dbgs() << "*** Final schedule ***\n"; 238 dumpSchedule(); 239 dbgs() << '\n'; 240 }); 241 ScheduleDAGInstrs::exitRegion(); 242 } 243 244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 245 /// dumpSchedule - dump the scheduled Sequence. 246 LLVM_DUMP_METHOD void SchedulePostRATDList::dumpSchedule() const { 247 for (const SUnit *SU : Sequence) { 248 if (SU) 249 dumpNode(*SU); 250 else 251 dbgs() << "**** NOOP ****\n"; 252 } 253 } 254 #endif 255 256 static bool enablePostRAScheduler(const TargetSubtargetInfo &ST, 257 CodeGenOptLevel OptLevel) { 258 // Check for explicit enable/disable of post-ra scheduling. 259 if (EnablePostRAScheduler.getPosition() > 0) 260 return EnablePostRAScheduler; 261 262 return ST.enablePostRAScheduler() && 263 OptLevel >= ST.getOptLevelToEnablePostRAScheduler(); 264 } 265 266 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 267 if (skipFunction(Fn.getFunction())) 268 return false; 269 270 const auto &Subtarget = Fn.getSubtarget(); 271 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 272 // Check that post-RA scheduling is enabled for this target. 273 if (!enablePostRAScheduler(Subtarget, PassConfig->getOptLevel())) 274 return false; 275 276 TII = Subtarget.getInstrInfo(); 277 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfoWrapperPass>().getLI(); 278 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 279 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 280 Subtarget.getAntiDepBreakMode(); 281 if (EnableAntiDepBreaking.getPosition() > 0) { 282 AntiDepMode = (EnableAntiDepBreaking == "all") 283 ? TargetSubtargetInfo::ANTIDEP_ALL 284 : ((EnableAntiDepBreaking == "critical") 285 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 286 : TargetSubtargetInfo::ANTIDEP_NONE); 287 } 288 SmallVector<const TargetRegisterClass *, 4> CriticalPathRCs; 289 Subtarget.getCriticalPathRCs(CriticalPathRCs); 290 RegClassInfo.runOnMachineFunction(Fn); 291 292 LLVM_DEBUG(dbgs() << "PostRAScheduler\n"); 293 294 SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode, 295 CriticalPathRCs); 296 297 // Loop over all of the basic blocks 298 for (auto &MBB : Fn) { 299 #ifndef NDEBUG 300 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 301 if (DebugDiv > 0) { 302 static int bbcnt = 0; 303 if (bbcnt++ % DebugDiv != DebugMod) 304 continue; 305 dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":" 306 << printMBBReference(MBB) << " ***\n"; 307 } 308 #endif 309 310 // Initialize register live-range state for scheduling in this block. 311 Scheduler.startBlock(&MBB); 312 313 // Schedule each sequence of instructions not interrupted by a label 314 // or anything else that effectively needs to shut down scheduling. 315 MachineBasicBlock::iterator Current = MBB.end(); 316 unsigned Count = MBB.size(), CurrentCount = Count; 317 for (MachineBasicBlock::iterator I = Current; I != MBB.begin();) { 318 MachineInstr &MI = *std::prev(I); 319 --Count; 320 // Calls are not scheduling boundaries before register allocation, but 321 // post-ra we don't gain anything by scheduling across calls since we 322 // don't need to worry about register pressure. 323 if (MI.isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) { 324 Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count); 325 Scheduler.setEndIndex(CurrentCount); 326 Scheduler.schedule(); 327 Scheduler.exitRegion(); 328 Scheduler.EmitSchedule(); 329 Current = &MI; 330 CurrentCount = Count; 331 Scheduler.Observe(MI, CurrentCount); 332 } 333 I = MI; 334 if (MI.isBundle()) 335 Count -= MI.getBundleSize(); 336 } 337 assert(Count == 0 && "Instruction count mismatch!"); 338 assert((MBB.begin() == Current || CurrentCount != 0) && 339 "Instruction count mismatch!"); 340 Scheduler.enterRegion(&MBB, MBB.begin(), Current, CurrentCount); 341 Scheduler.setEndIndex(CurrentCount); 342 Scheduler.schedule(); 343 Scheduler.exitRegion(); 344 Scheduler.EmitSchedule(); 345 346 // Clean up register live-range state. 347 Scheduler.finishBlock(); 348 349 // Update register kills 350 Scheduler.fixupKills(MBB); 351 } 352 353 return true; 354 } 355 356 /// StartBlock - Initialize register live-range state for scheduling in 357 /// this block. 358 /// 359 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 360 // Call the superclass. 361 ScheduleDAGInstrs::startBlock(BB); 362 363 // Reset the hazard recognizer and anti-dep breaker. 364 HazardRec->Reset(); 365 if (AntiDepBreak) 366 AntiDepBreak->StartBlock(BB); 367 } 368 369 /// Schedule - Schedule the instruction range using list scheduling. 370 /// 371 void SchedulePostRATDList::schedule() { 372 // Build the scheduling graph. 373 buildSchedGraph(AA); 374 375 if (AntiDepBreak) { 376 unsigned Broken = 377 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 378 EndIndex, DbgValues); 379 380 if (Broken != 0) { 381 // We made changes. Update the dependency graph. 382 // Theoretically we could update the graph in place: 383 // When a live range is changed to use a different register, remove 384 // the def's anti-dependence *and* output-dependence edges due to 385 // that register, and add new anti-dependence and output-dependence 386 // edges based on the next live range of the register. 387 ScheduleDAG::clearDAG(); 388 buildSchedGraph(AA); 389 390 NumFixedAnti += Broken; 391 } 392 } 393 394 postProcessDAG(); 395 396 LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n"); 397 LLVM_DEBUG(dump()); 398 399 AvailableQueue.initNodes(SUnits); 400 ListScheduleTopDown(); 401 AvailableQueue.releaseState(); 402 } 403 404 /// Observe - Update liveness information to account for the current 405 /// instruction, which will not be scheduled. 406 /// 407 void SchedulePostRATDList::Observe(MachineInstr &MI, unsigned Count) { 408 if (AntiDepBreak) 409 AntiDepBreak->Observe(MI, Count, EndIndex); 410 } 411 412 /// FinishBlock - Clean up register live-range state. 413 /// 414 void SchedulePostRATDList::finishBlock() { 415 if (AntiDepBreak) 416 AntiDepBreak->FinishBlock(); 417 418 // Call the superclass. 419 ScheduleDAGInstrs::finishBlock(); 420 } 421 422 /// Apply each ScheduleDAGMutation step in order. 423 void SchedulePostRATDList::postProcessDAG() { 424 for (auto &M : Mutations) 425 M->apply(this); 426 } 427 428 //===----------------------------------------------------------------------===// 429 // Top-Down Scheduling 430 //===----------------------------------------------------------------------===// 431 432 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 433 /// the PendingQueue if the count reaches zero. 434 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 435 SUnit *SuccSU = SuccEdge->getSUnit(); 436 437 if (SuccEdge->isWeak()) { 438 --SuccSU->WeakPredsLeft; 439 return; 440 } 441 #ifndef NDEBUG 442 if (SuccSU->NumPredsLeft == 0) { 443 dbgs() << "*** Scheduling failed! ***\n"; 444 dumpNode(*SuccSU); 445 dbgs() << " has been released too many times!\n"; 446 llvm_unreachable(nullptr); 447 } 448 #endif 449 --SuccSU->NumPredsLeft; 450 451 // Standard scheduler algorithms will recompute the depth of the successor 452 // here as such: 453 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 454 // 455 // However, we lazily compute node depth instead. Note that 456 // ScheduleNodeTopDown has already updated the depth of this node which causes 457 // all descendents to be marked dirty. Setting the successor depth explicitly 458 // here would cause depth to be recomputed for all its ancestors. If the 459 // successor is not yet ready (because of a transitively redundant edge) then 460 // this causes depth computation to be quadratic in the size of the DAG. 461 462 // If all the node's predecessors are scheduled, this node is ready 463 // to be scheduled. Ignore the special ExitSU node. 464 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 465 PendingQueue.push_back(SuccSU); 466 } 467 468 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 469 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 470 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 471 I != E; ++I) { 472 ReleaseSucc(SU, &*I); 473 } 474 } 475 476 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 477 /// count of its successors. If a successor pending count is zero, add it to 478 /// the Available queue. 479 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 480 LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 481 LLVM_DEBUG(dumpNode(*SU)); 482 483 Sequence.push_back(SU); 484 assert(CurCycle >= SU->getDepth() && 485 "Node scheduled above its depth!"); 486 SU->setDepthToAtLeast(CurCycle); 487 488 ReleaseSuccessors(SU); 489 SU->isScheduled = true; 490 AvailableQueue.scheduledNode(SU); 491 } 492 493 /// emitNoop - Add a noop to the current instruction sequence. 494 void SchedulePostRATDList::emitNoop(unsigned CurCycle) { 495 LLVM_DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 496 HazardRec->EmitNoop(); 497 Sequence.push_back(nullptr); // NULL here means noop 498 ++NumNoops; 499 } 500 501 /// ListScheduleTopDown - The main loop of list scheduling for top-down 502 /// schedulers. 503 void SchedulePostRATDList::ListScheduleTopDown() { 504 unsigned CurCycle = 0; 505 506 // We're scheduling top-down but we're visiting the regions in 507 // bottom-up order, so we don't know the hazards at the start of a 508 // region. So assume no hazards (this should usually be ok as most 509 // blocks are a single region). 510 HazardRec->Reset(); 511 512 // Release any successors of the special Entry node. 513 ReleaseSuccessors(&EntrySU); 514 515 // Add all leaves to Available queue. 516 for (SUnit &SUnit : SUnits) { 517 // It is available if it has no predecessors. 518 if (!SUnit.NumPredsLeft && !SUnit.isAvailable) { 519 AvailableQueue.push(&SUnit); 520 SUnit.isAvailable = true; 521 } 522 } 523 524 // In any cycle where we can't schedule any instructions, we must 525 // stall or emit a noop, depending on the target. 526 bool CycleHasInsts = false; 527 528 // While Available queue is not empty, grab the node with the highest 529 // priority. If it is not ready put it back. Schedule the node. 530 std::vector<SUnit*> NotReady; 531 Sequence.reserve(SUnits.size()); 532 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 533 // Check to see if any of the pending instructions are ready to issue. If 534 // so, add them to the available queue. 535 unsigned MinDepth = ~0u; 536 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 537 if (PendingQueue[i]->getDepth() <= CurCycle) { 538 AvailableQueue.push(PendingQueue[i]); 539 PendingQueue[i]->isAvailable = true; 540 PendingQueue[i] = PendingQueue.back(); 541 PendingQueue.pop_back(); 542 --i; --e; 543 } else if (PendingQueue[i]->getDepth() < MinDepth) 544 MinDepth = PendingQueue[i]->getDepth(); 545 } 546 547 LLVM_DEBUG(dbgs() << "\n*** Examining Available\n"; 548 AvailableQueue.dump(this)); 549 550 SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr; 551 bool HasNoopHazards = false; 552 while (!AvailableQueue.empty()) { 553 SUnit *CurSUnit = AvailableQueue.pop(); 554 555 ScheduleHazardRecognizer::HazardType HT = 556 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 557 if (HT == ScheduleHazardRecognizer::NoHazard) { 558 if (HazardRec->ShouldPreferAnother(CurSUnit)) { 559 if (!NotPreferredSUnit) { 560 // If this is the first non-preferred node for this cycle, then 561 // record it and continue searching for a preferred node. If this 562 // is not the first non-preferred node, then treat it as though 563 // there had been a hazard. 564 NotPreferredSUnit = CurSUnit; 565 continue; 566 } 567 } else { 568 FoundSUnit = CurSUnit; 569 break; 570 } 571 } 572 573 // Remember if this is a noop hazard. 574 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 575 576 NotReady.push_back(CurSUnit); 577 } 578 579 // If we have a non-preferred node, push it back onto the available list. 580 // If we did not find a preferred node, then schedule this first 581 // non-preferred node. 582 if (NotPreferredSUnit) { 583 if (!FoundSUnit) { 584 LLVM_DEBUG( 585 dbgs() << "*** Will schedule a non-preferred instruction...\n"); 586 FoundSUnit = NotPreferredSUnit; 587 } else { 588 AvailableQueue.push(NotPreferredSUnit); 589 } 590 591 NotPreferredSUnit = nullptr; 592 } 593 594 // Add the nodes that aren't ready back onto the available list. 595 if (!NotReady.empty()) { 596 AvailableQueue.push_all(NotReady); 597 NotReady.clear(); 598 } 599 600 // If we found a node to schedule... 601 if (FoundSUnit) { 602 // If we need to emit noops prior to this instruction, then do so. 603 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit); 604 for (unsigned i = 0; i != NumPreNoops; ++i) 605 emitNoop(CurCycle); 606 607 // ... schedule the node... 608 ScheduleNodeTopDown(FoundSUnit, CurCycle); 609 HazardRec->EmitInstruction(FoundSUnit); 610 CycleHasInsts = true; 611 if (HazardRec->atIssueLimit()) { 612 LLVM_DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle 613 << '\n'); 614 HazardRec->AdvanceCycle(); 615 ++CurCycle; 616 CycleHasInsts = false; 617 } 618 } else { 619 if (CycleHasInsts) { 620 LLVM_DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 621 HazardRec->AdvanceCycle(); 622 } else if (!HasNoopHazards) { 623 // Otherwise, we have a pipeline stall, but no other problem, 624 // just advance the current cycle and try again. 625 LLVM_DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 626 HazardRec->AdvanceCycle(); 627 ++NumStalls; 628 } else { 629 // Otherwise, we have no instructions to issue and we have instructions 630 // that will fault if we don't do this right. This is the case for 631 // processors without pipeline interlocks and other cases. 632 emitNoop(CurCycle); 633 } 634 635 ++CurCycle; 636 CycleHasInsts = false; 637 } 638 } 639 640 #ifndef NDEBUG 641 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 642 unsigned Noops = llvm::count(Sequence, nullptr); 643 assert(Sequence.size() - Noops == ScheduledNodes && 644 "The number of nodes scheduled doesn't match the expected number!"); 645 #endif // NDEBUG 646 } 647 648 // EmitSchedule - Emit the machine code in scheduled order. 649 void SchedulePostRATDList::EmitSchedule() { 650 RegionBegin = RegionEnd; 651 652 // If first instruction was a DBG_VALUE then put it back. 653 if (FirstDbgValue) 654 BB->splice(RegionEnd, BB, FirstDbgValue); 655 656 // Then re-insert them according to the given schedule. 657 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 658 if (SUnit *SU = Sequence[i]) 659 BB->splice(RegionEnd, BB, SU->getInstr()); 660 else 661 // Null SUnit* is a noop. 662 TII->insertNoop(*BB, RegionEnd); 663 664 // Update the Begin iterator, as the first instruction in the block 665 // may have been scheduled later. 666 if (i == 0) 667 RegionBegin = std::prev(RegionEnd); 668 } 669 670 // Reinsert any remaining debug_values. 671 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 672 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 673 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 674 MachineInstr *DbgValue = P.first; 675 MachineBasicBlock::iterator OrigPrivMI = P.second; 676 BB->splice(++OrigPrivMI, BB, DbgValue); 677 } 678 DbgValues.clear(); 679 FirstDbgValue = nullptr; 680 } 681