1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // MachineScheduler schedules machine instructions after phi elimination. It 10 // preserves LiveIntervals so it can be invoked before register allocation. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MachineScheduler.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PriorityQueue.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/LiveInterval.h" 25 #include "llvm/CodeGen/LiveIntervals.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineDominators.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineFunctionPass.h" 30 #include "llvm/CodeGen/MachineInstr.h" 31 #include "llvm/CodeGen/MachineLoopInfo.h" 32 #include "llvm/CodeGen/MachineOperand.h" 33 #include "llvm/CodeGen/MachinePassRegistry.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/Passes.h" 36 #include "llvm/CodeGen/RegisterClassInfo.h" 37 #include "llvm/CodeGen/RegisterPressure.h" 38 #include "llvm/CodeGen/ScheduleDAG.h" 39 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 40 #include "llvm/CodeGen/ScheduleDAGMutation.h" 41 #include "llvm/CodeGen/ScheduleDFS.h" 42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 43 #include "llvm/CodeGen/SlotIndexes.h" 44 #include "llvm/CodeGen/TargetFrameLowering.h" 45 #include "llvm/CodeGen/TargetInstrInfo.h" 46 #include "llvm/CodeGen/TargetLowering.h" 47 #include "llvm/CodeGen/TargetPassConfig.h" 48 #include "llvm/CodeGen/TargetRegisterInfo.h" 49 #include "llvm/CodeGen/TargetSchedule.h" 50 #include "llvm/CodeGen/TargetSubtargetInfo.h" 51 #include "llvm/Config/llvm-config.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/MC/LaneBitmask.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/GraphWriter.h" 60 #include "llvm/Support/MachineValueType.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include <algorithm> 63 #include <cassert> 64 #include <cstdint> 65 #include <iterator> 66 #include <limits> 67 #include <memory> 68 #include <string> 69 #include <tuple> 70 #include <utility> 71 #include <vector> 72 73 using namespace llvm; 74 75 #define DEBUG_TYPE "machine-scheduler" 76 77 STATISTIC(NumClustered, "Number of load/store pairs clustered"); 78 79 namespace llvm { 80 81 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 82 cl::desc("Force top-down list scheduling")); 83 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 84 cl::desc("Force bottom-up list scheduling")); 85 cl::opt<bool> 86 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 87 cl::desc("Print critical path length to stdout")); 88 89 cl::opt<bool> VerifyScheduling( 90 "verify-misched", cl::Hidden, 91 cl::desc("Verify machine instrs before and after machine scheduling")); 92 93 } // end namespace llvm 94 95 #ifndef NDEBUG 96 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 97 cl::desc("Pop up a window to show MISched dags after they are processed")); 98 99 /// In some situations a few uninteresting nodes depend on nearly all other 100 /// nodes in the graph, provide a cutoff to hide them. 101 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden, 102 cl::desc("Hide nodes with more predecessor/successor than cutoff")); 103 104 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 105 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 106 107 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 108 cl::desc("Only schedule this function")); 109 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 110 cl::desc("Only schedule this MBB#")); 111 static cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden, 112 cl::desc("Print schedule DAGs")); 113 #else 114 static const bool ViewMISchedDAGs = false; 115 static const bool PrintDAGs = false; 116 #endif // NDEBUG 117 118 /// Avoid quadratic complexity in unusually large basic blocks by limiting the 119 /// size of the ready lists. 120 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden, 121 cl::desc("Limit ready list to N instructions"), cl::init(256)); 122 123 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 124 cl::desc("Enable register pressure scheduling."), cl::init(true)); 125 126 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 127 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 128 129 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden, 130 cl::desc("Enable memop clustering."), 131 cl::init(true)); 132 static cl::opt<bool> 133 ForceFastCluster("force-fast-cluster", cl::Hidden, 134 cl::desc("Switch to fast cluster algorithm with the lost " 135 "of some fusion opportunities"), 136 cl::init(false)); 137 static cl::opt<unsigned> 138 FastClusterThreshold("fast-cluster-threshold", cl::Hidden, 139 cl::desc("The threshold for fast cluster"), 140 cl::init(1000)); 141 142 // DAG subtrees must have at least this many nodes. 143 static const unsigned MinSubtreeSize = 8; 144 145 // Pin the vtables to this file. 146 void MachineSchedStrategy::anchor() {} 147 148 void ScheduleDAGMutation::anchor() {} 149 150 //===----------------------------------------------------------------------===// 151 // Machine Instruction Scheduling Pass and Registry 152 //===----------------------------------------------------------------------===// 153 154 MachineSchedContext::MachineSchedContext() { 155 RegClassInfo = new RegisterClassInfo(); 156 } 157 158 MachineSchedContext::~MachineSchedContext() { 159 delete RegClassInfo; 160 } 161 162 namespace { 163 164 /// Base class for a machine scheduler class that can run at any point. 165 class MachineSchedulerBase : public MachineSchedContext, 166 public MachineFunctionPass { 167 public: 168 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 169 170 void print(raw_ostream &O, const Module* = nullptr) const override; 171 172 protected: 173 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); 174 }; 175 176 /// MachineScheduler runs after coalescing and before register allocation. 177 class MachineScheduler : public MachineSchedulerBase { 178 public: 179 MachineScheduler(); 180 181 void getAnalysisUsage(AnalysisUsage &AU) const override; 182 183 bool runOnMachineFunction(MachineFunction&) override; 184 185 static char ID; // Class identification, replacement for typeinfo 186 187 protected: 188 ScheduleDAGInstrs *createMachineScheduler(); 189 }; 190 191 /// PostMachineScheduler runs after shortly before code emission. 192 class PostMachineScheduler : public MachineSchedulerBase { 193 public: 194 PostMachineScheduler(); 195 196 void getAnalysisUsage(AnalysisUsage &AU) const override; 197 198 bool runOnMachineFunction(MachineFunction&) override; 199 200 static char ID; // Class identification, replacement for typeinfo 201 202 protected: 203 ScheduleDAGInstrs *createPostMachineScheduler(); 204 }; 205 206 } // end anonymous namespace 207 208 char MachineScheduler::ID = 0; 209 210 char &llvm::MachineSchedulerID = MachineScheduler::ID; 211 212 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE, 213 "Machine Instruction Scheduler", false, false) 214 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 215 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 216 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 217 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 218 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 219 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE, 220 "Machine Instruction Scheduler", false, false) 221 222 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) { 223 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 224 } 225 226 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 227 AU.setPreservesCFG(); 228 AU.addRequired<MachineDominatorTree>(); 229 AU.addRequired<MachineLoopInfo>(); 230 AU.addRequired<AAResultsWrapperPass>(); 231 AU.addRequired<TargetPassConfig>(); 232 AU.addRequired<SlotIndexes>(); 233 AU.addPreserved<SlotIndexes>(); 234 AU.addRequired<LiveIntervals>(); 235 AU.addPreserved<LiveIntervals>(); 236 MachineFunctionPass::getAnalysisUsage(AU); 237 } 238 239 char PostMachineScheduler::ID = 0; 240 241 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 242 243 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 244 "PostRA Machine Instruction Scheduler", false, false) 245 246 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) { 247 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 248 } 249 250 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 251 AU.setPreservesCFG(); 252 AU.addRequired<MachineDominatorTree>(); 253 AU.addRequired<MachineLoopInfo>(); 254 AU.addRequired<AAResultsWrapperPass>(); 255 AU.addRequired<TargetPassConfig>(); 256 MachineFunctionPass::getAnalysisUsage(AU); 257 } 258 259 MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor> 260 MachineSchedRegistry::Registry; 261 262 /// A dummy default scheduler factory indicates whether the scheduler 263 /// is overridden on the command line. 264 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 265 return nullptr; 266 } 267 268 /// MachineSchedOpt allows command line selection of the scheduler. 269 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 270 RegisterPassParser<MachineSchedRegistry>> 271 MachineSchedOpt("misched", 272 cl::init(&useDefaultMachineSched), cl::Hidden, 273 cl::desc("Machine instruction scheduler to use")); 274 275 static MachineSchedRegistry 276 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 277 useDefaultMachineSched); 278 279 static cl::opt<bool> EnableMachineSched( 280 "enable-misched", 281 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), 282 cl::Hidden); 283 284 static cl::opt<bool> EnablePostRAMachineSched( 285 "enable-post-misched", 286 cl::desc("Enable the post-ra machine instruction scheduling pass."), 287 cl::init(true), cl::Hidden); 288 289 /// Decrement this iterator until reaching the top or a non-debug instr. 290 static MachineBasicBlock::const_iterator 291 priorNonDebug(MachineBasicBlock::const_iterator I, 292 MachineBasicBlock::const_iterator Beg) { 293 assert(I != Beg && "reached the top of the region, cannot decrement"); 294 while (--I != Beg) { 295 if (!I->isDebugInstr()) 296 break; 297 } 298 return I; 299 } 300 301 /// Non-const version. 302 static MachineBasicBlock::iterator 303 priorNonDebug(MachineBasicBlock::iterator I, 304 MachineBasicBlock::const_iterator Beg) { 305 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg) 306 .getNonConstIterator(); 307 } 308 309 /// If this iterator is a debug value, increment until reaching the End or a 310 /// non-debug instruction. 311 static MachineBasicBlock::const_iterator 312 nextIfDebug(MachineBasicBlock::const_iterator I, 313 MachineBasicBlock::const_iterator End) { 314 for(; I != End; ++I) { 315 if (!I->isDebugInstr()) 316 break; 317 } 318 return I; 319 } 320 321 /// Non-const version. 322 static MachineBasicBlock::iterator 323 nextIfDebug(MachineBasicBlock::iterator I, 324 MachineBasicBlock::const_iterator End) { 325 return nextIfDebug(MachineBasicBlock::const_iterator(I), End) 326 .getNonConstIterator(); 327 } 328 329 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 330 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 331 // Select the scheduler, or set the default. 332 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 333 if (Ctor != useDefaultMachineSched) 334 return Ctor(this); 335 336 // Get the default scheduler set by the target for this function. 337 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 338 if (Scheduler) 339 return Scheduler; 340 341 // Default to GenericScheduler. 342 return createGenericSchedLive(this); 343 } 344 345 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 346 /// the caller. We don't have a command line option to override the postRA 347 /// scheduler. The Target must configure it. 348 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 349 // Get the postRA scheduler set by the target for this function. 350 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 351 if (Scheduler) 352 return Scheduler; 353 354 // Default to GenericScheduler. 355 return createGenericSchedPostRA(this); 356 } 357 358 /// Top-level MachineScheduler pass driver. 359 /// 360 /// Visit blocks in function order. Divide each block into scheduling regions 361 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 362 /// consistent with the DAG builder, which traverses the interior of the 363 /// scheduling regions bottom-up. 364 /// 365 /// This design avoids exposing scheduling boundaries to the DAG builder, 366 /// simplifying the DAG builder's support for "special" target instructions. 367 /// At the same time the design allows target schedulers to operate across 368 /// scheduling boundaries, for example to bundle the boundary instructions 369 /// without reordering them. This creates complexity, because the target 370 /// scheduler must update the RegionBegin and RegionEnd positions cached by 371 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 372 /// design would be to split blocks at scheduling boundaries, but LLVM has a 373 /// general bias against block splitting purely for implementation simplicity. 374 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 375 if (skipFunction(mf.getFunction())) 376 return false; 377 378 if (EnableMachineSched.getNumOccurrences()) { 379 if (!EnableMachineSched) 380 return false; 381 } else if (!mf.getSubtarget().enableMachineScheduler()) 382 return false; 383 384 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); 385 386 // Initialize the context of the pass. 387 MF = &mf; 388 MLI = &getAnalysis<MachineLoopInfo>(); 389 MDT = &getAnalysis<MachineDominatorTree>(); 390 PassConfig = &getAnalysis<TargetPassConfig>(); 391 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 392 393 LIS = &getAnalysis<LiveIntervals>(); 394 395 if (VerifyScheduling) { 396 LLVM_DEBUG(LIS->dump()); 397 MF->verify(this, "Before machine scheduling."); 398 } 399 RegClassInfo->runOnMachineFunction(*MF); 400 401 // Instantiate the selected scheduler for this target, function, and 402 // optimization level. 403 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 404 scheduleRegions(*Scheduler, false); 405 406 LLVM_DEBUG(LIS->dump()); 407 if (VerifyScheduling) 408 MF->verify(this, "After machine scheduling."); 409 return true; 410 } 411 412 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 413 if (skipFunction(mf.getFunction())) 414 return false; 415 416 if (EnablePostRAMachineSched.getNumOccurrences()) { 417 if (!EnablePostRAMachineSched) 418 return false; 419 } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) { 420 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 421 return false; 422 } 423 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 424 425 // Initialize the context of the pass. 426 MF = &mf; 427 MLI = &getAnalysis<MachineLoopInfo>(); 428 PassConfig = &getAnalysis<TargetPassConfig>(); 429 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 430 431 if (VerifyScheduling) 432 MF->verify(this, "Before post machine scheduling."); 433 434 // Instantiate the selected scheduler for this target, function, and 435 // optimization level. 436 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 437 scheduleRegions(*Scheduler, true); 438 439 if (VerifyScheduling) 440 MF->verify(this, "After post machine scheduling."); 441 return true; 442 } 443 444 /// Return true of the given instruction should not be included in a scheduling 445 /// region. 446 /// 447 /// MachineScheduler does not currently support scheduling across calls. To 448 /// handle calls, the DAG builder needs to be modified to create register 449 /// anti/output dependencies on the registers clobbered by the call's regmask 450 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 451 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 452 /// the boundary, but there would be no benefit to postRA scheduling across 453 /// calls this late anyway. 454 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 455 MachineBasicBlock *MBB, 456 MachineFunction *MF, 457 const TargetInstrInfo *TII) { 458 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF); 459 } 460 461 /// A region of an MBB for scheduling. 462 namespace { 463 struct SchedRegion { 464 /// RegionBegin is the first instruction in the scheduling region, and 465 /// RegionEnd is either MBB->end() or the scheduling boundary after the 466 /// last instruction in the scheduling region. These iterators cannot refer 467 /// to instructions outside of the identified scheduling region because 468 /// those may be reordered before scheduling this region. 469 MachineBasicBlock::iterator RegionBegin; 470 MachineBasicBlock::iterator RegionEnd; 471 unsigned NumRegionInstrs; 472 473 SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, 474 unsigned N) : 475 RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {} 476 }; 477 } // end anonymous namespace 478 479 using MBBRegionsVector = SmallVector<SchedRegion, 16>; 480 481 static void 482 getSchedRegions(MachineBasicBlock *MBB, 483 MBBRegionsVector &Regions, 484 bool RegionsTopDown) { 485 MachineFunction *MF = MBB->getParent(); 486 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 487 488 MachineBasicBlock::iterator I = nullptr; 489 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 490 RegionEnd != MBB->begin(); RegionEnd = I) { 491 492 // Avoid decrementing RegionEnd for blocks with no terminator. 493 if (RegionEnd != MBB->end() || 494 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) { 495 --RegionEnd; 496 } 497 498 // The next region starts above the previous region. Look backward in the 499 // instruction stream until we find the nearest boundary. 500 unsigned NumRegionInstrs = 0; 501 I = RegionEnd; 502 for (;I != MBB->begin(); --I) { 503 MachineInstr &MI = *std::prev(I); 504 if (isSchedBoundary(&MI, &*MBB, MF, TII)) 505 break; 506 if (!MI.isDebugInstr()) { 507 // MBB::size() uses instr_iterator to count. Here we need a bundle to 508 // count as a single instruction. 509 ++NumRegionInstrs; 510 } 511 } 512 513 // It's possible we found a scheduling region that only has debug 514 // instructions. Don't bother scheduling these. 515 if (NumRegionInstrs != 0) 516 Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs)); 517 } 518 519 if (RegionsTopDown) 520 std::reverse(Regions.begin(), Regions.end()); 521 } 522 523 /// Main driver for both MachineScheduler and PostMachineScheduler. 524 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler, 525 bool FixKillFlags) { 526 // Visit all machine basic blocks. 527 // 528 // TODO: Visit blocks in global postorder or postorder within the bottom-up 529 // loop tree. Then we can optionally compute global RegPressure. 530 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 531 MBB != MBBEnd; ++MBB) { 532 533 Scheduler.startBlock(&*MBB); 534 535 #ifndef NDEBUG 536 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 537 continue; 538 if (SchedOnlyBlock.getNumOccurrences() 539 && (int)SchedOnlyBlock != MBB->getNumber()) 540 continue; 541 #endif 542 543 // Break the block into scheduling regions [I, RegionEnd). RegionEnd 544 // points to the scheduling boundary at the bottom of the region. The DAG 545 // does not include RegionEnd, but the region does (i.e. the next 546 // RegionEnd is above the previous RegionBegin). If the current block has 547 // no terminator then RegionEnd == MBB->end() for the bottom region. 548 // 549 // All the regions of MBB are first found and stored in MBBRegions, which 550 // will be processed (MBB) top-down if initialized with true. 551 // 552 // The Scheduler may insert instructions during either schedule() or 553 // exitRegion(), even for empty regions. So the local iterators 'I' and 554 // 'RegionEnd' are invalid across these calls. Instructions must not be 555 // added to other regions than the current one without updating MBBRegions. 556 557 MBBRegionsVector MBBRegions; 558 getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown()); 559 for (MBBRegionsVector::iterator R = MBBRegions.begin(); 560 R != MBBRegions.end(); ++R) { 561 MachineBasicBlock::iterator I = R->RegionBegin; 562 MachineBasicBlock::iterator RegionEnd = R->RegionEnd; 563 unsigned NumRegionInstrs = R->NumRegionInstrs; 564 565 // Notify the scheduler of the region, even if we may skip scheduling 566 // it. Perhaps it still needs to be bundled. 567 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs); 568 569 // Skip empty scheduling regions (0 or 1 schedulable instructions). 570 if (I == RegionEnd || I == std::prev(RegionEnd)) { 571 // Close the current region. Bundle the terminator if needed. 572 // This invalidates 'RegionEnd' and 'I'. 573 Scheduler.exitRegion(); 574 continue; 575 } 576 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n"); 577 LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) 578 << " " << MBB->getName() << "\n From: " << *I 579 << " To: "; 580 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 581 else dbgs() << "End"; 582 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); 583 if (DumpCriticalPathLength) { 584 errs() << MF->getName(); 585 errs() << ":%bb. " << MBB->getNumber(); 586 errs() << " " << MBB->getName() << " \n"; 587 } 588 589 // Schedule a region: possibly reorder instructions. 590 // This invalidates the original region iterators. 591 Scheduler.schedule(); 592 593 // Close the current region. 594 Scheduler.exitRegion(); 595 } 596 Scheduler.finishBlock(); 597 // FIXME: Ideally, no further passes should rely on kill flags. However, 598 // thumb2 size reduction is currently an exception, so the PostMIScheduler 599 // needs to do this. 600 if (FixKillFlags) 601 Scheduler.fixupKills(*MBB); 602 } 603 Scheduler.finalizeSchedule(); 604 } 605 606 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 607 // unimplemented 608 } 609 610 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 611 LLVM_DUMP_METHOD void ReadyQueue::dump() const { 612 dbgs() << "Queue " << Name << ": "; 613 for (const SUnit *SU : Queue) 614 dbgs() << SU->NodeNum << " "; 615 dbgs() << "\n"; 616 } 617 #endif 618 619 //===----------------------------------------------------------------------===// 620 // ScheduleDAGMI - Basic machine instruction scheduling. This is 621 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 622 // virtual registers. 623 // ===----------------------------------------------------------------------===/ 624 625 // Provide a vtable anchor. 626 ScheduleDAGMI::~ScheduleDAGMI() = default; 627 628 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 629 /// NumPredsLeft reaches zero, release the successor node. 630 /// 631 /// FIXME: Adjust SuccSU height based on MinLatency. 632 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 633 SUnit *SuccSU = SuccEdge->getSUnit(); 634 635 if (SuccEdge->isWeak()) { 636 --SuccSU->WeakPredsLeft; 637 if (SuccEdge->isCluster()) 638 NextClusterSucc = SuccSU; 639 return; 640 } 641 #ifndef NDEBUG 642 if (SuccSU->NumPredsLeft == 0) { 643 dbgs() << "*** Scheduling failed! ***\n"; 644 dumpNode(*SuccSU); 645 dbgs() << " has been released too many times!\n"; 646 llvm_unreachable(nullptr); 647 } 648 #endif 649 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 650 // CurrCycle may have advanced since then. 651 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 652 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 653 654 --SuccSU->NumPredsLeft; 655 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 656 SchedImpl->releaseTopNode(SuccSU); 657 } 658 659 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 660 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 661 for (SDep &Succ : SU->Succs) 662 releaseSucc(SU, &Succ); 663 } 664 665 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 666 /// NumSuccsLeft reaches zero, release the predecessor node. 667 /// 668 /// FIXME: Adjust PredSU height based on MinLatency. 669 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 670 SUnit *PredSU = PredEdge->getSUnit(); 671 672 if (PredEdge->isWeak()) { 673 --PredSU->WeakSuccsLeft; 674 if (PredEdge->isCluster()) 675 NextClusterPred = PredSU; 676 return; 677 } 678 #ifndef NDEBUG 679 if (PredSU->NumSuccsLeft == 0) { 680 dbgs() << "*** Scheduling failed! ***\n"; 681 dumpNode(*PredSU); 682 dbgs() << " has been released too many times!\n"; 683 llvm_unreachable(nullptr); 684 } 685 #endif 686 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 687 // CurrCycle may have advanced since then. 688 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 689 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 690 691 --PredSU->NumSuccsLeft; 692 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 693 SchedImpl->releaseBottomNode(PredSU); 694 } 695 696 /// releasePredecessors - Call releasePred on each of SU's predecessors. 697 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 698 for (SDep &Pred : SU->Preds) 699 releasePred(SU, &Pred); 700 } 701 702 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) { 703 ScheduleDAGInstrs::startBlock(bb); 704 SchedImpl->enterMBB(bb); 705 } 706 707 void ScheduleDAGMI::finishBlock() { 708 SchedImpl->leaveMBB(); 709 ScheduleDAGInstrs::finishBlock(); 710 } 711 712 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 713 /// crossing a scheduling boundary. [begin, end) includes all instructions in 714 /// the region, including the boundary itself and single-instruction regions 715 /// that don't get scheduled. 716 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 717 MachineBasicBlock::iterator begin, 718 MachineBasicBlock::iterator end, 719 unsigned regioninstrs) 720 { 721 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 722 723 SchedImpl->initPolicy(begin, end, regioninstrs); 724 } 725 726 /// This is normally called from the main scheduler loop but may also be invoked 727 /// by the scheduling strategy to perform additional code motion. 728 void ScheduleDAGMI::moveInstruction( 729 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 730 // Advance RegionBegin if the first instruction moves down. 731 if (&*RegionBegin == MI) 732 ++RegionBegin; 733 734 // Update the instruction stream. 735 BB->splice(InsertPos, BB, MI); 736 737 // Update LiveIntervals 738 if (LIS) 739 LIS->handleMove(*MI, /*UpdateFlags=*/true); 740 741 // Recede RegionBegin if an instruction moves above the first. 742 if (RegionBegin == InsertPos) 743 RegionBegin = MI; 744 } 745 746 bool ScheduleDAGMI::checkSchedLimit() { 747 #ifndef NDEBUG 748 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 749 CurrentTop = CurrentBottom; 750 return false; 751 } 752 ++NumInstrsScheduled; 753 #endif 754 return true; 755 } 756 757 /// Per-region scheduling driver, called back from 758 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 759 /// does not consider liveness or register pressure. It is useful for PostRA 760 /// scheduling and potentially other custom schedulers. 761 void ScheduleDAGMI::schedule() { 762 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n"); 763 LLVM_DEBUG(SchedImpl->dumpPolicy()); 764 765 // Build the DAG. 766 buildSchedGraph(AA); 767 768 postprocessDAG(); 769 770 SmallVector<SUnit*, 8> TopRoots, BotRoots; 771 findRootsAndBiasEdges(TopRoots, BotRoots); 772 773 LLVM_DEBUG(dump()); 774 if (PrintDAGs) dump(); 775 if (ViewMISchedDAGs) viewGraph(); 776 777 // Initialize the strategy before modifying the DAG. 778 // This may initialize a DFSResult to be used for queue priority. 779 SchedImpl->initialize(this); 780 781 // Initialize ready queues now that the DAG and priority data are finalized. 782 initQueues(TopRoots, BotRoots); 783 784 bool IsTopNode = false; 785 while (true) { 786 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n"); 787 SUnit *SU = SchedImpl->pickNode(IsTopNode); 788 if (!SU) break; 789 790 assert(!SU->isScheduled && "Node already scheduled"); 791 if (!checkSchedLimit()) 792 break; 793 794 MachineInstr *MI = SU->getInstr(); 795 if (IsTopNode) { 796 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 797 if (&*CurrentTop == MI) 798 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 799 else 800 moveInstruction(MI, CurrentTop); 801 } else { 802 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 803 MachineBasicBlock::iterator priorII = 804 priorNonDebug(CurrentBottom, CurrentTop); 805 if (&*priorII == MI) 806 CurrentBottom = priorII; 807 else { 808 if (&*CurrentTop == MI) 809 CurrentTop = nextIfDebug(++CurrentTop, priorII); 810 moveInstruction(MI, CurrentBottom); 811 CurrentBottom = MI; 812 } 813 } 814 // Notify the scheduling strategy before updating the DAG. 815 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 816 // runs, it can then use the accurate ReadyCycle time to determine whether 817 // newly released nodes can move to the readyQ. 818 SchedImpl->schedNode(SU, IsTopNode); 819 820 updateQueues(SU, IsTopNode); 821 } 822 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 823 824 placeDebugValues(); 825 826 LLVM_DEBUG({ 827 dbgs() << "*** Final schedule for " 828 << printMBBReference(*begin()->getParent()) << " ***\n"; 829 dumpSchedule(); 830 dbgs() << '\n'; 831 }); 832 } 833 834 /// Apply each ScheduleDAGMutation step in order. 835 void ScheduleDAGMI::postprocessDAG() { 836 for (auto &m : Mutations) 837 m->apply(this); 838 } 839 840 void ScheduleDAGMI:: 841 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 842 SmallVectorImpl<SUnit*> &BotRoots) { 843 for (SUnit &SU : SUnits) { 844 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits"); 845 846 // Order predecessors so DFSResult follows the critical path. 847 SU.biasCriticalPath(); 848 849 // A SUnit is ready to top schedule if it has no predecessors. 850 if (!SU.NumPredsLeft) 851 TopRoots.push_back(&SU); 852 // A SUnit is ready to bottom schedule if it has no successors. 853 if (!SU.NumSuccsLeft) 854 BotRoots.push_back(&SU); 855 } 856 ExitSU.biasCriticalPath(); 857 } 858 859 /// Identify DAG roots and setup scheduler queues. 860 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 861 ArrayRef<SUnit*> BotRoots) { 862 NextClusterSucc = nullptr; 863 NextClusterPred = nullptr; 864 865 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 866 // 867 // Nodes with unreleased weak edges can still be roots. 868 // Release top roots in forward order. 869 for (SUnit *SU : TopRoots) 870 SchedImpl->releaseTopNode(SU); 871 872 // Release bottom roots in reverse order so the higher priority nodes appear 873 // first. This is more natural and slightly more efficient. 874 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 875 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 876 SchedImpl->releaseBottomNode(*I); 877 } 878 879 releaseSuccessors(&EntrySU); 880 releasePredecessors(&ExitSU); 881 882 SchedImpl->registerRoots(); 883 884 // Advance past initial DebugValues. 885 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 886 CurrentBottom = RegionEnd; 887 } 888 889 /// Update scheduler queues after scheduling an instruction. 890 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 891 // Release dependent instructions for scheduling. 892 if (IsTopNode) 893 releaseSuccessors(SU); 894 else 895 releasePredecessors(SU); 896 897 SU->isScheduled = true; 898 } 899 900 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 901 void ScheduleDAGMI::placeDebugValues() { 902 // If first instruction was a DBG_VALUE then put it back. 903 if (FirstDbgValue) { 904 BB->splice(RegionBegin, BB, FirstDbgValue); 905 RegionBegin = FirstDbgValue; 906 } 907 908 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator 909 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 910 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 911 MachineInstr *DbgValue = P.first; 912 MachineBasicBlock::iterator OrigPrevMI = P.second; 913 if (&*RegionBegin == DbgValue) 914 ++RegionBegin; 915 BB->splice(++OrigPrevMI, BB, DbgValue); 916 if (OrigPrevMI == std::prev(RegionEnd)) 917 RegionEnd = DbgValue; 918 } 919 DbgValues.clear(); 920 FirstDbgValue = nullptr; 921 } 922 923 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 924 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const { 925 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 926 if (SUnit *SU = getSUnit(&(*MI))) 927 dumpNode(*SU); 928 else 929 dbgs() << "Missing SUnit\n"; 930 } 931 } 932 #endif 933 934 //===----------------------------------------------------------------------===// 935 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 936 // preservation. 937 //===----------------------------------------------------------------------===// 938 939 ScheduleDAGMILive::~ScheduleDAGMILive() { 940 delete DFSResult; 941 } 942 943 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) { 944 const MachineInstr &MI = *SU.getInstr(); 945 for (const MachineOperand &MO : MI.operands()) { 946 if (!MO.isReg()) 947 continue; 948 if (!MO.readsReg()) 949 continue; 950 if (TrackLaneMasks && !MO.isUse()) 951 continue; 952 953 Register Reg = MO.getReg(); 954 if (!Register::isVirtualRegister(Reg)) 955 continue; 956 957 // Ignore re-defs. 958 if (TrackLaneMasks) { 959 bool FoundDef = false; 960 for (const MachineOperand &MO2 : MI.operands()) { 961 if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) { 962 FoundDef = true; 963 break; 964 } 965 } 966 if (FoundDef) 967 continue; 968 } 969 970 // Record this local VReg use. 971 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg); 972 for (; UI != VRegUses.end(); ++UI) { 973 if (UI->SU == &SU) 974 break; 975 } 976 if (UI == VRegUses.end()) 977 VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU)); 978 } 979 } 980 981 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 982 /// crossing a scheduling boundary. [begin, end) includes all instructions in 983 /// the region, including the boundary itself and single-instruction regions 984 /// that don't get scheduled. 985 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 986 MachineBasicBlock::iterator begin, 987 MachineBasicBlock::iterator end, 988 unsigned regioninstrs) 989 { 990 // ScheduleDAGMI initializes SchedImpl's per-region policy. 991 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 992 993 // For convenience remember the end of the liveness region. 994 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 995 996 SUPressureDiffs.clear(); 997 998 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 999 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks(); 1000 1001 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) && 1002 "ShouldTrackLaneMasks requires ShouldTrackPressure"); 1003 } 1004 1005 // Setup the register pressure trackers for the top scheduled and bottom 1006 // scheduled regions. 1007 void ScheduleDAGMILive::initRegPressure() { 1008 VRegUses.clear(); 1009 VRegUses.setUniverse(MRI.getNumVirtRegs()); 1010 for (SUnit &SU : SUnits) 1011 collectVRegUses(SU); 1012 1013 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin, 1014 ShouldTrackLaneMasks, false); 1015 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1016 ShouldTrackLaneMasks, false); 1017 1018 // Close the RPTracker to finalize live ins. 1019 RPTracker.closeRegion(); 1020 1021 LLVM_DEBUG(RPTracker.dump()); 1022 1023 // Initialize the live ins and live outs. 1024 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 1025 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 1026 1027 // Close one end of the tracker so we can call 1028 // getMaxUpward/DownwardPressureDelta before advancing across any 1029 // instructions. This converts currently live regs into live ins/outs. 1030 TopRPTracker.closeTop(); 1031 BotRPTracker.closeBottom(); 1032 1033 BotRPTracker.initLiveThru(RPTracker); 1034 if (!BotRPTracker.getLiveThru().empty()) { 1035 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 1036 LLVM_DEBUG(dbgs() << "Live Thru: "; 1037 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 1038 }; 1039 1040 // For each live out vreg reduce the pressure change associated with other 1041 // uses of the same vreg below the live-out reaching def. 1042 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 1043 1044 // Account for liveness generated by the region boundary. 1045 if (LiveRegionEnd != RegionEnd) { 1046 SmallVector<RegisterMaskPair, 8> LiveUses; 1047 BotRPTracker.recede(&LiveUses); 1048 updatePressureDiffs(LiveUses); 1049 } 1050 1051 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; 1052 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI); 1053 dbgs() << "Bottom Pressure:\n"; 1054 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);); 1055 1056 assert((BotRPTracker.getPos() == RegionEnd || 1057 (RegionEnd->isDebugInstr() && 1058 BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) && 1059 "Can't find the region bottom"); 1060 1061 // Cache the list of excess pressure sets in this region. This will also track 1062 // the max pressure in the scheduled code for these sets. 1063 RegionCriticalPSets.clear(); 1064 const std::vector<unsigned> &RegionPressure = 1065 RPTracker.getPressure().MaxSetPressure; 1066 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 1067 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 1068 if (RegionPressure[i] > Limit) { 1069 LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit 1070 << " Actual " << RegionPressure[i] << "\n"); 1071 RegionCriticalPSets.push_back(PressureChange(i)); 1072 } 1073 } 1074 LLVM_DEBUG(dbgs() << "Excess PSets: "; 1075 for (const PressureChange &RCPS 1076 : RegionCriticalPSets) dbgs() 1077 << TRI->getRegPressureSetName(RCPS.getPSet()) << " "; 1078 dbgs() << "\n"); 1079 } 1080 1081 void ScheduleDAGMILive:: 1082 updateScheduledPressure(const SUnit *SU, 1083 const std::vector<unsigned> &NewMaxPressure) { 1084 const PressureDiff &PDiff = getPressureDiff(SU); 1085 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 1086 for (const PressureChange &PC : PDiff) { 1087 if (!PC.isValid()) 1088 break; 1089 unsigned ID = PC.getPSet(); 1090 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 1091 ++CritIdx; 1092 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 1093 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 1094 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max()) 1095 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 1096 } 1097 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 1098 if (NewMaxPressure[ID] >= Limit - 2) { 1099 LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 1100 << NewMaxPressure[ID] 1101 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") 1102 << Limit << "(+ " << BotRPTracker.getLiveThru()[ID] 1103 << " livethru)\n"); 1104 } 1105 } 1106 } 1107 1108 /// Update the PressureDiff array for liveness after scheduling this 1109 /// instruction. 1110 void ScheduleDAGMILive::updatePressureDiffs( 1111 ArrayRef<RegisterMaskPair> LiveUses) { 1112 for (const RegisterMaskPair &P : LiveUses) { 1113 Register Reg = P.RegUnit; 1114 /// FIXME: Currently assuming single-use physregs. 1115 if (!Register::isVirtualRegister(Reg)) 1116 continue; 1117 1118 if (ShouldTrackLaneMasks) { 1119 // If the register has just become live then other uses won't change 1120 // this fact anymore => decrement pressure. 1121 // If the register has just become dead then other uses make it come 1122 // back to life => increment pressure. 1123 bool Decrement = P.LaneMask.any(); 1124 1125 for (const VReg2SUnit &V2SU 1126 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1127 SUnit &SU = *V2SU.SU; 1128 if (SU.isScheduled || &SU == &ExitSU) 1129 continue; 1130 1131 PressureDiff &PDiff = getPressureDiff(&SU); 1132 PDiff.addPressureChange(Reg, Decrement, &MRI); 1133 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") " 1134 << printReg(Reg, TRI) << ':' 1135 << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr(); 1136 dbgs() << " to "; PDiff.dump(*TRI);); 1137 } 1138 } else { 1139 assert(P.LaneMask.any()); 1140 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n"); 1141 // This may be called before CurrentBottom has been initialized. However, 1142 // BotRPTracker must have a valid position. We want the value live into the 1143 // instruction or live out of the block, so ask for the previous 1144 // instruction's live-out. 1145 const LiveInterval &LI = LIS->getInterval(Reg); 1146 VNInfo *VNI; 1147 MachineBasicBlock::const_iterator I = 1148 nextIfDebug(BotRPTracker.getPos(), BB->end()); 1149 if (I == BB->end()) 1150 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1151 else { 1152 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I)); 1153 VNI = LRQ.valueIn(); 1154 } 1155 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 1156 assert(VNI && "No live value at use."); 1157 for (const VReg2SUnit &V2SU 1158 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1159 SUnit *SU = V2SU.SU; 1160 // If this use comes before the reaching def, it cannot be a last use, 1161 // so decrease its pressure change. 1162 if (!SU->isScheduled && SU != &ExitSU) { 1163 LiveQueryResult LRQ = 1164 LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1165 if (LRQ.valueIn() == VNI) { 1166 PressureDiff &PDiff = getPressureDiff(SU); 1167 PDiff.addPressureChange(Reg, true, &MRI); 1168 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 1169 << *SU->getInstr(); 1170 dbgs() << " to "; PDiff.dump(*TRI);); 1171 } 1172 } 1173 } 1174 } 1175 } 1176 } 1177 1178 void ScheduleDAGMILive::dump() const { 1179 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1180 if (EntrySU.getInstr() != nullptr) 1181 dumpNodeAll(EntrySU); 1182 for (const SUnit &SU : SUnits) { 1183 dumpNodeAll(SU); 1184 if (ShouldTrackPressure) { 1185 dbgs() << " Pressure Diff : "; 1186 getPressureDiff(&SU).dump(*TRI); 1187 } 1188 dbgs() << " Single Issue : "; 1189 if (SchedModel.mustBeginGroup(SU.getInstr()) && 1190 SchedModel.mustEndGroup(SU.getInstr())) 1191 dbgs() << "true;"; 1192 else 1193 dbgs() << "false;"; 1194 dbgs() << '\n'; 1195 } 1196 if (ExitSU.getInstr() != nullptr) 1197 dumpNodeAll(ExitSU); 1198 #endif 1199 } 1200 1201 /// schedule - Called back from MachineScheduler::runOnMachineFunction 1202 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 1203 /// only includes instructions that have DAG nodes, not scheduling boundaries. 1204 /// 1205 /// This is a skeletal driver, with all the functionality pushed into helpers, 1206 /// so that it can be easily extended by experimental schedulers. Generally, 1207 /// implementing MachineSchedStrategy should be sufficient to implement a new 1208 /// scheduling algorithm. However, if a scheduler further subclasses 1209 /// ScheduleDAGMILive then it will want to override this virtual method in order 1210 /// to update any specialized state. 1211 void ScheduleDAGMILive::schedule() { 1212 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n"); 1213 LLVM_DEBUG(SchedImpl->dumpPolicy()); 1214 buildDAGWithRegPressure(); 1215 1216 postprocessDAG(); 1217 1218 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1219 findRootsAndBiasEdges(TopRoots, BotRoots); 1220 1221 // Initialize the strategy before modifying the DAG. 1222 // This may initialize a DFSResult to be used for queue priority. 1223 SchedImpl->initialize(this); 1224 1225 LLVM_DEBUG(dump()); 1226 if (PrintDAGs) dump(); 1227 if (ViewMISchedDAGs) viewGraph(); 1228 1229 // Initialize ready queues now that the DAG and priority data are finalized. 1230 initQueues(TopRoots, BotRoots); 1231 1232 bool IsTopNode = false; 1233 while (true) { 1234 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n"); 1235 SUnit *SU = SchedImpl->pickNode(IsTopNode); 1236 if (!SU) break; 1237 1238 assert(!SU->isScheduled && "Node already scheduled"); 1239 if (!checkSchedLimit()) 1240 break; 1241 1242 scheduleMI(SU, IsTopNode); 1243 1244 if (DFSResult) { 1245 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1246 if (!ScheduledTrees.test(SubtreeID)) { 1247 ScheduledTrees.set(SubtreeID); 1248 DFSResult->scheduleTree(SubtreeID); 1249 SchedImpl->scheduleTree(SubtreeID); 1250 } 1251 } 1252 1253 // Notify the scheduling strategy after updating the DAG. 1254 SchedImpl->schedNode(SU, IsTopNode); 1255 1256 updateQueues(SU, IsTopNode); 1257 } 1258 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1259 1260 placeDebugValues(); 1261 1262 LLVM_DEBUG({ 1263 dbgs() << "*** Final schedule for " 1264 << printMBBReference(*begin()->getParent()) << " ***\n"; 1265 dumpSchedule(); 1266 dbgs() << '\n'; 1267 }); 1268 } 1269 1270 /// Build the DAG and setup three register pressure trackers. 1271 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1272 if (!ShouldTrackPressure) { 1273 RPTracker.reset(); 1274 RegionCriticalPSets.clear(); 1275 buildSchedGraph(AA); 1276 return; 1277 } 1278 1279 // Initialize the register pressure tracker used by buildSchedGraph. 1280 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1281 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true); 1282 1283 // Account for liveness generate by the region boundary. 1284 if (LiveRegionEnd != RegionEnd) 1285 RPTracker.recede(); 1286 1287 // Build the DAG, and compute current register pressure. 1288 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks); 1289 1290 // Initialize top/bottom trackers after computing region pressure. 1291 initRegPressure(); 1292 } 1293 1294 void ScheduleDAGMILive::computeDFSResult() { 1295 if (!DFSResult) 1296 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1297 DFSResult->clear(); 1298 ScheduledTrees.clear(); 1299 DFSResult->resize(SUnits.size()); 1300 DFSResult->compute(SUnits); 1301 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1302 } 1303 1304 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1305 /// only provides the critical path for single block loops. To handle loops that 1306 /// span blocks, we could use the vreg path latencies provided by 1307 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1308 /// available for use in the scheduler. 1309 /// 1310 /// The cyclic path estimation identifies a def-use pair that crosses the back 1311 /// edge and considers the depth and height of the nodes. For example, consider 1312 /// the following instruction sequence where each instruction has unit latency 1313 /// and defines an eponymous virtual register: 1314 /// 1315 /// a->b(a,c)->c(b)->d(c)->exit 1316 /// 1317 /// The cyclic critical path is a two cycles: b->c->b 1318 /// The acyclic critical path is four cycles: a->b->c->d->exit 1319 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1320 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1321 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1322 /// LiveInDepth = depth(b) = len(a->b) = 1 1323 /// 1324 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1325 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1326 /// CyclicCriticalPath = min(2, 2) = 2 1327 /// 1328 /// This could be relevant to PostRA scheduling, but is currently implemented 1329 /// assuming LiveIntervals. 1330 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1331 // This only applies to single block loop. 1332 if (!BB->isSuccessor(BB)) 1333 return 0; 1334 1335 unsigned MaxCyclicLatency = 0; 1336 // Visit each live out vreg def to find def/use pairs that cross iterations. 1337 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) { 1338 Register Reg = P.RegUnit; 1339 if (!Register::isVirtualRegister(Reg)) 1340 continue; 1341 const LiveInterval &LI = LIS->getInterval(Reg); 1342 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1343 if (!DefVNI) 1344 continue; 1345 1346 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1347 const SUnit *DefSU = getSUnit(DefMI); 1348 if (!DefSU) 1349 continue; 1350 1351 unsigned LiveOutHeight = DefSU->getHeight(); 1352 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1353 // Visit all local users of the vreg def. 1354 for (const VReg2SUnit &V2SU 1355 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1356 SUnit *SU = V2SU.SU; 1357 if (SU == &ExitSU) 1358 continue; 1359 1360 // Only consider uses of the phi. 1361 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1362 if (!LRQ.valueIn()->isPHIDef()) 1363 continue; 1364 1365 // Assume that a path spanning two iterations is a cycle, which could 1366 // overestimate in strange cases. This allows cyclic latency to be 1367 // estimated as the minimum slack of the vreg's depth or height. 1368 unsigned CyclicLatency = 0; 1369 if (LiveOutDepth > SU->getDepth()) 1370 CyclicLatency = LiveOutDepth - SU->getDepth(); 1371 1372 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency; 1373 if (LiveInHeight > LiveOutHeight) { 1374 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1375 CyclicLatency = LiveInHeight - LiveOutHeight; 1376 } else 1377 CyclicLatency = 0; 1378 1379 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1380 << SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1381 if (CyclicLatency > MaxCyclicLatency) 1382 MaxCyclicLatency = CyclicLatency; 1383 } 1384 } 1385 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1386 return MaxCyclicLatency; 1387 } 1388 1389 /// Release ExitSU predecessors and setup scheduler queues. Re-position 1390 /// the Top RP tracker in case the region beginning has changed. 1391 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots, 1392 ArrayRef<SUnit*> BotRoots) { 1393 ScheduleDAGMI::initQueues(TopRoots, BotRoots); 1394 if (ShouldTrackPressure) { 1395 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1396 TopRPTracker.setPos(CurrentTop); 1397 } 1398 } 1399 1400 /// Move an instruction and update register pressure. 1401 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1402 // Move the instruction to its new location in the instruction stream. 1403 MachineInstr *MI = SU->getInstr(); 1404 1405 if (IsTopNode) { 1406 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1407 if (&*CurrentTop == MI) 1408 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1409 else { 1410 moveInstruction(MI, CurrentTop); 1411 TopRPTracker.setPos(MI); 1412 } 1413 1414 if (ShouldTrackPressure) { 1415 // Update top scheduled pressure. 1416 RegisterOperands RegOpers; 1417 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1418 if (ShouldTrackLaneMasks) { 1419 // Adjust liveness and add missing dead+read-undef flags. 1420 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1421 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1422 } else { 1423 // Adjust for missing dead-def flags. 1424 RegOpers.detectDeadDefs(*MI, *LIS); 1425 } 1426 1427 TopRPTracker.advance(RegOpers); 1428 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1429 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure( 1430 TopRPTracker.getRegSetPressureAtPos(), TRI);); 1431 1432 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1433 } 1434 } else { 1435 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1436 MachineBasicBlock::iterator priorII = 1437 priorNonDebug(CurrentBottom, CurrentTop); 1438 if (&*priorII == MI) 1439 CurrentBottom = priorII; 1440 else { 1441 if (&*CurrentTop == MI) { 1442 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1443 TopRPTracker.setPos(CurrentTop); 1444 } 1445 moveInstruction(MI, CurrentBottom); 1446 CurrentBottom = MI; 1447 BotRPTracker.setPos(CurrentBottom); 1448 } 1449 if (ShouldTrackPressure) { 1450 RegisterOperands RegOpers; 1451 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1452 if (ShouldTrackLaneMasks) { 1453 // Adjust liveness and add missing dead+read-undef flags. 1454 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1455 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1456 } else { 1457 // Adjust for missing dead-def flags. 1458 RegOpers.detectDeadDefs(*MI, *LIS); 1459 } 1460 1461 if (BotRPTracker.getPos() != CurrentBottom) 1462 BotRPTracker.recedeSkipDebugValues(); 1463 SmallVector<RegisterMaskPair, 8> LiveUses; 1464 BotRPTracker.recede(RegOpers, &LiveUses); 1465 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1466 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure( 1467 BotRPTracker.getRegSetPressureAtPos(), TRI);); 1468 1469 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1470 updatePressureDiffs(LiveUses); 1471 } 1472 } 1473 } 1474 1475 //===----------------------------------------------------------------------===// 1476 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores. 1477 //===----------------------------------------------------------------------===// 1478 1479 namespace { 1480 1481 /// Post-process the DAG to create cluster edges between neighboring 1482 /// loads or between neighboring stores. 1483 class BaseMemOpClusterMutation : public ScheduleDAGMutation { 1484 struct MemOpInfo { 1485 SUnit *SU; 1486 SmallVector<const MachineOperand *, 4> BaseOps; 1487 int64_t Offset; 1488 unsigned Width; 1489 1490 MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps, 1491 int64_t Offset, unsigned Width) 1492 : SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset), 1493 Width(Width) {} 1494 1495 static bool Compare(const MachineOperand *const &A, 1496 const MachineOperand *const &B) { 1497 if (A->getType() != B->getType()) 1498 return A->getType() < B->getType(); 1499 if (A->isReg()) 1500 return A->getReg() < B->getReg(); 1501 if (A->isFI()) { 1502 const MachineFunction &MF = *A->getParent()->getParent()->getParent(); 1503 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1504 bool StackGrowsDown = TFI.getStackGrowthDirection() == 1505 TargetFrameLowering::StackGrowsDown; 1506 return StackGrowsDown ? A->getIndex() > B->getIndex() 1507 : A->getIndex() < B->getIndex(); 1508 } 1509 1510 llvm_unreachable("MemOpClusterMutation only supports register or frame " 1511 "index bases."); 1512 } 1513 1514 bool operator<(const MemOpInfo &RHS) const { 1515 // FIXME: Don't compare everything twice. Maybe use C++20 three way 1516 // comparison instead when it's available. 1517 if (std::lexicographical_compare(BaseOps.begin(), BaseOps.end(), 1518 RHS.BaseOps.begin(), RHS.BaseOps.end(), 1519 Compare)) 1520 return true; 1521 if (std::lexicographical_compare(RHS.BaseOps.begin(), RHS.BaseOps.end(), 1522 BaseOps.begin(), BaseOps.end(), Compare)) 1523 return false; 1524 if (Offset != RHS.Offset) 1525 return Offset < RHS.Offset; 1526 return SU->NodeNum < RHS.SU->NodeNum; 1527 } 1528 }; 1529 1530 const TargetInstrInfo *TII; 1531 const TargetRegisterInfo *TRI; 1532 bool IsLoad; 1533 1534 public: 1535 BaseMemOpClusterMutation(const TargetInstrInfo *tii, 1536 const TargetRegisterInfo *tri, bool IsLoad) 1537 : TII(tii), TRI(tri), IsLoad(IsLoad) {} 1538 1539 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1540 1541 protected: 1542 void clusterNeighboringMemOps(ArrayRef<MemOpInfo> MemOps, bool FastCluster, 1543 ScheduleDAGInstrs *DAG); 1544 void collectMemOpRecords(std::vector<SUnit> &SUnits, 1545 SmallVectorImpl<MemOpInfo> &MemOpRecords); 1546 bool groupMemOps(ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG, 1547 DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups); 1548 }; 1549 1550 class StoreClusterMutation : public BaseMemOpClusterMutation { 1551 public: 1552 StoreClusterMutation(const TargetInstrInfo *tii, 1553 const TargetRegisterInfo *tri) 1554 : BaseMemOpClusterMutation(tii, tri, false) {} 1555 }; 1556 1557 class LoadClusterMutation : public BaseMemOpClusterMutation { 1558 public: 1559 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri) 1560 : BaseMemOpClusterMutation(tii, tri, true) {} 1561 }; 1562 1563 } // end anonymous namespace 1564 1565 namespace llvm { 1566 1567 std::unique_ptr<ScheduleDAGMutation> 1568 createLoadClusterDAGMutation(const TargetInstrInfo *TII, 1569 const TargetRegisterInfo *TRI) { 1570 return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI) 1571 : nullptr; 1572 } 1573 1574 std::unique_ptr<ScheduleDAGMutation> 1575 createStoreClusterDAGMutation(const TargetInstrInfo *TII, 1576 const TargetRegisterInfo *TRI) { 1577 return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI) 1578 : nullptr; 1579 } 1580 1581 } // end namespace llvm 1582 1583 // Sorting all the loads/stores first, then for each load/store, checking the 1584 // following load/store one by one, until reach the first non-dependent one and 1585 // call target hook to see if they can cluster. 1586 // If FastCluster is enabled, we assume that, all the loads/stores have been 1587 // preprocessed and now, they didn't have dependencies on each other. 1588 void BaseMemOpClusterMutation::clusterNeighboringMemOps( 1589 ArrayRef<MemOpInfo> MemOpRecords, bool FastCluster, 1590 ScheduleDAGInstrs *DAG) { 1591 // Keep track of the current cluster length and bytes for each SUnit. 1592 DenseMap<unsigned, std::pair<unsigned, unsigned>> SUnit2ClusterInfo; 1593 1594 // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to 1595 // cluster mem ops collected within `MemOpRecords` array. 1596 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { 1597 // Decision to cluster mem ops is taken based on target dependent logic 1598 auto MemOpa = MemOpRecords[Idx]; 1599 1600 // Seek for the next load/store to do the cluster. 1601 unsigned NextIdx = Idx + 1; 1602 for (; NextIdx < End; ++NextIdx) 1603 // Skip if MemOpb has been clustered already or has dependency with 1604 // MemOpa. 1605 if (!SUnit2ClusterInfo.count(MemOpRecords[NextIdx].SU->NodeNum) && 1606 (FastCluster || 1607 (!DAG->IsReachable(MemOpRecords[NextIdx].SU, MemOpa.SU) && 1608 !DAG->IsReachable(MemOpa.SU, MemOpRecords[NextIdx].SU)))) 1609 break; 1610 if (NextIdx == End) 1611 continue; 1612 1613 auto MemOpb = MemOpRecords[NextIdx]; 1614 unsigned ClusterLength = 2; 1615 unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width; 1616 if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) { 1617 ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1; 1618 CurrentClusterBytes = 1619 SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width; 1620 } 1621 1622 if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpb.BaseOps, ClusterLength, 1623 CurrentClusterBytes)) 1624 continue; 1625 1626 SUnit *SUa = MemOpa.SU; 1627 SUnit *SUb = MemOpb.SU; 1628 if (SUa->NodeNum > SUb->NodeNum) 1629 std::swap(SUa, SUb); 1630 1631 // FIXME: Is this check really required? 1632 if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) 1633 continue; 1634 1635 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU(" 1636 << SUb->NodeNum << ")\n"); 1637 ++NumClustered; 1638 1639 if (IsLoad) { 1640 // Copy successor edges from SUa to SUb. Interleaving computation 1641 // dependent on SUa can prevent load combining due to register reuse. 1642 // Predecessor edges do not need to be copied from SUb to SUa since 1643 // nearby loads should have effectively the same inputs. 1644 for (const SDep &Succ : SUa->Succs) { 1645 if (Succ.getSUnit() == SUb) 1646 continue; 1647 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum 1648 << ")\n"); 1649 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial)); 1650 } 1651 } else { 1652 // Copy predecessor edges from SUb to SUa to avoid the SUnits that 1653 // SUb dependent on scheduled in-between SUb and SUa. Successor edges 1654 // do not need to be copied from SUa to SUb since no one will depend 1655 // on stores. 1656 // Notice that, we don't need to care about the memory dependency as 1657 // we won't try to cluster them if they have any memory dependency. 1658 for (const SDep &Pred : SUb->Preds) { 1659 if (Pred.getSUnit() == SUa) 1660 continue; 1661 LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred.getSUnit()->NodeNum 1662 << ")\n"); 1663 DAG->addEdge(SUa, SDep(Pred.getSUnit(), SDep::Artificial)); 1664 } 1665 } 1666 1667 SUnit2ClusterInfo[MemOpb.SU->NodeNum] = {ClusterLength, 1668 CurrentClusterBytes}; 1669 1670 LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength 1671 << ", Curr cluster bytes: " << CurrentClusterBytes 1672 << "\n"); 1673 } 1674 } 1675 1676 void BaseMemOpClusterMutation::collectMemOpRecords( 1677 std::vector<SUnit> &SUnits, SmallVectorImpl<MemOpInfo> &MemOpRecords) { 1678 for (auto &SU : SUnits) { 1679 if ((IsLoad && !SU.getInstr()->mayLoad()) || 1680 (!IsLoad && !SU.getInstr()->mayStore())) 1681 continue; 1682 1683 const MachineInstr &MI = *SU.getInstr(); 1684 SmallVector<const MachineOperand *, 4> BaseOps; 1685 int64_t Offset; 1686 bool OffsetIsScalable; 1687 unsigned Width; 1688 if (TII->getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, 1689 OffsetIsScalable, Width, TRI)) { 1690 MemOpRecords.push_back(MemOpInfo(&SU, BaseOps, Offset, Width)); 1691 1692 LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps.size() << ", Offset: " 1693 << Offset << ", OffsetIsScalable: " << OffsetIsScalable 1694 << ", Width: " << Width << "\n"); 1695 } 1696 #ifndef NDEBUG 1697 for (auto *Op : BaseOps) 1698 assert(Op); 1699 #endif 1700 } 1701 } 1702 1703 bool BaseMemOpClusterMutation::groupMemOps( 1704 ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG, 1705 DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups) { 1706 bool FastCluster = 1707 ForceFastCluster || 1708 MemOps.size() * DAG->SUnits.size() / 1000 > FastClusterThreshold; 1709 1710 for (const auto &MemOp : MemOps) { 1711 unsigned ChainPredID = DAG->SUnits.size(); 1712 if (FastCluster) { 1713 for (const SDep &Pred : MemOp.SU->Preds) { 1714 // We only want to cluster the mem ops that have the same ctrl(non-data) 1715 // pred so that they didn't have ctrl dependency for each other. But for 1716 // store instrs, we can still cluster them if the pred is load instr. 1717 if ((Pred.isCtrl() && 1718 (IsLoad || 1719 (Pred.getSUnit() && Pred.getSUnit()->getInstr()->mayStore()))) && 1720 !Pred.isArtificial()) { 1721 ChainPredID = Pred.getSUnit()->NodeNum; 1722 break; 1723 } 1724 } 1725 } else 1726 ChainPredID = 0; 1727 1728 Groups[ChainPredID].push_back(MemOp); 1729 } 1730 return FastCluster; 1731 } 1732 1733 /// Callback from DAG postProcessing to create cluster edges for loads/stores. 1734 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) { 1735 // Collect all the clusterable loads/stores 1736 SmallVector<MemOpInfo, 32> MemOpRecords; 1737 collectMemOpRecords(DAG->SUnits, MemOpRecords); 1738 1739 if (MemOpRecords.size() < 2) 1740 return; 1741 1742 // Put the loads/stores without dependency into the same group with some 1743 // heuristic if the DAG is too complex to avoid compiling time blow up. 1744 // Notice that, some fusion pair could be lost with this. 1745 DenseMap<unsigned, SmallVector<MemOpInfo, 32>> Groups; 1746 bool FastCluster = groupMemOps(MemOpRecords, DAG, Groups); 1747 1748 for (auto &Group : Groups) { 1749 // Sorting the loads/stores, so that, we can stop the cluster as early as 1750 // possible. 1751 llvm::sort(Group.second); 1752 1753 // Trying to cluster all the neighboring loads/stores. 1754 clusterNeighboringMemOps(Group.second, FastCluster, DAG); 1755 } 1756 } 1757 1758 //===----------------------------------------------------------------------===// 1759 // CopyConstrain - DAG post-processing to encourage copy elimination. 1760 //===----------------------------------------------------------------------===// 1761 1762 namespace { 1763 1764 /// Post-process the DAG to create weak edges from all uses of a copy to 1765 /// the one use that defines the copy's source vreg, most likely an induction 1766 /// variable increment. 1767 class CopyConstrain : public ScheduleDAGMutation { 1768 // Transient state. 1769 SlotIndex RegionBeginIdx; 1770 1771 // RegionEndIdx is the slot index of the last non-debug instruction in the 1772 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1773 SlotIndex RegionEndIdx; 1774 1775 public: 1776 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1777 1778 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1779 1780 protected: 1781 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1782 }; 1783 1784 } // end anonymous namespace 1785 1786 namespace llvm { 1787 1788 std::unique_ptr<ScheduleDAGMutation> 1789 createCopyConstrainDAGMutation(const TargetInstrInfo *TII, 1790 const TargetRegisterInfo *TRI) { 1791 return std::make_unique<CopyConstrain>(TII, TRI); 1792 } 1793 1794 } // end namespace llvm 1795 1796 /// constrainLocalCopy handles two possibilities: 1797 /// 1) Local src: 1798 /// I0: = dst 1799 /// I1: src = ... 1800 /// I2: = dst 1801 /// I3: dst = src (copy) 1802 /// (create pred->succ edges I0->I1, I2->I1) 1803 /// 1804 /// 2) Local copy: 1805 /// I0: dst = src (copy) 1806 /// I1: = dst 1807 /// I2: src = ... 1808 /// I3: = dst 1809 /// (create pred->succ edges I1->I2, I3->I2) 1810 /// 1811 /// Although the MachineScheduler is currently constrained to single blocks, 1812 /// this algorithm should handle extended blocks. An EBB is a set of 1813 /// contiguously numbered blocks such that the previous block in the EBB is 1814 /// always the single predecessor. 1815 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1816 LiveIntervals *LIS = DAG->getLIS(); 1817 MachineInstr *Copy = CopySU->getInstr(); 1818 1819 // Check for pure vreg copies. 1820 const MachineOperand &SrcOp = Copy->getOperand(1); 1821 Register SrcReg = SrcOp.getReg(); 1822 if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg()) 1823 return; 1824 1825 const MachineOperand &DstOp = Copy->getOperand(0); 1826 Register DstReg = DstOp.getReg(); 1827 if (!Register::isVirtualRegister(DstReg) || DstOp.isDead()) 1828 return; 1829 1830 // Check if either the dest or source is local. If it's live across a back 1831 // edge, it's not local. Note that if both vregs are live across the back 1832 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1833 // If both the copy's source and dest are local live intervals, then we 1834 // should treat the dest as the global for the purpose of adding 1835 // constraints. This adds edges from source's other uses to the copy. 1836 unsigned LocalReg = SrcReg; 1837 unsigned GlobalReg = DstReg; 1838 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1839 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1840 LocalReg = DstReg; 1841 GlobalReg = SrcReg; 1842 LocalLI = &LIS->getInterval(LocalReg); 1843 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1844 return; 1845 } 1846 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1847 1848 // Find the global segment after the start of the local LI. 1849 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1850 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1851 // local live range. We could create edges from other global uses to the local 1852 // start, but the coalescer should have already eliminated these cases, so 1853 // don't bother dealing with it. 1854 if (GlobalSegment == GlobalLI->end()) 1855 return; 1856 1857 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1858 // returned the next global segment. But if GlobalSegment overlaps with 1859 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI 1860 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1861 if (GlobalSegment->contains(LocalLI->beginIndex())) 1862 ++GlobalSegment; 1863 1864 if (GlobalSegment == GlobalLI->end()) 1865 return; 1866 1867 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1868 if (GlobalSegment != GlobalLI->begin()) { 1869 // Two address defs have no hole. 1870 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1871 GlobalSegment->start)) { 1872 return; 1873 } 1874 // If the prior global segment may be defined by the same two-address 1875 // instruction that also defines LocalLI, then can't make a hole here. 1876 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1877 LocalLI->beginIndex())) { 1878 return; 1879 } 1880 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1881 // it would be a disconnected component in the live range. 1882 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1883 "Disconnected LRG within the scheduling region."); 1884 } 1885 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1886 if (!GlobalDef) 1887 return; 1888 1889 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1890 if (!GlobalSU) 1891 return; 1892 1893 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1894 // constraining the uses of the last local def to precede GlobalDef. 1895 SmallVector<SUnit*,8> LocalUses; 1896 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1897 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1898 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1899 for (const SDep &Succ : LastLocalSU->Succs) { 1900 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg) 1901 continue; 1902 if (Succ.getSUnit() == GlobalSU) 1903 continue; 1904 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit())) 1905 return; 1906 LocalUses.push_back(Succ.getSUnit()); 1907 } 1908 // Open the top of the GlobalLI hole by constraining any earlier global uses 1909 // to precede the start of LocalLI. 1910 SmallVector<SUnit*,8> GlobalUses; 1911 MachineInstr *FirstLocalDef = 1912 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1913 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1914 for (const SDep &Pred : GlobalSU->Preds) { 1915 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg) 1916 continue; 1917 if (Pred.getSUnit() == FirstLocalSU) 1918 continue; 1919 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit())) 1920 return; 1921 GlobalUses.push_back(Pred.getSUnit()); 1922 } 1923 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1924 // Add the weak edges. 1925 for (SmallVectorImpl<SUnit*>::const_iterator 1926 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1927 LLVM_DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1928 << GlobalSU->NodeNum << ")\n"); 1929 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1930 } 1931 for (SmallVectorImpl<SUnit*>::const_iterator 1932 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1933 LLVM_DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1934 << FirstLocalSU->NodeNum << ")\n"); 1935 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1936 } 1937 } 1938 1939 /// Callback from DAG postProcessing to create weak edges to encourage 1940 /// copy elimination. 1941 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { 1942 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1943 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1944 1945 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1946 if (FirstPos == DAG->end()) 1947 return; 1948 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos); 1949 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1950 *priorNonDebug(DAG->end(), DAG->begin())); 1951 1952 for (SUnit &SU : DAG->SUnits) { 1953 if (!SU.getInstr()->isCopy()) 1954 continue; 1955 1956 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG)); 1957 } 1958 } 1959 1960 //===----------------------------------------------------------------------===// 1961 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1962 // and possibly other custom schedulers. 1963 //===----------------------------------------------------------------------===// 1964 1965 static const unsigned InvalidCycle = ~0U; 1966 1967 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1968 1969 /// Given a Count of resource usage and a Latency value, return true if a 1970 /// SchedBoundary becomes resource limited. 1971 /// If we are checking after scheduling a node, we should return true when 1972 /// we just reach the resource limit. 1973 static bool checkResourceLimit(unsigned LFactor, unsigned Count, 1974 unsigned Latency, bool AfterSchedNode) { 1975 int ResCntFactor = (int)(Count - (Latency * LFactor)); 1976 if (AfterSchedNode) 1977 return ResCntFactor >= (int)LFactor; 1978 else 1979 return ResCntFactor > (int)LFactor; 1980 } 1981 1982 void SchedBoundary::reset() { 1983 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1984 // Destroying and reconstructing it is very expensive though. So keep 1985 // invalid, placeholder HazardRecs. 1986 if (HazardRec && HazardRec->isEnabled()) { 1987 delete HazardRec; 1988 HazardRec = nullptr; 1989 } 1990 Available.clear(); 1991 Pending.clear(); 1992 CheckPending = false; 1993 CurrCycle = 0; 1994 CurrMOps = 0; 1995 MinReadyCycle = std::numeric_limits<unsigned>::max(); 1996 ExpectedLatency = 0; 1997 DependentLatency = 0; 1998 RetiredMOps = 0; 1999 MaxExecutedResCount = 0; 2000 ZoneCritResIdx = 0; 2001 IsResourceLimited = false; 2002 ReservedCycles.clear(); 2003 ReservedCyclesIndex.clear(); 2004 #ifndef NDEBUG 2005 // Track the maximum number of stall cycles that could arise either from the 2006 // latency of a DAG edge or the number of cycles that a processor resource is 2007 // reserved (SchedBoundary::ReservedCycles). 2008 MaxObservedStall = 0; 2009 #endif 2010 // Reserve a zero-count for invalid CritResIdx. 2011 ExecutedResCounts.resize(1); 2012 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 2013 } 2014 2015 void SchedRemainder:: 2016 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 2017 reset(); 2018 if (!SchedModel->hasInstrSchedModel()) 2019 return; 2020 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 2021 for (SUnit &SU : DAG->SUnits) { 2022 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU); 2023 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC) 2024 * SchedModel->getMicroOpFactor(); 2025 for (TargetSchedModel::ProcResIter 2026 PI = SchedModel->getWriteProcResBegin(SC), 2027 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2028 unsigned PIdx = PI->ProcResourceIdx; 2029 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2030 RemainingCounts[PIdx] += (Factor * PI->Cycles); 2031 } 2032 } 2033 } 2034 2035 void SchedBoundary:: 2036 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 2037 reset(); 2038 DAG = dag; 2039 SchedModel = smodel; 2040 Rem = rem; 2041 if (SchedModel->hasInstrSchedModel()) { 2042 unsigned ResourceCount = SchedModel->getNumProcResourceKinds(); 2043 ReservedCyclesIndex.resize(ResourceCount); 2044 ExecutedResCounts.resize(ResourceCount); 2045 unsigned NumUnits = 0; 2046 2047 for (unsigned i = 0; i < ResourceCount; ++i) { 2048 ReservedCyclesIndex[i] = NumUnits; 2049 NumUnits += SchedModel->getProcResource(i)->NumUnits; 2050 } 2051 2052 ReservedCycles.resize(NumUnits, InvalidCycle); 2053 } 2054 } 2055 2056 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 2057 /// these "soft stalls" differently than the hard stall cycles based on CPU 2058 /// resources and computed by checkHazard(). A fully in-order model 2059 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 2060 /// available for scheduling until they are ready. However, a weaker in-order 2061 /// model may use this for heuristics. For example, if a processor has in-order 2062 /// behavior when reading certain resources, this may come into play. 2063 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 2064 if (!SU->isUnbuffered) 2065 return 0; 2066 2067 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2068 if (ReadyCycle > CurrCycle) 2069 return ReadyCycle - CurrCycle; 2070 return 0; 2071 } 2072 2073 /// Compute the next cycle at which the given processor resource unit 2074 /// can be scheduled. 2075 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx, 2076 unsigned Cycles) { 2077 unsigned NextUnreserved = ReservedCycles[InstanceIdx]; 2078 // If this resource has never been used, always return cycle zero. 2079 if (NextUnreserved == InvalidCycle) 2080 return 0; 2081 // For bottom-up scheduling add the cycles needed for the current operation. 2082 if (!isTop()) 2083 NextUnreserved += Cycles; 2084 return NextUnreserved; 2085 } 2086 2087 /// Compute the next cycle at which the given processor resource can be 2088 /// scheduled. Returns the next cycle and the index of the processor resource 2089 /// instance in the reserved cycles vector. 2090 std::pair<unsigned, unsigned> 2091 SchedBoundary::getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 2092 unsigned MinNextUnreserved = InvalidCycle; 2093 unsigned InstanceIdx = 0; 2094 unsigned StartIndex = ReservedCyclesIndex[PIdx]; 2095 unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits; 2096 assert(NumberOfInstances > 0 && 2097 "Cannot have zero instances of a ProcResource"); 2098 2099 for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End; 2100 ++I) { 2101 unsigned NextUnreserved = getNextResourceCycleByInstance(I, Cycles); 2102 if (MinNextUnreserved > NextUnreserved) { 2103 InstanceIdx = I; 2104 MinNextUnreserved = NextUnreserved; 2105 } 2106 } 2107 return std::make_pair(MinNextUnreserved, InstanceIdx); 2108 } 2109 2110 /// Does this SU have a hazard within the current instruction group. 2111 /// 2112 /// The scheduler supports two modes of hazard recognition. The first is the 2113 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 2114 /// supports highly complicated in-order reservation tables 2115 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic. 2116 /// 2117 /// The second is a streamlined mechanism that checks for hazards based on 2118 /// simple counters that the scheduler itself maintains. It explicitly checks 2119 /// for instruction dispatch limitations, including the number of micro-ops that 2120 /// can dispatch per cycle. 2121 /// 2122 /// TODO: Also check whether the SU must start a new group. 2123 bool SchedBoundary::checkHazard(SUnit *SU) { 2124 if (HazardRec->isEnabled() 2125 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 2126 return true; 2127 } 2128 2129 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 2130 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 2131 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 2132 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 2133 return true; 2134 } 2135 2136 if (CurrMOps > 0 && 2137 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || 2138 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { 2139 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " 2140 << (isTop() ? "begin" : "end") << " group\n"); 2141 return true; 2142 } 2143 2144 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 2145 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2146 for (const MCWriteProcResEntry &PE : 2147 make_range(SchedModel->getWriteProcResBegin(SC), 2148 SchedModel->getWriteProcResEnd(SC))) { 2149 unsigned ResIdx = PE.ProcResourceIdx; 2150 unsigned Cycles = PE.Cycles; 2151 unsigned NRCycle, InstanceIdx; 2152 std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(ResIdx, Cycles); 2153 if (NRCycle > CurrCycle) { 2154 #ifndef NDEBUG 2155 MaxObservedStall = std::max(Cycles, MaxObservedStall); 2156 #endif 2157 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 2158 << SchedModel->getResourceName(ResIdx) 2159 << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx] << ']' 2160 << "=" << NRCycle << "c\n"); 2161 return true; 2162 } 2163 } 2164 } 2165 return false; 2166 } 2167 2168 // Find the unscheduled node in ReadySUs with the highest latency. 2169 unsigned SchedBoundary:: 2170 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 2171 SUnit *LateSU = nullptr; 2172 unsigned RemLatency = 0; 2173 for (SUnit *SU : ReadySUs) { 2174 unsigned L = getUnscheduledLatency(SU); 2175 if (L > RemLatency) { 2176 RemLatency = L; 2177 LateSU = SU; 2178 } 2179 } 2180 if (LateSU) { 2181 LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 2182 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 2183 } 2184 return RemLatency; 2185 } 2186 2187 // Count resources in this zone and the remaining unscheduled 2188 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 2189 // resource index, or zero if the zone is issue limited. 2190 unsigned SchedBoundary:: 2191 getOtherResourceCount(unsigned &OtherCritIdx) { 2192 OtherCritIdx = 0; 2193 if (!SchedModel->hasInstrSchedModel()) 2194 return 0; 2195 2196 unsigned OtherCritCount = Rem->RemIssueCount 2197 + (RetiredMOps * SchedModel->getMicroOpFactor()); 2198 LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 2199 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 2200 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 2201 PIdx != PEnd; ++PIdx) { 2202 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 2203 if (OtherCount > OtherCritCount) { 2204 OtherCritCount = OtherCount; 2205 OtherCritIdx = PIdx; 2206 } 2207 } 2208 if (OtherCritIdx) { 2209 LLVM_DEBUG( 2210 dbgs() << " " << Available.getName() << " + Remain CritRes: " 2211 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 2212 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 2213 } 2214 return OtherCritCount; 2215 } 2216 2217 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, 2218 unsigned Idx) { 2219 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 2220 2221 #ifndef NDEBUG 2222 // ReadyCycle was been bumped up to the CurrCycle when this node was 2223 // scheduled, but CurrCycle may have been eagerly advanced immediately after 2224 // scheduling, so may now be greater than ReadyCycle. 2225 if (ReadyCycle > CurrCycle) 2226 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 2227 #endif 2228 2229 if (ReadyCycle < MinReadyCycle) 2230 MinReadyCycle = ReadyCycle; 2231 2232 // Check for interlocks first. For the purpose of other heuristics, an 2233 // instruction that cannot issue appears as if it's not in the ReadyQueue. 2234 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2235 bool HazardDetected = (!IsBuffered && ReadyCycle > CurrCycle) || 2236 checkHazard(SU) || (Available.size() >= ReadyListLimit); 2237 2238 if (!HazardDetected) { 2239 Available.push(SU); 2240 2241 if (InPQueue) 2242 Pending.remove(Pending.begin() + Idx); 2243 return; 2244 } 2245 2246 if (!InPQueue) 2247 Pending.push(SU); 2248 } 2249 2250 /// Move the boundary of scheduled code by one cycle. 2251 void SchedBoundary::bumpCycle(unsigned NextCycle) { 2252 if (SchedModel->getMicroOpBufferSize() == 0) { 2253 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() && 2254 "MinReadyCycle uninitialized"); 2255 if (MinReadyCycle > NextCycle) 2256 NextCycle = MinReadyCycle; 2257 } 2258 // Update the current micro-ops, which will issue in the next cycle. 2259 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 2260 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 2261 2262 // Decrement DependentLatency based on the next cycle. 2263 if ((NextCycle - CurrCycle) > DependentLatency) 2264 DependentLatency = 0; 2265 else 2266 DependentLatency -= (NextCycle - CurrCycle); 2267 2268 if (!HazardRec->isEnabled()) { 2269 // Bypass HazardRec virtual calls. 2270 CurrCycle = NextCycle; 2271 } else { 2272 // Bypass getHazardType calls in case of long latency. 2273 for (; CurrCycle != NextCycle; ++CurrCycle) { 2274 if (isTop()) 2275 HazardRec->AdvanceCycle(); 2276 else 2277 HazardRec->RecedeCycle(); 2278 } 2279 } 2280 CheckPending = true; 2281 IsResourceLimited = 2282 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2283 getScheduledLatency(), true); 2284 2285 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() 2286 << '\n'); 2287 } 2288 2289 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 2290 ExecutedResCounts[PIdx] += Count; 2291 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 2292 MaxExecutedResCount = ExecutedResCounts[PIdx]; 2293 } 2294 2295 /// Add the given processor resource to this scheduled zone. 2296 /// 2297 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 2298 /// during which this resource is consumed. 2299 /// 2300 /// \return the next cycle at which the instruction may execute without 2301 /// oversubscribing resources. 2302 unsigned SchedBoundary:: 2303 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 2304 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2305 unsigned Count = Factor * Cycles; 2306 LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +" 2307 << Cycles << "x" << Factor << "u\n"); 2308 2309 // Update Executed resources counts. 2310 incExecutedResources(PIdx, Count); 2311 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 2312 Rem->RemainingCounts[PIdx] -= Count; 2313 2314 // Check if this resource exceeds the current critical resource. If so, it 2315 // becomes the critical resource. 2316 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 2317 ZoneCritResIdx = PIdx; 2318 LLVM_DEBUG(dbgs() << " *** Critical resource " 2319 << SchedModel->getResourceName(PIdx) << ": " 2320 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() 2321 << "c\n"); 2322 } 2323 // For reserved resources, record the highest cycle using the resource. 2324 unsigned NextAvailable, InstanceIdx; 2325 std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(PIdx, Cycles); 2326 if (NextAvailable > CurrCycle) { 2327 LLVM_DEBUG(dbgs() << " Resource conflict: " 2328 << SchedModel->getResourceName(PIdx) 2329 << '[' << InstanceIdx - ReservedCyclesIndex[PIdx] << ']' 2330 << " reserved until @" << NextAvailable << "\n"); 2331 } 2332 return NextAvailable; 2333 } 2334 2335 /// Move the boundary of scheduled code by one SUnit. 2336 void SchedBoundary::bumpNode(SUnit *SU) { 2337 // Update the reservation table. 2338 if (HazardRec->isEnabled()) { 2339 if (!isTop() && SU->isCall) { 2340 // Calls are scheduled with their preceding instructions. For bottom-up 2341 // scheduling, clear the pipeline state before emitting. 2342 HazardRec->Reset(); 2343 } 2344 HazardRec->EmitInstruction(SU); 2345 // Scheduling an instruction may have made pending instructions available. 2346 CheckPending = true; 2347 } 2348 // checkHazard should prevent scheduling multiple instructions per cycle that 2349 // exceed the issue width. 2350 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2351 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 2352 assert( 2353 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 2354 "Cannot schedule this instruction's MicroOps in the current cycle."); 2355 2356 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2357 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 2358 2359 unsigned NextCycle = CurrCycle; 2360 switch (SchedModel->getMicroOpBufferSize()) { 2361 case 0: 2362 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 2363 break; 2364 case 1: 2365 if (ReadyCycle > NextCycle) { 2366 NextCycle = ReadyCycle; 2367 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 2368 } 2369 break; 2370 default: 2371 // We don't currently model the OOO reorder buffer, so consider all 2372 // scheduled MOps to be "retired". We do loosely model in-order resource 2373 // latency. If this instruction uses an in-order resource, account for any 2374 // likely stall cycles. 2375 if (SU->isUnbuffered && ReadyCycle > NextCycle) 2376 NextCycle = ReadyCycle; 2377 break; 2378 } 2379 RetiredMOps += IncMOps; 2380 2381 // Update resource counts and critical resource. 2382 if (SchedModel->hasInstrSchedModel()) { 2383 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 2384 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 2385 Rem->RemIssueCount -= DecRemIssue; 2386 if (ZoneCritResIdx) { 2387 // Scale scheduled micro-ops for comparing with the critical resource. 2388 unsigned ScaledMOps = 2389 RetiredMOps * SchedModel->getMicroOpFactor(); 2390 2391 // If scaled micro-ops are now more than the previous critical resource by 2392 // a full cycle, then micro-ops issue becomes critical. 2393 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 2394 >= (int)SchedModel->getLatencyFactor()) { 2395 ZoneCritResIdx = 0; 2396 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2397 << ScaledMOps / SchedModel->getLatencyFactor() 2398 << "c\n"); 2399 } 2400 } 2401 for (TargetSchedModel::ProcResIter 2402 PI = SchedModel->getWriteProcResBegin(SC), 2403 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2404 unsigned RCycle = 2405 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 2406 if (RCycle > NextCycle) 2407 NextCycle = RCycle; 2408 } 2409 if (SU->hasReservedResource) { 2410 // For reserved resources, record the highest cycle using the resource. 2411 // For top-down scheduling, this is the cycle in which we schedule this 2412 // instruction plus the number of cycles the operations reserves the 2413 // resource. For bottom-up is it simply the instruction's cycle. 2414 for (TargetSchedModel::ProcResIter 2415 PI = SchedModel->getWriteProcResBegin(SC), 2416 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2417 unsigned PIdx = PI->ProcResourceIdx; 2418 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 2419 unsigned ReservedUntil, InstanceIdx; 2420 std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(PIdx, 0); 2421 if (isTop()) { 2422 ReservedCycles[InstanceIdx] = 2423 std::max(ReservedUntil, NextCycle + PI->Cycles); 2424 } else 2425 ReservedCycles[InstanceIdx] = NextCycle; 2426 } 2427 } 2428 } 2429 } 2430 // Update ExpectedLatency and DependentLatency. 2431 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2432 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2433 if (SU->getDepth() > TopLatency) { 2434 TopLatency = SU->getDepth(); 2435 LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU(" 2436 << SU->NodeNum << ") " << TopLatency << "c\n"); 2437 } 2438 if (SU->getHeight() > BotLatency) { 2439 BotLatency = SU->getHeight(); 2440 LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU(" 2441 << SU->NodeNum << ") " << BotLatency << "c\n"); 2442 } 2443 // If we stall for any reason, bump the cycle. 2444 if (NextCycle > CurrCycle) 2445 bumpCycle(NextCycle); 2446 else 2447 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2448 // resource limited. If a stall occurred, bumpCycle does this. 2449 IsResourceLimited = 2450 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2451 getScheduledLatency(), true); 2452 2453 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2454 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2455 // one cycle. Since we commonly reach the max MOps here, opportunistically 2456 // bump the cycle to avoid uselessly checking everything in the readyQ. 2457 CurrMOps += IncMOps; 2458 2459 // Bump the cycle count for issue group constraints. 2460 // This must be done after NextCycle has been adjust for all other stalls. 2461 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set 2462 // currCycle to X. 2463 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) || 2464 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) { 2465 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin") 2466 << " group\n"); 2467 bumpCycle(++NextCycle); 2468 } 2469 2470 while (CurrMOps >= SchedModel->getIssueWidth()) { 2471 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle " 2472 << CurrCycle << '\n'); 2473 bumpCycle(++NextCycle); 2474 } 2475 LLVM_DEBUG(dumpScheduledState()); 2476 } 2477 2478 /// Release pending ready nodes in to the available queue. This makes them 2479 /// visible to heuristics. 2480 void SchedBoundary::releasePending() { 2481 // If the available queue is empty, it is safe to reset MinReadyCycle. 2482 if (Available.empty()) 2483 MinReadyCycle = std::numeric_limits<unsigned>::max(); 2484 2485 // Check to see if any of the pending instructions are ready to issue. If 2486 // so, add them to the available queue. 2487 for (unsigned I = 0, E = Pending.size(); I < E; ++I) { 2488 SUnit *SU = *(Pending.begin() + I); 2489 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2490 2491 if (ReadyCycle < MinReadyCycle) 2492 MinReadyCycle = ReadyCycle; 2493 2494 if (Available.size() >= ReadyListLimit) 2495 break; 2496 2497 releaseNode(SU, ReadyCycle, true, I); 2498 if (E != Pending.size()) { 2499 --I; 2500 --E; 2501 } 2502 } 2503 CheckPending = false; 2504 } 2505 2506 /// Remove SU from the ready set for this boundary. 2507 void SchedBoundary::removeReady(SUnit *SU) { 2508 if (Available.isInQueue(SU)) 2509 Available.remove(Available.find(SU)); 2510 else { 2511 assert(Pending.isInQueue(SU) && "bad ready count"); 2512 Pending.remove(Pending.find(SU)); 2513 } 2514 } 2515 2516 /// If this queue only has one ready candidate, return it. As a side effect, 2517 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2518 /// one node is ready. If multiple instructions are ready, return NULL. 2519 SUnit *SchedBoundary::pickOnlyChoice() { 2520 if (CheckPending) 2521 releasePending(); 2522 2523 // Defer any ready instrs that now have a hazard. 2524 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2525 if (checkHazard(*I)) { 2526 Pending.push(*I); 2527 I = Available.remove(I); 2528 continue; 2529 } 2530 ++I; 2531 } 2532 for (unsigned i = 0; Available.empty(); ++i) { 2533 // FIXME: Re-enable assert once PR20057 is resolved. 2534 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2535 // "permanent hazard"); 2536 (void)i; 2537 bumpCycle(CurrCycle + 1); 2538 releasePending(); 2539 } 2540 2541 LLVM_DEBUG(Pending.dump()); 2542 LLVM_DEBUG(Available.dump()); 2543 2544 if (Available.size() == 1) 2545 return *Available.begin(); 2546 return nullptr; 2547 } 2548 2549 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2550 // This is useful information to dump after bumpNode. 2551 // Note that the Queue contents are more useful before pickNodeFromQueue. 2552 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const { 2553 unsigned ResFactor; 2554 unsigned ResCount; 2555 if (ZoneCritResIdx) { 2556 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2557 ResCount = getResourceCount(ZoneCritResIdx); 2558 } else { 2559 ResFactor = SchedModel->getMicroOpFactor(); 2560 ResCount = RetiredMOps * ResFactor; 2561 } 2562 unsigned LFactor = SchedModel->getLatencyFactor(); 2563 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2564 << " Retired: " << RetiredMOps; 2565 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2566 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2567 << ResCount / ResFactor << " " 2568 << SchedModel->getResourceName(ZoneCritResIdx) 2569 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2570 << (IsResourceLimited ? " - Resource" : " - Latency") 2571 << " limited.\n"; 2572 } 2573 #endif 2574 2575 //===----------------------------------------------------------------------===// 2576 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2577 //===----------------------------------------------------------------------===// 2578 2579 void GenericSchedulerBase::SchedCandidate:: 2580 initResourceDelta(const ScheduleDAGMI *DAG, 2581 const TargetSchedModel *SchedModel) { 2582 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2583 return; 2584 2585 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2586 for (TargetSchedModel::ProcResIter 2587 PI = SchedModel->getWriteProcResBegin(SC), 2588 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2589 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2590 ResDelta.CritResources += PI->Cycles; 2591 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2592 ResDelta.DemandedResources += PI->Cycles; 2593 } 2594 } 2595 2596 /// Compute remaining latency. We need this both to determine whether the 2597 /// overall schedule has become latency-limited and whether the instructions 2598 /// outside this zone are resource or latency limited. 2599 /// 2600 /// The "dependent" latency is updated incrementally during scheduling as the 2601 /// max height/depth of scheduled nodes minus the cycles since it was 2602 /// scheduled: 2603 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2604 /// 2605 /// The "independent" latency is the max ready queue depth: 2606 /// ILat = max N.depth for N in Available|Pending 2607 /// 2608 /// RemainingLatency is the greater of independent and dependent latency. 2609 /// 2610 /// These computations are expensive, especially in DAGs with many edges, so 2611 /// only do them if necessary. 2612 static unsigned computeRemLatency(SchedBoundary &CurrZone) { 2613 unsigned RemLatency = CurrZone.getDependentLatency(); 2614 RemLatency = std::max(RemLatency, 2615 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2616 RemLatency = std::max(RemLatency, 2617 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2618 return RemLatency; 2619 } 2620 2621 /// Returns true if the current cycle plus remaning latency is greater than 2622 /// the critical path in the scheduling region. 2623 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy, 2624 SchedBoundary &CurrZone, 2625 bool ComputeRemLatency, 2626 unsigned &RemLatency) const { 2627 // The current cycle is already greater than the critical path, so we are 2628 // already latency limited and don't need to compute the remaining latency. 2629 if (CurrZone.getCurrCycle() > Rem.CriticalPath) 2630 return true; 2631 2632 // If we haven't scheduled anything yet, then we aren't latency limited. 2633 if (CurrZone.getCurrCycle() == 0) 2634 return false; 2635 2636 if (ComputeRemLatency) 2637 RemLatency = computeRemLatency(CurrZone); 2638 2639 return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath; 2640 } 2641 2642 /// Set the CandPolicy given a scheduling zone given the current resources and 2643 /// latencies inside and outside the zone. 2644 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA, 2645 SchedBoundary &CurrZone, 2646 SchedBoundary *OtherZone) { 2647 // Apply preemptive heuristics based on the total latency and resources 2648 // inside and outside this zone. Potential stalls should be considered before 2649 // following this policy. 2650 2651 // Compute the critical resource outside the zone. 2652 unsigned OtherCritIdx = 0; 2653 unsigned OtherCount = 2654 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2655 2656 bool OtherResLimited = false; 2657 unsigned RemLatency = 0; 2658 bool RemLatencyComputed = false; 2659 if (SchedModel->hasInstrSchedModel() && OtherCount != 0) { 2660 RemLatency = computeRemLatency(CurrZone); 2661 RemLatencyComputed = true; 2662 OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(), 2663 OtherCount, RemLatency, false); 2664 } 2665 2666 // Schedule aggressively for latency in PostRA mode. We don't check for 2667 // acyclic latency during PostRA, and highly out-of-order processors will 2668 // skip PostRA scheduling. 2669 if (!OtherResLimited && 2670 (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed, 2671 RemLatency))) { 2672 Policy.ReduceLatency |= true; 2673 LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName() 2674 << " RemainingLatency " << RemLatency << " + " 2675 << CurrZone.getCurrCycle() << "c > CritPath " 2676 << Rem.CriticalPath << "\n"); 2677 } 2678 // If the same resource is limiting inside and outside the zone, do nothing. 2679 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2680 return; 2681 2682 LLVM_DEBUG(if (CurrZone.isResourceLimited()) { 2683 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2684 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n"; 2685 } if (OtherResLimited) dbgs() 2686 << " RemainingLimit: " 2687 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2688 if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs() 2689 << " Latency limited both directions.\n"); 2690 2691 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2692 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2693 2694 if (OtherResLimited) 2695 Policy.DemandResIdx = OtherCritIdx; 2696 } 2697 2698 #ifndef NDEBUG 2699 const char *GenericSchedulerBase::getReasonStr( 2700 GenericSchedulerBase::CandReason Reason) { 2701 switch (Reason) { 2702 case NoCand: return "NOCAND "; 2703 case Only1: return "ONLY1 "; 2704 case PhysReg: return "PHYS-REG "; 2705 case RegExcess: return "REG-EXCESS"; 2706 case RegCritical: return "REG-CRIT "; 2707 case Stall: return "STALL "; 2708 case Cluster: return "CLUSTER "; 2709 case Weak: return "WEAK "; 2710 case RegMax: return "REG-MAX "; 2711 case ResourceReduce: return "RES-REDUCE"; 2712 case ResourceDemand: return "RES-DEMAND"; 2713 case TopDepthReduce: return "TOP-DEPTH "; 2714 case TopPathReduce: return "TOP-PATH "; 2715 case BotHeightReduce:return "BOT-HEIGHT"; 2716 case BotPathReduce: return "BOT-PATH "; 2717 case NextDefUse: return "DEF-USE "; 2718 case NodeOrder: return "ORDER "; 2719 }; 2720 llvm_unreachable("Unknown reason!"); 2721 } 2722 2723 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2724 PressureChange P; 2725 unsigned ResIdx = 0; 2726 unsigned Latency = 0; 2727 switch (Cand.Reason) { 2728 default: 2729 break; 2730 case RegExcess: 2731 P = Cand.RPDelta.Excess; 2732 break; 2733 case RegCritical: 2734 P = Cand.RPDelta.CriticalMax; 2735 break; 2736 case RegMax: 2737 P = Cand.RPDelta.CurrentMax; 2738 break; 2739 case ResourceReduce: 2740 ResIdx = Cand.Policy.ReduceResIdx; 2741 break; 2742 case ResourceDemand: 2743 ResIdx = Cand.Policy.DemandResIdx; 2744 break; 2745 case TopDepthReduce: 2746 Latency = Cand.SU->getDepth(); 2747 break; 2748 case TopPathReduce: 2749 Latency = Cand.SU->getHeight(); 2750 break; 2751 case BotHeightReduce: 2752 Latency = Cand.SU->getHeight(); 2753 break; 2754 case BotPathReduce: 2755 Latency = Cand.SU->getDepth(); 2756 break; 2757 } 2758 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2759 if (P.isValid()) 2760 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2761 << ":" << P.getUnitInc() << " "; 2762 else 2763 dbgs() << " "; 2764 if (ResIdx) 2765 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2766 else 2767 dbgs() << " "; 2768 if (Latency) 2769 dbgs() << " " << Latency << " cycles "; 2770 else 2771 dbgs() << " "; 2772 dbgs() << '\n'; 2773 } 2774 #endif 2775 2776 namespace llvm { 2777 /// Return true if this heuristic determines order. 2778 bool tryLess(int TryVal, int CandVal, 2779 GenericSchedulerBase::SchedCandidate &TryCand, 2780 GenericSchedulerBase::SchedCandidate &Cand, 2781 GenericSchedulerBase::CandReason Reason) { 2782 if (TryVal < CandVal) { 2783 TryCand.Reason = Reason; 2784 return true; 2785 } 2786 if (TryVal > CandVal) { 2787 if (Cand.Reason > Reason) 2788 Cand.Reason = Reason; 2789 return true; 2790 } 2791 return false; 2792 } 2793 2794 bool tryGreater(int TryVal, int CandVal, 2795 GenericSchedulerBase::SchedCandidate &TryCand, 2796 GenericSchedulerBase::SchedCandidate &Cand, 2797 GenericSchedulerBase::CandReason Reason) { 2798 if (TryVal > CandVal) { 2799 TryCand.Reason = Reason; 2800 return true; 2801 } 2802 if (TryVal < CandVal) { 2803 if (Cand.Reason > Reason) 2804 Cand.Reason = Reason; 2805 return true; 2806 } 2807 return false; 2808 } 2809 2810 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2811 GenericSchedulerBase::SchedCandidate &Cand, 2812 SchedBoundary &Zone) { 2813 if (Zone.isTop()) { 2814 // Prefer the candidate with the lesser depth, but only if one of them has 2815 // depth greater than the total latency scheduled so far, otherwise either 2816 // of them could be scheduled now with no stall. 2817 if (std::max(TryCand.SU->getDepth(), Cand.SU->getDepth()) > 2818 Zone.getScheduledLatency()) { 2819 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2820 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2821 return true; 2822 } 2823 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2824 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2825 return true; 2826 } else { 2827 // Prefer the candidate with the lesser height, but only if one of them has 2828 // height greater than the total latency scheduled so far, otherwise either 2829 // of them could be scheduled now with no stall. 2830 if (std::max(TryCand.SU->getHeight(), Cand.SU->getHeight()) > 2831 Zone.getScheduledLatency()) { 2832 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2833 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2834 return true; 2835 } 2836 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2837 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2838 return true; 2839 } 2840 return false; 2841 } 2842 } // end namespace llvm 2843 2844 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) { 2845 LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2846 << GenericSchedulerBase::getReasonStr(Reason) << '\n'); 2847 } 2848 2849 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) { 2850 tracePick(Cand.Reason, Cand.AtTop); 2851 } 2852 2853 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2854 assert(dag->hasVRegLiveness() && 2855 "(PreRA)GenericScheduler needs vreg liveness"); 2856 DAG = static_cast<ScheduleDAGMILive*>(dag); 2857 SchedModel = DAG->getSchedModel(); 2858 TRI = DAG->TRI; 2859 2860 if (RegionPolicy.ComputeDFSResult) 2861 DAG->computeDFSResult(); 2862 2863 Rem.init(DAG, SchedModel); 2864 Top.init(DAG, SchedModel, &Rem); 2865 Bot.init(DAG, SchedModel, &Rem); 2866 2867 // Initialize resource counts. 2868 2869 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2870 // are disabled, then these HazardRecs will be disabled. 2871 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2872 if (!Top.HazardRec) { 2873 Top.HazardRec = 2874 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2875 Itin, DAG); 2876 } 2877 if (!Bot.HazardRec) { 2878 Bot.HazardRec = 2879 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2880 Itin, DAG); 2881 } 2882 TopCand.SU = nullptr; 2883 BotCand.SU = nullptr; 2884 } 2885 2886 /// Initialize the per-region scheduling policy. 2887 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2888 MachineBasicBlock::iterator End, 2889 unsigned NumRegionInstrs) { 2890 const MachineFunction &MF = *Begin->getMF(); 2891 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 2892 2893 // Avoid setting up the register pressure tracker for small regions to save 2894 // compile time. As a rough heuristic, only track pressure when the number of 2895 // schedulable instructions exceeds half the integer register file. 2896 RegionPolicy.ShouldTrackPressure = true; 2897 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2898 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2899 if (TLI->isTypeLegal(LegalIntVT)) { 2900 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2901 TLI->getRegClassFor(LegalIntVT)); 2902 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2903 } 2904 } 2905 2906 // For generic targets, we default to bottom-up, because it's simpler and more 2907 // compile-time optimizations have been implemented in that direction. 2908 RegionPolicy.OnlyBottomUp = true; 2909 2910 // Allow the subtarget to override default policy. 2911 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs); 2912 2913 // After subtarget overrides, apply command line options. 2914 if (!EnableRegPressure) { 2915 RegionPolicy.ShouldTrackPressure = false; 2916 RegionPolicy.ShouldTrackLaneMasks = false; 2917 } 2918 2919 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2920 // e.g. -misched-bottomup=false allows scheduling in both directions. 2921 assert((!ForceTopDown || !ForceBottomUp) && 2922 "-misched-topdown incompatible with -misched-bottomup"); 2923 if (ForceBottomUp.getNumOccurrences() > 0) { 2924 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2925 if (RegionPolicy.OnlyBottomUp) 2926 RegionPolicy.OnlyTopDown = false; 2927 } 2928 if (ForceTopDown.getNumOccurrences() > 0) { 2929 RegionPolicy.OnlyTopDown = ForceTopDown; 2930 if (RegionPolicy.OnlyTopDown) 2931 RegionPolicy.OnlyBottomUp = false; 2932 } 2933 } 2934 2935 void GenericScheduler::dumpPolicy() const { 2936 // Cannot completely remove virtual function even in release mode. 2937 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2938 dbgs() << "GenericScheduler RegionPolicy: " 2939 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure 2940 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown 2941 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp 2942 << "\n"; 2943 #endif 2944 } 2945 2946 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2947 /// critical path by more cycles than it takes to drain the instruction buffer. 2948 /// We estimate an upper bounds on in-flight instructions as: 2949 /// 2950 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2951 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2952 /// InFlightResources = InFlightIterations * LoopResources 2953 /// 2954 /// TODO: Check execution resources in addition to IssueCount. 2955 void GenericScheduler::checkAcyclicLatency() { 2956 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2957 return; 2958 2959 // Scaled number of cycles per loop iteration. 2960 unsigned IterCount = 2961 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2962 Rem.RemIssueCount); 2963 // Scaled acyclic critical path. 2964 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2965 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2966 unsigned InFlightCount = 2967 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2968 unsigned BufferLimit = 2969 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2970 2971 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2972 2973 LLVM_DEBUG( 2974 dbgs() << "IssueCycles=" 2975 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2976 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2977 << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount 2978 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2979 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2980 if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2981 } 2982 2983 void GenericScheduler::registerRoots() { 2984 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2985 2986 // Some roots may not feed into ExitSU. Check all of them in case. 2987 for (const SUnit *SU : Bot.Available) { 2988 if (SU->getDepth() > Rem.CriticalPath) 2989 Rem.CriticalPath = SU->getDepth(); 2990 } 2991 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 2992 if (DumpCriticalPathLength) { 2993 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 2994 } 2995 2996 if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) { 2997 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2998 checkAcyclicLatency(); 2999 } 3000 } 3001 3002 namespace llvm { 3003 bool tryPressure(const PressureChange &TryP, 3004 const PressureChange &CandP, 3005 GenericSchedulerBase::SchedCandidate &TryCand, 3006 GenericSchedulerBase::SchedCandidate &Cand, 3007 GenericSchedulerBase::CandReason Reason, 3008 const TargetRegisterInfo *TRI, 3009 const MachineFunction &MF) { 3010 // If one candidate decreases and the other increases, go with it. 3011 // Invalid candidates have UnitInc==0. 3012 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 3013 Reason)) { 3014 return true; 3015 } 3016 // Do not compare the magnitude of pressure changes between top and bottom 3017 // boundary. 3018 if (Cand.AtTop != TryCand.AtTop) 3019 return false; 3020 3021 // If both candidates affect the same set in the same boundary, go with the 3022 // smallest increase. 3023 unsigned TryPSet = TryP.getPSetOrMax(); 3024 unsigned CandPSet = CandP.getPSetOrMax(); 3025 if (TryPSet == CandPSet) { 3026 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 3027 Reason); 3028 } 3029 3030 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) : 3031 std::numeric_limits<int>::max(); 3032 3033 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) : 3034 std::numeric_limits<int>::max(); 3035 3036 // If the candidates are decreasing pressure, reverse priority. 3037 if (TryP.getUnitInc() < 0) 3038 std::swap(TryRank, CandRank); 3039 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 3040 } 3041 3042 unsigned getWeakLeft(const SUnit *SU, bool isTop) { 3043 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 3044 } 3045 3046 /// Minimize physical register live ranges. Regalloc wants them adjacent to 3047 /// their physreg def/use. 3048 /// 3049 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 3050 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 3051 /// with the operation that produces or consumes the physreg. We'll do this when 3052 /// regalloc has support for parallel copies. 3053 int biasPhysReg(const SUnit *SU, bool isTop) { 3054 const MachineInstr *MI = SU->getInstr(); 3055 3056 if (MI->isCopy()) { 3057 unsigned ScheduledOper = isTop ? 1 : 0; 3058 unsigned UnscheduledOper = isTop ? 0 : 1; 3059 // If we have already scheduled the physreg produce/consumer, immediately 3060 // schedule the copy. 3061 if (Register::isPhysicalRegister(MI->getOperand(ScheduledOper).getReg())) 3062 return 1; 3063 // If the physreg is at the boundary, defer it. Otherwise schedule it 3064 // immediately to free the dependent. We can hoist the copy later. 3065 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 3066 if (Register::isPhysicalRegister(MI->getOperand(UnscheduledOper).getReg())) 3067 return AtBoundary ? -1 : 1; 3068 } 3069 3070 if (MI->isMoveImmediate()) { 3071 // If we have a move immediate and all successors have been assigned, bias 3072 // towards scheduling this later. Make sure all register defs are to 3073 // physical registers. 3074 bool DoBias = true; 3075 for (const MachineOperand &Op : MI->defs()) { 3076 if (Op.isReg() && !Register::isPhysicalRegister(Op.getReg())) { 3077 DoBias = false; 3078 break; 3079 } 3080 } 3081 3082 if (DoBias) 3083 return isTop ? -1 : 1; 3084 } 3085 3086 return 0; 3087 } 3088 } // end namespace llvm 3089 3090 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU, 3091 bool AtTop, 3092 const RegPressureTracker &RPTracker, 3093 RegPressureTracker &TempTracker) { 3094 Cand.SU = SU; 3095 Cand.AtTop = AtTop; 3096 if (DAG->isTrackingPressure()) { 3097 if (AtTop) { 3098 TempTracker.getMaxDownwardPressureDelta( 3099 Cand.SU->getInstr(), 3100 Cand.RPDelta, 3101 DAG->getRegionCriticalPSets(), 3102 DAG->getRegPressure().MaxSetPressure); 3103 } else { 3104 if (VerifyScheduling) { 3105 TempTracker.getMaxUpwardPressureDelta( 3106 Cand.SU->getInstr(), 3107 &DAG->getPressureDiff(Cand.SU), 3108 Cand.RPDelta, 3109 DAG->getRegionCriticalPSets(), 3110 DAG->getRegPressure().MaxSetPressure); 3111 } else { 3112 RPTracker.getUpwardPressureDelta( 3113 Cand.SU->getInstr(), 3114 DAG->getPressureDiff(Cand.SU), 3115 Cand.RPDelta, 3116 DAG->getRegionCriticalPSets(), 3117 DAG->getRegPressure().MaxSetPressure); 3118 } 3119 } 3120 } 3121 LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs() 3122 << " Try SU(" << Cand.SU->NodeNum << ") " 3123 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":" 3124 << Cand.RPDelta.Excess.getUnitInc() << "\n"); 3125 } 3126 3127 /// Apply a set of heuristics to a new candidate. Heuristics are currently 3128 /// hierarchical. This may be more efficient than a graduated cost model because 3129 /// we don't need to evaluate all aspects of the model for each node in the 3130 /// queue. But it's really done to make the heuristics easier to debug and 3131 /// statistically analyze. 3132 /// 3133 /// \param Cand provides the policy and current best candidate. 3134 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3135 /// \param Zone describes the scheduled zone that we are extending, or nullptr 3136 // if Cand is from a different zone than TryCand. 3137 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 3138 SchedCandidate &TryCand, 3139 SchedBoundary *Zone) const { 3140 // Initialize the candidate if needed. 3141 if (!Cand.isValid()) { 3142 TryCand.Reason = NodeOrder; 3143 return; 3144 } 3145 3146 // Bias PhysReg Defs and copies to their uses and defined respectively. 3147 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop), 3148 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg)) 3149 return; 3150 3151 // Avoid exceeding the target's limit. 3152 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 3153 Cand.RPDelta.Excess, 3154 TryCand, Cand, RegExcess, TRI, 3155 DAG->MF)) 3156 return; 3157 3158 // Avoid increasing the max critical pressure in the scheduled region. 3159 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 3160 Cand.RPDelta.CriticalMax, 3161 TryCand, Cand, RegCritical, TRI, 3162 DAG->MF)) 3163 return; 3164 3165 // We only compare a subset of features when comparing nodes between 3166 // Top and Bottom boundary. Some properties are simply incomparable, in many 3167 // other instances we should only override the other boundary if something 3168 // is a clear good pick on one boundary. Skip heuristics that are more 3169 // "tie-breaking" in nature. 3170 bool SameBoundary = Zone != nullptr; 3171 if (SameBoundary) { 3172 // For loops that are acyclic path limited, aggressively schedule for 3173 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal 3174 // heuristics to take precedence. 3175 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() && 3176 tryLatency(TryCand, Cand, *Zone)) 3177 return; 3178 3179 // Prioritize instructions that read unbuffered resources by stall cycles. 3180 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU), 3181 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3182 return; 3183 } 3184 3185 // Keep clustered nodes together to encourage downstream peephole 3186 // optimizations which may reduce resource requirements. 3187 // 3188 // This is a best effort to set things up for a post-RA pass. Optimizations 3189 // like generating loads of multiple registers should ideally be done within 3190 // the scheduler pass by combining the loads during DAG postprocessing. 3191 const SUnit *CandNextClusterSU = 3192 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3193 const SUnit *TryCandNextClusterSU = 3194 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3195 if (tryGreater(TryCand.SU == TryCandNextClusterSU, 3196 Cand.SU == CandNextClusterSU, 3197 TryCand, Cand, Cluster)) 3198 return; 3199 3200 if (SameBoundary) { 3201 // Weak edges are for clustering and other constraints. 3202 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop), 3203 getWeakLeft(Cand.SU, Cand.AtTop), 3204 TryCand, Cand, Weak)) 3205 return; 3206 } 3207 3208 // Avoid increasing the max pressure of the entire region. 3209 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 3210 Cand.RPDelta.CurrentMax, 3211 TryCand, Cand, RegMax, TRI, 3212 DAG->MF)) 3213 return; 3214 3215 if (SameBoundary) { 3216 // Avoid critical resource consumption and balance the schedule. 3217 TryCand.initResourceDelta(DAG, SchedModel); 3218 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3219 TryCand, Cand, ResourceReduce)) 3220 return; 3221 if (tryGreater(TryCand.ResDelta.DemandedResources, 3222 Cand.ResDelta.DemandedResources, 3223 TryCand, Cand, ResourceDemand)) 3224 return; 3225 3226 // Avoid serializing long latency dependence chains. 3227 // For acyclic path limited loops, latency was already checked above. 3228 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency && 3229 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone)) 3230 return; 3231 3232 // Fall through to original instruction order. 3233 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 3234 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 3235 TryCand.Reason = NodeOrder; 3236 } 3237 } 3238 } 3239 3240 /// Pick the best candidate from the queue. 3241 /// 3242 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 3243 /// DAG building. To adjust for the current scheduling location we need to 3244 /// maintain the number of vreg uses remaining to be top-scheduled. 3245 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 3246 const CandPolicy &ZonePolicy, 3247 const RegPressureTracker &RPTracker, 3248 SchedCandidate &Cand) { 3249 // getMaxPressureDelta temporarily modifies the tracker. 3250 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 3251 3252 ReadyQueue &Q = Zone.Available; 3253 for (SUnit *SU : Q) { 3254 3255 SchedCandidate TryCand(ZonePolicy); 3256 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker); 3257 // Pass SchedBoundary only when comparing nodes from the same boundary. 3258 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr; 3259 tryCandidate(Cand, TryCand, ZoneArg); 3260 if (TryCand.Reason != NoCand) { 3261 // Initialize resource delta if needed in case future heuristics query it. 3262 if (TryCand.ResDelta == SchedResourceDelta()) 3263 TryCand.initResourceDelta(DAG, SchedModel); 3264 Cand.setBest(TryCand); 3265 LLVM_DEBUG(traceCandidate(Cand)); 3266 } 3267 } 3268 } 3269 3270 /// Pick the best candidate node from either the top or bottom queue. 3271 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 3272 // Schedule as far as possible in the direction of no choice. This is most 3273 // efficient, but also provides the best heuristics for CriticalPSets. 3274 if (SUnit *SU = Bot.pickOnlyChoice()) { 3275 IsTopNode = false; 3276 tracePick(Only1, false); 3277 return SU; 3278 } 3279 if (SUnit *SU = Top.pickOnlyChoice()) { 3280 IsTopNode = true; 3281 tracePick(Only1, true); 3282 return SU; 3283 } 3284 // Set the bottom-up policy based on the state of the current bottom zone and 3285 // the instructions outside the zone, including the top zone. 3286 CandPolicy BotPolicy; 3287 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top); 3288 // Set the top-down policy based on the state of the current top zone and 3289 // the instructions outside the zone, including the bottom zone. 3290 CandPolicy TopPolicy; 3291 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot); 3292 3293 // See if BotCand is still valid (because we previously scheduled from Top). 3294 LLVM_DEBUG(dbgs() << "Picking from Bot:\n"); 3295 if (!BotCand.isValid() || BotCand.SU->isScheduled || 3296 BotCand.Policy != BotPolicy) { 3297 BotCand.reset(CandPolicy()); 3298 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand); 3299 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 3300 } else { 3301 LLVM_DEBUG(traceCandidate(BotCand)); 3302 #ifndef NDEBUG 3303 if (VerifyScheduling) { 3304 SchedCandidate TCand; 3305 TCand.reset(CandPolicy()); 3306 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand); 3307 assert(TCand.SU == BotCand.SU && 3308 "Last pick result should correspond to re-picking right now"); 3309 } 3310 #endif 3311 } 3312 3313 // Check if the top Q has a better candidate. 3314 LLVM_DEBUG(dbgs() << "Picking from Top:\n"); 3315 if (!TopCand.isValid() || TopCand.SU->isScheduled || 3316 TopCand.Policy != TopPolicy) { 3317 TopCand.reset(CandPolicy()); 3318 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand); 3319 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 3320 } else { 3321 LLVM_DEBUG(traceCandidate(TopCand)); 3322 #ifndef NDEBUG 3323 if (VerifyScheduling) { 3324 SchedCandidate TCand; 3325 TCand.reset(CandPolicy()); 3326 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand); 3327 assert(TCand.SU == TopCand.SU && 3328 "Last pick result should correspond to re-picking right now"); 3329 } 3330 #endif 3331 } 3332 3333 // Pick best from BotCand and TopCand. 3334 assert(BotCand.isValid()); 3335 assert(TopCand.isValid()); 3336 SchedCandidate Cand = BotCand; 3337 TopCand.Reason = NoCand; 3338 tryCandidate(Cand, TopCand, nullptr); 3339 if (TopCand.Reason != NoCand) { 3340 Cand.setBest(TopCand); 3341 LLVM_DEBUG(traceCandidate(Cand)); 3342 } 3343 3344 IsTopNode = Cand.AtTop; 3345 tracePick(Cand); 3346 return Cand.SU; 3347 } 3348 3349 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 3350 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 3351 if (DAG->top() == DAG->bottom()) { 3352 assert(Top.Available.empty() && Top.Pending.empty() && 3353 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 3354 return nullptr; 3355 } 3356 SUnit *SU; 3357 do { 3358 if (RegionPolicy.OnlyTopDown) { 3359 SU = Top.pickOnlyChoice(); 3360 if (!SU) { 3361 CandPolicy NoPolicy; 3362 TopCand.reset(NoPolicy); 3363 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); 3364 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3365 tracePick(TopCand); 3366 SU = TopCand.SU; 3367 } 3368 IsTopNode = true; 3369 } else if (RegionPolicy.OnlyBottomUp) { 3370 SU = Bot.pickOnlyChoice(); 3371 if (!SU) { 3372 CandPolicy NoPolicy; 3373 BotCand.reset(NoPolicy); 3374 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); 3375 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 3376 tracePick(BotCand); 3377 SU = BotCand.SU; 3378 } 3379 IsTopNode = false; 3380 } else { 3381 SU = pickNodeBidirectional(IsTopNode); 3382 } 3383 } while (SU->isScheduled); 3384 3385 if (SU->isTopReady()) 3386 Top.removeReady(SU); 3387 if (SU->isBottomReady()) 3388 Bot.removeReady(SU); 3389 3390 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3391 << *SU->getInstr()); 3392 return SU; 3393 } 3394 3395 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) { 3396 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 3397 if (!isTop) 3398 ++InsertPos; 3399 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 3400 3401 // Find already scheduled copies with a single physreg dependence and move 3402 // them just above the scheduled instruction. 3403 for (SDep &Dep : Deps) { 3404 if (Dep.getKind() != SDep::Data || 3405 !Register::isPhysicalRegister(Dep.getReg())) 3406 continue; 3407 SUnit *DepSU = Dep.getSUnit(); 3408 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 3409 continue; 3410 MachineInstr *Copy = DepSU->getInstr(); 3411 if (!Copy->isCopy() && !Copy->isMoveImmediate()) 3412 continue; 3413 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy "; 3414 DAG->dumpNode(*Dep.getSUnit())); 3415 DAG->moveInstruction(Copy, InsertPos); 3416 } 3417 } 3418 3419 /// Update the scheduler's state after scheduling a node. This is the same node 3420 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 3421 /// update it's state based on the current cycle before MachineSchedStrategy 3422 /// does. 3423 /// 3424 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 3425 /// them here. See comments in biasPhysReg. 3426 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3427 if (IsTopNode) { 3428 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3429 Top.bumpNode(SU); 3430 if (SU->hasPhysRegUses) 3431 reschedulePhysReg(SU, true); 3432 } else { 3433 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 3434 Bot.bumpNode(SU); 3435 if (SU->hasPhysRegDefs) 3436 reschedulePhysReg(SU, false); 3437 } 3438 } 3439 3440 /// Create the standard converging machine scheduler. This will be used as the 3441 /// default scheduler if the target does not set a default. 3442 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) { 3443 ScheduleDAGMILive *DAG = 3444 new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C)); 3445 // Register DAG post-processors. 3446 // 3447 // FIXME: extend the mutation API to allow earlier mutations to instantiate 3448 // data and pass it to later mutations. Have a single mutation that gathers 3449 // the interesting nodes in one pass. 3450 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI)); 3451 return DAG; 3452 } 3453 3454 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 3455 return createGenericSchedLive(C); 3456 } 3457 3458 static MachineSchedRegistry 3459 GenericSchedRegistry("converge", "Standard converging scheduler.", 3460 createConvergingSched); 3461 3462 //===----------------------------------------------------------------------===// 3463 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 3464 //===----------------------------------------------------------------------===// 3465 3466 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 3467 DAG = Dag; 3468 SchedModel = DAG->getSchedModel(); 3469 TRI = DAG->TRI; 3470 3471 Rem.init(DAG, SchedModel); 3472 Top.init(DAG, SchedModel, &Rem); 3473 BotRoots.clear(); 3474 3475 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 3476 // or are disabled, then these HazardRecs will be disabled. 3477 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3478 if (!Top.HazardRec) { 3479 Top.HazardRec = 3480 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3481 Itin, DAG); 3482 } 3483 } 3484 3485 void PostGenericScheduler::registerRoots() { 3486 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3487 3488 // Some roots may not feed into ExitSU. Check all of them in case. 3489 for (const SUnit *SU : BotRoots) { 3490 if (SU->getDepth() > Rem.CriticalPath) 3491 Rem.CriticalPath = SU->getDepth(); 3492 } 3493 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 3494 if (DumpCriticalPathLength) { 3495 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 3496 } 3497 } 3498 3499 /// Apply a set of heuristics to a new candidate for PostRA scheduling. 3500 /// 3501 /// \param Cand provides the policy and current best candidate. 3502 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3503 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 3504 SchedCandidate &TryCand) { 3505 // Initialize the candidate if needed. 3506 if (!Cand.isValid()) { 3507 TryCand.Reason = NodeOrder; 3508 return; 3509 } 3510 3511 // Prioritize instructions that read unbuffered resources by stall cycles. 3512 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3513 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3514 return; 3515 3516 // Keep clustered nodes together. 3517 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(), 3518 Cand.SU == DAG->getNextClusterSucc(), 3519 TryCand, Cand, Cluster)) 3520 return; 3521 3522 // Avoid critical resource consumption and balance the schedule. 3523 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3524 TryCand, Cand, ResourceReduce)) 3525 return; 3526 if (tryGreater(TryCand.ResDelta.DemandedResources, 3527 Cand.ResDelta.DemandedResources, 3528 TryCand, Cand, ResourceDemand)) 3529 return; 3530 3531 // Avoid serializing long latency dependence chains. 3532 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3533 return; 3534 } 3535 3536 // Fall through to original instruction order. 3537 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 3538 TryCand.Reason = NodeOrder; 3539 } 3540 3541 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3542 ReadyQueue &Q = Top.Available; 3543 for (SUnit *SU : Q) { 3544 SchedCandidate TryCand(Cand.Policy); 3545 TryCand.SU = SU; 3546 TryCand.AtTop = true; 3547 TryCand.initResourceDelta(DAG, SchedModel); 3548 tryCandidate(Cand, TryCand); 3549 if (TryCand.Reason != NoCand) { 3550 Cand.setBest(TryCand); 3551 LLVM_DEBUG(traceCandidate(Cand)); 3552 } 3553 } 3554 } 3555 3556 /// Pick the next node to schedule. 3557 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3558 if (DAG->top() == DAG->bottom()) { 3559 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3560 return nullptr; 3561 } 3562 SUnit *SU; 3563 do { 3564 SU = Top.pickOnlyChoice(); 3565 if (SU) { 3566 tracePick(Only1, true); 3567 } else { 3568 CandPolicy NoPolicy; 3569 SchedCandidate TopCand(NoPolicy); 3570 // Set the top-down policy based on the state of the current top zone and 3571 // the instructions outside the zone, including the bottom zone. 3572 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 3573 pickNodeFromQueue(TopCand); 3574 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3575 tracePick(TopCand); 3576 SU = TopCand.SU; 3577 } 3578 } while (SU->isScheduled); 3579 3580 IsTopNode = true; 3581 Top.removeReady(SU); 3582 3583 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3584 << *SU->getInstr()); 3585 return SU; 3586 } 3587 3588 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3589 /// scheduled/remaining flags in the DAG nodes. 3590 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3591 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3592 Top.bumpNode(SU); 3593 } 3594 3595 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) { 3596 return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C), 3597 /*RemoveKillFlags=*/true); 3598 } 3599 3600 //===----------------------------------------------------------------------===// 3601 // ILP Scheduler. Currently for experimental analysis of heuristics. 3602 //===----------------------------------------------------------------------===// 3603 3604 namespace { 3605 3606 /// Order nodes by the ILP metric. 3607 struct ILPOrder { 3608 const SchedDFSResult *DFSResult = nullptr; 3609 const BitVector *ScheduledTrees = nullptr; 3610 bool MaximizeILP; 3611 3612 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {} 3613 3614 /// Apply a less-than relation on node priority. 3615 /// 3616 /// (Return true if A comes after B in the Q.) 3617 bool operator()(const SUnit *A, const SUnit *B) const { 3618 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3619 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3620 if (SchedTreeA != SchedTreeB) { 3621 // Unscheduled trees have lower priority. 3622 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3623 return ScheduledTrees->test(SchedTreeB); 3624 3625 // Trees with shallower connections have have lower priority. 3626 if (DFSResult->getSubtreeLevel(SchedTreeA) 3627 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3628 return DFSResult->getSubtreeLevel(SchedTreeA) 3629 < DFSResult->getSubtreeLevel(SchedTreeB); 3630 } 3631 } 3632 if (MaximizeILP) 3633 return DFSResult->getILP(A) < DFSResult->getILP(B); 3634 else 3635 return DFSResult->getILP(A) > DFSResult->getILP(B); 3636 } 3637 }; 3638 3639 /// Schedule based on the ILP metric. 3640 class ILPScheduler : public MachineSchedStrategy { 3641 ScheduleDAGMILive *DAG = nullptr; 3642 ILPOrder Cmp; 3643 3644 std::vector<SUnit*> ReadyQ; 3645 3646 public: 3647 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {} 3648 3649 void initialize(ScheduleDAGMI *dag) override { 3650 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3651 DAG = static_cast<ScheduleDAGMILive*>(dag); 3652 DAG->computeDFSResult(); 3653 Cmp.DFSResult = DAG->getDFSResult(); 3654 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3655 ReadyQ.clear(); 3656 } 3657 3658 void registerRoots() override { 3659 // Restore the heap in ReadyQ with the updated DFS results. 3660 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3661 } 3662 3663 /// Implement MachineSchedStrategy interface. 3664 /// ----------------------------------------- 3665 3666 /// Callback to select the highest priority node from the ready Q. 3667 SUnit *pickNode(bool &IsTopNode) override { 3668 if (ReadyQ.empty()) return nullptr; 3669 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3670 SUnit *SU = ReadyQ.back(); 3671 ReadyQ.pop_back(); 3672 IsTopNode = false; 3673 LLVM_DEBUG(dbgs() << "Pick node " 3674 << "SU(" << SU->NodeNum << ") " 3675 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3676 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) 3677 << " @" 3678 << DAG->getDFSResult()->getSubtreeLevel( 3679 DAG->getDFSResult()->getSubtreeID(SU)) 3680 << '\n' 3681 << "Scheduling " << *SU->getInstr()); 3682 return SU; 3683 } 3684 3685 /// Scheduler callback to notify that a new subtree is scheduled. 3686 void scheduleTree(unsigned SubtreeID) override { 3687 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3688 } 3689 3690 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3691 /// DFSResults, and resort the priority Q. 3692 void schedNode(SUnit *SU, bool IsTopNode) override { 3693 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3694 } 3695 3696 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3697 3698 void releaseBottomNode(SUnit *SU) override { 3699 ReadyQ.push_back(SU); 3700 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3701 } 3702 }; 3703 3704 } // end anonymous namespace 3705 3706 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3707 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true)); 3708 } 3709 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3710 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false)); 3711 } 3712 3713 static MachineSchedRegistry ILPMaxRegistry( 3714 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3715 static MachineSchedRegistry ILPMinRegistry( 3716 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3717 3718 //===----------------------------------------------------------------------===// 3719 // Machine Instruction Shuffler for Correctness Testing 3720 //===----------------------------------------------------------------------===// 3721 3722 #ifndef NDEBUG 3723 namespace { 3724 3725 /// Apply a less-than relation on the node order, which corresponds to the 3726 /// instruction order prior to scheduling. IsReverse implements greater-than. 3727 template<bool IsReverse> 3728 struct SUnitOrder { 3729 bool operator()(SUnit *A, SUnit *B) const { 3730 if (IsReverse) 3731 return A->NodeNum > B->NodeNum; 3732 else 3733 return A->NodeNum < B->NodeNum; 3734 } 3735 }; 3736 3737 /// Reorder instructions as much as possible. 3738 class InstructionShuffler : public MachineSchedStrategy { 3739 bool IsAlternating; 3740 bool IsTopDown; 3741 3742 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3743 // gives nodes with a higher number higher priority causing the latest 3744 // instructions to be scheduled first. 3745 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>> 3746 TopQ; 3747 3748 // When scheduling bottom-up, use greater-than as the queue priority. 3749 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>> 3750 BottomQ; 3751 3752 public: 3753 InstructionShuffler(bool alternate, bool topdown) 3754 : IsAlternating(alternate), IsTopDown(topdown) {} 3755 3756 void initialize(ScheduleDAGMI*) override { 3757 TopQ.clear(); 3758 BottomQ.clear(); 3759 } 3760 3761 /// Implement MachineSchedStrategy interface. 3762 /// ----------------------------------------- 3763 3764 SUnit *pickNode(bool &IsTopNode) override { 3765 SUnit *SU; 3766 if (IsTopDown) { 3767 do { 3768 if (TopQ.empty()) return nullptr; 3769 SU = TopQ.top(); 3770 TopQ.pop(); 3771 } while (SU->isScheduled); 3772 IsTopNode = true; 3773 } else { 3774 do { 3775 if (BottomQ.empty()) return nullptr; 3776 SU = BottomQ.top(); 3777 BottomQ.pop(); 3778 } while (SU->isScheduled); 3779 IsTopNode = false; 3780 } 3781 if (IsAlternating) 3782 IsTopDown = !IsTopDown; 3783 return SU; 3784 } 3785 3786 void schedNode(SUnit *SU, bool IsTopNode) override {} 3787 3788 void releaseTopNode(SUnit *SU) override { 3789 TopQ.push(SU); 3790 } 3791 void releaseBottomNode(SUnit *SU) override { 3792 BottomQ.push(SU); 3793 } 3794 }; 3795 3796 } // end anonymous namespace 3797 3798 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3799 bool Alternate = !ForceTopDown && !ForceBottomUp; 3800 bool TopDown = !ForceBottomUp; 3801 assert((TopDown || !ForceTopDown) && 3802 "-misched-topdown incompatible with -misched-bottomup"); 3803 return new ScheduleDAGMILive( 3804 C, std::make_unique<InstructionShuffler>(Alternate, TopDown)); 3805 } 3806 3807 static MachineSchedRegistry ShufflerRegistry( 3808 "shuffle", "Shuffle machine instructions alternating directions", 3809 createInstructionShuffler); 3810 #endif // !NDEBUG 3811 3812 //===----------------------------------------------------------------------===// 3813 // GraphWriter support for ScheduleDAGMILive. 3814 //===----------------------------------------------------------------------===// 3815 3816 #ifndef NDEBUG 3817 namespace llvm { 3818 3819 template<> struct GraphTraits< 3820 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3821 3822 template<> 3823 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3824 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 3825 3826 static std::string getGraphName(const ScheduleDAG *G) { 3827 return std::string(G->MF.getName()); 3828 } 3829 3830 static bool renderGraphFromBottomUp() { 3831 return true; 3832 } 3833 3834 static bool isNodeHidden(const SUnit *Node) { 3835 if (ViewMISchedCutoff == 0) 3836 return false; 3837 return (Node->Preds.size() > ViewMISchedCutoff 3838 || Node->Succs.size() > ViewMISchedCutoff); 3839 } 3840 3841 /// If you want to override the dot attributes printed for a particular 3842 /// edge, override this method. 3843 static std::string getEdgeAttributes(const SUnit *Node, 3844 SUnitIterator EI, 3845 const ScheduleDAG *Graph) { 3846 if (EI.isArtificialDep()) 3847 return "color=cyan,style=dashed"; 3848 if (EI.isCtrlDep()) 3849 return "color=blue,style=dashed"; 3850 return ""; 3851 } 3852 3853 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3854 std::string Str; 3855 raw_string_ostream SS(Str); 3856 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3857 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3858 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3859 SS << "SU:" << SU->NodeNum; 3860 if (DFS) 3861 SS << " I:" << DFS->getNumInstrs(SU); 3862 return SS.str(); 3863 } 3864 3865 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3866 return G->getGraphNodeLabel(SU); 3867 } 3868 3869 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3870 std::string Str("shape=Mrecord"); 3871 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3872 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3873 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3874 if (DFS) { 3875 Str += ",style=filled,fillcolor=\"#"; 3876 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3877 Str += '"'; 3878 } 3879 return Str; 3880 } 3881 }; 3882 3883 } // end namespace llvm 3884 #endif // NDEBUG 3885 3886 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3887 /// rendered using 'dot'. 3888 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3889 #ifndef NDEBUG 3890 ViewGraph(this, Name, false, Title); 3891 #else 3892 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3893 << "systems with Graphviz or gv!\n"; 3894 #endif // NDEBUG 3895 } 3896 3897 /// Out-of-line implementation with no arguments is handy for gdb. 3898 void ScheduleDAGMI::viewGraph() { 3899 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3900 } 3901