1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // MachineScheduler schedules machine instructions after phi elimination. It 10 // preserves LiveIntervals so it can be invoked before register allocation. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MachineScheduler.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PriorityQueue.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/iterator_range.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/CodeGen/LiveInterval.h" 24 #include "llvm/CodeGen/LiveIntervals.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineDominators.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineFunctionPass.h" 29 #include "llvm/CodeGen/MachineInstr.h" 30 #include "llvm/CodeGen/MachineLoopInfo.h" 31 #include "llvm/CodeGen/MachineOperand.h" 32 #include "llvm/CodeGen/MachinePassRegistry.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/Passes.h" 35 #include "llvm/CodeGen/RegisterClassInfo.h" 36 #include "llvm/CodeGen/RegisterPressure.h" 37 #include "llvm/CodeGen/ScheduleDAG.h" 38 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 39 #include "llvm/CodeGen/ScheduleDAGMutation.h" 40 #include "llvm/CodeGen/ScheduleDFS.h" 41 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 42 #include "llvm/CodeGen/SlotIndexes.h" 43 #include "llvm/CodeGen/TargetFrameLowering.h" 44 #include "llvm/CodeGen/TargetInstrInfo.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/CodeGen/TargetPassConfig.h" 47 #include "llvm/CodeGen/TargetRegisterInfo.h" 48 #include "llvm/CodeGen/TargetSchedule.h" 49 #include "llvm/CodeGen/TargetSubtargetInfo.h" 50 #include "llvm/Config/llvm-config.h" 51 #include "llvm/MC/LaneBitmask.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/GraphWriter.h" 58 #include "llvm/Support/MachineValueType.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstdint> 63 #include <iterator> 64 #include <limits> 65 #include <memory> 66 #include <string> 67 #include <tuple> 68 #include <utility> 69 #include <vector> 70 71 using namespace llvm; 72 73 #define DEBUG_TYPE "machine-scheduler" 74 75 namespace llvm { 76 77 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 78 cl::desc("Force top-down list scheduling")); 79 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 80 cl::desc("Force bottom-up list scheduling")); 81 cl::opt<bool> 82 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 83 cl::desc("Print critical path length to stdout")); 84 85 cl::opt<bool> VerifyScheduling( 86 "verify-misched", cl::Hidden, 87 cl::desc("Verify machine instrs before and after machine scheduling")); 88 89 } // end namespace llvm 90 91 #ifndef NDEBUG 92 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 93 cl::desc("Pop up a window to show MISched dags after they are processed")); 94 95 /// In some situations a few uninteresting nodes depend on nearly all other 96 /// nodes in the graph, provide a cutoff to hide them. 97 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden, 98 cl::desc("Hide nodes with more predecessor/successor than cutoff")); 99 100 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 101 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 102 103 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 104 cl::desc("Only schedule this function")); 105 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 106 cl::desc("Only schedule this MBB#")); 107 static cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden, 108 cl::desc("Print schedule DAGs")); 109 #else 110 static const bool ViewMISchedDAGs = false; 111 static const bool PrintDAGs = false; 112 #endif // NDEBUG 113 114 /// Avoid quadratic complexity in unusually large basic blocks by limiting the 115 /// size of the ready lists. 116 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden, 117 cl::desc("Limit ready list to N instructions"), cl::init(256)); 118 119 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 120 cl::desc("Enable register pressure scheduling."), cl::init(true)); 121 122 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 123 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 124 125 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden, 126 cl::desc("Enable memop clustering."), 127 cl::init(true)); 128 129 // DAG subtrees must have at least this many nodes. 130 static const unsigned MinSubtreeSize = 8; 131 132 // Pin the vtables to this file. 133 void MachineSchedStrategy::anchor() {} 134 135 void ScheduleDAGMutation::anchor() {} 136 137 //===----------------------------------------------------------------------===// 138 // Machine Instruction Scheduling Pass and Registry 139 //===----------------------------------------------------------------------===// 140 141 MachineSchedContext::MachineSchedContext() { 142 RegClassInfo = new RegisterClassInfo(); 143 } 144 145 MachineSchedContext::~MachineSchedContext() { 146 delete RegClassInfo; 147 } 148 149 namespace { 150 151 /// Base class for a machine scheduler class that can run at any point. 152 class MachineSchedulerBase : public MachineSchedContext, 153 public MachineFunctionPass { 154 public: 155 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 156 157 void print(raw_ostream &O, const Module* = nullptr) const override; 158 159 protected: 160 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); 161 }; 162 163 /// MachineScheduler runs after coalescing and before register allocation. 164 class MachineScheduler : public MachineSchedulerBase { 165 public: 166 MachineScheduler(); 167 168 void getAnalysisUsage(AnalysisUsage &AU) const override; 169 170 bool runOnMachineFunction(MachineFunction&) override; 171 172 static char ID; // Class identification, replacement for typeinfo 173 174 protected: 175 ScheduleDAGInstrs *createMachineScheduler(); 176 }; 177 178 /// PostMachineScheduler runs after shortly before code emission. 179 class PostMachineScheduler : public MachineSchedulerBase { 180 public: 181 PostMachineScheduler(); 182 183 void getAnalysisUsage(AnalysisUsage &AU) const override; 184 185 bool runOnMachineFunction(MachineFunction&) override; 186 187 static char ID; // Class identification, replacement for typeinfo 188 189 protected: 190 ScheduleDAGInstrs *createPostMachineScheduler(); 191 }; 192 193 } // end anonymous namespace 194 195 char MachineScheduler::ID = 0; 196 197 char &llvm::MachineSchedulerID = MachineScheduler::ID; 198 199 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE, 200 "Machine Instruction Scheduler", false, false) 201 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 202 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 203 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 204 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 205 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 206 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE, 207 "Machine Instruction Scheduler", false, false) 208 209 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) { 210 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 211 } 212 213 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 214 AU.setPreservesCFG(); 215 AU.addRequired<MachineDominatorTree>(); 216 AU.addRequired<MachineLoopInfo>(); 217 AU.addRequired<AAResultsWrapperPass>(); 218 AU.addRequired<TargetPassConfig>(); 219 AU.addRequired<SlotIndexes>(); 220 AU.addPreserved<SlotIndexes>(); 221 AU.addRequired<LiveIntervals>(); 222 AU.addPreserved<LiveIntervals>(); 223 MachineFunctionPass::getAnalysisUsage(AU); 224 } 225 226 char PostMachineScheduler::ID = 0; 227 228 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 229 230 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 231 "PostRA Machine Instruction Scheduler", false, false) 232 233 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) { 234 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 235 } 236 237 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 238 AU.setPreservesCFG(); 239 AU.addRequired<MachineDominatorTree>(); 240 AU.addRequired<MachineLoopInfo>(); 241 AU.addRequired<AAResultsWrapperPass>(); 242 AU.addRequired<TargetPassConfig>(); 243 MachineFunctionPass::getAnalysisUsage(AU); 244 } 245 246 MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor> 247 MachineSchedRegistry::Registry; 248 249 /// A dummy default scheduler factory indicates whether the scheduler 250 /// is overridden on the command line. 251 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 252 return nullptr; 253 } 254 255 /// MachineSchedOpt allows command line selection of the scheduler. 256 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 257 RegisterPassParser<MachineSchedRegistry>> 258 MachineSchedOpt("misched", 259 cl::init(&useDefaultMachineSched), cl::Hidden, 260 cl::desc("Machine instruction scheduler to use")); 261 262 static MachineSchedRegistry 263 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 264 useDefaultMachineSched); 265 266 static cl::opt<bool> EnableMachineSched( 267 "enable-misched", 268 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), 269 cl::Hidden); 270 271 static cl::opt<bool> EnablePostRAMachineSched( 272 "enable-post-misched", 273 cl::desc("Enable the post-ra machine instruction scheduling pass."), 274 cl::init(true), cl::Hidden); 275 276 /// Decrement this iterator until reaching the top or a non-debug instr. 277 static MachineBasicBlock::const_iterator 278 priorNonDebug(MachineBasicBlock::const_iterator I, 279 MachineBasicBlock::const_iterator Beg) { 280 assert(I != Beg && "reached the top of the region, cannot decrement"); 281 while (--I != Beg) { 282 if (!I->isDebugInstr()) 283 break; 284 } 285 return I; 286 } 287 288 /// Non-const version. 289 static MachineBasicBlock::iterator 290 priorNonDebug(MachineBasicBlock::iterator I, 291 MachineBasicBlock::const_iterator Beg) { 292 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg) 293 .getNonConstIterator(); 294 } 295 296 /// If this iterator is a debug value, increment until reaching the End or a 297 /// non-debug instruction. 298 static MachineBasicBlock::const_iterator 299 nextIfDebug(MachineBasicBlock::const_iterator I, 300 MachineBasicBlock::const_iterator End) { 301 for(; I != End; ++I) { 302 if (!I->isDebugInstr()) 303 break; 304 } 305 return I; 306 } 307 308 /// Non-const version. 309 static MachineBasicBlock::iterator 310 nextIfDebug(MachineBasicBlock::iterator I, 311 MachineBasicBlock::const_iterator End) { 312 return nextIfDebug(MachineBasicBlock::const_iterator(I), End) 313 .getNonConstIterator(); 314 } 315 316 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 317 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 318 // Select the scheduler, or set the default. 319 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 320 if (Ctor != useDefaultMachineSched) 321 return Ctor(this); 322 323 // Get the default scheduler set by the target for this function. 324 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 325 if (Scheduler) 326 return Scheduler; 327 328 // Default to GenericScheduler. 329 return createGenericSchedLive(this); 330 } 331 332 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 333 /// the caller. We don't have a command line option to override the postRA 334 /// scheduler. The Target must configure it. 335 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 336 // Get the postRA scheduler set by the target for this function. 337 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 338 if (Scheduler) 339 return Scheduler; 340 341 // Default to GenericScheduler. 342 return createGenericSchedPostRA(this); 343 } 344 345 /// Top-level MachineScheduler pass driver. 346 /// 347 /// Visit blocks in function order. Divide each block into scheduling regions 348 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 349 /// consistent with the DAG builder, which traverses the interior of the 350 /// scheduling regions bottom-up. 351 /// 352 /// This design avoids exposing scheduling boundaries to the DAG builder, 353 /// simplifying the DAG builder's support for "special" target instructions. 354 /// At the same time the design allows target schedulers to operate across 355 /// scheduling boundaries, for example to bundle the boundary instructions 356 /// without reordering them. This creates complexity, because the target 357 /// scheduler must update the RegionBegin and RegionEnd positions cached by 358 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 359 /// design would be to split blocks at scheduling boundaries, but LLVM has a 360 /// general bias against block splitting purely for implementation simplicity. 361 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 362 if (skipFunction(mf.getFunction())) 363 return false; 364 365 if (EnableMachineSched.getNumOccurrences()) { 366 if (!EnableMachineSched) 367 return false; 368 } else if (!mf.getSubtarget().enableMachineScheduler()) 369 return false; 370 371 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); 372 373 // Initialize the context of the pass. 374 MF = &mf; 375 MLI = &getAnalysis<MachineLoopInfo>(); 376 MDT = &getAnalysis<MachineDominatorTree>(); 377 PassConfig = &getAnalysis<TargetPassConfig>(); 378 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 379 380 LIS = &getAnalysis<LiveIntervals>(); 381 382 if (VerifyScheduling) { 383 LLVM_DEBUG(LIS->dump()); 384 MF->verify(this, "Before machine scheduling."); 385 } 386 RegClassInfo->runOnMachineFunction(*MF); 387 388 // Instantiate the selected scheduler for this target, function, and 389 // optimization level. 390 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 391 scheduleRegions(*Scheduler, false); 392 393 LLVM_DEBUG(LIS->dump()); 394 if (VerifyScheduling) 395 MF->verify(this, "After machine scheduling."); 396 return true; 397 } 398 399 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 400 if (skipFunction(mf.getFunction())) 401 return false; 402 403 if (EnablePostRAMachineSched.getNumOccurrences()) { 404 if (!EnablePostRAMachineSched) 405 return false; 406 } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) { 407 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 408 return false; 409 } 410 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 411 412 // Initialize the context of the pass. 413 MF = &mf; 414 MLI = &getAnalysis<MachineLoopInfo>(); 415 PassConfig = &getAnalysis<TargetPassConfig>(); 416 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 417 418 if (VerifyScheduling) 419 MF->verify(this, "Before post machine scheduling."); 420 421 // Instantiate the selected scheduler for this target, function, and 422 // optimization level. 423 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 424 scheduleRegions(*Scheduler, true); 425 426 if (VerifyScheduling) 427 MF->verify(this, "After post machine scheduling."); 428 return true; 429 } 430 431 /// Return true of the given instruction should not be included in a scheduling 432 /// region. 433 /// 434 /// MachineScheduler does not currently support scheduling across calls. To 435 /// handle calls, the DAG builder needs to be modified to create register 436 /// anti/output dependencies on the registers clobbered by the call's regmask 437 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 438 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 439 /// the boundary, but there would be no benefit to postRA scheduling across 440 /// calls this late anyway. 441 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 442 MachineBasicBlock *MBB, 443 MachineFunction *MF, 444 const TargetInstrInfo *TII) { 445 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF); 446 } 447 448 /// A region of an MBB for scheduling. 449 namespace { 450 struct SchedRegion { 451 /// RegionBegin is the first instruction in the scheduling region, and 452 /// RegionEnd is either MBB->end() or the scheduling boundary after the 453 /// last instruction in the scheduling region. These iterators cannot refer 454 /// to instructions outside of the identified scheduling region because 455 /// those may be reordered before scheduling this region. 456 MachineBasicBlock::iterator RegionBegin; 457 MachineBasicBlock::iterator RegionEnd; 458 unsigned NumRegionInstrs; 459 460 SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, 461 unsigned N) : 462 RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {} 463 }; 464 } // end anonymous namespace 465 466 using MBBRegionsVector = SmallVector<SchedRegion, 16>; 467 468 static void 469 getSchedRegions(MachineBasicBlock *MBB, 470 MBBRegionsVector &Regions, 471 bool RegionsTopDown) { 472 MachineFunction *MF = MBB->getParent(); 473 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 474 475 MachineBasicBlock::iterator I = nullptr; 476 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 477 RegionEnd != MBB->begin(); RegionEnd = I) { 478 479 // Avoid decrementing RegionEnd for blocks with no terminator. 480 if (RegionEnd != MBB->end() || 481 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) { 482 --RegionEnd; 483 } 484 485 // The next region starts above the previous region. Look backward in the 486 // instruction stream until we find the nearest boundary. 487 unsigned NumRegionInstrs = 0; 488 I = RegionEnd; 489 for (;I != MBB->begin(); --I) { 490 MachineInstr &MI = *std::prev(I); 491 if (isSchedBoundary(&MI, &*MBB, MF, TII)) 492 break; 493 if (!MI.isDebugInstr()) { 494 // MBB::size() uses instr_iterator to count. Here we need a bundle to 495 // count as a single instruction. 496 ++NumRegionInstrs; 497 } 498 } 499 500 // It's possible we found a scheduling region that only has debug 501 // instructions. Don't bother scheduling these. 502 if (NumRegionInstrs != 0) 503 Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs)); 504 } 505 506 if (RegionsTopDown) 507 std::reverse(Regions.begin(), Regions.end()); 508 } 509 510 /// Main driver for both MachineScheduler and PostMachineScheduler. 511 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler, 512 bool FixKillFlags) { 513 // Visit all machine basic blocks. 514 // 515 // TODO: Visit blocks in global postorder or postorder within the bottom-up 516 // loop tree. Then we can optionally compute global RegPressure. 517 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 518 MBB != MBBEnd; ++MBB) { 519 520 Scheduler.startBlock(&*MBB); 521 522 #ifndef NDEBUG 523 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 524 continue; 525 if (SchedOnlyBlock.getNumOccurrences() 526 && (int)SchedOnlyBlock != MBB->getNumber()) 527 continue; 528 #endif 529 530 // Break the block into scheduling regions [I, RegionEnd). RegionEnd 531 // points to the scheduling boundary at the bottom of the region. The DAG 532 // does not include RegionEnd, but the region does (i.e. the next 533 // RegionEnd is above the previous RegionBegin). If the current block has 534 // no terminator then RegionEnd == MBB->end() for the bottom region. 535 // 536 // All the regions of MBB are first found and stored in MBBRegions, which 537 // will be processed (MBB) top-down if initialized with true. 538 // 539 // The Scheduler may insert instructions during either schedule() or 540 // exitRegion(), even for empty regions. So the local iterators 'I' and 541 // 'RegionEnd' are invalid across these calls. Instructions must not be 542 // added to other regions than the current one without updating MBBRegions. 543 544 MBBRegionsVector MBBRegions; 545 getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown()); 546 for (MBBRegionsVector::iterator R = MBBRegions.begin(); 547 R != MBBRegions.end(); ++R) { 548 MachineBasicBlock::iterator I = R->RegionBegin; 549 MachineBasicBlock::iterator RegionEnd = R->RegionEnd; 550 unsigned NumRegionInstrs = R->NumRegionInstrs; 551 552 // Notify the scheduler of the region, even if we may skip scheduling 553 // it. Perhaps it still needs to be bundled. 554 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs); 555 556 // Skip empty scheduling regions (0 or 1 schedulable instructions). 557 if (I == RegionEnd || I == std::prev(RegionEnd)) { 558 // Close the current region. Bundle the terminator if needed. 559 // This invalidates 'RegionEnd' and 'I'. 560 Scheduler.exitRegion(); 561 continue; 562 } 563 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n"); 564 LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) 565 << " " << MBB->getName() << "\n From: " << *I 566 << " To: "; 567 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 568 else dbgs() << "End"; 569 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); 570 if (DumpCriticalPathLength) { 571 errs() << MF->getName(); 572 errs() << ":%bb. " << MBB->getNumber(); 573 errs() << " " << MBB->getName() << " \n"; 574 } 575 576 // Schedule a region: possibly reorder instructions. 577 // This invalidates the original region iterators. 578 Scheduler.schedule(); 579 580 // Close the current region. 581 Scheduler.exitRegion(); 582 } 583 Scheduler.finishBlock(); 584 // FIXME: Ideally, no further passes should rely on kill flags. However, 585 // thumb2 size reduction is currently an exception, so the PostMIScheduler 586 // needs to do this. 587 if (FixKillFlags) 588 Scheduler.fixupKills(*MBB); 589 } 590 Scheduler.finalizeSchedule(); 591 } 592 593 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 594 // unimplemented 595 } 596 597 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 598 LLVM_DUMP_METHOD void ReadyQueue::dump() const { 599 dbgs() << "Queue " << Name << ": "; 600 for (const SUnit *SU : Queue) 601 dbgs() << SU->NodeNum << " "; 602 dbgs() << "\n"; 603 } 604 #endif 605 606 //===----------------------------------------------------------------------===// 607 // ScheduleDAGMI - Basic machine instruction scheduling. This is 608 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 609 // virtual registers. 610 // ===----------------------------------------------------------------------===/ 611 612 // Provide a vtable anchor. 613 ScheduleDAGMI::~ScheduleDAGMI() = default; 614 615 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 616 /// NumPredsLeft reaches zero, release the successor node. 617 /// 618 /// FIXME: Adjust SuccSU height based on MinLatency. 619 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 620 SUnit *SuccSU = SuccEdge->getSUnit(); 621 622 if (SuccEdge->isWeak()) { 623 --SuccSU->WeakPredsLeft; 624 if (SuccEdge->isCluster()) 625 NextClusterSucc = SuccSU; 626 return; 627 } 628 #ifndef NDEBUG 629 if (SuccSU->NumPredsLeft == 0) { 630 dbgs() << "*** Scheduling failed! ***\n"; 631 dumpNode(*SuccSU); 632 dbgs() << " has been released too many times!\n"; 633 llvm_unreachable(nullptr); 634 } 635 #endif 636 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 637 // CurrCycle may have advanced since then. 638 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 639 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 640 641 --SuccSU->NumPredsLeft; 642 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 643 SchedImpl->releaseTopNode(SuccSU); 644 } 645 646 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 647 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 648 for (SDep &Succ : SU->Succs) 649 releaseSucc(SU, &Succ); 650 } 651 652 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 653 /// NumSuccsLeft reaches zero, release the predecessor node. 654 /// 655 /// FIXME: Adjust PredSU height based on MinLatency. 656 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 657 SUnit *PredSU = PredEdge->getSUnit(); 658 659 if (PredEdge->isWeak()) { 660 --PredSU->WeakSuccsLeft; 661 if (PredEdge->isCluster()) 662 NextClusterPred = PredSU; 663 return; 664 } 665 #ifndef NDEBUG 666 if (PredSU->NumSuccsLeft == 0) { 667 dbgs() << "*** Scheduling failed! ***\n"; 668 dumpNode(*PredSU); 669 dbgs() << " has been released too many times!\n"; 670 llvm_unreachable(nullptr); 671 } 672 #endif 673 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 674 // CurrCycle may have advanced since then. 675 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 676 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 677 678 --PredSU->NumSuccsLeft; 679 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 680 SchedImpl->releaseBottomNode(PredSU); 681 } 682 683 /// releasePredecessors - Call releasePred on each of SU's predecessors. 684 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 685 for (SDep &Pred : SU->Preds) 686 releasePred(SU, &Pred); 687 } 688 689 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) { 690 ScheduleDAGInstrs::startBlock(bb); 691 SchedImpl->enterMBB(bb); 692 } 693 694 void ScheduleDAGMI::finishBlock() { 695 SchedImpl->leaveMBB(); 696 ScheduleDAGInstrs::finishBlock(); 697 } 698 699 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 700 /// crossing a scheduling boundary. [begin, end) includes all instructions in 701 /// the region, including the boundary itself and single-instruction regions 702 /// that don't get scheduled. 703 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 704 MachineBasicBlock::iterator begin, 705 MachineBasicBlock::iterator end, 706 unsigned regioninstrs) 707 { 708 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 709 710 SchedImpl->initPolicy(begin, end, regioninstrs); 711 } 712 713 /// This is normally called from the main scheduler loop but may also be invoked 714 /// by the scheduling strategy to perform additional code motion. 715 void ScheduleDAGMI::moveInstruction( 716 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 717 // Advance RegionBegin if the first instruction moves down. 718 if (&*RegionBegin == MI) 719 ++RegionBegin; 720 721 // Update the instruction stream. 722 BB->splice(InsertPos, BB, MI); 723 724 // Update LiveIntervals 725 if (LIS) 726 LIS->handleMove(*MI, /*UpdateFlags=*/true); 727 728 // Recede RegionBegin if an instruction moves above the first. 729 if (RegionBegin == InsertPos) 730 RegionBegin = MI; 731 } 732 733 bool ScheduleDAGMI::checkSchedLimit() { 734 #ifndef NDEBUG 735 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 736 CurrentTop = CurrentBottom; 737 return false; 738 } 739 ++NumInstrsScheduled; 740 #endif 741 return true; 742 } 743 744 /// Per-region scheduling driver, called back from 745 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 746 /// does not consider liveness or register pressure. It is useful for PostRA 747 /// scheduling and potentially other custom schedulers. 748 void ScheduleDAGMI::schedule() { 749 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n"); 750 LLVM_DEBUG(SchedImpl->dumpPolicy()); 751 752 // Build the DAG. 753 buildSchedGraph(AA); 754 755 postprocessDAG(); 756 757 SmallVector<SUnit*, 8> TopRoots, BotRoots; 758 findRootsAndBiasEdges(TopRoots, BotRoots); 759 760 LLVM_DEBUG(dump()); 761 if (PrintDAGs) dump(); 762 if (ViewMISchedDAGs) viewGraph(); 763 764 // Initialize the strategy before modifying the DAG. 765 // This may initialize a DFSResult to be used for queue priority. 766 SchedImpl->initialize(this); 767 768 // Initialize ready queues now that the DAG and priority data are finalized. 769 initQueues(TopRoots, BotRoots); 770 771 bool IsTopNode = false; 772 while (true) { 773 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n"); 774 SUnit *SU = SchedImpl->pickNode(IsTopNode); 775 if (!SU) break; 776 777 assert(!SU->isScheduled && "Node already scheduled"); 778 if (!checkSchedLimit()) 779 break; 780 781 MachineInstr *MI = SU->getInstr(); 782 if (IsTopNode) { 783 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 784 if (&*CurrentTop == MI) 785 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 786 else 787 moveInstruction(MI, CurrentTop); 788 } else { 789 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 790 MachineBasicBlock::iterator priorII = 791 priorNonDebug(CurrentBottom, CurrentTop); 792 if (&*priorII == MI) 793 CurrentBottom = priorII; 794 else { 795 if (&*CurrentTop == MI) 796 CurrentTop = nextIfDebug(++CurrentTop, priorII); 797 moveInstruction(MI, CurrentBottom); 798 CurrentBottom = MI; 799 } 800 } 801 // Notify the scheduling strategy before updating the DAG. 802 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 803 // runs, it can then use the accurate ReadyCycle time to determine whether 804 // newly released nodes can move to the readyQ. 805 SchedImpl->schedNode(SU, IsTopNode); 806 807 updateQueues(SU, IsTopNode); 808 } 809 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 810 811 placeDebugValues(); 812 813 LLVM_DEBUG({ 814 dbgs() << "*** Final schedule for " 815 << printMBBReference(*begin()->getParent()) << " ***\n"; 816 dumpSchedule(); 817 dbgs() << '\n'; 818 }); 819 } 820 821 /// Apply each ScheduleDAGMutation step in order. 822 void ScheduleDAGMI::postprocessDAG() { 823 for (auto &m : Mutations) 824 m->apply(this); 825 } 826 827 void ScheduleDAGMI:: 828 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 829 SmallVectorImpl<SUnit*> &BotRoots) { 830 for (SUnit &SU : SUnits) { 831 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits"); 832 833 // Order predecessors so DFSResult follows the critical path. 834 SU.biasCriticalPath(); 835 836 // A SUnit is ready to top schedule if it has no predecessors. 837 if (!SU.NumPredsLeft) 838 TopRoots.push_back(&SU); 839 // A SUnit is ready to bottom schedule if it has no successors. 840 if (!SU.NumSuccsLeft) 841 BotRoots.push_back(&SU); 842 } 843 ExitSU.biasCriticalPath(); 844 } 845 846 /// Identify DAG roots and setup scheduler queues. 847 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 848 ArrayRef<SUnit*> BotRoots) { 849 NextClusterSucc = nullptr; 850 NextClusterPred = nullptr; 851 852 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 853 // 854 // Nodes with unreleased weak edges can still be roots. 855 // Release top roots in forward order. 856 for (SUnit *SU : TopRoots) 857 SchedImpl->releaseTopNode(SU); 858 859 // Release bottom roots in reverse order so the higher priority nodes appear 860 // first. This is more natural and slightly more efficient. 861 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 862 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 863 SchedImpl->releaseBottomNode(*I); 864 } 865 866 releaseSuccessors(&EntrySU); 867 releasePredecessors(&ExitSU); 868 869 SchedImpl->registerRoots(); 870 871 // Advance past initial DebugValues. 872 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 873 CurrentBottom = RegionEnd; 874 } 875 876 /// Update scheduler queues after scheduling an instruction. 877 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 878 // Release dependent instructions for scheduling. 879 if (IsTopNode) 880 releaseSuccessors(SU); 881 else 882 releasePredecessors(SU); 883 884 SU->isScheduled = true; 885 } 886 887 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 888 void ScheduleDAGMI::placeDebugValues() { 889 // If first instruction was a DBG_VALUE then put it back. 890 if (FirstDbgValue) { 891 BB->splice(RegionBegin, BB, FirstDbgValue); 892 RegionBegin = FirstDbgValue; 893 } 894 895 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator 896 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 897 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 898 MachineInstr *DbgValue = P.first; 899 MachineBasicBlock::iterator OrigPrevMI = P.second; 900 if (&*RegionBegin == DbgValue) 901 ++RegionBegin; 902 BB->splice(++OrigPrevMI, BB, DbgValue); 903 if (OrigPrevMI == std::prev(RegionEnd)) 904 RegionEnd = DbgValue; 905 } 906 DbgValues.clear(); 907 FirstDbgValue = nullptr; 908 } 909 910 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 911 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const { 912 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 913 if (SUnit *SU = getSUnit(&(*MI))) 914 dumpNode(*SU); 915 else 916 dbgs() << "Missing SUnit\n"; 917 } 918 } 919 #endif 920 921 //===----------------------------------------------------------------------===// 922 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 923 // preservation. 924 //===----------------------------------------------------------------------===// 925 926 ScheduleDAGMILive::~ScheduleDAGMILive() { 927 delete DFSResult; 928 } 929 930 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) { 931 const MachineInstr &MI = *SU.getInstr(); 932 for (const MachineOperand &MO : MI.operands()) { 933 if (!MO.isReg()) 934 continue; 935 if (!MO.readsReg()) 936 continue; 937 if (TrackLaneMasks && !MO.isUse()) 938 continue; 939 940 Register Reg = MO.getReg(); 941 if (!Register::isVirtualRegister(Reg)) 942 continue; 943 944 // Ignore re-defs. 945 if (TrackLaneMasks) { 946 bool FoundDef = false; 947 for (const MachineOperand &MO2 : MI.operands()) { 948 if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) { 949 FoundDef = true; 950 break; 951 } 952 } 953 if (FoundDef) 954 continue; 955 } 956 957 // Record this local VReg use. 958 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg); 959 for (; UI != VRegUses.end(); ++UI) { 960 if (UI->SU == &SU) 961 break; 962 } 963 if (UI == VRegUses.end()) 964 VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU)); 965 } 966 } 967 968 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 969 /// crossing a scheduling boundary. [begin, end) includes all instructions in 970 /// the region, including the boundary itself and single-instruction regions 971 /// that don't get scheduled. 972 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 973 MachineBasicBlock::iterator begin, 974 MachineBasicBlock::iterator end, 975 unsigned regioninstrs) 976 { 977 // ScheduleDAGMI initializes SchedImpl's per-region policy. 978 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 979 980 // For convenience remember the end of the liveness region. 981 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 982 983 SUPressureDiffs.clear(); 984 985 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 986 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks(); 987 988 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) && 989 "ShouldTrackLaneMasks requires ShouldTrackPressure"); 990 } 991 992 // Setup the register pressure trackers for the top scheduled and bottom 993 // scheduled regions. 994 void ScheduleDAGMILive::initRegPressure() { 995 VRegUses.clear(); 996 VRegUses.setUniverse(MRI.getNumVirtRegs()); 997 for (SUnit &SU : SUnits) 998 collectVRegUses(SU); 999 1000 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin, 1001 ShouldTrackLaneMasks, false); 1002 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1003 ShouldTrackLaneMasks, false); 1004 1005 // Close the RPTracker to finalize live ins. 1006 RPTracker.closeRegion(); 1007 1008 LLVM_DEBUG(RPTracker.dump()); 1009 1010 // Initialize the live ins and live outs. 1011 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 1012 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 1013 1014 // Close one end of the tracker so we can call 1015 // getMaxUpward/DownwardPressureDelta before advancing across any 1016 // instructions. This converts currently live regs into live ins/outs. 1017 TopRPTracker.closeTop(); 1018 BotRPTracker.closeBottom(); 1019 1020 BotRPTracker.initLiveThru(RPTracker); 1021 if (!BotRPTracker.getLiveThru().empty()) { 1022 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 1023 LLVM_DEBUG(dbgs() << "Live Thru: "; 1024 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 1025 }; 1026 1027 // For each live out vreg reduce the pressure change associated with other 1028 // uses of the same vreg below the live-out reaching def. 1029 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 1030 1031 // Account for liveness generated by the region boundary. 1032 if (LiveRegionEnd != RegionEnd) { 1033 SmallVector<RegisterMaskPair, 8> LiveUses; 1034 BotRPTracker.recede(&LiveUses); 1035 updatePressureDiffs(LiveUses); 1036 } 1037 1038 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; 1039 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI); 1040 dbgs() << "Bottom Pressure:\n"; 1041 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);); 1042 1043 assert((BotRPTracker.getPos() == RegionEnd || 1044 (RegionEnd->isDebugInstr() && 1045 BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) && 1046 "Can't find the region bottom"); 1047 1048 // Cache the list of excess pressure sets in this region. This will also track 1049 // the max pressure in the scheduled code for these sets. 1050 RegionCriticalPSets.clear(); 1051 const std::vector<unsigned> &RegionPressure = 1052 RPTracker.getPressure().MaxSetPressure; 1053 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 1054 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 1055 if (RegionPressure[i] > Limit) { 1056 LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit 1057 << " Actual " << RegionPressure[i] << "\n"); 1058 RegionCriticalPSets.push_back(PressureChange(i)); 1059 } 1060 } 1061 LLVM_DEBUG(dbgs() << "Excess PSets: "; 1062 for (const PressureChange &RCPS 1063 : RegionCriticalPSets) dbgs() 1064 << TRI->getRegPressureSetName(RCPS.getPSet()) << " "; 1065 dbgs() << "\n"); 1066 } 1067 1068 void ScheduleDAGMILive:: 1069 updateScheduledPressure(const SUnit *SU, 1070 const std::vector<unsigned> &NewMaxPressure) { 1071 const PressureDiff &PDiff = getPressureDiff(SU); 1072 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 1073 for (const PressureChange &PC : PDiff) { 1074 if (!PC.isValid()) 1075 break; 1076 unsigned ID = PC.getPSet(); 1077 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 1078 ++CritIdx; 1079 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 1080 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 1081 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max()) 1082 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 1083 } 1084 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 1085 if (NewMaxPressure[ID] >= Limit - 2) { 1086 LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 1087 << NewMaxPressure[ID] 1088 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") 1089 << Limit << "(+ " << BotRPTracker.getLiveThru()[ID] 1090 << " livethru)\n"); 1091 } 1092 } 1093 } 1094 1095 /// Update the PressureDiff array for liveness after scheduling this 1096 /// instruction. 1097 void ScheduleDAGMILive::updatePressureDiffs( 1098 ArrayRef<RegisterMaskPair> LiveUses) { 1099 for (const RegisterMaskPair &P : LiveUses) { 1100 unsigned Reg = P.RegUnit; 1101 /// FIXME: Currently assuming single-use physregs. 1102 if (!Register::isVirtualRegister(Reg)) 1103 continue; 1104 1105 if (ShouldTrackLaneMasks) { 1106 // If the register has just become live then other uses won't change 1107 // this fact anymore => decrement pressure. 1108 // If the register has just become dead then other uses make it come 1109 // back to life => increment pressure. 1110 bool Decrement = P.LaneMask.any(); 1111 1112 for (const VReg2SUnit &V2SU 1113 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1114 SUnit &SU = *V2SU.SU; 1115 if (SU.isScheduled || &SU == &ExitSU) 1116 continue; 1117 1118 PressureDiff &PDiff = getPressureDiff(&SU); 1119 PDiff.addPressureChange(Reg, Decrement, &MRI); 1120 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") " 1121 << printReg(Reg, TRI) << ':' 1122 << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr(); 1123 dbgs() << " to "; PDiff.dump(*TRI);); 1124 } 1125 } else { 1126 assert(P.LaneMask.any()); 1127 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n"); 1128 // This may be called before CurrentBottom has been initialized. However, 1129 // BotRPTracker must have a valid position. We want the value live into the 1130 // instruction or live out of the block, so ask for the previous 1131 // instruction's live-out. 1132 const LiveInterval &LI = LIS->getInterval(Reg); 1133 VNInfo *VNI; 1134 MachineBasicBlock::const_iterator I = 1135 nextIfDebug(BotRPTracker.getPos(), BB->end()); 1136 if (I == BB->end()) 1137 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1138 else { 1139 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I)); 1140 VNI = LRQ.valueIn(); 1141 } 1142 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 1143 assert(VNI && "No live value at use."); 1144 for (const VReg2SUnit &V2SU 1145 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1146 SUnit *SU = V2SU.SU; 1147 // If this use comes before the reaching def, it cannot be a last use, 1148 // so decrease its pressure change. 1149 if (!SU->isScheduled && SU != &ExitSU) { 1150 LiveQueryResult LRQ = 1151 LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1152 if (LRQ.valueIn() == VNI) { 1153 PressureDiff &PDiff = getPressureDiff(SU); 1154 PDiff.addPressureChange(Reg, true, &MRI); 1155 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 1156 << *SU->getInstr(); 1157 dbgs() << " to "; PDiff.dump(*TRI);); 1158 } 1159 } 1160 } 1161 } 1162 } 1163 } 1164 1165 void ScheduleDAGMILive::dump() const { 1166 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1167 if (EntrySU.getInstr() != nullptr) 1168 dumpNodeAll(EntrySU); 1169 for (const SUnit &SU : SUnits) { 1170 dumpNodeAll(SU); 1171 if (ShouldTrackPressure) { 1172 dbgs() << " Pressure Diff : "; 1173 getPressureDiff(&SU).dump(*TRI); 1174 } 1175 dbgs() << " Single Issue : "; 1176 if (SchedModel.mustBeginGroup(SU.getInstr()) && 1177 SchedModel.mustEndGroup(SU.getInstr())) 1178 dbgs() << "true;"; 1179 else 1180 dbgs() << "false;"; 1181 dbgs() << '\n'; 1182 } 1183 if (ExitSU.getInstr() != nullptr) 1184 dumpNodeAll(ExitSU); 1185 #endif 1186 } 1187 1188 /// schedule - Called back from MachineScheduler::runOnMachineFunction 1189 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 1190 /// only includes instructions that have DAG nodes, not scheduling boundaries. 1191 /// 1192 /// This is a skeletal driver, with all the functionality pushed into helpers, 1193 /// so that it can be easily extended by experimental schedulers. Generally, 1194 /// implementing MachineSchedStrategy should be sufficient to implement a new 1195 /// scheduling algorithm. However, if a scheduler further subclasses 1196 /// ScheduleDAGMILive then it will want to override this virtual method in order 1197 /// to update any specialized state. 1198 void ScheduleDAGMILive::schedule() { 1199 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n"); 1200 LLVM_DEBUG(SchedImpl->dumpPolicy()); 1201 buildDAGWithRegPressure(); 1202 1203 postprocessDAG(); 1204 1205 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1206 findRootsAndBiasEdges(TopRoots, BotRoots); 1207 1208 // Initialize the strategy before modifying the DAG. 1209 // This may initialize a DFSResult to be used for queue priority. 1210 SchedImpl->initialize(this); 1211 1212 LLVM_DEBUG(dump()); 1213 if (PrintDAGs) dump(); 1214 if (ViewMISchedDAGs) viewGraph(); 1215 1216 // Initialize ready queues now that the DAG and priority data are finalized. 1217 initQueues(TopRoots, BotRoots); 1218 1219 bool IsTopNode = false; 1220 while (true) { 1221 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n"); 1222 SUnit *SU = SchedImpl->pickNode(IsTopNode); 1223 if (!SU) break; 1224 1225 assert(!SU->isScheduled && "Node already scheduled"); 1226 if (!checkSchedLimit()) 1227 break; 1228 1229 scheduleMI(SU, IsTopNode); 1230 1231 if (DFSResult) { 1232 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1233 if (!ScheduledTrees.test(SubtreeID)) { 1234 ScheduledTrees.set(SubtreeID); 1235 DFSResult->scheduleTree(SubtreeID); 1236 SchedImpl->scheduleTree(SubtreeID); 1237 } 1238 } 1239 1240 // Notify the scheduling strategy after updating the DAG. 1241 SchedImpl->schedNode(SU, IsTopNode); 1242 1243 updateQueues(SU, IsTopNode); 1244 } 1245 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1246 1247 placeDebugValues(); 1248 1249 LLVM_DEBUG({ 1250 dbgs() << "*** Final schedule for " 1251 << printMBBReference(*begin()->getParent()) << " ***\n"; 1252 dumpSchedule(); 1253 dbgs() << '\n'; 1254 }); 1255 } 1256 1257 /// Build the DAG and setup three register pressure trackers. 1258 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1259 if (!ShouldTrackPressure) { 1260 RPTracker.reset(); 1261 RegionCriticalPSets.clear(); 1262 buildSchedGraph(AA); 1263 return; 1264 } 1265 1266 // Initialize the register pressure tracker used by buildSchedGraph. 1267 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1268 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true); 1269 1270 // Account for liveness generate by the region boundary. 1271 if (LiveRegionEnd != RegionEnd) 1272 RPTracker.recede(); 1273 1274 // Build the DAG, and compute current register pressure. 1275 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks); 1276 1277 // Initialize top/bottom trackers after computing region pressure. 1278 initRegPressure(); 1279 } 1280 1281 void ScheduleDAGMILive::computeDFSResult() { 1282 if (!DFSResult) 1283 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1284 DFSResult->clear(); 1285 ScheduledTrees.clear(); 1286 DFSResult->resize(SUnits.size()); 1287 DFSResult->compute(SUnits); 1288 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1289 } 1290 1291 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1292 /// only provides the critical path for single block loops. To handle loops that 1293 /// span blocks, we could use the vreg path latencies provided by 1294 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1295 /// available for use in the scheduler. 1296 /// 1297 /// The cyclic path estimation identifies a def-use pair that crosses the back 1298 /// edge and considers the depth and height of the nodes. For example, consider 1299 /// the following instruction sequence where each instruction has unit latency 1300 /// and defines an epomymous virtual register: 1301 /// 1302 /// a->b(a,c)->c(b)->d(c)->exit 1303 /// 1304 /// The cyclic critical path is a two cycles: b->c->b 1305 /// The acyclic critical path is four cycles: a->b->c->d->exit 1306 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1307 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1308 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1309 /// LiveInDepth = depth(b) = len(a->b) = 1 1310 /// 1311 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1312 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1313 /// CyclicCriticalPath = min(2, 2) = 2 1314 /// 1315 /// This could be relevant to PostRA scheduling, but is currently implemented 1316 /// assuming LiveIntervals. 1317 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1318 // This only applies to single block loop. 1319 if (!BB->isSuccessor(BB)) 1320 return 0; 1321 1322 unsigned MaxCyclicLatency = 0; 1323 // Visit each live out vreg def to find def/use pairs that cross iterations. 1324 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) { 1325 unsigned Reg = P.RegUnit; 1326 if (!Register::isVirtualRegister(Reg)) 1327 continue; 1328 const LiveInterval &LI = LIS->getInterval(Reg); 1329 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1330 if (!DefVNI) 1331 continue; 1332 1333 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1334 const SUnit *DefSU = getSUnit(DefMI); 1335 if (!DefSU) 1336 continue; 1337 1338 unsigned LiveOutHeight = DefSU->getHeight(); 1339 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1340 // Visit all local users of the vreg def. 1341 for (const VReg2SUnit &V2SU 1342 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1343 SUnit *SU = V2SU.SU; 1344 if (SU == &ExitSU) 1345 continue; 1346 1347 // Only consider uses of the phi. 1348 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1349 if (!LRQ.valueIn()->isPHIDef()) 1350 continue; 1351 1352 // Assume that a path spanning two iterations is a cycle, which could 1353 // overestimate in strange cases. This allows cyclic latency to be 1354 // estimated as the minimum slack of the vreg's depth or height. 1355 unsigned CyclicLatency = 0; 1356 if (LiveOutDepth > SU->getDepth()) 1357 CyclicLatency = LiveOutDepth - SU->getDepth(); 1358 1359 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency; 1360 if (LiveInHeight > LiveOutHeight) { 1361 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1362 CyclicLatency = LiveInHeight - LiveOutHeight; 1363 } else 1364 CyclicLatency = 0; 1365 1366 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1367 << SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1368 if (CyclicLatency > MaxCyclicLatency) 1369 MaxCyclicLatency = CyclicLatency; 1370 } 1371 } 1372 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1373 return MaxCyclicLatency; 1374 } 1375 1376 /// Release ExitSU predecessors and setup scheduler queues. Re-position 1377 /// the Top RP tracker in case the region beginning has changed. 1378 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots, 1379 ArrayRef<SUnit*> BotRoots) { 1380 ScheduleDAGMI::initQueues(TopRoots, BotRoots); 1381 if (ShouldTrackPressure) { 1382 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1383 TopRPTracker.setPos(CurrentTop); 1384 } 1385 } 1386 1387 /// Move an instruction and update register pressure. 1388 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1389 // Move the instruction to its new location in the instruction stream. 1390 MachineInstr *MI = SU->getInstr(); 1391 1392 if (IsTopNode) { 1393 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1394 if (&*CurrentTop == MI) 1395 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1396 else { 1397 moveInstruction(MI, CurrentTop); 1398 TopRPTracker.setPos(MI); 1399 } 1400 1401 if (ShouldTrackPressure) { 1402 // Update top scheduled pressure. 1403 RegisterOperands RegOpers; 1404 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1405 if (ShouldTrackLaneMasks) { 1406 // Adjust liveness and add missing dead+read-undef flags. 1407 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1408 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1409 } else { 1410 // Adjust for missing dead-def flags. 1411 RegOpers.detectDeadDefs(*MI, *LIS); 1412 } 1413 1414 TopRPTracker.advance(RegOpers); 1415 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1416 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure( 1417 TopRPTracker.getRegSetPressureAtPos(), TRI);); 1418 1419 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1420 } 1421 } else { 1422 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1423 MachineBasicBlock::iterator priorII = 1424 priorNonDebug(CurrentBottom, CurrentTop); 1425 if (&*priorII == MI) 1426 CurrentBottom = priorII; 1427 else { 1428 if (&*CurrentTop == MI) { 1429 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1430 TopRPTracker.setPos(CurrentTop); 1431 } 1432 moveInstruction(MI, CurrentBottom); 1433 CurrentBottom = MI; 1434 BotRPTracker.setPos(CurrentBottom); 1435 } 1436 if (ShouldTrackPressure) { 1437 RegisterOperands RegOpers; 1438 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1439 if (ShouldTrackLaneMasks) { 1440 // Adjust liveness and add missing dead+read-undef flags. 1441 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1442 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1443 } else { 1444 // Adjust for missing dead-def flags. 1445 RegOpers.detectDeadDefs(*MI, *LIS); 1446 } 1447 1448 if (BotRPTracker.getPos() != CurrentBottom) 1449 BotRPTracker.recedeSkipDebugValues(); 1450 SmallVector<RegisterMaskPair, 8> LiveUses; 1451 BotRPTracker.recede(RegOpers, &LiveUses); 1452 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1453 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure( 1454 BotRPTracker.getRegSetPressureAtPos(), TRI);); 1455 1456 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1457 updatePressureDiffs(LiveUses); 1458 } 1459 } 1460 } 1461 1462 //===----------------------------------------------------------------------===// 1463 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores. 1464 //===----------------------------------------------------------------------===// 1465 1466 namespace { 1467 1468 /// Post-process the DAG to create cluster edges between neighboring 1469 /// loads or between neighboring stores. 1470 class BaseMemOpClusterMutation : public ScheduleDAGMutation { 1471 struct MemOpInfo { 1472 SUnit *SU; 1473 const MachineOperand *BaseOp; 1474 int64_t Offset; 1475 1476 MemOpInfo(SUnit *su, const MachineOperand *Op, int64_t ofs) 1477 : SU(su), BaseOp(Op), Offset(ofs) {} 1478 1479 bool operator<(const MemOpInfo &RHS) const { 1480 if (BaseOp->getType() != RHS.BaseOp->getType()) 1481 return BaseOp->getType() < RHS.BaseOp->getType(); 1482 1483 if (BaseOp->isReg()) 1484 return std::make_tuple(BaseOp->getReg(), Offset, SU->NodeNum) < 1485 std::make_tuple(RHS.BaseOp->getReg(), RHS.Offset, 1486 RHS.SU->NodeNum); 1487 if (BaseOp->isFI()) { 1488 const MachineFunction &MF = 1489 *BaseOp->getParent()->getParent()->getParent(); 1490 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1491 bool StackGrowsDown = TFI.getStackGrowthDirection() == 1492 TargetFrameLowering::StackGrowsDown; 1493 // Can't use tuple comparison here since we might need to use a 1494 // different order when the stack grows down. 1495 if (BaseOp->getIndex() != RHS.BaseOp->getIndex()) 1496 return StackGrowsDown ? BaseOp->getIndex() > RHS.BaseOp->getIndex() 1497 : BaseOp->getIndex() < RHS.BaseOp->getIndex(); 1498 1499 if (Offset != RHS.Offset) 1500 return StackGrowsDown ? Offset > RHS.Offset : Offset < RHS.Offset; 1501 1502 return SU->NodeNum < RHS.SU->NodeNum; 1503 } 1504 1505 llvm_unreachable("MemOpClusterMutation only supports register or frame " 1506 "index bases."); 1507 } 1508 }; 1509 1510 const TargetInstrInfo *TII; 1511 const TargetRegisterInfo *TRI; 1512 bool IsLoad; 1513 1514 public: 1515 BaseMemOpClusterMutation(const TargetInstrInfo *tii, 1516 const TargetRegisterInfo *tri, bool IsLoad) 1517 : TII(tii), TRI(tri), IsLoad(IsLoad) {} 1518 1519 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1520 1521 protected: 1522 void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGInstrs *DAG); 1523 }; 1524 1525 class StoreClusterMutation : public BaseMemOpClusterMutation { 1526 public: 1527 StoreClusterMutation(const TargetInstrInfo *tii, 1528 const TargetRegisterInfo *tri) 1529 : BaseMemOpClusterMutation(tii, tri, false) {} 1530 }; 1531 1532 class LoadClusterMutation : public BaseMemOpClusterMutation { 1533 public: 1534 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri) 1535 : BaseMemOpClusterMutation(tii, tri, true) {} 1536 }; 1537 1538 } // end anonymous namespace 1539 1540 namespace llvm { 1541 1542 std::unique_ptr<ScheduleDAGMutation> 1543 createLoadClusterDAGMutation(const TargetInstrInfo *TII, 1544 const TargetRegisterInfo *TRI) { 1545 return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI) 1546 : nullptr; 1547 } 1548 1549 std::unique_ptr<ScheduleDAGMutation> 1550 createStoreClusterDAGMutation(const TargetInstrInfo *TII, 1551 const TargetRegisterInfo *TRI) { 1552 return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI) 1553 : nullptr; 1554 } 1555 1556 } // end namespace llvm 1557 1558 void BaseMemOpClusterMutation::clusterNeighboringMemOps( 1559 ArrayRef<SUnit *> MemOps, ScheduleDAGInstrs *DAG) { 1560 SmallVector<MemOpInfo, 32> MemOpRecords; 1561 for (SUnit *SU : MemOps) { 1562 const MachineOperand *BaseOp; 1563 int64_t Offset; 1564 if (TII->getMemOperandWithOffset(*SU->getInstr(), BaseOp, Offset, TRI)) 1565 MemOpRecords.push_back(MemOpInfo(SU, BaseOp, Offset)); 1566 } 1567 if (MemOpRecords.size() < 2) 1568 return; 1569 1570 llvm::sort(MemOpRecords); 1571 unsigned ClusterLength = 1; 1572 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { 1573 SUnit *SUa = MemOpRecords[Idx].SU; 1574 SUnit *SUb = MemOpRecords[Idx+1].SU; 1575 if (TII->shouldClusterMemOps(*MemOpRecords[Idx].BaseOp, 1576 *MemOpRecords[Idx + 1].BaseOp, 1577 ClusterLength) && 1578 DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1579 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU(" 1580 << SUb->NodeNum << ")\n"); 1581 // Copy successor edges from SUa to SUb. Interleaving computation 1582 // dependent on SUa can prevent load combining due to register reuse. 1583 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1584 // loads should have effectively the same inputs. 1585 for (const SDep &Succ : SUa->Succs) { 1586 if (Succ.getSUnit() == SUb) 1587 continue; 1588 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum 1589 << ")\n"); 1590 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial)); 1591 } 1592 ++ClusterLength; 1593 } else 1594 ClusterLength = 1; 1595 } 1596 } 1597 1598 /// Callback from DAG postProcessing to create cluster edges for loads. 1599 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) { 1600 // Map DAG NodeNum to store chain ID. 1601 DenseMap<unsigned, unsigned> StoreChainIDs; 1602 // Map each store chain to a set of dependent MemOps. 1603 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1604 for (SUnit &SU : DAG->SUnits) { 1605 if ((IsLoad && !SU.getInstr()->mayLoad()) || 1606 (!IsLoad && !SU.getInstr()->mayStore())) 1607 continue; 1608 1609 unsigned ChainPredID = DAG->SUnits.size(); 1610 for (const SDep &Pred : SU.Preds) { 1611 if (Pred.isCtrl()) { 1612 ChainPredID = Pred.getSUnit()->NodeNum; 1613 break; 1614 } 1615 } 1616 // Check if this chain-like pred has been seen 1617 // before. ChainPredID==MaxNodeID at the top of the schedule. 1618 unsigned NumChains = StoreChainDependents.size(); 1619 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1620 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1621 if (Result.second) 1622 StoreChainDependents.resize(NumChains + 1); 1623 StoreChainDependents[Result.first->second].push_back(&SU); 1624 } 1625 1626 // Iterate over the store chains. 1627 for (auto &SCD : StoreChainDependents) 1628 clusterNeighboringMemOps(SCD, DAG); 1629 } 1630 1631 //===----------------------------------------------------------------------===// 1632 // CopyConstrain - DAG post-processing to encourage copy elimination. 1633 //===----------------------------------------------------------------------===// 1634 1635 namespace { 1636 1637 /// Post-process the DAG to create weak edges from all uses of a copy to 1638 /// the one use that defines the copy's source vreg, most likely an induction 1639 /// variable increment. 1640 class CopyConstrain : public ScheduleDAGMutation { 1641 // Transient state. 1642 SlotIndex RegionBeginIdx; 1643 1644 // RegionEndIdx is the slot index of the last non-debug instruction in the 1645 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1646 SlotIndex RegionEndIdx; 1647 1648 public: 1649 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1650 1651 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1652 1653 protected: 1654 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1655 }; 1656 1657 } // end anonymous namespace 1658 1659 namespace llvm { 1660 1661 std::unique_ptr<ScheduleDAGMutation> 1662 createCopyConstrainDAGMutation(const TargetInstrInfo *TII, 1663 const TargetRegisterInfo *TRI) { 1664 return std::make_unique<CopyConstrain>(TII, TRI); 1665 } 1666 1667 } // end namespace llvm 1668 1669 /// constrainLocalCopy handles two possibilities: 1670 /// 1) Local src: 1671 /// I0: = dst 1672 /// I1: src = ... 1673 /// I2: = dst 1674 /// I3: dst = src (copy) 1675 /// (create pred->succ edges I0->I1, I2->I1) 1676 /// 1677 /// 2) Local copy: 1678 /// I0: dst = src (copy) 1679 /// I1: = dst 1680 /// I2: src = ... 1681 /// I3: = dst 1682 /// (create pred->succ edges I1->I2, I3->I2) 1683 /// 1684 /// Although the MachineScheduler is currently constrained to single blocks, 1685 /// this algorithm should handle extended blocks. An EBB is a set of 1686 /// contiguously numbered blocks such that the previous block in the EBB is 1687 /// always the single predecessor. 1688 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1689 LiveIntervals *LIS = DAG->getLIS(); 1690 MachineInstr *Copy = CopySU->getInstr(); 1691 1692 // Check for pure vreg copies. 1693 const MachineOperand &SrcOp = Copy->getOperand(1); 1694 Register SrcReg = SrcOp.getReg(); 1695 if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg()) 1696 return; 1697 1698 const MachineOperand &DstOp = Copy->getOperand(0); 1699 Register DstReg = DstOp.getReg(); 1700 if (!Register::isVirtualRegister(DstReg) || DstOp.isDead()) 1701 return; 1702 1703 // Check if either the dest or source is local. If it's live across a back 1704 // edge, it's not local. Note that if both vregs are live across the back 1705 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1706 // If both the copy's source and dest are local live intervals, then we 1707 // should treat the dest as the global for the purpose of adding 1708 // constraints. This adds edges from source's other uses to the copy. 1709 unsigned LocalReg = SrcReg; 1710 unsigned GlobalReg = DstReg; 1711 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1712 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1713 LocalReg = DstReg; 1714 GlobalReg = SrcReg; 1715 LocalLI = &LIS->getInterval(LocalReg); 1716 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1717 return; 1718 } 1719 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1720 1721 // Find the global segment after the start of the local LI. 1722 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1723 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1724 // local live range. We could create edges from other global uses to the local 1725 // start, but the coalescer should have already eliminated these cases, so 1726 // don't bother dealing with it. 1727 if (GlobalSegment == GlobalLI->end()) 1728 return; 1729 1730 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1731 // returned the next global segment. But if GlobalSegment overlaps with 1732 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI 1733 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1734 if (GlobalSegment->contains(LocalLI->beginIndex())) 1735 ++GlobalSegment; 1736 1737 if (GlobalSegment == GlobalLI->end()) 1738 return; 1739 1740 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1741 if (GlobalSegment != GlobalLI->begin()) { 1742 // Two address defs have no hole. 1743 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1744 GlobalSegment->start)) { 1745 return; 1746 } 1747 // If the prior global segment may be defined by the same two-address 1748 // instruction that also defines LocalLI, then can't make a hole here. 1749 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1750 LocalLI->beginIndex())) { 1751 return; 1752 } 1753 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1754 // it would be a disconnected component in the live range. 1755 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1756 "Disconnected LRG within the scheduling region."); 1757 } 1758 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1759 if (!GlobalDef) 1760 return; 1761 1762 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1763 if (!GlobalSU) 1764 return; 1765 1766 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1767 // constraining the uses of the last local def to precede GlobalDef. 1768 SmallVector<SUnit*,8> LocalUses; 1769 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1770 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1771 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1772 for (const SDep &Succ : LastLocalSU->Succs) { 1773 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg) 1774 continue; 1775 if (Succ.getSUnit() == GlobalSU) 1776 continue; 1777 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit())) 1778 return; 1779 LocalUses.push_back(Succ.getSUnit()); 1780 } 1781 // Open the top of the GlobalLI hole by constraining any earlier global uses 1782 // to precede the start of LocalLI. 1783 SmallVector<SUnit*,8> GlobalUses; 1784 MachineInstr *FirstLocalDef = 1785 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1786 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1787 for (const SDep &Pred : GlobalSU->Preds) { 1788 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg) 1789 continue; 1790 if (Pred.getSUnit() == FirstLocalSU) 1791 continue; 1792 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit())) 1793 return; 1794 GlobalUses.push_back(Pred.getSUnit()); 1795 } 1796 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1797 // Add the weak edges. 1798 for (SmallVectorImpl<SUnit*>::const_iterator 1799 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1800 LLVM_DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1801 << GlobalSU->NodeNum << ")\n"); 1802 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1803 } 1804 for (SmallVectorImpl<SUnit*>::const_iterator 1805 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1806 LLVM_DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1807 << FirstLocalSU->NodeNum << ")\n"); 1808 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1809 } 1810 } 1811 1812 /// Callback from DAG postProcessing to create weak edges to encourage 1813 /// copy elimination. 1814 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { 1815 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1816 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1817 1818 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1819 if (FirstPos == DAG->end()) 1820 return; 1821 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos); 1822 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1823 *priorNonDebug(DAG->end(), DAG->begin())); 1824 1825 for (SUnit &SU : DAG->SUnits) { 1826 if (!SU.getInstr()->isCopy()) 1827 continue; 1828 1829 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG)); 1830 } 1831 } 1832 1833 //===----------------------------------------------------------------------===// 1834 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1835 // and possibly other custom schedulers. 1836 //===----------------------------------------------------------------------===// 1837 1838 static const unsigned InvalidCycle = ~0U; 1839 1840 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1841 1842 /// Given a Count of resource usage and a Latency value, return true if a 1843 /// SchedBoundary becomes resource limited. 1844 /// If we are checking after scheduling a node, we should return true when 1845 /// we just reach the resource limit. 1846 static bool checkResourceLimit(unsigned LFactor, unsigned Count, 1847 unsigned Latency, bool AfterSchedNode) { 1848 int ResCntFactor = (int)(Count - (Latency * LFactor)); 1849 if (AfterSchedNode) 1850 return ResCntFactor >= (int)LFactor; 1851 else 1852 return ResCntFactor > (int)LFactor; 1853 } 1854 1855 void SchedBoundary::reset() { 1856 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1857 // Destroying and reconstructing it is very expensive though. So keep 1858 // invalid, placeholder HazardRecs. 1859 if (HazardRec && HazardRec->isEnabled()) { 1860 delete HazardRec; 1861 HazardRec = nullptr; 1862 } 1863 Available.clear(); 1864 Pending.clear(); 1865 CheckPending = false; 1866 CurrCycle = 0; 1867 CurrMOps = 0; 1868 MinReadyCycle = std::numeric_limits<unsigned>::max(); 1869 ExpectedLatency = 0; 1870 DependentLatency = 0; 1871 RetiredMOps = 0; 1872 MaxExecutedResCount = 0; 1873 ZoneCritResIdx = 0; 1874 IsResourceLimited = false; 1875 ReservedCycles.clear(); 1876 ReservedCyclesIndex.clear(); 1877 #ifndef NDEBUG 1878 // Track the maximum number of stall cycles that could arise either from the 1879 // latency of a DAG edge or the number of cycles that a processor resource is 1880 // reserved (SchedBoundary::ReservedCycles). 1881 MaxObservedStall = 0; 1882 #endif 1883 // Reserve a zero-count for invalid CritResIdx. 1884 ExecutedResCounts.resize(1); 1885 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1886 } 1887 1888 void SchedRemainder:: 1889 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1890 reset(); 1891 if (!SchedModel->hasInstrSchedModel()) 1892 return; 1893 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1894 for (SUnit &SU : DAG->SUnits) { 1895 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU); 1896 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC) 1897 * SchedModel->getMicroOpFactor(); 1898 for (TargetSchedModel::ProcResIter 1899 PI = SchedModel->getWriteProcResBegin(SC), 1900 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1901 unsigned PIdx = PI->ProcResourceIdx; 1902 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1903 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1904 } 1905 } 1906 } 1907 1908 void SchedBoundary:: 1909 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1910 reset(); 1911 DAG = dag; 1912 SchedModel = smodel; 1913 Rem = rem; 1914 if (SchedModel->hasInstrSchedModel()) { 1915 unsigned ResourceCount = SchedModel->getNumProcResourceKinds(); 1916 ReservedCyclesIndex.resize(ResourceCount); 1917 ExecutedResCounts.resize(ResourceCount); 1918 unsigned NumUnits = 0; 1919 1920 for (unsigned i = 0; i < ResourceCount; ++i) { 1921 ReservedCyclesIndex[i] = NumUnits; 1922 NumUnits += SchedModel->getProcResource(i)->NumUnits; 1923 } 1924 1925 ReservedCycles.resize(NumUnits, InvalidCycle); 1926 } 1927 } 1928 1929 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 1930 /// these "soft stalls" differently than the hard stall cycles based on CPU 1931 /// resources and computed by checkHazard(). A fully in-order model 1932 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 1933 /// available for scheduling until they are ready. However, a weaker in-order 1934 /// model may use this for heuristics. For example, if a processor has in-order 1935 /// behavior when reading certain resources, this may come into play. 1936 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 1937 if (!SU->isUnbuffered) 1938 return 0; 1939 1940 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1941 if (ReadyCycle > CurrCycle) 1942 return ReadyCycle - CurrCycle; 1943 return 0; 1944 } 1945 1946 /// Compute the next cycle at which the given processor resource unit 1947 /// can be scheduled. 1948 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx, 1949 unsigned Cycles) { 1950 unsigned NextUnreserved = ReservedCycles[InstanceIdx]; 1951 // If this resource has never been used, always return cycle zero. 1952 if (NextUnreserved == InvalidCycle) 1953 return 0; 1954 // For bottom-up scheduling add the cycles needed for the current operation. 1955 if (!isTop()) 1956 NextUnreserved += Cycles; 1957 return NextUnreserved; 1958 } 1959 1960 /// Compute the next cycle at which the given processor resource can be 1961 /// scheduled. Returns the next cycle and the index of the processor resource 1962 /// instance in the reserved cycles vector. 1963 std::pair<unsigned, unsigned> 1964 SchedBoundary::getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 1965 unsigned MinNextUnreserved = InvalidCycle; 1966 unsigned InstanceIdx = 0; 1967 unsigned StartIndex = ReservedCyclesIndex[PIdx]; 1968 unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits; 1969 assert(NumberOfInstances > 0 && 1970 "Cannot have zero instances of a ProcResource"); 1971 1972 for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End; 1973 ++I) { 1974 unsigned NextUnreserved = getNextResourceCycleByInstance(I, Cycles); 1975 if (MinNextUnreserved > NextUnreserved) { 1976 InstanceIdx = I; 1977 MinNextUnreserved = NextUnreserved; 1978 } 1979 } 1980 return std::make_pair(MinNextUnreserved, InstanceIdx); 1981 } 1982 1983 /// Does this SU have a hazard within the current instruction group. 1984 /// 1985 /// The scheduler supports two modes of hazard recognition. The first is the 1986 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1987 /// supports highly complicated in-order reservation tables 1988 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic. 1989 /// 1990 /// The second is a streamlined mechanism that checks for hazards based on 1991 /// simple counters that the scheduler itself maintains. It explicitly checks 1992 /// for instruction dispatch limitations, including the number of micro-ops that 1993 /// can dispatch per cycle. 1994 /// 1995 /// TODO: Also check whether the SU must start a new group. 1996 bool SchedBoundary::checkHazard(SUnit *SU) { 1997 if (HazardRec->isEnabled() 1998 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 1999 return true; 2000 } 2001 2002 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 2003 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 2004 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 2005 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 2006 return true; 2007 } 2008 2009 if (CurrMOps > 0 && 2010 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || 2011 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { 2012 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " 2013 << (isTop() ? "begin" : "end") << " group\n"); 2014 return true; 2015 } 2016 2017 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 2018 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2019 for (const MCWriteProcResEntry &PE : 2020 make_range(SchedModel->getWriteProcResBegin(SC), 2021 SchedModel->getWriteProcResEnd(SC))) { 2022 unsigned ResIdx = PE.ProcResourceIdx; 2023 unsigned Cycles = PE.Cycles; 2024 unsigned NRCycle, InstanceIdx; 2025 std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(ResIdx, Cycles); 2026 if (NRCycle > CurrCycle) { 2027 #ifndef NDEBUG 2028 MaxObservedStall = std::max(Cycles, MaxObservedStall); 2029 #endif 2030 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 2031 << SchedModel->getResourceName(ResIdx) 2032 << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx] << ']' 2033 << "=" << NRCycle << "c\n"); 2034 return true; 2035 } 2036 } 2037 } 2038 return false; 2039 } 2040 2041 // Find the unscheduled node in ReadySUs with the highest latency. 2042 unsigned SchedBoundary:: 2043 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 2044 SUnit *LateSU = nullptr; 2045 unsigned RemLatency = 0; 2046 for (SUnit *SU : ReadySUs) { 2047 unsigned L = getUnscheduledLatency(SU); 2048 if (L > RemLatency) { 2049 RemLatency = L; 2050 LateSU = SU; 2051 } 2052 } 2053 if (LateSU) { 2054 LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 2055 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 2056 } 2057 return RemLatency; 2058 } 2059 2060 // Count resources in this zone and the remaining unscheduled 2061 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 2062 // resource index, or zero if the zone is issue limited. 2063 unsigned SchedBoundary:: 2064 getOtherResourceCount(unsigned &OtherCritIdx) { 2065 OtherCritIdx = 0; 2066 if (!SchedModel->hasInstrSchedModel()) 2067 return 0; 2068 2069 unsigned OtherCritCount = Rem->RemIssueCount 2070 + (RetiredMOps * SchedModel->getMicroOpFactor()); 2071 LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 2072 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 2073 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 2074 PIdx != PEnd; ++PIdx) { 2075 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 2076 if (OtherCount > OtherCritCount) { 2077 OtherCritCount = OtherCount; 2078 OtherCritIdx = PIdx; 2079 } 2080 } 2081 if (OtherCritIdx) { 2082 LLVM_DEBUG( 2083 dbgs() << " " << Available.getName() << " + Remain CritRes: " 2084 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 2085 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 2086 } 2087 return OtherCritCount; 2088 } 2089 2090 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { 2091 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 2092 2093 #ifndef NDEBUG 2094 // ReadyCycle was been bumped up to the CurrCycle when this node was 2095 // scheduled, but CurrCycle may have been eagerly advanced immediately after 2096 // scheduling, so may now be greater than ReadyCycle. 2097 if (ReadyCycle > CurrCycle) 2098 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 2099 #endif 2100 2101 if (ReadyCycle < MinReadyCycle) 2102 MinReadyCycle = ReadyCycle; 2103 2104 // Check for interlocks first. For the purpose of other heuristics, an 2105 // instruction that cannot issue appears as if it's not in the ReadyQueue. 2106 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2107 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) || 2108 Available.size() >= ReadyListLimit) 2109 Pending.push(SU); 2110 else 2111 Available.push(SU); 2112 } 2113 2114 /// Move the boundary of scheduled code by one cycle. 2115 void SchedBoundary::bumpCycle(unsigned NextCycle) { 2116 if (SchedModel->getMicroOpBufferSize() == 0) { 2117 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() && 2118 "MinReadyCycle uninitialized"); 2119 if (MinReadyCycle > NextCycle) 2120 NextCycle = MinReadyCycle; 2121 } 2122 // Update the current micro-ops, which will issue in the next cycle. 2123 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 2124 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 2125 2126 // Decrement DependentLatency based on the next cycle. 2127 if ((NextCycle - CurrCycle) > DependentLatency) 2128 DependentLatency = 0; 2129 else 2130 DependentLatency -= (NextCycle - CurrCycle); 2131 2132 if (!HazardRec->isEnabled()) { 2133 // Bypass HazardRec virtual calls. 2134 CurrCycle = NextCycle; 2135 } else { 2136 // Bypass getHazardType calls in case of long latency. 2137 for (; CurrCycle != NextCycle; ++CurrCycle) { 2138 if (isTop()) 2139 HazardRec->AdvanceCycle(); 2140 else 2141 HazardRec->RecedeCycle(); 2142 } 2143 } 2144 CheckPending = true; 2145 IsResourceLimited = 2146 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2147 getScheduledLatency(), true); 2148 2149 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() 2150 << '\n'); 2151 } 2152 2153 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 2154 ExecutedResCounts[PIdx] += Count; 2155 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 2156 MaxExecutedResCount = ExecutedResCounts[PIdx]; 2157 } 2158 2159 /// Add the given processor resource to this scheduled zone. 2160 /// 2161 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 2162 /// during which this resource is consumed. 2163 /// 2164 /// \return the next cycle at which the instruction may execute without 2165 /// oversubscribing resources. 2166 unsigned SchedBoundary:: 2167 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 2168 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2169 unsigned Count = Factor * Cycles; 2170 LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +" 2171 << Cycles << "x" << Factor << "u\n"); 2172 2173 // Update Executed resources counts. 2174 incExecutedResources(PIdx, Count); 2175 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 2176 Rem->RemainingCounts[PIdx] -= Count; 2177 2178 // Check if this resource exceeds the current critical resource. If so, it 2179 // becomes the critical resource. 2180 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 2181 ZoneCritResIdx = PIdx; 2182 LLVM_DEBUG(dbgs() << " *** Critical resource " 2183 << SchedModel->getResourceName(PIdx) << ": " 2184 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() 2185 << "c\n"); 2186 } 2187 // For reserved resources, record the highest cycle using the resource. 2188 unsigned NextAvailable, InstanceIdx; 2189 std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(PIdx, Cycles); 2190 if (NextAvailable > CurrCycle) { 2191 LLVM_DEBUG(dbgs() << " Resource conflict: " 2192 << SchedModel->getResourceName(PIdx) 2193 << '[' << InstanceIdx - ReservedCyclesIndex[PIdx] << ']' 2194 << " reserved until @" << NextAvailable << "\n"); 2195 } 2196 return NextAvailable; 2197 } 2198 2199 /// Move the boundary of scheduled code by one SUnit. 2200 void SchedBoundary::bumpNode(SUnit *SU) { 2201 // Update the reservation table. 2202 if (HazardRec->isEnabled()) { 2203 if (!isTop() && SU->isCall) { 2204 // Calls are scheduled with their preceding instructions. For bottom-up 2205 // scheduling, clear the pipeline state before emitting. 2206 HazardRec->Reset(); 2207 } 2208 HazardRec->EmitInstruction(SU); 2209 // Scheduling an instruction may have made pending instructions available. 2210 CheckPending = true; 2211 } 2212 // checkHazard should prevent scheduling multiple instructions per cycle that 2213 // exceed the issue width. 2214 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2215 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 2216 assert( 2217 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 2218 "Cannot schedule this instruction's MicroOps in the current cycle."); 2219 2220 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2221 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 2222 2223 unsigned NextCycle = CurrCycle; 2224 switch (SchedModel->getMicroOpBufferSize()) { 2225 case 0: 2226 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 2227 break; 2228 case 1: 2229 if (ReadyCycle > NextCycle) { 2230 NextCycle = ReadyCycle; 2231 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 2232 } 2233 break; 2234 default: 2235 // We don't currently model the OOO reorder buffer, so consider all 2236 // scheduled MOps to be "retired". We do loosely model in-order resource 2237 // latency. If this instruction uses an in-order resource, account for any 2238 // likely stall cycles. 2239 if (SU->isUnbuffered && ReadyCycle > NextCycle) 2240 NextCycle = ReadyCycle; 2241 break; 2242 } 2243 RetiredMOps += IncMOps; 2244 2245 // Update resource counts and critical resource. 2246 if (SchedModel->hasInstrSchedModel()) { 2247 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 2248 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 2249 Rem->RemIssueCount -= DecRemIssue; 2250 if (ZoneCritResIdx) { 2251 // Scale scheduled micro-ops for comparing with the critical resource. 2252 unsigned ScaledMOps = 2253 RetiredMOps * SchedModel->getMicroOpFactor(); 2254 2255 // If scaled micro-ops are now more than the previous critical resource by 2256 // a full cycle, then micro-ops issue becomes critical. 2257 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 2258 >= (int)SchedModel->getLatencyFactor()) { 2259 ZoneCritResIdx = 0; 2260 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2261 << ScaledMOps / SchedModel->getLatencyFactor() 2262 << "c\n"); 2263 } 2264 } 2265 for (TargetSchedModel::ProcResIter 2266 PI = SchedModel->getWriteProcResBegin(SC), 2267 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2268 unsigned RCycle = 2269 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 2270 if (RCycle > NextCycle) 2271 NextCycle = RCycle; 2272 } 2273 if (SU->hasReservedResource) { 2274 // For reserved resources, record the highest cycle using the resource. 2275 // For top-down scheduling, this is the cycle in which we schedule this 2276 // instruction plus the number of cycles the operations reserves the 2277 // resource. For bottom-up is it simply the instruction's cycle. 2278 for (TargetSchedModel::ProcResIter 2279 PI = SchedModel->getWriteProcResBegin(SC), 2280 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2281 unsigned PIdx = PI->ProcResourceIdx; 2282 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 2283 unsigned ReservedUntil, InstanceIdx; 2284 std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(PIdx, 0); 2285 if (isTop()) { 2286 ReservedCycles[InstanceIdx] = 2287 std::max(ReservedUntil, NextCycle + PI->Cycles); 2288 } else 2289 ReservedCycles[InstanceIdx] = NextCycle; 2290 } 2291 } 2292 } 2293 } 2294 // Update ExpectedLatency and DependentLatency. 2295 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2296 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2297 if (SU->getDepth() > TopLatency) { 2298 TopLatency = SU->getDepth(); 2299 LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU(" 2300 << SU->NodeNum << ") " << TopLatency << "c\n"); 2301 } 2302 if (SU->getHeight() > BotLatency) { 2303 BotLatency = SU->getHeight(); 2304 LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU(" 2305 << SU->NodeNum << ") " << BotLatency << "c\n"); 2306 } 2307 // If we stall for any reason, bump the cycle. 2308 if (NextCycle > CurrCycle) 2309 bumpCycle(NextCycle); 2310 else 2311 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2312 // resource limited. If a stall occurred, bumpCycle does this. 2313 IsResourceLimited = 2314 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2315 getScheduledLatency(), true); 2316 2317 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2318 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2319 // one cycle. Since we commonly reach the max MOps here, opportunistically 2320 // bump the cycle to avoid uselessly checking everything in the readyQ. 2321 CurrMOps += IncMOps; 2322 2323 // Bump the cycle count for issue group constraints. 2324 // This must be done after NextCycle has been adjust for all other stalls. 2325 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set 2326 // currCycle to X. 2327 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) || 2328 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) { 2329 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin") 2330 << " group\n"); 2331 bumpCycle(++NextCycle); 2332 } 2333 2334 while (CurrMOps >= SchedModel->getIssueWidth()) { 2335 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle " 2336 << CurrCycle << '\n'); 2337 bumpCycle(++NextCycle); 2338 } 2339 LLVM_DEBUG(dumpScheduledState()); 2340 } 2341 2342 /// Release pending ready nodes in to the available queue. This makes them 2343 /// visible to heuristics. 2344 void SchedBoundary::releasePending() { 2345 // If the available queue is empty, it is safe to reset MinReadyCycle. 2346 if (Available.empty()) 2347 MinReadyCycle = std::numeric_limits<unsigned>::max(); 2348 2349 // Check to see if any of the pending instructions are ready to issue. If 2350 // so, add them to the available queue. 2351 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2352 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2353 SUnit *SU = *(Pending.begin()+i); 2354 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2355 2356 if (ReadyCycle < MinReadyCycle) 2357 MinReadyCycle = ReadyCycle; 2358 2359 if (!IsBuffered && ReadyCycle > CurrCycle) 2360 continue; 2361 2362 if (checkHazard(SU)) 2363 continue; 2364 2365 if (Available.size() >= ReadyListLimit) 2366 break; 2367 2368 Available.push(SU); 2369 Pending.remove(Pending.begin()+i); 2370 --i; --e; 2371 } 2372 CheckPending = false; 2373 } 2374 2375 /// Remove SU from the ready set for this boundary. 2376 void SchedBoundary::removeReady(SUnit *SU) { 2377 if (Available.isInQueue(SU)) 2378 Available.remove(Available.find(SU)); 2379 else { 2380 assert(Pending.isInQueue(SU) && "bad ready count"); 2381 Pending.remove(Pending.find(SU)); 2382 } 2383 } 2384 2385 /// If this queue only has one ready candidate, return it. As a side effect, 2386 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2387 /// one node is ready. If multiple instructions are ready, return NULL. 2388 SUnit *SchedBoundary::pickOnlyChoice() { 2389 if (CheckPending) 2390 releasePending(); 2391 2392 if (CurrMOps > 0) { 2393 // Defer any ready instrs that now have a hazard. 2394 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2395 if (checkHazard(*I)) { 2396 Pending.push(*I); 2397 I = Available.remove(I); 2398 continue; 2399 } 2400 ++I; 2401 } 2402 } 2403 for (unsigned i = 0; Available.empty(); ++i) { 2404 // FIXME: Re-enable assert once PR20057 is resolved. 2405 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2406 // "permanent hazard"); 2407 (void)i; 2408 bumpCycle(CurrCycle + 1); 2409 releasePending(); 2410 } 2411 2412 LLVM_DEBUG(Pending.dump()); 2413 LLVM_DEBUG(Available.dump()); 2414 2415 if (Available.size() == 1) 2416 return *Available.begin(); 2417 return nullptr; 2418 } 2419 2420 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2421 // This is useful information to dump after bumpNode. 2422 // Note that the Queue contents are more useful before pickNodeFromQueue. 2423 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const { 2424 unsigned ResFactor; 2425 unsigned ResCount; 2426 if (ZoneCritResIdx) { 2427 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2428 ResCount = getResourceCount(ZoneCritResIdx); 2429 } else { 2430 ResFactor = SchedModel->getMicroOpFactor(); 2431 ResCount = RetiredMOps * ResFactor; 2432 } 2433 unsigned LFactor = SchedModel->getLatencyFactor(); 2434 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2435 << " Retired: " << RetiredMOps; 2436 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2437 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2438 << ResCount / ResFactor << " " 2439 << SchedModel->getResourceName(ZoneCritResIdx) 2440 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2441 << (IsResourceLimited ? " - Resource" : " - Latency") 2442 << " limited.\n"; 2443 } 2444 #endif 2445 2446 //===----------------------------------------------------------------------===// 2447 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2448 //===----------------------------------------------------------------------===// 2449 2450 void GenericSchedulerBase::SchedCandidate:: 2451 initResourceDelta(const ScheduleDAGMI *DAG, 2452 const TargetSchedModel *SchedModel) { 2453 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2454 return; 2455 2456 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2457 for (TargetSchedModel::ProcResIter 2458 PI = SchedModel->getWriteProcResBegin(SC), 2459 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2460 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2461 ResDelta.CritResources += PI->Cycles; 2462 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2463 ResDelta.DemandedResources += PI->Cycles; 2464 } 2465 } 2466 2467 /// Compute remaining latency. We need this both to determine whether the 2468 /// overall schedule has become latency-limited and whether the instructions 2469 /// outside this zone are resource or latency limited. 2470 /// 2471 /// The "dependent" latency is updated incrementally during scheduling as the 2472 /// max height/depth of scheduled nodes minus the cycles since it was 2473 /// scheduled: 2474 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2475 /// 2476 /// The "independent" latency is the max ready queue depth: 2477 /// ILat = max N.depth for N in Available|Pending 2478 /// 2479 /// RemainingLatency is the greater of independent and dependent latency. 2480 /// 2481 /// These computations are expensive, especially in DAGs with many edges, so 2482 /// only do them if necessary. 2483 static unsigned computeRemLatency(SchedBoundary &CurrZone) { 2484 unsigned RemLatency = CurrZone.getDependentLatency(); 2485 RemLatency = std::max(RemLatency, 2486 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2487 RemLatency = std::max(RemLatency, 2488 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2489 return RemLatency; 2490 } 2491 2492 /// Returns true if the current cycle plus remaning latency is greater than 2493 /// the critical path in the scheduling region. 2494 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy, 2495 SchedBoundary &CurrZone, 2496 bool ComputeRemLatency, 2497 unsigned &RemLatency) const { 2498 // The current cycle is already greater than the critical path, so we are 2499 // already latency limited and don't need to compute the remaining latency. 2500 if (CurrZone.getCurrCycle() > Rem.CriticalPath) 2501 return true; 2502 2503 // If we haven't scheduled anything yet, then we aren't latency limited. 2504 if (CurrZone.getCurrCycle() == 0) 2505 return false; 2506 2507 if (ComputeRemLatency) 2508 RemLatency = computeRemLatency(CurrZone); 2509 2510 return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath; 2511 } 2512 2513 /// Set the CandPolicy given a scheduling zone given the current resources and 2514 /// latencies inside and outside the zone. 2515 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA, 2516 SchedBoundary &CurrZone, 2517 SchedBoundary *OtherZone) { 2518 // Apply preemptive heuristics based on the total latency and resources 2519 // inside and outside this zone. Potential stalls should be considered before 2520 // following this policy. 2521 2522 // Compute the critical resource outside the zone. 2523 unsigned OtherCritIdx = 0; 2524 unsigned OtherCount = 2525 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2526 2527 bool OtherResLimited = false; 2528 unsigned RemLatency = 0; 2529 bool RemLatencyComputed = false; 2530 if (SchedModel->hasInstrSchedModel() && OtherCount != 0) { 2531 RemLatency = computeRemLatency(CurrZone); 2532 RemLatencyComputed = true; 2533 OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(), 2534 OtherCount, RemLatency, false); 2535 } 2536 2537 // Schedule aggressively for latency in PostRA mode. We don't check for 2538 // acyclic latency during PostRA, and highly out-of-order processors will 2539 // skip PostRA scheduling. 2540 if (!OtherResLimited && 2541 (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed, 2542 RemLatency))) { 2543 Policy.ReduceLatency |= true; 2544 LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName() 2545 << " RemainingLatency " << RemLatency << " + " 2546 << CurrZone.getCurrCycle() << "c > CritPath " 2547 << Rem.CriticalPath << "\n"); 2548 } 2549 // If the same resource is limiting inside and outside the zone, do nothing. 2550 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2551 return; 2552 2553 LLVM_DEBUG(if (CurrZone.isResourceLimited()) { 2554 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2555 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n"; 2556 } if (OtherResLimited) dbgs() 2557 << " RemainingLimit: " 2558 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2559 if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs() 2560 << " Latency limited both directions.\n"); 2561 2562 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2563 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2564 2565 if (OtherResLimited) 2566 Policy.DemandResIdx = OtherCritIdx; 2567 } 2568 2569 #ifndef NDEBUG 2570 const char *GenericSchedulerBase::getReasonStr( 2571 GenericSchedulerBase::CandReason Reason) { 2572 switch (Reason) { 2573 case NoCand: return "NOCAND "; 2574 case Only1: return "ONLY1 "; 2575 case PhysReg: return "PHYS-REG "; 2576 case RegExcess: return "REG-EXCESS"; 2577 case RegCritical: return "REG-CRIT "; 2578 case Stall: return "STALL "; 2579 case Cluster: return "CLUSTER "; 2580 case Weak: return "WEAK "; 2581 case RegMax: return "REG-MAX "; 2582 case ResourceReduce: return "RES-REDUCE"; 2583 case ResourceDemand: return "RES-DEMAND"; 2584 case TopDepthReduce: return "TOP-DEPTH "; 2585 case TopPathReduce: return "TOP-PATH "; 2586 case BotHeightReduce:return "BOT-HEIGHT"; 2587 case BotPathReduce: return "BOT-PATH "; 2588 case NextDefUse: return "DEF-USE "; 2589 case NodeOrder: return "ORDER "; 2590 }; 2591 llvm_unreachable("Unknown reason!"); 2592 } 2593 2594 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2595 PressureChange P; 2596 unsigned ResIdx = 0; 2597 unsigned Latency = 0; 2598 switch (Cand.Reason) { 2599 default: 2600 break; 2601 case RegExcess: 2602 P = Cand.RPDelta.Excess; 2603 break; 2604 case RegCritical: 2605 P = Cand.RPDelta.CriticalMax; 2606 break; 2607 case RegMax: 2608 P = Cand.RPDelta.CurrentMax; 2609 break; 2610 case ResourceReduce: 2611 ResIdx = Cand.Policy.ReduceResIdx; 2612 break; 2613 case ResourceDemand: 2614 ResIdx = Cand.Policy.DemandResIdx; 2615 break; 2616 case TopDepthReduce: 2617 Latency = Cand.SU->getDepth(); 2618 break; 2619 case TopPathReduce: 2620 Latency = Cand.SU->getHeight(); 2621 break; 2622 case BotHeightReduce: 2623 Latency = Cand.SU->getHeight(); 2624 break; 2625 case BotPathReduce: 2626 Latency = Cand.SU->getDepth(); 2627 break; 2628 } 2629 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2630 if (P.isValid()) 2631 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2632 << ":" << P.getUnitInc() << " "; 2633 else 2634 dbgs() << " "; 2635 if (ResIdx) 2636 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2637 else 2638 dbgs() << " "; 2639 if (Latency) 2640 dbgs() << " " << Latency << " cycles "; 2641 else 2642 dbgs() << " "; 2643 dbgs() << '\n'; 2644 } 2645 #endif 2646 2647 namespace llvm { 2648 /// Return true if this heuristic determines order. 2649 bool tryLess(int TryVal, int CandVal, 2650 GenericSchedulerBase::SchedCandidate &TryCand, 2651 GenericSchedulerBase::SchedCandidate &Cand, 2652 GenericSchedulerBase::CandReason Reason) { 2653 if (TryVal < CandVal) { 2654 TryCand.Reason = Reason; 2655 return true; 2656 } 2657 if (TryVal > CandVal) { 2658 if (Cand.Reason > Reason) 2659 Cand.Reason = Reason; 2660 return true; 2661 } 2662 return false; 2663 } 2664 2665 bool tryGreater(int TryVal, int CandVal, 2666 GenericSchedulerBase::SchedCandidate &TryCand, 2667 GenericSchedulerBase::SchedCandidate &Cand, 2668 GenericSchedulerBase::CandReason Reason) { 2669 if (TryVal > CandVal) { 2670 TryCand.Reason = Reason; 2671 return true; 2672 } 2673 if (TryVal < CandVal) { 2674 if (Cand.Reason > Reason) 2675 Cand.Reason = Reason; 2676 return true; 2677 } 2678 return false; 2679 } 2680 2681 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2682 GenericSchedulerBase::SchedCandidate &Cand, 2683 SchedBoundary &Zone) { 2684 if (Zone.isTop()) { 2685 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2686 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2687 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2688 return true; 2689 } 2690 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2691 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2692 return true; 2693 } else { 2694 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2695 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2696 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2697 return true; 2698 } 2699 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2700 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2701 return true; 2702 } 2703 return false; 2704 } 2705 } // end namespace llvm 2706 2707 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) { 2708 LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2709 << GenericSchedulerBase::getReasonStr(Reason) << '\n'); 2710 } 2711 2712 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) { 2713 tracePick(Cand.Reason, Cand.AtTop); 2714 } 2715 2716 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2717 assert(dag->hasVRegLiveness() && 2718 "(PreRA)GenericScheduler needs vreg liveness"); 2719 DAG = static_cast<ScheduleDAGMILive*>(dag); 2720 SchedModel = DAG->getSchedModel(); 2721 TRI = DAG->TRI; 2722 2723 Rem.init(DAG, SchedModel); 2724 Top.init(DAG, SchedModel, &Rem); 2725 Bot.init(DAG, SchedModel, &Rem); 2726 2727 // Initialize resource counts. 2728 2729 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2730 // are disabled, then these HazardRecs will be disabled. 2731 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2732 if (!Top.HazardRec) { 2733 Top.HazardRec = 2734 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2735 Itin, DAG); 2736 } 2737 if (!Bot.HazardRec) { 2738 Bot.HazardRec = 2739 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2740 Itin, DAG); 2741 } 2742 TopCand.SU = nullptr; 2743 BotCand.SU = nullptr; 2744 } 2745 2746 /// Initialize the per-region scheduling policy. 2747 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2748 MachineBasicBlock::iterator End, 2749 unsigned NumRegionInstrs) { 2750 const MachineFunction &MF = *Begin->getMF(); 2751 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 2752 2753 // Avoid setting up the register pressure tracker for small regions to save 2754 // compile time. As a rough heuristic, only track pressure when the number of 2755 // schedulable instructions exceeds half the integer register file. 2756 RegionPolicy.ShouldTrackPressure = true; 2757 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2758 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2759 if (TLI->isTypeLegal(LegalIntVT)) { 2760 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2761 TLI->getRegClassFor(LegalIntVT)); 2762 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2763 } 2764 } 2765 2766 // For generic targets, we default to bottom-up, because it's simpler and more 2767 // compile-time optimizations have been implemented in that direction. 2768 RegionPolicy.OnlyBottomUp = true; 2769 2770 // Allow the subtarget to override default policy. 2771 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs); 2772 2773 // After subtarget overrides, apply command line options. 2774 if (!EnableRegPressure) { 2775 RegionPolicy.ShouldTrackPressure = false; 2776 RegionPolicy.ShouldTrackLaneMasks = false; 2777 } 2778 2779 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2780 // e.g. -misched-bottomup=false allows scheduling in both directions. 2781 assert((!ForceTopDown || !ForceBottomUp) && 2782 "-misched-topdown incompatible with -misched-bottomup"); 2783 if (ForceBottomUp.getNumOccurrences() > 0) { 2784 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2785 if (RegionPolicy.OnlyBottomUp) 2786 RegionPolicy.OnlyTopDown = false; 2787 } 2788 if (ForceTopDown.getNumOccurrences() > 0) { 2789 RegionPolicy.OnlyTopDown = ForceTopDown; 2790 if (RegionPolicy.OnlyTopDown) 2791 RegionPolicy.OnlyBottomUp = false; 2792 } 2793 } 2794 2795 void GenericScheduler::dumpPolicy() const { 2796 // Cannot completely remove virtual function even in release mode. 2797 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2798 dbgs() << "GenericScheduler RegionPolicy: " 2799 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure 2800 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown 2801 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp 2802 << "\n"; 2803 #endif 2804 } 2805 2806 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2807 /// critical path by more cycles than it takes to drain the instruction buffer. 2808 /// We estimate an upper bounds on in-flight instructions as: 2809 /// 2810 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2811 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2812 /// InFlightResources = InFlightIterations * LoopResources 2813 /// 2814 /// TODO: Check execution resources in addition to IssueCount. 2815 void GenericScheduler::checkAcyclicLatency() { 2816 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2817 return; 2818 2819 // Scaled number of cycles per loop iteration. 2820 unsigned IterCount = 2821 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2822 Rem.RemIssueCount); 2823 // Scaled acyclic critical path. 2824 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2825 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2826 unsigned InFlightCount = 2827 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2828 unsigned BufferLimit = 2829 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2830 2831 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2832 2833 LLVM_DEBUG( 2834 dbgs() << "IssueCycles=" 2835 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2836 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2837 << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount 2838 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2839 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2840 if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2841 } 2842 2843 void GenericScheduler::registerRoots() { 2844 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2845 2846 // Some roots may not feed into ExitSU. Check all of them in case. 2847 for (const SUnit *SU : Bot.Available) { 2848 if (SU->getDepth() > Rem.CriticalPath) 2849 Rem.CriticalPath = SU->getDepth(); 2850 } 2851 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 2852 if (DumpCriticalPathLength) { 2853 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 2854 } 2855 2856 if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) { 2857 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2858 checkAcyclicLatency(); 2859 } 2860 } 2861 2862 namespace llvm { 2863 bool tryPressure(const PressureChange &TryP, 2864 const PressureChange &CandP, 2865 GenericSchedulerBase::SchedCandidate &TryCand, 2866 GenericSchedulerBase::SchedCandidate &Cand, 2867 GenericSchedulerBase::CandReason Reason, 2868 const TargetRegisterInfo *TRI, 2869 const MachineFunction &MF) { 2870 // If one candidate decreases and the other increases, go with it. 2871 // Invalid candidates have UnitInc==0. 2872 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2873 Reason)) { 2874 return true; 2875 } 2876 // Do not compare the magnitude of pressure changes between top and bottom 2877 // boundary. 2878 if (Cand.AtTop != TryCand.AtTop) 2879 return false; 2880 2881 // If both candidates affect the same set in the same boundary, go with the 2882 // smallest increase. 2883 unsigned TryPSet = TryP.getPSetOrMax(); 2884 unsigned CandPSet = CandP.getPSetOrMax(); 2885 if (TryPSet == CandPSet) { 2886 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2887 Reason); 2888 } 2889 2890 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) : 2891 std::numeric_limits<int>::max(); 2892 2893 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) : 2894 std::numeric_limits<int>::max(); 2895 2896 // If the candidates are decreasing pressure, reverse priority. 2897 if (TryP.getUnitInc() < 0) 2898 std::swap(TryRank, CandRank); 2899 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2900 } 2901 2902 unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2903 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2904 } 2905 2906 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2907 /// their physreg def/use. 2908 /// 2909 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2910 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2911 /// with the operation that produces or consumes the physreg. We'll do this when 2912 /// regalloc has support for parallel copies. 2913 int biasPhysReg(const SUnit *SU, bool isTop) { 2914 const MachineInstr *MI = SU->getInstr(); 2915 2916 if (MI->isCopy()) { 2917 unsigned ScheduledOper = isTop ? 1 : 0; 2918 unsigned UnscheduledOper = isTop ? 0 : 1; 2919 // If we have already scheduled the physreg produce/consumer, immediately 2920 // schedule the copy. 2921 if (Register::isPhysicalRegister(MI->getOperand(ScheduledOper).getReg())) 2922 return 1; 2923 // If the physreg is at the boundary, defer it. Otherwise schedule it 2924 // immediately to free the dependent. We can hoist the copy later. 2925 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2926 if (Register::isPhysicalRegister(MI->getOperand(UnscheduledOper).getReg())) 2927 return AtBoundary ? -1 : 1; 2928 } 2929 2930 if (MI->isMoveImmediate()) { 2931 // If we have a move immediate and all successors have been assigned, bias 2932 // towards scheduling this later. Make sure all register defs are to 2933 // physical registers. 2934 bool DoBias = true; 2935 for (const MachineOperand &Op : MI->defs()) { 2936 if (Op.isReg() && !Register::isPhysicalRegister(Op.getReg())) { 2937 DoBias = false; 2938 break; 2939 } 2940 } 2941 2942 if (DoBias) 2943 return isTop ? -1 : 1; 2944 } 2945 2946 return 0; 2947 } 2948 } // end namespace llvm 2949 2950 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU, 2951 bool AtTop, 2952 const RegPressureTracker &RPTracker, 2953 RegPressureTracker &TempTracker) { 2954 Cand.SU = SU; 2955 Cand.AtTop = AtTop; 2956 if (DAG->isTrackingPressure()) { 2957 if (AtTop) { 2958 TempTracker.getMaxDownwardPressureDelta( 2959 Cand.SU->getInstr(), 2960 Cand.RPDelta, 2961 DAG->getRegionCriticalPSets(), 2962 DAG->getRegPressure().MaxSetPressure); 2963 } else { 2964 if (VerifyScheduling) { 2965 TempTracker.getMaxUpwardPressureDelta( 2966 Cand.SU->getInstr(), 2967 &DAG->getPressureDiff(Cand.SU), 2968 Cand.RPDelta, 2969 DAG->getRegionCriticalPSets(), 2970 DAG->getRegPressure().MaxSetPressure); 2971 } else { 2972 RPTracker.getUpwardPressureDelta( 2973 Cand.SU->getInstr(), 2974 DAG->getPressureDiff(Cand.SU), 2975 Cand.RPDelta, 2976 DAG->getRegionCriticalPSets(), 2977 DAG->getRegPressure().MaxSetPressure); 2978 } 2979 } 2980 } 2981 LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs() 2982 << " Try SU(" << Cand.SU->NodeNum << ") " 2983 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":" 2984 << Cand.RPDelta.Excess.getUnitInc() << "\n"); 2985 } 2986 2987 /// Apply a set of heuristics to a new candidate. Heuristics are currently 2988 /// hierarchical. This may be more efficient than a graduated cost model because 2989 /// we don't need to evaluate all aspects of the model for each node in the 2990 /// queue. But it's really done to make the heuristics easier to debug and 2991 /// statistically analyze. 2992 /// 2993 /// \param Cand provides the policy and current best candidate. 2994 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2995 /// \param Zone describes the scheduled zone that we are extending, or nullptr 2996 // if Cand is from a different zone than TryCand. 2997 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 2998 SchedCandidate &TryCand, 2999 SchedBoundary *Zone) const { 3000 // Initialize the candidate if needed. 3001 if (!Cand.isValid()) { 3002 TryCand.Reason = NodeOrder; 3003 return; 3004 } 3005 3006 // Bias PhysReg Defs and copies to their uses and defined respectively. 3007 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop), 3008 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg)) 3009 return; 3010 3011 // Avoid exceeding the target's limit. 3012 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 3013 Cand.RPDelta.Excess, 3014 TryCand, Cand, RegExcess, TRI, 3015 DAG->MF)) 3016 return; 3017 3018 // Avoid increasing the max critical pressure in the scheduled region. 3019 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 3020 Cand.RPDelta.CriticalMax, 3021 TryCand, Cand, RegCritical, TRI, 3022 DAG->MF)) 3023 return; 3024 3025 // We only compare a subset of features when comparing nodes between 3026 // Top and Bottom boundary. Some properties are simply incomparable, in many 3027 // other instances we should only override the other boundary if something 3028 // is a clear good pick on one boundary. Skip heuristics that are more 3029 // "tie-breaking" in nature. 3030 bool SameBoundary = Zone != nullptr; 3031 if (SameBoundary) { 3032 // For loops that are acyclic path limited, aggressively schedule for 3033 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal 3034 // heuristics to take precedence. 3035 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() && 3036 tryLatency(TryCand, Cand, *Zone)) 3037 return; 3038 3039 // Prioritize instructions that read unbuffered resources by stall cycles. 3040 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU), 3041 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3042 return; 3043 } 3044 3045 // Keep clustered nodes together to encourage downstream peephole 3046 // optimizations which may reduce resource requirements. 3047 // 3048 // This is a best effort to set things up for a post-RA pass. Optimizations 3049 // like generating loads of multiple registers should ideally be done within 3050 // the scheduler pass by combining the loads during DAG postprocessing. 3051 const SUnit *CandNextClusterSU = 3052 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3053 const SUnit *TryCandNextClusterSU = 3054 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3055 if (tryGreater(TryCand.SU == TryCandNextClusterSU, 3056 Cand.SU == CandNextClusterSU, 3057 TryCand, Cand, Cluster)) 3058 return; 3059 3060 if (SameBoundary) { 3061 // Weak edges are for clustering and other constraints. 3062 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop), 3063 getWeakLeft(Cand.SU, Cand.AtTop), 3064 TryCand, Cand, Weak)) 3065 return; 3066 } 3067 3068 // Avoid increasing the max pressure of the entire region. 3069 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 3070 Cand.RPDelta.CurrentMax, 3071 TryCand, Cand, RegMax, TRI, 3072 DAG->MF)) 3073 return; 3074 3075 if (SameBoundary) { 3076 // Avoid critical resource consumption and balance the schedule. 3077 TryCand.initResourceDelta(DAG, SchedModel); 3078 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3079 TryCand, Cand, ResourceReduce)) 3080 return; 3081 if (tryGreater(TryCand.ResDelta.DemandedResources, 3082 Cand.ResDelta.DemandedResources, 3083 TryCand, Cand, ResourceDemand)) 3084 return; 3085 3086 // Avoid serializing long latency dependence chains. 3087 // For acyclic path limited loops, latency was already checked above. 3088 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency && 3089 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone)) 3090 return; 3091 3092 // Fall through to original instruction order. 3093 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 3094 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 3095 TryCand.Reason = NodeOrder; 3096 } 3097 } 3098 } 3099 3100 /// Pick the best candidate from the queue. 3101 /// 3102 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 3103 /// DAG building. To adjust for the current scheduling location we need to 3104 /// maintain the number of vreg uses remaining to be top-scheduled. 3105 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 3106 const CandPolicy &ZonePolicy, 3107 const RegPressureTracker &RPTracker, 3108 SchedCandidate &Cand) { 3109 // getMaxPressureDelta temporarily modifies the tracker. 3110 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 3111 3112 ReadyQueue &Q = Zone.Available; 3113 for (SUnit *SU : Q) { 3114 3115 SchedCandidate TryCand(ZonePolicy); 3116 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker); 3117 // Pass SchedBoundary only when comparing nodes from the same boundary. 3118 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr; 3119 tryCandidate(Cand, TryCand, ZoneArg); 3120 if (TryCand.Reason != NoCand) { 3121 // Initialize resource delta if needed in case future heuristics query it. 3122 if (TryCand.ResDelta == SchedResourceDelta()) 3123 TryCand.initResourceDelta(DAG, SchedModel); 3124 Cand.setBest(TryCand); 3125 LLVM_DEBUG(traceCandidate(Cand)); 3126 } 3127 } 3128 } 3129 3130 /// Pick the best candidate node from either the top or bottom queue. 3131 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 3132 // Schedule as far as possible in the direction of no choice. This is most 3133 // efficient, but also provides the best heuristics for CriticalPSets. 3134 if (SUnit *SU = Bot.pickOnlyChoice()) { 3135 IsTopNode = false; 3136 tracePick(Only1, false); 3137 return SU; 3138 } 3139 if (SUnit *SU = Top.pickOnlyChoice()) { 3140 IsTopNode = true; 3141 tracePick(Only1, true); 3142 return SU; 3143 } 3144 // Set the bottom-up policy based on the state of the current bottom zone and 3145 // the instructions outside the zone, including the top zone. 3146 CandPolicy BotPolicy; 3147 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top); 3148 // Set the top-down policy based on the state of the current top zone and 3149 // the instructions outside the zone, including the bottom zone. 3150 CandPolicy TopPolicy; 3151 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot); 3152 3153 // See if BotCand is still valid (because we previously scheduled from Top). 3154 LLVM_DEBUG(dbgs() << "Picking from Bot:\n"); 3155 if (!BotCand.isValid() || BotCand.SU->isScheduled || 3156 BotCand.Policy != BotPolicy) { 3157 BotCand.reset(CandPolicy()); 3158 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand); 3159 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 3160 } else { 3161 LLVM_DEBUG(traceCandidate(BotCand)); 3162 #ifndef NDEBUG 3163 if (VerifyScheduling) { 3164 SchedCandidate TCand; 3165 TCand.reset(CandPolicy()); 3166 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand); 3167 assert(TCand.SU == BotCand.SU && 3168 "Last pick result should correspond to re-picking right now"); 3169 } 3170 #endif 3171 } 3172 3173 // Check if the top Q has a better candidate. 3174 LLVM_DEBUG(dbgs() << "Picking from Top:\n"); 3175 if (!TopCand.isValid() || TopCand.SU->isScheduled || 3176 TopCand.Policy != TopPolicy) { 3177 TopCand.reset(CandPolicy()); 3178 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand); 3179 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 3180 } else { 3181 LLVM_DEBUG(traceCandidate(TopCand)); 3182 #ifndef NDEBUG 3183 if (VerifyScheduling) { 3184 SchedCandidate TCand; 3185 TCand.reset(CandPolicy()); 3186 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand); 3187 assert(TCand.SU == TopCand.SU && 3188 "Last pick result should correspond to re-picking right now"); 3189 } 3190 #endif 3191 } 3192 3193 // Pick best from BotCand and TopCand. 3194 assert(BotCand.isValid()); 3195 assert(TopCand.isValid()); 3196 SchedCandidate Cand = BotCand; 3197 TopCand.Reason = NoCand; 3198 tryCandidate(Cand, TopCand, nullptr); 3199 if (TopCand.Reason != NoCand) { 3200 Cand.setBest(TopCand); 3201 LLVM_DEBUG(traceCandidate(Cand)); 3202 } 3203 3204 IsTopNode = Cand.AtTop; 3205 tracePick(Cand); 3206 return Cand.SU; 3207 } 3208 3209 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 3210 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 3211 if (DAG->top() == DAG->bottom()) { 3212 assert(Top.Available.empty() && Top.Pending.empty() && 3213 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 3214 return nullptr; 3215 } 3216 SUnit *SU; 3217 do { 3218 if (RegionPolicy.OnlyTopDown) { 3219 SU = Top.pickOnlyChoice(); 3220 if (!SU) { 3221 CandPolicy NoPolicy; 3222 TopCand.reset(NoPolicy); 3223 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); 3224 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3225 tracePick(TopCand); 3226 SU = TopCand.SU; 3227 } 3228 IsTopNode = true; 3229 } else if (RegionPolicy.OnlyBottomUp) { 3230 SU = Bot.pickOnlyChoice(); 3231 if (!SU) { 3232 CandPolicy NoPolicy; 3233 BotCand.reset(NoPolicy); 3234 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); 3235 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 3236 tracePick(BotCand); 3237 SU = BotCand.SU; 3238 } 3239 IsTopNode = false; 3240 } else { 3241 SU = pickNodeBidirectional(IsTopNode); 3242 } 3243 } while (SU->isScheduled); 3244 3245 if (SU->isTopReady()) 3246 Top.removeReady(SU); 3247 if (SU->isBottomReady()) 3248 Bot.removeReady(SU); 3249 3250 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3251 << *SU->getInstr()); 3252 return SU; 3253 } 3254 3255 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) { 3256 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 3257 if (!isTop) 3258 ++InsertPos; 3259 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 3260 3261 // Find already scheduled copies with a single physreg dependence and move 3262 // them just above the scheduled instruction. 3263 for (SDep &Dep : Deps) { 3264 if (Dep.getKind() != SDep::Data || 3265 !Register::isPhysicalRegister(Dep.getReg())) 3266 continue; 3267 SUnit *DepSU = Dep.getSUnit(); 3268 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 3269 continue; 3270 MachineInstr *Copy = DepSU->getInstr(); 3271 if (!Copy->isCopy() && !Copy->isMoveImmediate()) 3272 continue; 3273 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy "; 3274 DAG->dumpNode(*Dep.getSUnit())); 3275 DAG->moveInstruction(Copy, InsertPos); 3276 } 3277 } 3278 3279 /// Update the scheduler's state after scheduling a node. This is the same node 3280 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 3281 /// update it's state based on the current cycle before MachineSchedStrategy 3282 /// does. 3283 /// 3284 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 3285 /// them here. See comments in biasPhysReg. 3286 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3287 if (IsTopNode) { 3288 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3289 Top.bumpNode(SU); 3290 if (SU->hasPhysRegUses) 3291 reschedulePhysReg(SU, true); 3292 } else { 3293 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 3294 Bot.bumpNode(SU); 3295 if (SU->hasPhysRegDefs) 3296 reschedulePhysReg(SU, false); 3297 } 3298 } 3299 3300 /// Create the standard converging machine scheduler. This will be used as the 3301 /// default scheduler if the target does not set a default. 3302 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) { 3303 ScheduleDAGMILive *DAG = 3304 new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C)); 3305 // Register DAG post-processors. 3306 // 3307 // FIXME: extend the mutation API to allow earlier mutations to instantiate 3308 // data and pass it to later mutations. Have a single mutation that gathers 3309 // the interesting nodes in one pass. 3310 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI)); 3311 return DAG; 3312 } 3313 3314 static ScheduleDAGInstrs *createConveringSched(MachineSchedContext *C) { 3315 return createGenericSchedLive(C); 3316 } 3317 3318 static MachineSchedRegistry 3319 GenericSchedRegistry("converge", "Standard converging scheduler.", 3320 createConveringSched); 3321 3322 //===----------------------------------------------------------------------===// 3323 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 3324 //===----------------------------------------------------------------------===// 3325 3326 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 3327 DAG = Dag; 3328 SchedModel = DAG->getSchedModel(); 3329 TRI = DAG->TRI; 3330 3331 Rem.init(DAG, SchedModel); 3332 Top.init(DAG, SchedModel, &Rem); 3333 BotRoots.clear(); 3334 3335 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 3336 // or are disabled, then these HazardRecs will be disabled. 3337 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3338 if (!Top.HazardRec) { 3339 Top.HazardRec = 3340 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3341 Itin, DAG); 3342 } 3343 } 3344 3345 void PostGenericScheduler::registerRoots() { 3346 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3347 3348 // Some roots may not feed into ExitSU. Check all of them in case. 3349 for (const SUnit *SU : BotRoots) { 3350 if (SU->getDepth() > Rem.CriticalPath) 3351 Rem.CriticalPath = SU->getDepth(); 3352 } 3353 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 3354 if (DumpCriticalPathLength) { 3355 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 3356 } 3357 } 3358 3359 /// Apply a set of heuristics to a new candidate for PostRA scheduling. 3360 /// 3361 /// \param Cand provides the policy and current best candidate. 3362 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3363 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 3364 SchedCandidate &TryCand) { 3365 // Initialize the candidate if needed. 3366 if (!Cand.isValid()) { 3367 TryCand.Reason = NodeOrder; 3368 return; 3369 } 3370 3371 // Prioritize instructions that read unbuffered resources by stall cycles. 3372 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3373 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3374 return; 3375 3376 // Keep clustered nodes together. 3377 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(), 3378 Cand.SU == DAG->getNextClusterSucc(), 3379 TryCand, Cand, Cluster)) 3380 return; 3381 3382 // Avoid critical resource consumption and balance the schedule. 3383 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3384 TryCand, Cand, ResourceReduce)) 3385 return; 3386 if (tryGreater(TryCand.ResDelta.DemandedResources, 3387 Cand.ResDelta.DemandedResources, 3388 TryCand, Cand, ResourceDemand)) 3389 return; 3390 3391 // Avoid serializing long latency dependence chains. 3392 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3393 return; 3394 } 3395 3396 // Fall through to original instruction order. 3397 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 3398 TryCand.Reason = NodeOrder; 3399 } 3400 3401 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3402 ReadyQueue &Q = Top.Available; 3403 for (SUnit *SU : Q) { 3404 SchedCandidate TryCand(Cand.Policy); 3405 TryCand.SU = SU; 3406 TryCand.AtTop = true; 3407 TryCand.initResourceDelta(DAG, SchedModel); 3408 tryCandidate(Cand, TryCand); 3409 if (TryCand.Reason != NoCand) { 3410 Cand.setBest(TryCand); 3411 LLVM_DEBUG(traceCandidate(Cand)); 3412 } 3413 } 3414 } 3415 3416 /// Pick the next node to schedule. 3417 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3418 if (DAG->top() == DAG->bottom()) { 3419 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3420 return nullptr; 3421 } 3422 SUnit *SU; 3423 do { 3424 SU = Top.pickOnlyChoice(); 3425 if (SU) { 3426 tracePick(Only1, true); 3427 } else { 3428 CandPolicy NoPolicy; 3429 SchedCandidate TopCand(NoPolicy); 3430 // Set the top-down policy based on the state of the current top zone and 3431 // the instructions outside the zone, including the bottom zone. 3432 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 3433 pickNodeFromQueue(TopCand); 3434 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3435 tracePick(TopCand); 3436 SU = TopCand.SU; 3437 } 3438 } while (SU->isScheduled); 3439 3440 IsTopNode = true; 3441 Top.removeReady(SU); 3442 3443 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3444 << *SU->getInstr()); 3445 return SU; 3446 } 3447 3448 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3449 /// scheduled/remaining flags in the DAG nodes. 3450 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3451 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3452 Top.bumpNode(SU); 3453 } 3454 3455 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) { 3456 return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C), 3457 /*RemoveKillFlags=*/true); 3458 } 3459 3460 //===----------------------------------------------------------------------===// 3461 // ILP Scheduler. Currently for experimental analysis of heuristics. 3462 //===----------------------------------------------------------------------===// 3463 3464 namespace { 3465 3466 /// Order nodes by the ILP metric. 3467 struct ILPOrder { 3468 const SchedDFSResult *DFSResult = nullptr; 3469 const BitVector *ScheduledTrees = nullptr; 3470 bool MaximizeILP; 3471 3472 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {} 3473 3474 /// Apply a less-than relation on node priority. 3475 /// 3476 /// (Return true if A comes after B in the Q.) 3477 bool operator()(const SUnit *A, const SUnit *B) const { 3478 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3479 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3480 if (SchedTreeA != SchedTreeB) { 3481 // Unscheduled trees have lower priority. 3482 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3483 return ScheduledTrees->test(SchedTreeB); 3484 3485 // Trees with shallower connections have have lower priority. 3486 if (DFSResult->getSubtreeLevel(SchedTreeA) 3487 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3488 return DFSResult->getSubtreeLevel(SchedTreeA) 3489 < DFSResult->getSubtreeLevel(SchedTreeB); 3490 } 3491 } 3492 if (MaximizeILP) 3493 return DFSResult->getILP(A) < DFSResult->getILP(B); 3494 else 3495 return DFSResult->getILP(A) > DFSResult->getILP(B); 3496 } 3497 }; 3498 3499 /// Schedule based on the ILP metric. 3500 class ILPScheduler : public MachineSchedStrategy { 3501 ScheduleDAGMILive *DAG = nullptr; 3502 ILPOrder Cmp; 3503 3504 std::vector<SUnit*> ReadyQ; 3505 3506 public: 3507 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {} 3508 3509 void initialize(ScheduleDAGMI *dag) override { 3510 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3511 DAG = static_cast<ScheduleDAGMILive*>(dag); 3512 DAG->computeDFSResult(); 3513 Cmp.DFSResult = DAG->getDFSResult(); 3514 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3515 ReadyQ.clear(); 3516 } 3517 3518 void registerRoots() override { 3519 // Restore the heap in ReadyQ with the updated DFS results. 3520 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3521 } 3522 3523 /// Implement MachineSchedStrategy interface. 3524 /// ----------------------------------------- 3525 3526 /// Callback to select the highest priority node from the ready Q. 3527 SUnit *pickNode(bool &IsTopNode) override { 3528 if (ReadyQ.empty()) return nullptr; 3529 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3530 SUnit *SU = ReadyQ.back(); 3531 ReadyQ.pop_back(); 3532 IsTopNode = false; 3533 LLVM_DEBUG(dbgs() << "Pick node " 3534 << "SU(" << SU->NodeNum << ") " 3535 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3536 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) 3537 << " @" 3538 << DAG->getDFSResult()->getSubtreeLevel( 3539 DAG->getDFSResult()->getSubtreeID(SU)) 3540 << '\n' 3541 << "Scheduling " << *SU->getInstr()); 3542 return SU; 3543 } 3544 3545 /// Scheduler callback to notify that a new subtree is scheduled. 3546 void scheduleTree(unsigned SubtreeID) override { 3547 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3548 } 3549 3550 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3551 /// DFSResults, and resort the priority Q. 3552 void schedNode(SUnit *SU, bool IsTopNode) override { 3553 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3554 } 3555 3556 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3557 3558 void releaseBottomNode(SUnit *SU) override { 3559 ReadyQ.push_back(SU); 3560 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3561 } 3562 }; 3563 3564 } // end anonymous namespace 3565 3566 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3567 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true)); 3568 } 3569 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3570 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false)); 3571 } 3572 3573 static MachineSchedRegistry ILPMaxRegistry( 3574 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3575 static MachineSchedRegistry ILPMinRegistry( 3576 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3577 3578 //===----------------------------------------------------------------------===// 3579 // Machine Instruction Shuffler for Correctness Testing 3580 //===----------------------------------------------------------------------===// 3581 3582 #ifndef NDEBUG 3583 namespace { 3584 3585 /// Apply a less-than relation on the node order, which corresponds to the 3586 /// instruction order prior to scheduling. IsReverse implements greater-than. 3587 template<bool IsReverse> 3588 struct SUnitOrder { 3589 bool operator()(SUnit *A, SUnit *B) const { 3590 if (IsReverse) 3591 return A->NodeNum > B->NodeNum; 3592 else 3593 return A->NodeNum < B->NodeNum; 3594 } 3595 }; 3596 3597 /// Reorder instructions as much as possible. 3598 class InstructionShuffler : public MachineSchedStrategy { 3599 bool IsAlternating; 3600 bool IsTopDown; 3601 3602 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3603 // gives nodes with a higher number higher priority causing the latest 3604 // instructions to be scheduled first. 3605 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>> 3606 TopQ; 3607 3608 // When scheduling bottom-up, use greater-than as the queue priority. 3609 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>> 3610 BottomQ; 3611 3612 public: 3613 InstructionShuffler(bool alternate, bool topdown) 3614 : IsAlternating(alternate), IsTopDown(topdown) {} 3615 3616 void initialize(ScheduleDAGMI*) override { 3617 TopQ.clear(); 3618 BottomQ.clear(); 3619 } 3620 3621 /// Implement MachineSchedStrategy interface. 3622 /// ----------------------------------------- 3623 3624 SUnit *pickNode(bool &IsTopNode) override { 3625 SUnit *SU; 3626 if (IsTopDown) { 3627 do { 3628 if (TopQ.empty()) return nullptr; 3629 SU = TopQ.top(); 3630 TopQ.pop(); 3631 } while (SU->isScheduled); 3632 IsTopNode = true; 3633 } else { 3634 do { 3635 if (BottomQ.empty()) return nullptr; 3636 SU = BottomQ.top(); 3637 BottomQ.pop(); 3638 } while (SU->isScheduled); 3639 IsTopNode = false; 3640 } 3641 if (IsAlternating) 3642 IsTopDown = !IsTopDown; 3643 return SU; 3644 } 3645 3646 void schedNode(SUnit *SU, bool IsTopNode) override {} 3647 3648 void releaseTopNode(SUnit *SU) override { 3649 TopQ.push(SU); 3650 } 3651 void releaseBottomNode(SUnit *SU) override { 3652 BottomQ.push(SU); 3653 } 3654 }; 3655 3656 } // end anonymous namespace 3657 3658 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3659 bool Alternate = !ForceTopDown && !ForceBottomUp; 3660 bool TopDown = !ForceBottomUp; 3661 assert((TopDown || !ForceTopDown) && 3662 "-misched-topdown incompatible with -misched-bottomup"); 3663 return new ScheduleDAGMILive( 3664 C, std::make_unique<InstructionShuffler>(Alternate, TopDown)); 3665 } 3666 3667 static MachineSchedRegistry ShufflerRegistry( 3668 "shuffle", "Shuffle machine instructions alternating directions", 3669 createInstructionShuffler); 3670 #endif // !NDEBUG 3671 3672 //===----------------------------------------------------------------------===// 3673 // GraphWriter support for ScheduleDAGMILive. 3674 //===----------------------------------------------------------------------===// 3675 3676 #ifndef NDEBUG 3677 namespace llvm { 3678 3679 template<> struct GraphTraits< 3680 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3681 3682 template<> 3683 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3684 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 3685 3686 static std::string getGraphName(const ScheduleDAG *G) { 3687 return G->MF.getName(); 3688 } 3689 3690 static bool renderGraphFromBottomUp() { 3691 return true; 3692 } 3693 3694 static bool isNodeHidden(const SUnit *Node) { 3695 if (ViewMISchedCutoff == 0) 3696 return false; 3697 return (Node->Preds.size() > ViewMISchedCutoff 3698 || Node->Succs.size() > ViewMISchedCutoff); 3699 } 3700 3701 /// If you want to override the dot attributes printed for a particular 3702 /// edge, override this method. 3703 static std::string getEdgeAttributes(const SUnit *Node, 3704 SUnitIterator EI, 3705 const ScheduleDAG *Graph) { 3706 if (EI.isArtificialDep()) 3707 return "color=cyan,style=dashed"; 3708 if (EI.isCtrlDep()) 3709 return "color=blue,style=dashed"; 3710 return ""; 3711 } 3712 3713 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3714 std::string Str; 3715 raw_string_ostream SS(Str); 3716 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3717 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3718 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3719 SS << "SU:" << SU->NodeNum; 3720 if (DFS) 3721 SS << " I:" << DFS->getNumInstrs(SU); 3722 return SS.str(); 3723 } 3724 3725 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3726 return G->getGraphNodeLabel(SU); 3727 } 3728 3729 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3730 std::string Str("shape=Mrecord"); 3731 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3732 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3733 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3734 if (DFS) { 3735 Str += ",style=filled,fillcolor=\"#"; 3736 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3737 Str += '"'; 3738 } 3739 return Str; 3740 } 3741 }; 3742 3743 } // end namespace llvm 3744 #endif // NDEBUG 3745 3746 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3747 /// rendered using 'dot'. 3748 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3749 #ifndef NDEBUG 3750 ViewGraph(this, Name, false, Title); 3751 #else 3752 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3753 << "systems with Graphviz or gv!\n"; 3754 #endif // NDEBUG 3755 } 3756 3757 /// Out-of-line implementation with no arguments is handy for gdb. 3758 void ScheduleDAGMI::viewGraph() { 3759 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3760 } 3761