1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // MachineScheduler schedules machine instructions after phi elimination. It 10 // preserves LiveIntervals so it can be invoked before register allocation. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MachineScheduler.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PriorityQueue.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/LiveInterval.h" 25 #include "llvm/CodeGen/LiveIntervals.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineDominators.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineFunctionPass.h" 30 #include "llvm/CodeGen/MachineInstr.h" 31 #include "llvm/CodeGen/MachineLoopInfo.h" 32 #include "llvm/CodeGen/MachineOperand.h" 33 #include "llvm/CodeGen/MachinePassRegistry.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/Passes.h" 36 #include "llvm/CodeGen/RegisterClassInfo.h" 37 #include "llvm/CodeGen/RegisterPressure.h" 38 #include "llvm/CodeGen/ScheduleDAG.h" 39 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 40 #include "llvm/CodeGen/ScheduleDAGMutation.h" 41 #include "llvm/CodeGen/ScheduleDFS.h" 42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 43 #include "llvm/CodeGen/SlotIndexes.h" 44 #include "llvm/CodeGen/TargetFrameLowering.h" 45 #include "llvm/CodeGen/TargetInstrInfo.h" 46 #include "llvm/CodeGen/TargetLowering.h" 47 #include "llvm/CodeGen/TargetPassConfig.h" 48 #include "llvm/CodeGen/TargetRegisterInfo.h" 49 #include "llvm/CodeGen/TargetSchedule.h" 50 #include "llvm/CodeGen/TargetSubtargetInfo.h" 51 #include "llvm/Config/llvm-config.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/MC/LaneBitmask.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/GraphWriter.h" 60 #include "llvm/Support/MachineValueType.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include <algorithm> 63 #include <cassert> 64 #include <cstdint> 65 #include <iterator> 66 #include <limits> 67 #include <memory> 68 #include <string> 69 #include <tuple> 70 #include <utility> 71 #include <vector> 72 73 using namespace llvm; 74 75 #define DEBUG_TYPE "machine-scheduler" 76 77 STATISTIC(NumClustered, "Number of load/store pairs clustered"); 78 79 namespace llvm { 80 81 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 82 cl::desc("Force top-down list scheduling")); 83 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 84 cl::desc("Force bottom-up list scheduling")); 85 cl::opt<bool> 86 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 87 cl::desc("Print critical path length to stdout")); 88 89 cl::opt<bool> VerifyScheduling( 90 "verify-misched", cl::Hidden, 91 cl::desc("Verify machine instrs before and after machine scheduling")); 92 93 } // end namespace llvm 94 95 #ifndef NDEBUG 96 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 97 cl::desc("Pop up a window to show MISched dags after they are processed")); 98 99 /// In some situations a few uninteresting nodes depend on nearly all other 100 /// nodes in the graph, provide a cutoff to hide them. 101 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden, 102 cl::desc("Hide nodes with more predecessor/successor than cutoff")); 103 104 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 105 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 106 107 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 108 cl::desc("Only schedule this function")); 109 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 110 cl::desc("Only schedule this MBB#")); 111 static cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden, 112 cl::desc("Print schedule DAGs")); 113 #else 114 static const bool ViewMISchedDAGs = false; 115 static const bool PrintDAGs = false; 116 #endif // NDEBUG 117 118 /// Avoid quadratic complexity in unusually large basic blocks by limiting the 119 /// size of the ready lists. 120 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden, 121 cl::desc("Limit ready list to N instructions"), cl::init(256)); 122 123 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 124 cl::desc("Enable register pressure scheduling."), cl::init(true)); 125 126 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 127 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 128 129 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden, 130 cl::desc("Enable memop clustering."), 131 cl::init(true)); 132 133 // DAG subtrees must have at least this many nodes. 134 static const unsigned MinSubtreeSize = 8; 135 136 // Pin the vtables to this file. 137 void MachineSchedStrategy::anchor() {} 138 139 void ScheduleDAGMutation::anchor() {} 140 141 //===----------------------------------------------------------------------===// 142 // Machine Instruction Scheduling Pass and Registry 143 //===----------------------------------------------------------------------===// 144 145 MachineSchedContext::MachineSchedContext() { 146 RegClassInfo = new RegisterClassInfo(); 147 } 148 149 MachineSchedContext::~MachineSchedContext() { 150 delete RegClassInfo; 151 } 152 153 namespace { 154 155 /// Base class for a machine scheduler class that can run at any point. 156 class MachineSchedulerBase : public MachineSchedContext, 157 public MachineFunctionPass { 158 public: 159 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 160 161 void print(raw_ostream &O, const Module* = nullptr) const override; 162 163 protected: 164 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); 165 }; 166 167 /// MachineScheduler runs after coalescing and before register allocation. 168 class MachineScheduler : public MachineSchedulerBase { 169 public: 170 MachineScheduler(); 171 172 void getAnalysisUsage(AnalysisUsage &AU) const override; 173 174 bool runOnMachineFunction(MachineFunction&) override; 175 176 static char ID; // Class identification, replacement for typeinfo 177 178 protected: 179 ScheduleDAGInstrs *createMachineScheduler(); 180 }; 181 182 /// PostMachineScheduler runs after shortly before code emission. 183 class PostMachineScheduler : public MachineSchedulerBase { 184 public: 185 PostMachineScheduler(); 186 187 void getAnalysisUsage(AnalysisUsage &AU) const override; 188 189 bool runOnMachineFunction(MachineFunction&) override; 190 191 static char ID; // Class identification, replacement for typeinfo 192 193 protected: 194 ScheduleDAGInstrs *createPostMachineScheduler(); 195 }; 196 197 } // end anonymous namespace 198 199 char MachineScheduler::ID = 0; 200 201 char &llvm::MachineSchedulerID = MachineScheduler::ID; 202 203 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE, 204 "Machine Instruction Scheduler", false, false) 205 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 206 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 207 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 208 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 209 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 210 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE, 211 "Machine Instruction Scheduler", false, false) 212 213 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) { 214 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 215 } 216 217 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 218 AU.setPreservesCFG(); 219 AU.addRequired<MachineDominatorTree>(); 220 AU.addRequired<MachineLoopInfo>(); 221 AU.addRequired<AAResultsWrapperPass>(); 222 AU.addRequired<TargetPassConfig>(); 223 AU.addRequired<SlotIndexes>(); 224 AU.addPreserved<SlotIndexes>(); 225 AU.addRequired<LiveIntervals>(); 226 AU.addPreserved<LiveIntervals>(); 227 MachineFunctionPass::getAnalysisUsage(AU); 228 } 229 230 char PostMachineScheduler::ID = 0; 231 232 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 233 234 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 235 "PostRA Machine Instruction Scheduler", false, false) 236 237 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) { 238 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 239 } 240 241 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 242 AU.setPreservesCFG(); 243 AU.addRequired<MachineDominatorTree>(); 244 AU.addRequired<MachineLoopInfo>(); 245 AU.addRequired<AAResultsWrapperPass>(); 246 AU.addRequired<TargetPassConfig>(); 247 MachineFunctionPass::getAnalysisUsage(AU); 248 } 249 250 MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor> 251 MachineSchedRegistry::Registry; 252 253 /// A dummy default scheduler factory indicates whether the scheduler 254 /// is overridden on the command line. 255 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 256 return nullptr; 257 } 258 259 /// MachineSchedOpt allows command line selection of the scheduler. 260 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 261 RegisterPassParser<MachineSchedRegistry>> 262 MachineSchedOpt("misched", 263 cl::init(&useDefaultMachineSched), cl::Hidden, 264 cl::desc("Machine instruction scheduler to use")); 265 266 static MachineSchedRegistry 267 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 268 useDefaultMachineSched); 269 270 static cl::opt<bool> EnableMachineSched( 271 "enable-misched", 272 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), 273 cl::Hidden); 274 275 static cl::opt<bool> EnablePostRAMachineSched( 276 "enable-post-misched", 277 cl::desc("Enable the post-ra machine instruction scheduling pass."), 278 cl::init(true), cl::Hidden); 279 280 /// Decrement this iterator until reaching the top or a non-debug instr. 281 static MachineBasicBlock::const_iterator 282 priorNonDebug(MachineBasicBlock::const_iterator I, 283 MachineBasicBlock::const_iterator Beg) { 284 assert(I != Beg && "reached the top of the region, cannot decrement"); 285 while (--I != Beg) { 286 if (!I->isDebugInstr()) 287 break; 288 } 289 return I; 290 } 291 292 /// Non-const version. 293 static MachineBasicBlock::iterator 294 priorNonDebug(MachineBasicBlock::iterator I, 295 MachineBasicBlock::const_iterator Beg) { 296 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg) 297 .getNonConstIterator(); 298 } 299 300 /// If this iterator is a debug value, increment until reaching the End or a 301 /// non-debug instruction. 302 static MachineBasicBlock::const_iterator 303 nextIfDebug(MachineBasicBlock::const_iterator I, 304 MachineBasicBlock::const_iterator End) { 305 for(; I != End; ++I) { 306 if (!I->isDebugInstr()) 307 break; 308 } 309 return I; 310 } 311 312 /// Non-const version. 313 static MachineBasicBlock::iterator 314 nextIfDebug(MachineBasicBlock::iterator I, 315 MachineBasicBlock::const_iterator End) { 316 return nextIfDebug(MachineBasicBlock::const_iterator(I), End) 317 .getNonConstIterator(); 318 } 319 320 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 321 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 322 // Select the scheduler, or set the default. 323 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 324 if (Ctor != useDefaultMachineSched) 325 return Ctor(this); 326 327 // Get the default scheduler set by the target for this function. 328 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 329 if (Scheduler) 330 return Scheduler; 331 332 // Default to GenericScheduler. 333 return createGenericSchedLive(this); 334 } 335 336 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 337 /// the caller. We don't have a command line option to override the postRA 338 /// scheduler. The Target must configure it. 339 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 340 // Get the postRA scheduler set by the target for this function. 341 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 342 if (Scheduler) 343 return Scheduler; 344 345 // Default to GenericScheduler. 346 return createGenericSchedPostRA(this); 347 } 348 349 /// Top-level MachineScheduler pass driver. 350 /// 351 /// Visit blocks in function order. Divide each block into scheduling regions 352 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 353 /// consistent with the DAG builder, which traverses the interior of the 354 /// scheduling regions bottom-up. 355 /// 356 /// This design avoids exposing scheduling boundaries to the DAG builder, 357 /// simplifying the DAG builder's support for "special" target instructions. 358 /// At the same time the design allows target schedulers to operate across 359 /// scheduling boundaries, for example to bundle the boundary instructions 360 /// without reordering them. This creates complexity, because the target 361 /// scheduler must update the RegionBegin and RegionEnd positions cached by 362 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 363 /// design would be to split blocks at scheduling boundaries, but LLVM has a 364 /// general bias against block splitting purely for implementation simplicity. 365 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 366 if (skipFunction(mf.getFunction())) 367 return false; 368 369 if (EnableMachineSched.getNumOccurrences()) { 370 if (!EnableMachineSched) 371 return false; 372 } else if (!mf.getSubtarget().enableMachineScheduler()) 373 return false; 374 375 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); 376 377 // Initialize the context of the pass. 378 MF = &mf; 379 MLI = &getAnalysis<MachineLoopInfo>(); 380 MDT = &getAnalysis<MachineDominatorTree>(); 381 PassConfig = &getAnalysis<TargetPassConfig>(); 382 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 383 384 LIS = &getAnalysis<LiveIntervals>(); 385 386 if (VerifyScheduling) { 387 LLVM_DEBUG(LIS->dump()); 388 MF->verify(this, "Before machine scheduling."); 389 } 390 RegClassInfo->runOnMachineFunction(*MF); 391 392 // Instantiate the selected scheduler for this target, function, and 393 // optimization level. 394 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 395 scheduleRegions(*Scheduler, false); 396 397 LLVM_DEBUG(LIS->dump()); 398 if (VerifyScheduling) 399 MF->verify(this, "After machine scheduling."); 400 return true; 401 } 402 403 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 404 if (skipFunction(mf.getFunction())) 405 return false; 406 407 if (EnablePostRAMachineSched.getNumOccurrences()) { 408 if (!EnablePostRAMachineSched) 409 return false; 410 } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) { 411 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 412 return false; 413 } 414 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 415 416 // Initialize the context of the pass. 417 MF = &mf; 418 MLI = &getAnalysis<MachineLoopInfo>(); 419 PassConfig = &getAnalysis<TargetPassConfig>(); 420 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 421 422 if (VerifyScheduling) 423 MF->verify(this, "Before post machine scheduling."); 424 425 // Instantiate the selected scheduler for this target, function, and 426 // optimization level. 427 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 428 scheduleRegions(*Scheduler, true); 429 430 if (VerifyScheduling) 431 MF->verify(this, "After post machine scheduling."); 432 return true; 433 } 434 435 /// Return true of the given instruction should not be included in a scheduling 436 /// region. 437 /// 438 /// MachineScheduler does not currently support scheduling across calls. To 439 /// handle calls, the DAG builder needs to be modified to create register 440 /// anti/output dependencies on the registers clobbered by the call's regmask 441 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 442 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 443 /// the boundary, but there would be no benefit to postRA scheduling across 444 /// calls this late anyway. 445 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 446 MachineBasicBlock *MBB, 447 MachineFunction *MF, 448 const TargetInstrInfo *TII) { 449 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF); 450 } 451 452 /// A region of an MBB for scheduling. 453 namespace { 454 struct SchedRegion { 455 /// RegionBegin is the first instruction in the scheduling region, and 456 /// RegionEnd is either MBB->end() or the scheduling boundary after the 457 /// last instruction in the scheduling region. These iterators cannot refer 458 /// to instructions outside of the identified scheduling region because 459 /// those may be reordered before scheduling this region. 460 MachineBasicBlock::iterator RegionBegin; 461 MachineBasicBlock::iterator RegionEnd; 462 unsigned NumRegionInstrs; 463 464 SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, 465 unsigned N) : 466 RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {} 467 }; 468 } // end anonymous namespace 469 470 using MBBRegionsVector = SmallVector<SchedRegion, 16>; 471 472 static void 473 getSchedRegions(MachineBasicBlock *MBB, 474 MBBRegionsVector &Regions, 475 bool RegionsTopDown) { 476 MachineFunction *MF = MBB->getParent(); 477 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 478 479 MachineBasicBlock::iterator I = nullptr; 480 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 481 RegionEnd != MBB->begin(); RegionEnd = I) { 482 483 // Avoid decrementing RegionEnd for blocks with no terminator. 484 if (RegionEnd != MBB->end() || 485 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) { 486 --RegionEnd; 487 } 488 489 // The next region starts above the previous region. Look backward in the 490 // instruction stream until we find the nearest boundary. 491 unsigned NumRegionInstrs = 0; 492 I = RegionEnd; 493 for (;I != MBB->begin(); --I) { 494 MachineInstr &MI = *std::prev(I); 495 if (isSchedBoundary(&MI, &*MBB, MF, TII)) 496 break; 497 if (!MI.isDebugInstr()) { 498 // MBB::size() uses instr_iterator to count. Here we need a bundle to 499 // count as a single instruction. 500 ++NumRegionInstrs; 501 } 502 } 503 504 // It's possible we found a scheduling region that only has debug 505 // instructions. Don't bother scheduling these. 506 if (NumRegionInstrs != 0) 507 Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs)); 508 } 509 510 if (RegionsTopDown) 511 std::reverse(Regions.begin(), Regions.end()); 512 } 513 514 /// Main driver for both MachineScheduler and PostMachineScheduler. 515 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler, 516 bool FixKillFlags) { 517 // Visit all machine basic blocks. 518 // 519 // TODO: Visit blocks in global postorder or postorder within the bottom-up 520 // loop tree. Then we can optionally compute global RegPressure. 521 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 522 MBB != MBBEnd; ++MBB) { 523 524 Scheduler.startBlock(&*MBB); 525 526 #ifndef NDEBUG 527 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 528 continue; 529 if (SchedOnlyBlock.getNumOccurrences() 530 && (int)SchedOnlyBlock != MBB->getNumber()) 531 continue; 532 #endif 533 534 // Break the block into scheduling regions [I, RegionEnd). RegionEnd 535 // points to the scheduling boundary at the bottom of the region. The DAG 536 // does not include RegionEnd, but the region does (i.e. the next 537 // RegionEnd is above the previous RegionBegin). If the current block has 538 // no terminator then RegionEnd == MBB->end() for the bottom region. 539 // 540 // All the regions of MBB are first found and stored in MBBRegions, which 541 // will be processed (MBB) top-down if initialized with true. 542 // 543 // The Scheduler may insert instructions during either schedule() or 544 // exitRegion(), even for empty regions. So the local iterators 'I' and 545 // 'RegionEnd' are invalid across these calls. Instructions must not be 546 // added to other regions than the current one without updating MBBRegions. 547 548 MBBRegionsVector MBBRegions; 549 getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown()); 550 for (MBBRegionsVector::iterator R = MBBRegions.begin(); 551 R != MBBRegions.end(); ++R) { 552 MachineBasicBlock::iterator I = R->RegionBegin; 553 MachineBasicBlock::iterator RegionEnd = R->RegionEnd; 554 unsigned NumRegionInstrs = R->NumRegionInstrs; 555 556 // Notify the scheduler of the region, even if we may skip scheduling 557 // it. Perhaps it still needs to be bundled. 558 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs); 559 560 // Skip empty scheduling regions (0 or 1 schedulable instructions). 561 if (I == RegionEnd || I == std::prev(RegionEnd)) { 562 // Close the current region. Bundle the terminator if needed. 563 // This invalidates 'RegionEnd' and 'I'. 564 Scheduler.exitRegion(); 565 continue; 566 } 567 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n"); 568 LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) 569 << " " << MBB->getName() << "\n From: " << *I 570 << " To: "; 571 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 572 else dbgs() << "End"; 573 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); 574 if (DumpCriticalPathLength) { 575 errs() << MF->getName(); 576 errs() << ":%bb. " << MBB->getNumber(); 577 errs() << " " << MBB->getName() << " \n"; 578 } 579 580 // Schedule a region: possibly reorder instructions. 581 // This invalidates the original region iterators. 582 Scheduler.schedule(); 583 584 // Close the current region. 585 Scheduler.exitRegion(); 586 } 587 Scheduler.finishBlock(); 588 // FIXME: Ideally, no further passes should rely on kill flags. However, 589 // thumb2 size reduction is currently an exception, so the PostMIScheduler 590 // needs to do this. 591 if (FixKillFlags) 592 Scheduler.fixupKills(*MBB); 593 } 594 Scheduler.finalizeSchedule(); 595 } 596 597 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 598 // unimplemented 599 } 600 601 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 602 LLVM_DUMP_METHOD void ReadyQueue::dump() const { 603 dbgs() << "Queue " << Name << ": "; 604 for (const SUnit *SU : Queue) 605 dbgs() << SU->NodeNum << " "; 606 dbgs() << "\n"; 607 } 608 #endif 609 610 //===----------------------------------------------------------------------===// 611 // ScheduleDAGMI - Basic machine instruction scheduling. This is 612 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 613 // virtual registers. 614 // ===----------------------------------------------------------------------===/ 615 616 // Provide a vtable anchor. 617 ScheduleDAGMI::~ScheduleDAGMI() = default; 618 619 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 620 /// NumPredsLeft reaches zero, release the successor node. 621 /// 622 /// FIXME: Adjust SuccSU height based on MinLatency. 623 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 624 SUnit *SuccSU = SuccEdge->getSUnit(); 625 626 if (SuccEdge->isWeak()) { 627 --SuccSU->WeakPredsLeft; 628 if (SuccEdge->isCluster()) 629 NextClusterSucc = SuccSU; 630 return; 631 } 632 #ifndef NDEBUG 633 if (SuccSU->NumPredsLeft == 0) { 634 dbgs() << "*** Scheduling failed! ***\n"; 635 dumpNode(*SuccSU); 636 dbgs() << " has been released too many times!\n"; 637 llvm_unreachable(nullptr); 638 } 639 #endif 640 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 641 // CurrCycle may have advanced since then. 642 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 643 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 644 645 --SuccSU->NumPredsLeft; 646 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 647 SchedImpl->releaseTopNode(SuccSU); 648 } 649 650 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 651 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 652 for (SDep &Succ : SU->Succs) 653 releaseSucc(SU, &Succ); 654 } 655 656 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 657 /// NumSuccsLeft reaches zero, release the predecessor node. 658 /// 659 /// FIXME: Adjust PredSU height based on MinLatency. 660 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 661 SUnit *PredSU = PredEdge->getSUnit(); 662 663 if (PredEdge->isWeak()) { 664 --PredSU->WeakSuccsLeft; 665 if (PredEdge->isCluster()) 666 NextClusterPred = PredSU; 667 return; 668 } 669 #ifndef NDEBUG 670 if (PredSU->NumSuccsLeft == 0) { 671 dbgs() << "*** Scheduling failed! ***\n"; 672 dumpNode(*PredSU); 673 dbgs() << " has been released too many times!\n"; 674 llvm_unreachable(nullptr); 675 } 676 #endif 677 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 678 // CurrCycle may have advanced since then. 679 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 680 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 681 682 --PredSU->NumSuccsLeft; 683 if (PredSU->NumSuccsLeft == 0) 684 SchedImpl->releaseBottomNode(PredSU); 685 } 686 687 /// releasePredecessors - Call releasePred on each of SU's predecessors. 688 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 689 for (SDep &Pred : SU->Preds) 690 releasePred(SU, &Pred); 691 } 692 693 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) { 694 ScheduleDAGInstrs::startBlock(bb); 695 SchedImpl->enterMBB(bb); 696 } 697 698 void ScheduleDAGMI::finishBlock() { 699 SchedImpl->leaveMBB(); 700 ScheduleDAGInstrs::finishBlock(); 701 } 702 703 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 704 /// crossing a scheduling boundary. [begin, end) includes all instructions in 705 /// the region, including the boundary itself and single-instruction regions 706 /// that don't get scheduled. 707 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 708 MachineBasicBlock::iterator begin, 709 MachineBasicBlock::iterator end, 710 unsigned regioninstrs) 711 { 712 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 713 714 SchedImpl->initPolicy(begin, end, regioninstrs); 715 } 716 717 /// This is normally called from the main scheduler loop but may also be invoked 718 /// by the scheduling strategy to perform additional code motion. 719 void ScheduleDAGMI::moveInstruction( 720 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 721 // Advance RegionBegin if the first instruction moves down. 722 if (&*RegionBegin == MI) 723 ++RegionBegin; 724 725 // Update the instruction stream. 726 BB->splice(InsertPos, BB, MI); 727 728 // Update LiveIntervals 729 if (LIS) 730 LIS->handleMove(*MI, /*UpdateFlags=*/true); 731 732 // Recede RegionBegin if an instruction moves above the first. 733 if (RegionBegin == InsertPos) 734 RegionBegin = MI; 735 } 736 737 bool ScheduleDAGMI::checkSchedLimit() { 738 #ifndef NDEBUG 739 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 740 CurrentTop = CurrentBottom; 741 return false; 742 } 743 ++NumInstrsScheduled; 744 #endif 745 return true; 746 } 747 748 /// Per-region scheduling driver, called back from 749 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 750 /// does not consider liveness or register pressure. It is useful for PostRA 751 /// scheduling and potentially other custom schedulers. 752 void ScheduleDAGMI::schedule() { 753 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n"); 754 LLVM_DEBUG(SchedImpl->dumpPolicy()); 755 756 // Build the DAG. 757 buildSchedGraph(AA); 758 759 postprocessDAG(); 760 761 SmallVector<SUnit*, 8> TopRoots, BotRoots; 762 findRootsAndBiasEdges(TopRoots, BotRoots); 763 764 LLVM_DEBUG(dump()); 765 if (PrintDAGs) dump(); 766 if (ViewMISchedDAGs) viewGraph(); 767 768 // Initialize the strategy before modifying the DAG. 769 // This may initialize a DFSResult to be used for queue priority. 770 SchedImpl->initialize(this); 771 772 // Initialize ready queues now that the DAG and priority data are finalized. 773 initQueues(TopRoots, BotRoots); 774 775 bool IsTopNode = false; 776 while (true) { 777 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n"); 778 SUnit *SU = SchedImpl->pickNode(IsTopNode); 779 if (!SU) break; 780 781 assert(!SU->isScheduled && "Node already scheduled"); 782 if (!checkSchedLimit()) 783 break; 784 785 MachineInstr *MI = SU->getInstr(); 786 if (IsTopNode) { 787 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 788 if (&*CurrentTop == MI) 789 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 790 else 791 moveInstruction(MI, CurrentTop); 792 } else { 793 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 794 MachineBasicBlock::iterator priorII = 795 priorNonDebug(CurrentBottom, CurrentTop); 796 if (&*priorII == MI) 797 CurrentBottom = priorII; 798 else { 799 if (&*CurrentTop == MI) 800 CurrentTop = nextIfDebug(++CurrentTop, priorII); 801 moveInstruction(MI, CurrentBottom); 802 CurrentBottom = MI; 803 } 804 } 805 // Notify the scheduling strategy before updating the DAG. 806 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 807 // runs, it can then use the accurate ReadyCycle time to determine whether 808 // newly released nodes can move to the readyQ. 809 SchedImpl->schedNode(SU, IsTopNode); 810 811 updateQueues(SU, IsTopNode); 812 } 813 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 814 815 placeDebugValues(); 816 817 LLVM_DEBUG({ 818 dbgs() << "*** Final schedule for " 819 << printMBBReference(*begin()->getParent()) << " ***\n"; 820 dumpSchedule(); 821 dbgs() << '\n'; 822 }); 823 } 824 825 /// Apply each ScheduleDAGMutation step in order. 826 void ScheduleDAGMI::postprocessDAG() { 827 for (auto &m : Mutations) 828 m->apply(this); 829 } 830 831 void ScheduleDAGMI:: 832 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 833 SmallVectorImpl<SUnit*> &BotRoots) { 834 for (SUnit &SU : SUnits) { 835 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits"); 836 837 // Order predecessors so DFSResult follows the critical path. 838 SU.biasCriticalPath(); 839 840 // A SUnit is ready to top schedule if it has no predecessors. 841 if (!SU.NumPredsLeft) 842 TopRoots.push_back(&SU); 843 // A SUnit is ready to bottom schedule if it has no successors. 844 if (!SU.NumSuccsLeft) 845 BotRoots.push_back(&SU); 846 } 847 ExitSU.biasCriticalPath(); 848 } 849 850 /// Identify DAG roots and setup scheduler queues. 851 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 852 ArrayRef<SUnit*> BotRoots) { 853 NextClusterSucc = nullptr; 854 NextClusterPred = nullptr; 855 856 // Release all DAG roots for scheduling, not including ExitSU. 857 // 858 // Nodes with unreleased weak edges can still be roots. 859 // Release top roots in forward order. 860 for (SUnit *SU : TopRoots) 861 SchedImpl->releaseTopNode(SU); 862 863 // Release bottom roots in reverse order so the higher priority nodes appear 864 // first. This is more natural and slightly more efficient. 865 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 866 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 867 SchedImpl->releaseBottomNode(*I); 868 } 869 870 releasePredecessors(&ExitSU); 871 872 SchedImpl->registerRoots(); 873 874 // Advance past initial DebugValues. 875 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 876 CurrentBottom = RegionEnd; 877 } 878 879 /// Update scheduler queues after scheduling an instruction. 880 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 881 // Release dependent instructions for scheduling. 882 if (IsTopNode) 883 releaseSuccessors(SU); 884 else 885 releasePredecessors(SU); 886 887 SU->isScheduled = true; 888 } 889 890 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 891 void ScheduleDAGMI::placeDebugValues() { 892 // If first instruction was a DBG_VALUE then put it back. 893 if (FirstDbgValue) { 894 BB->splice(RegionBegin, BB, FirstDbgValue); 895 RegionBegin = FirstDbgValue; 896 } 897 898 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator 899 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 900 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 901 MachineInstr *DbgValue = P.first; 902 MachineBasicBlock::iterator OrigPrevMI = P.second; 903 if (&*RegionBegin == DbgValue) 904 ++RegionBegin; 905 BB->splice(++OrigPrevMI, BB, DbgValue); 906 if (OrigPrevMI == std::prev(RegionEnd)) 907 RegionEnd = DbgValue; 908 } 909 DbgValues.clear(); 910 FirstDbgValue = nullptr; 911 } 912 913 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 914 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const { 915 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 916 if (SUnit *SU = getSUnit(&(*MI))) 917 dumpNode(*SU); 918 else 919 dbgs() << "Missing SUnit\n"; 920 } 921 } 922 #endif 923 924 //===----------------------------------------------------------------------===// 925 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 926 // preservation. 927 //===----------------------------------------------------------------------===// 928 929 ScheduleDAGMILive::~ScheduleDAGMILive() { 930 delete DFSResult; 931 } 932 933 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) { 934 const MachineInstr &MI = *SU.getInstr(); 935 for (const MachineOperand &MO : MI.operands()) { 936 if (!MO.isReg()) 937 continue; 938 if (!MO.readsReg()) 939 continue; 940 if (TrackLaneMasks && !MO.isUse()) 941 continue; 942 943 Register Reg = MO.getReg(); 944 if (!Register::isVirtualRegister(Reg)) 945 continue; 946 947 // Ignore re-defs. 948 if (TrackLaneMasks) { 949 bool FoundDef = false; 950 for (const MachineOperand &MO2 : MI.operands()) { 951 if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) { 952 FoundDef = true; 953 break; 954 } 955 } 956 if (FoundDef) 957 continue; 958 } 959 960 // Record this local VReg use. 961 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg); 962 for (; UI != VRegUses.end(); ++UI) { 963 if (UI->SU == &SU) 964 break; 965 } 966 if (UI == VRegUses.end()) 967 VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU)); 968 } 969 } 970 971 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 972 /// crossing a scheduling boundary. [begin, end) includes all instructions in 973 /// the region, including the boundary itself and single-instruction regions 974 /// that don't get scheduled. 975 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 976 MachineBasicBlock::iterator begin, 977 MachineBasicBlock::iterator end, 978 unsigned regioninstrs) 979 { 980 // ScheduleDAGMI initializes SchedImpl's per-region policy. 981 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 982 983 // For convenience remember the end of the liveness region. 984 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 985 986 SUPressureDiffs.clear(); 987 988 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 989 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks(); 990 991 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) && 992 "ShouldTrackLaneMasks requires ShouldTrackPressure"); 993 } 994 995 // Setup the register pressure trackers for the top scheduled and bottom 996 // scheduled regions. 997 void ScheduleDAGMILive::initRegPressure() { 998 VRegUses.clear(); 999 VRegUses.setUniverse(MRI.getNumVirtRegs()); 1000 for (SUnit &SU : SUnits) 1001 collectVRegUses(SU); 1002 1003 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin, 1004 ShouldTrackLaneMasks, false); 1005 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1006 ShouldTrackLaneMasks, false); 1007 1008 // Close the RPTracker to finalize live ins. 1009 RPTracker.closeRegion(); 1010 1011 LLVM_DEBUG(RPTracker.dump()); 1012 1013 // Initialize the live ins and live outs. 1014 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 1015 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 1016 1017 // Close one end of the tracker so we can call 1018 // getMaxUpward/DownwardPressureDelta before advancing across any 1019 // instructions. This converts currently live regs into live ins/outs. 1020 TopRPTracker.closeTop(); 1021 BotRPTracker.closeBottom(); 1022 1023 BotRPTracker.initLiveThru(RPTracker); 1024 if (!BotRPTracker.getLiveThru().empty()) { 1025 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 1026 LLVM_DEBUG(dbgs() << "Live Thru: "; 1027 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 1028 }; 1029 1030 // For each live out vreg reduce the pressure change associated with other 1031 // uses of the same vreg below the live-out reaching def. 1032 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 1033 1034 // Account for liveness generated by the region boundary. 1035 if (LiveRegionEnd != RegionEnd) { 1036 SmallVector<RegisterMaskPair, 8> LiveUses; 1037 BotRPTracker.recede(&LiveUses); 1038 updatePressureDiffs(LiveUses); 1039 } 1040 1041 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; 1042 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI); 1043 dbgs() << "Bottom Pressure:\n"; 1044 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);); 1045 1046 assert((BotRPTracker.getPos() == RegionEnd || 1047 (RegionEnd->isDebugInstr() && 1048 BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) && 1049 "Can't find the region bottom"); 1050 1051 // Cache the list of excess pressure sets in this region. This will also track 1052 // the max pressure in the scheduled code for these sets. 1053 RegionCriticalPSets.clear(); 1054 const std::vector<unsigned> &RegionPressure = 1055 RPTracker.getPressure().MaxSetPressure; 1056 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 1057 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 1058 if (RegionPressure[i] > Limit) { 1059 LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit 1060 << " Actual " << RegionPressure[i] << "\n"); 1061 RegionCriticalPSets.push_back(PressureChange(i)); 1062 } 1063 } 1064 LLVM_DEBUG(dbgs() << "Excess PSets: "; 1065 for (const PressureChange &RCPS 1066 : RegionCriticalPSets) dbgs() 1067 << TRI->getRegPressureSetName(RCPS.getPSet()) << " "; 1068 dbgs() << "\n"); 1069 } 1070 1071 void ScheduleDAGMILive:: 1072 updateScheduledPressure(const SUnit *SU, 1073 const std::vector<unsigned> &NewMaxPressure) { 1074 const PressureDiff &PDiff = getPressureDiff(SU); 1075 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 1076 for (const PressureChange &PC : PDiff) { 1077 if (!PC.isValid()) 1078 break; 1079 unsigned ID = PC.getPSet(); 1080 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 1081 ++CritIdx; 1082 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 1083 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 1084 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max()) 1085 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 1086 } 1087 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 1088 if (NewMaxPressure[ID] >= Limit - 2) { 1089 LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 1090 << NewMaxPressure[ID] 1091 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") 1092 << Limit << "(+ " << BotRPTracker.getLiveThru()[ID] 1093 << " livethru)\n"); 1094 } 1095 } 1096 } 1097 1098 /// Update the PressureDiff array for liveness after scheduling this 1099 /// instruction. 1100 void ScheduleDAGMILive::updatePressureDiffs( 1101 ArrayRef<RegisterMaskPair> LiveUses) { 1102 for (const RegisterMaskPair &P : LiveUses) { 1103 unsigned Reg = P.RegUnit; 1104 /// FIXME: Currently assuming single-use physregs. 1105 if (!Register::isVirtualRegister(Reg)) 1106 continue; 1107 1108 if (ShouldTrackLaneMasks) { 1109 // If the register has just become live then other uses won't change 1110 // this fact anymore => decrement pressure. 1111 // If the register has just become dead then other uses make it come 1112 // back to life => increment pressure. 1113 bool Decrement = P.LaneMask.any(); 1114 1115 for (const VReg2SUnit &V2SU 1116 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1117 SUnit &SU = *V2SU.SU; 1118 if (SU.isScheduled || &SU == &ExitSU) 1119 continue; 1120 1121 PressureDiff &PDiff = getPressureDiff(&SU); 1122 PDiff.addPressureChange(Reg, Decrement, &MRI); 1123 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") " 1124 << printReg(Reg, TRI) << ':' 1125 << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr(); 1126 dbgs() << " to "; PDiff.dump(*TRI);); 1127 } 1128 } else { 1129 assert(P.LaneMask.any()); 1130 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n"); 1131 // This may be called before CurrentBottom has been initialized. However, 1132 // BotRPTracker must have a valid position. We want the value live into the 1133 // instruction or live out of the block, so ask for the previous 1134 // instruction's live-out. 1135 const LiveInterval &LI = LIS->getInterval(Reg); 1136 VNInfo *VNI; 1137 MachineBasicBlock::const_iterator I = 1138 nextIfDebug(BotRPTracker.getPos(), BB->end()); 1139 if (I == BB->end()) 1140 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1141 else { 1142 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I)); 1143 VNI = LRQ.valueIn(); 1144 } 1145 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 1146 assert(VNI && "No live value at use."); 1147 for (const VReg2SUnit &V2SU 1148 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1149 SUnit *SU = V2SU.SU; 1150 // If this use comes before the reaching def, it cannot be a last use, 1151 // so decrease its pressure change. 1152 if (!SU->isScheduled && SU != &ExitSU) { 1153 LiveQueryResult LRQ = 1154 LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1155 if (LRQ.valueIn() == VNI) { 1156 PressureDiff &PDiff = getPressureDiff(SU); 1157 PDiff.addPressureChange(Reg, true, &MRI); 1158 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 1159 << *SU->getInstr(); 1160 dbgs() << " to "; PDiff.dump(*TRI);); 1161 } 1162 } 1163 } 1164 } 1165 } 1166 } 1167 1168 void ScheduleDAGMILive::dump() const { 1169 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1170 for (const SUnit &SU : SUnits) { 1171 dumpNodeAll(SU); 1172 if (ShouldTrackPressure) { 1173 dbgs() << " Pressure Diff : "; 1174 getPressureDiff(&SU).dump(*TRI); 1175 } 1176 dbgs() << " Single Issue : "; 1177 if (SchedModel.mustBeginGroup(SU.getInstr()) && 1178 SchedModel.mustEndGroup(SU.getInstr())) 1179 dbgs() << "true;"; 1180 else 1181 dbgs() << "false;"; 1182 dbgs() << '\n'; 1183 } 1184 if (ExitSU.getInstr() != nullptr) 1185 dumpNodeAll(ExitSU); 1186 #endif 1187 } 1188 1189 /// schedule - Called back from MachineScheduler::runOnMachineFunction 1190 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 1191 /// only includes instructions that have DAG nodes, not scheduling boundaries. 1192 /// 1193 /// This is a skeletal driver, with all the functionality pushed into helpers, 1194 /// so that it can be easily extended by experimental schedulers. Generally, 1195 /// implementing MachineSchedStrategy should be sufficient to implement a new 1196 /// scheduling algorithm. However, if a scheduler further subclasses 1197 /// ScheduleDAGMILive then it will want to override this virtual method in order 1198 /// to update any specialized state. 1199 void ScheduleDAGMILive::schedule() { 1200 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n"); 1201 LLVM_DEBUG(SchedImpl->dumpPolicy()); 1202 buildDAGWithRegPressure(); 1203 1204 postprocessDAG(); 1205 1206 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1207 findRootsAndBiasEdges(TopRoots, BotRoots); 1208 1209 // Initialize the strategy before modifying the DAG. 1210 // This may initialize a DFSResult to be used for queue priority. 1211 SchedImpl->initialize(this); 1212 1213 LLVM_DEBUG(dump()); 1214 if (PrintDAGs) dump(); 1215 if (ViewMISchedDAGs) viewGraph(); 1216 1217 // Initialize ready queues now that the DAG and priority data are finalized. 1218 initQueues(TopRoots, BotRoots); 1219 1220 bool IsTopNode = false; 1221 while (true) { 1222 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n"); 1223 SUnit *SU = SchedImpl->pickNode(IsTopNode); 1224 if (!SU) break; 1225 1226 assert(!SU->isScheduled && "Node already scheduled"); 1227 if (!checkSchedLimit()) 1228 break; 1229 1230 scheduleMI(SU, IsTopNode); 1231 1232 if (DFSResult) { 1233 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1234 if (!ScheduledTrees.test(SubtreeID)) { 1235 ScheduledTrees.set(SubtreeID); 1236 DFSResult->scheduleTree(SubtreeID); 1237 SchedImpl->scheduleTree(SubtreeID); 1238 } 1239 } 1240 1241 // Notify the scheduling strategy after updating the DAG. 1242 SchedImpl->schedNode(SU, IsTopNode); 1243 1244 updateQueues(SU, IsTopNode); 1245 } 1246 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1247 1248 placeDebugValues(); 1249 1250 LLVM_DEBUG({ 1251 dbgs() << "*** Final schedule for " 1252 << printMBBReference(*begin()->getParent()) << " ***\n"; 1253 dumpSchedule(); 1254 dbgs() << '\n'; 1255 }); 1256 } 1257 1258 /// Build the DAG and setup three register pressure trackers. 1259 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1260 if (!ShouldTrackPressure) { 1261 RPTracker.reset(); 1262 RegionCriticalPSets.clear(); 1263 buildSchedGraph(AA); 1264 return; 1265 } 1266 1267 // Initialize the register pressure tracker used by buildSchedGraph. 1268 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1269 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true); 1270 1271 // Account for liveness generate by the region boundary. 1272 if (LiveRegionEnd != RegionEnd) 1273 RPTracker.recede(); 1274 1275 // Build the DAG, and compute current register pressure. 1276 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks); 1277 1278 // Initialize top/bottom trackers after computing region pressure. 1279 initRegPressure(); 1280 } 1281 1282 void ScheduleDAGMILive::computeDFSResult() { 1283 if (!DFSResult) 1284 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1285 DFSResult->clear(); 1286 ScheduledTrees.clear(); 1287 DFSResult->resize(SUnits.size()); 1288 DFSResult->compute(SUnits); 1289 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1290 } 1291 1292 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1293 /// only provides the critical path for single block loops. To handle loops that 1294 /// span blocks, we could use the vreg path latencies provided by 1295 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1296 /// available for use in the scheduler. 1297 /// 1298 /// The cyclic path estimation identifies a def-use pair that crosses the back 1299 /// edge and considers the depth and height of the nodes. For example, consider 1300 /// the following instruction sequence where each instruction has unit latency 1301 /// and defines an eponymous virtual register: 1302 /// 1303 /// a->b(a,c)->c(b)->d(c)->exit 1304 /// 1305 /// The cyclic critical path is a two cycles: b->c->b 1306 /// The acyclic critical path is four cycles: a->b->c->d->exit 1307 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1308 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1309 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1310 /// LiveInDepth = depth(b) = len(a->b) = 1 1311 /// 1312 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1313 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1314 /// CyclicCriticalPath = min(2, 2) = 2 1315 /// 1316 /// This could be relevant to PostRA scheduling, but is currently implemented 1317 /// assuming LiveIntervals. 1318 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1319 // This only applies to single block loop. 1320 if (!BB->isSuccessor(BB)) 1321 return 0; 1322 1323 unsigned MaxCyclicLatency = 0; 1324 // Visit each live out vreg def to find def/use pairs that cross iterations. 1325 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) { 1326 unsigned Reg = P.RegUnit; 1327 if (!Register::isVirtualRegister(Reg)) 1328 continue; 1329 const LiveInterval &LI = LIS->getInterval(Reg); 1330 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1331 if (!DefVNI) 1332 continue; 1333 1334 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1335 const SUnit *DefSU = getSUnit(DefMI); 1336 if (!DefSU) 1337 continue; 1338 1339 unsigned LiveOutHeight = DefSU->getHeight(); 1340 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1341 // Visit all local users of the vreg def. 1342 for (const VReg2SUnit &V2SU 1343 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1344 SUnit *SU = V2SU.SU; 1345 if (SU == &ExitSU) 1346 continue; 1347 1348 // Only consider uses of the phi. 1349 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1350 if (!LRQ.valueIn()->isPHIDef()) 1351 continue; 1352 1353 // Assume that a path spanning two iterations is a cycle, which could 1354 // overestimate in strange cases. This allows cyclic latency to be 1355 // estimated as the minimum slack of the vreg's depth or height. 1356 unsigned CyclicLatency = 0; 1357 if (LiveOutDepth > SU->getDepth()) 1358 CyclicLatency = LiveOutDepth - SU->getDepth(); 1359 1360 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency; 1361 if (LiveInHeight > LiveOutHeight) { 1362 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1363 CyclicLatency = LiveInHeight - LiveOutHeight; 1364 } else 1365 CyclicLatency = 0; 1366 1367 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1368 << SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1369 if (CyclicLatency > MaxCyclicLatency) 1370 MaxCyclicLatency = CyclicLatency; 1371 } 1372 } 1373 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1374 return MaxCyclicLatency; 1375 } 1376 1377 /// Release ExitSU predecessors and setup scheduler queues. Re-position 1378 /// the Top RP tracker in case the region beginning has changed. 1379 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots, 1380 ArrayRef<SUnit*> BotRoots) { 1381 ScheduleDAGMI::initQueues(TopRoots, BotRoots); 1382 if (ShouldTrackPressure) { 1383 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1384 TopRPTracker.setPos(CurrentTop); 1385 } 1386 } 1387 1388 /// Move an instruction and update register pressure. 1389 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1390 // Move the instruction to its new location in the instruction stream. 1391 MachineInstr *MI = SU->getInstr(); 1392 1393 if (IsTopNode) { 1394 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1395 if (&*CurrentTop == MI) 1396 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1397 else { 1398 moveInstruction(MI, CurrentTop); 1399 TopRPTracker.setPos(MI); 1400 } 1401 1402 if (ShouldTrackPressure) { 1403 // Update top scheduled pressure. 1404 RegisterOperands RegOpers; 1405 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1406 if (ShouldTrackLaneMasks) { 1407 // Adjust liveness and add missing dead+read-undef flags. 1408 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1409 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1410 } else { 1411 // Adjust for missing dead-def flags. 1412 RegOpers.detectDeadDefs(*MI, *LIS); 1413 } 1414 1415 TopRPTracker.advance(RegOpers); 1416 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1417 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure( 1418 TopRPTracker.getRegSetPressureAtPos(), TRI);); 1419 1420 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1421 } 1422 } else { 1423 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1424 MachineBasicBlock::iterator priorII = 1425 priorNonDebug(CurrentBottom, CurrentTop); 1426 if (&*priorII == MI) 1427 CurrentBottom = priorII; 1428 else { 1429 if (&*CurrentTop == MI) { 1430 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1431 TopRPTracker.setPos(CurrentTop); 1432 } 1433 moveInstruction(MI, CurrentBottom); 1434 CurrentBottom = MI; 1435 BotRPTracker.setPos(CurrentBottom); 1436 } 1437 if (ShouldTrackPressure) { 1438 RegisterOperands RegOpers; 1439 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1440 if (ShouldTrackLaneMasks) { 1441 // Adjust liveness and add missing dead+read-undef flags. 1442 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1443 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1444 } else { 1445 // Adjust for missing dead-def flags. 1446 RegOpers.detectDeadDefs(*MI, *LIS); 1447 } 1448 1449 if (BotRPTracker.getPos() != CurrentBottom) 1450 BotRPTracker.recedeSkipDebugValues(); 1451 SmallVector<RegisterMaskPair, 8> LiveUses; 1452 BotRPTracker.recede(RegOpers, &LiveUses); 1453 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1454 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure( 1455 BotRPTracker.getRegSetPressureAtPos(), TRI);); 1456 1457 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1458 updatePressureDiffs(LiveUses); 1459 } 1460 } 1461 } 1462 1463 //===----------------------------------------------------------------------===// 1464 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores. 1465 //===----------------------------------------------------------------------===// 1466 1467 namespace { 1468 1469 /// Post-process the DAG to create cluster edges between neighboring 1470 /// loads or between neighboring stores. 1471 class BaseMemOpClusterMutation : public ScheduleDAGMutation { 1472 struct MemOpInfo { 1473 SUnit *SU; 1474 SmallVector<const MachineOperand *, 4> BaseOps; 1475 int64_t Offset; 1476 unsigned Width; 1477 1478 MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps, 1479 int64_t Offset, unsigned Width) 1480 : SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset), 1481 Width(Width) {} 1482 1483 static bool Compare(const MachineOperand *const &A, 1484 const MachineOperand *const &B) { 1485 if (A->getType() != B->getType()) 1486 return A->getType() < B->getType(); 1487 if (A->isReg()) 1488 return A->getReg() < B->getReg(); 1489 if (A->isFI()) { 1490 const MachineFunction &MF = *A->getParent()->getParent()->getParent(); 1491 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1492 bool StackGrowsDown = TFI.getStackGrowthDirection() == 1493 TargetFrameLowering::StackGrowsDown; 1494 return StackGrowsDown ? A->getIndex() > B->getIndex() 1495 : A->getIndex() < B->getIndex(); 1496 } 1497 1498 llvm_unreachable("MemOpClusterMutation only supports register or frame " 1499 "index bases."); 1500 } 1501 1502 bool operator<(const MemOpInfo &RHS) const { 1503 // FIXME: Don't compare everything twice. Maybe use C++20 three way 1504 // comparison instead when it's available. 1505 if (std::lexicographical_compare(BaseOps.begin(), BaseOps.end(), 1506 RHS.BaseOps.begin(), RHS.BaseOps.end(), 1507 Compare)) 1508 return true; 1509 if (std::lexicographical_compare(RHS.BaseOps.begin(), RHS.BaseOps.end(), 1510 BaseOps.begin(), BaseOps.end(), Compare)) 1511 return false; 1512 if (Offset != RHS.Offset) 1513 return Offset < RHS.Offset; 1514 return SU->NodeNum < RHS.SU->NodeNum; 1515 } 1516 }; 1517 1518 const TargetInstrInfo *TII; 1519 const TargetRegisterInfo *TRI; 1520 bool IsLoad; 1521 1522 public: 1523 BaseMemOpClusterMutation(const TargetInstrInfo *tii, 1524 const TargetRegisterInfo *tri, bool IsLoad) 1525 : TII(tii), TRI(tri), IsLoad(IsLoad) {} 1526 1527 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1528 1529 protected: 1530 void clusterNeighboringMemOps(ArrayRef<MemOpInfo> MemOps, 1531 ScheduleDAGInstrs *DAG); 1532 void collectMemOpRecords(std::vector<SUnit> &SUnits, 1533 SmallVectorImpl<MemOpInfo> &MemOpRecords); 1534 }; 1535 1536 class StoreClusterMutation : public BaseMemOpClusterMutation { 1537 public: 1538 StoreClusterMutation(const TargetInstrInfo *tii, 1539 const TargetRegisterInfo *tri) 1540 : BaseMemOpClusterMutation(tii, tri, false) {} 1541 }; 1542 1543 class LoadClusterMutation : public BaseMemOpClusterMutation { 1544 public: 1545 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri) 1546 : BaseMemOpClusterMutation(tii, tri, true) {} 1547 }; 1548 1549 } // end anonymous namespace 1550 1551 namespace llvm { 1552 1553 std::unique_ptr<ScheduleDAGMutation> 1554 createLoadClusterDAGMutation(const TargetInstrInfo *TII, 1555 const TargetRegisterInfo *TRI) { 1556 return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI) 1557 : nullptr; 1558 } 1559 1560 std::unique_ptr<ScheduleDAGMutation> 1561 createStoreClusterDAGMutation(const TargetInstrInfo *TII, 1562 const TargetRegisterInfo *TRI) { 1563 return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI) 1564 : nullptr; 1565 } 1566 1567 } // end namespace llvm 1568 1569 // Sorting all the loads/stores first, then for each load/store, checking the 1570 // following load/store one by one, until reach the first non-dependent one and 1571 // call target hook to see if they can cluster. 1572 void BaseMemOpClusterMutation::clusterNeighboringMemOps( 1573 ArrayRef<MemOpInfo> MemOpRecords, ScheduleDAGInstrs *DAG) { 1574 // Keep track of the current cluster length and bytes for each SUnit. 1575 DenseMap<unsigned, std::pair<unsigned, unsigned>> SUnit2ClusterInfo; 1576 1577 // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to 1578 // cluster mem ops collected within `MemOpRecords` array. 1579 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { 1580 // Decision to cluster mem ops is taken based on target dependent logic 1581 auto MemOpa = MemOpRecords[Idx]; 1582 1583 // Seek for the next load/store to do the cluster. 1584 unsigned NextIdx = Idx + 1; 1585 for (; NextIdx < End; ++NextIdx) 1586 // Skip if MemOpb has been clustered already or has dependency with 1587 // MemOpa. 1588 if (!SUnit2ClusterInfo.count(MemOpRecords[NextIdx].SU->NodeNum) && 1589 !DAG->IsReachable(MemOpRecords[NextIdx].SU, MemOpa.SU) && 1590 !DAG->IsReachable(MemOpa.SU, MemOpRecords[NextIdx].SU)) 1591 break; 1592 if (NextIdx == End) 1593 continue; 1594 1595 auto MemOpb = MemOpRecords[NextIdx]; 1596 unsigned ClusterLength = 2; 1597 unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width; 1598 if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) { 1599 ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1; 1600 CurrentClusterBytes = 1601 SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width; 1602 } 1603 1604 if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpb.BaseOps, ClusterLength, 1605 CurrentClusterBytes)) 1606 continue; 1607 1608 SUnit *SUa = MemOpa.SU; 1609 SUnit *SUb = MemOpb.SU; 1610 if (SUa->NodeNum > SUb->NodeNum) 1611 std::swap(SUa, SUb); 1612 1613 // FIXME: Is this check really required? 1614 if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) 1615 continue; 1616 1617 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU(" 1618 << SUb->NodeNum << ")\n"); 1619 ++NumClustered; 1620 1621 if (IsLoad) { 1622 // Copy successor edges from SUa to SUb. Interleaving computation 1623 // dependent on SUa can prevent load combining due to register reuse. 1624 // Predecessor edges do not need to be copied from SUb to SUa since 1625 // nearby loads should have effectively the same inputs. 1626 for (const SDep &Succ : SUa->Succs) { 1627 if (Succ.getSUnit() == SUb) 1628 continue; 1629 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum 1630 << ")\n"); 1631 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial)); 1632 } 1633 } else { 1634 // Copy predecessor edges from SUb to SUa to avoid the SUnits that 1635 // SUb dependent on scheduled in-between SUb and SUa. Successor edges 1636 // do not need to be copied from SUa to SUb since no one will depend 1637 // on stores. 1638 // Notice that, we don't need to care about the memory dependency as 1639 // we won't try to cluster them if they have any memory dependency. 1640 for (const SDep &Pred : SUb->Preds) { 1641 if (Pred.getSUnit() == SUa) 1642 continue; 1643 LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred.getSUnit()->NodeNum 1644 << ")\n"); 1645 DAG->addEdge(SUa, SDep(Pred.getSUnit(), SDep::Artificial)); 1646 } 1647 } 1648 1649 SUnit2ClusterInfo[MemOpb.SU->NodeNum] = {ClusterLength, 1650 CurrentClusterBytes}; 1651 1652 LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength 1653 << ", Curr cluster bytes: " << CurrentClusterBytes 1654 << "\n"); 1655 } 1656 } 1657 1658 void BaseMemOpClusterMutation::collectMemOpRecords( 1659 std::vector<SUnit> &SUnits, SmallVectorImpl<MemOpInfo> &MemOpRecords) { 1660 for (auto &SU : SUnits) { 1661 if ((IsLoad && !SU.getInstr()->mayLoad()) || 1662 (!IsLoad && !SU.getInstr()->mayStore())) 1663 continue; 1664 1665 const MachineInstr &MI = *SU.getInstr(); 1666 SmallVector<const MachineOperand *, 4> BaseOps; 1667 int64_t Offset; 1668 bool OffsetIsScalable; 1669 unsigned Width; 1670 if (TII->getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, 1671 OffsetIsScalable, Width, TRI)) { 1672 MemOpRecords.push_back(MemOpInfo(&SU, BaseOps, Offset, Width)); 1673 1674 LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps.size() << ", Offset: " 1675 << Offset << ", OffsetIsScalable: " << OffsetIsScalable 1676 << ", Width: " << Width << "\n"); 1677 } 1678 #ifndef NDEBUG 1679 for (auto *Op : BaseOps) 1680 assert(Op); 1681 #endif 1682 } 1683 } 1684 1685 /// Callback from DAG postProcessing to create cluster edges for loads/stores. 1686 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) { 1687 // Collect all the clusterable loads/stores 1688 SmallVector<MemOpInfo, 32> MemOpRecords; 1689 collectMemOpRecords(DAG->SUnits, MemOpRecords); 1690 1691 if (MemOpRecords.size() < 2) 1692 return; 1693 1694 // Sorting the loads/stores, so that, we can stop the cluster as early as 1695 // possible. 1696 llvm::sort(MemOpRecords); 1697 1698 // Trying to cluster all the neighboring loads/stores. 1699 clusterNeighboringMemOps(MemOpRecords, DAG); 1700 } 1701 1702 //===----------------------------------------------------------------------===// 1703 // CopyConstrain - DAG post-processing to encourage copy elimination. 1704 //===----------------------------------------------------------------------===// 1705 1706 namespace { 1707 1708 /// Post-process the DAG to create weak edges from all uses of a copy to 1709 /// the one use that defines the copy's source vreg, most likely an induction 1710 /// variable increment. 1711 class CopyConstrain : public ScheduleDAGMutation { 1712 // Transient state. 1713 SlotIndex RegionBeginIdx; 1714 1715 // RegionEndIdx is the slot index of the last non-debug instruction in the 1716 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1717 SlotIndex RegionEndIdx; 1718 1719 public: 1720 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1721 1722 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1723 1724 protected: 1725 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1726 }; 1727 1728 } // end anonymous namespace 1729 1730 namespace llvm { 1731 1732 std::unique_ptr<ScheduleDAGMutation> 1733 createCopyConstrainDAGMutation(const TargetInstrInfo *TII, 1734 const TargetRegisterInfo *TRI) { 1735 return std::make_unique<CopyConstrain>(TII, TRI); 1736 } 1737 1738 } // end namespace llvm 1739 1740 /// constrainLocalCopy handles two possibilities: 1741 /// 1) Local src: 1742 /// I0: = dst 1743 /// I1: src = ... 1744 /// I2: = dst 1745 /// I3: dst = src (copy) 1746 /// (create pred->succ edges I0->I1, I2->I1) 1747 /// 1748 /// 2) Local copy: 1749 /// I0: dst = src (copy) 1750 /// I1: = dst 1751 /// I2: src = ... 1752 /// I3: = dst 1753 /// (create pred->succ edges I1->I2, I3->I2) 1754 /// 1755 /// Although the MachineScheduler is currently constrained to single blocks, 1756 /// this algorithm should handle extended blocks. An EBB is a set of 1757 /// contiguously numbered blocks such that the previous block in the EBB is 1758 /// always the single predecessor. 1759 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1760 LiveIntervals *LIS = DAG->getLIS(); 1761 MachineInstr *Copy = CopySU->getInstr(); 1762 1763 // Check for pure vreg copies. 1764 const MachineOperand &SrcOp = Copy->getOperand(1); 1765 Register SrcReg = SrcOp.getReg(); 1766 if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg()) 1767 return; 1768 1769 const MachineOperand &DstOp = Copy->getOperand(0); 1770 Register DstReg = DstOp.getReg(); 1771 if (!Register::isVirtualRegister(DstReg) || DstOp.isDead()) 1772 return; 1773 1774 // Check if either the dest or source is local. If it's live across a back 1775 // edge, it's not local. Note that if both vregs are live across the back 1776 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1777 // If both the copy's source and dest are local live intervals, then we 1778 // should treat the dest as the global for the purpose of adding 1779 // constraints. This adds edges from source's other uses to the copy. 1780 unsigned LocalReg = SrcReg; 1781 unsigned GlobalReg = DstReg; 1782 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1783 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1784 LocalReg = DstReg; 1785 GlobalReg = SrcReg; 1786 LocalLI = &LIS->getInterval(LocalReg); 1787 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1788 return; 1789 } 1790 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1791 1792 // Find the global segment after the start of the local LI. 1793 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1794 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1795 // local live range. We could create edges from other global uses to the local 1796 // start, but the coalescer should have already eliminated these cases, so 1797 // don't bother dealing with it. 1798 if (GlobalSegment == GlobalLI->end()) 1799 return; 1800 1801 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1802 // returned the next global segment. But if GlobalSegment overlaps with 1803 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI 1804 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1805 if (GlobalSegment->contains(LocalLI->beginIndex())) 1806 ++GlobalSegment; 1807 1808 if (GlobalSegment == GlobalLI->end()) 1809 return; 1810 1811 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1812 if (GlobalSegment != GlobalLI->begin()) { 1813 // Two address defs have no hole. 1814 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1815 GlobalSegment->start)) { 1816 return; 1817 } 1818 // If the prior global segment may be defined by the same two-address 1819 // instruction that also defines LocalLI, then can't make a hole here. 1820 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1821 LocalLI->beginIndex())) { 1822 return; 1823 } 1824 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1825 // it would be a disconnected component in the live range. 1826 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1827 "Disconnected LRG within the scheduling region."); 1828 } 1829 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1830 if (!GlobalDef) 1831 return; 1832 1833 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1834 if (!GlobalSU) 1835 return; 1836 1837 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1838 // constraining the uses of the last local def to precede GlobalDef. 1839 SmallVector<SUnit*,8> LocalUses; 1840 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1841 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1842 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1843 for (const SDep &Succ : LastLocalSU->Succs) { 1844 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg) 1845 continue; 1846 if (Succ.getSUnit() == GlobalSU) 1847 continue; 1848 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit())) 1849 return; 1850 LocalUses.push_back(Succ.getSUnit()); 1851 } 1852 // Open the top of the GlobalLI hole by constraining any earlier global uses 1853 // to precede the start of LocalLI. 1854 SmallVector<SUnit*,8> GlobalUses; 1855 MachineInstr *FirstLocalDef = 1856 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1857 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1858 for (const SDep &Pred : GlobalSU->Preds) { 1859 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg) 1860 continue; 1861 if (Pred.getSUnit() == FirstLocalSU) 1862 continue; 1863 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit())) 1864 return; 1865 GlobalUses.push_back(Pred.getSUnit()); 1866 } 1867 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1868 // Add the weak edges. 1869 for (SmallVectorImpl<SUnit*>::const_iterator 1870 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1871 LLVM_DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1872 << GlobalSU->NodeNum << ")\n"); 1873 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1874 } 1875 for (SmallVectorImpl<SUnit*>::const_iterator 1876 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1877 LLVM_DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1878 << FirstLocalSU->NodeNum << ")\n"); 1879 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1880 } 1881 } 1882 1883 /// Callback from DAG postProcessing to create weak edges to encourage 1884 /// copy elimination. 1885 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { 1886 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1887 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1888 1889 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1890 if (FirstPos == DAG->end()) 1891 return; 1892 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos); 1893 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1894 *priorNonDebug(DAG->end(), DAG->begin())); 1895 1896 for (SUnit &SU : DAG->SUnits) { 1897 if (!SU.getInstr()->isCopy()) 1898 continue; 1899 1900 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG)); 1901 } 1902 } 1903 1904 //===----------------------------------------------------------------------===// 1905 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1906 // and possibly other custom schedulers. 1907 //===----------------------------------------------------------------------===// 1908 1909 static const unsigned InvalidCycle = ~0U; 1910 1911 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1912 1913 /// Given a Count of resource usage and a Latency value, return true if a 1914 /// SchedBoundary becomes resource limited. 1915 /// If we are checking after scheduling a node, we should return true when 1916 /// we just reach the resource limit. 1917 static bool checkResourceLimit(unsigned LFactor, unsigned Count, 1918 unsigned Latency, bool AfterSchedNode) { 1919 int ResCntFactor = (int)(Count - (Latency * LFactor)); 1920 if (AfterSchedNode) 1921 return ResCntFactor >= (int)LFactor; 1922 else 1923 return ResCntFactor > (int)LFactor; 1924 } 1925 1926 void SchedBoundary::reset() { 1927 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1928 // Destroying and reconstructing it is very expensive though. So keep 1929 // invalid, placeholder HazardRecs. 1930 if (HazardRec && HazardRec->isEnabled()) { 1931 delete HazardRec; 1932 HazardRec = nullptr; 1933 } 1934 Available.clear(); 1935 Pending.clear(); 1936 CheckPending = false; 1937 CurrCycle = 0; 1938 CurrMOps = 0; 1939 MinReadyCycle = std::numeric_limits<unsigned>::max(); 1940 ExpectedLatency = 0; 1941 DependentLatency = 0; 1942 RetiredMOps = 0; 1943 MaxExecutedResCount = 0; 1944 ZoneCritResIdx = 0; 1945 IsResourceLimited = false; 1946 ReservedCycles.clear(); 1947 ReservedCyclesIndex.clear(); 1948 #ifndef NDEBUG 1949 // Track the maximum number of stall cycles that could arise either from the 1950 // latency of a DAG edge or the number of cycles that a processor resource is 1951 // reserved (SchedBoundary::ReservedCycles). 1952 MaxObservedStall = 0; 1953 #endif 1954 // Reserve a zero-count for invalid CritResIdx. 1955 ExecutedResCounts.resize(1); 1956 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1957 } 1958 1959 void SchedRemainder:: 1960 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1961 reset(); 1962 if (!SchedModel->hasInstrSchedModel()) 1963 return; 1964 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1965 for (SUnit &SU : DAG->SUnits) { 1966 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU); 1967 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC) 1968 * SchedModel->getMicroOpFactor(); 1969 for (TargetSchedModel::ProcResIter 1970 PI = SchedModel->getWriteProcResBegin(SC), 1971 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1972 unsigned PIdx = PI->ProcResourceIdx; 1973 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1974 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1975 } 1976 } 1977 } 1978 1979 void SchedBoundary:: 1980 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1981 reset(); 1982 DAG = dag; 1983 SchedModel = smodel; 1984 Rem = rem; 1985 if (SchedModel->hasInstrSchedModel()) { 1986 unsigned ResourceCount = SchedModel->getNumProcResourceKinds(); 1987 ReservedCyclesIndex.resize(ResourceCount); 1988 ExecutedResCounts.resize(ResourceCount); 1989 unsigned NumUnits = 0; 1990 1991 for (unsigned i = 0; i < ResourceCount; ++i) { 1992 ReservedCyclesIndex[i] = NumUnits; 1993 NumUnits += SchedModel->getProcResource(i)->NumUnits; 1994 } 1995 1996 ReservedCycles.resize(NumUnits, InvalidCycle); 1997 } 1998 } 1999 2000 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 2001 /// these "soft stalls" differently than the hard stall cycles based on CPU 2002 /// resources and computed by checkHazard(). A fully in-order model 2003 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 2004 /// available for scheduling until they are ready. However, a weaker in-order 2005 /// model may use this for heuristics. For example, if a processor has in-order 2006 /// behavior when reading certain resources, this may come into play. 2007 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 2008 if (!SU->isUnbuffered) 2009 return 0; 2010 2011 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2012 if (ReadyCycle > CurrCycle) 2013 return ReadyCycle - CurrCycle; 2014 return 0; 2015 } 2016 2017 /// Compute the next cycle at which the given processor resource unit 2018 /// can be scheduled. 2019 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx, 2020 unsigned Cycles) { 2021 unsigned NextUnreserved = ReservedCycles[InstanceIdx]; 2022 // If this resource has never been used, always return cycle zero. 2023 if (NextUnreserved == InvalidCycle) 2024 return 0; 2025 // For bottom-up scheduling add the cycles needed for the current operation. 2026 if (!isTop()) 2027 NextUnreserved += Cycles; 2028 return NextUnreserved; 2029 } 2030 2031 /// Compute the next cycle at which the given processor resource can be 2032 /// scheduled. Returns the next cycle and the index of the processor resource 2033 /// instance in the reserved cycles vector. 2034 std::pair<unsigned, unsigned> 2035 SchedBoundary::getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 2036 unsigned MinNextUnreserved = InvalidCycle; 2037 unsigned InstanceIdx = 0; 2038 unsigned StartIndex = ReservedCyclesIndex[PIdx]; 2039 unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits; 2040 assert(NumberOfInstances > 0 && 2041 "Cannot have zero instances of a ProcResource"); 2042 2043 for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End; 2044 ++I) { 2045 unsigned NextUnreserved = getNextResourceCycleByInstance(I, Cycles); 2046 if (MinNextUnreserved > NextUnreserved) { 2047 InstanceIdx = I; 2048 MinNextUnreserved = NextUnreserved; 2049 } 2050 } 2051 return std::make_pair(MinNextUnreserved, InstanceIdx); 2052 } 2053 2054 /// Does this SU have a hazard within the current instruction group. 2055 /// 2056 /// The scheduler supports two modes of hazard recognition. The first is the 2057 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 2058 /// supports highly complicated in-order reservation tables 2059 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic. 2060 /// 2061 /// The second is a streamlined mechanism that checks for hazards based on 2062 /// simple counters that the scheduler itself maintains. It explicitly checks 2063 /// for instruction dispatch limitations, including the number of micro-ops that 2064 /// can dispatch per cycle. 2065 /// 2066 /// TODO: Also check whether the SU must start a new group. 2067 bool SchedBoundary::checkHazard(SUnit *SU) { 2068 if (HazardRec->isEnabled() 2069 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 2070 return true; 2071 } 2072 2073 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 2074 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 2075 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 2076 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 2077 return true; 2078 } 2079 2080 if (CurrMOps > 0 && 2081 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || 2082 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { 2083 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " 2084 << (isTop() ? "begin" : "end") << " group\n"); 2085 return true; 2086 } 2087 2088 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 2089 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2090 for (const MCWriteProcResEntry &PE : 2091 make_range(SchedModel->getWriteProcResBegin(SC), 2092 SchedModel->getWriteProcResEnd(SC))) { 2093 unsigned ResIdx = PE.ProcResourceIdx; 2094 unsigned Cycles = PE.Cycles; 2095 unsigned NRCycle, InstanceIdx; 2096 std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(ResIdx, Cycles); 2097 if (NRCycle > CurrCycle) { 2098 #ifndef NDEBUG 2099 MaxObservedStall = std::max(Cycles, MaxObservedStall); 2100 #endif 2101 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 2102 << SchedModel->getResourceName(ResIdx) 2103 << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx] << ']' 2104 << "=" << NRCycle << "c\n"); 2105 return true; 2106 } 2107 } 2108 } 2109 return false; 2110 } 2111 2112 // Find the unscheduled node in ReadySUs with the highest latency. 2113 unsigned SchedBoundary:: 2114 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 2115 SUnit *LateSU = nullptr; 2116 unsigned RemLatency = 0; 2117 for (SUnit *SU : ReadySUs) { 2118 unsigned L = getUnscheduledLatency(SU); 2119 if (L > RemLatency) { 2120 RemLatency = L; 2121 LateSU = SU; 2122 } 2123 } 2124 if (LateSU) { 2125 LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 2126 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 2127 } 2128 return RemLatency; 2129 } 2130 2131 // Count resources in this zone and the remaining unscheduled 2132 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 2133 // resource index, or zero if the zone is issue limited. 2134 unsigned SchedBoundary:: 2135 getOtherResourceCount(unsigned &OtherCritIdx) { 2136 OtherCritIdx = 0; 2137 if (!SchedModel->hasInstrSchedModel()) 2138 return 0; 2139 2140 unsigned OtherCritCount = Rem->RemIssueCount 2141 + (RetiredMOps * SchedModel->getMicroOpFactor()); 2142 LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 2143 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 2144 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 2145 PIdx != PEnd; ++PIdx) { 2146 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 2147 if (OtherCount > OtherCritCount) { 2148 OtherCritCount = OtherCount; 2149 OtherCritIdx = PIdx; 2150 } 2151 } 2152 if (OtherCritIdx) { 2153 LLVM_DEBUG( 2154 dbgs() << " " << Available.getName() << " + Remain CritRes: " 2155 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 2156 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 2157 } 2158 return OtherCritCount; 2159 } 2160 2161 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, 2162 unsigned Idx) { 2163 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 2164 2165 #ifndef NDEBUG 2166 // ReadyCycle was been bumped up to the CurrCycle when this node was 2167 // scheduled, but CurrCycle may have been eagerly advanced immediately after 2168 // scheduling, so may now be greater than ReadyCycle. 2169 if (ReadyCycle > CurrCycle) 2170 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 2171 #endif 2172 2173 if (ReadyCycle < MinReadyCycle) 2174 MinReadyCycle = ReadyCycle; 2175 2176 // Check for interlocks first. For the purpose of other heuristics, an 2177 // instruction that cannot issue appears as if it's not in the ReadyQueue. 2178 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2179 bool HazardDetected = (!IsBuffered && ReadyCycle > CurrCycle) || 2180 checkHazard(SU) || (Available.size() >= ReadyListLimit); 2181 2182 if (!HazardDetected) { 2183 Available.push(SU); 2184 2185 if (InPQueue) 2186 Pending.remove(Pending.begin() + Idx); 2187 return; 2188 } 2189 2190 if (!InPQueue) 2191 Pending.push(SU); 2192 } 2193 2194 /// Move the boundary of scheduled code by one cycle. 2195 void SchedBoundary::bumpCycle(unsigned NextCycle) { 2196 if (SchedModel->getMicroOpBufferSize() == 0) { 2197 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() && 2198 "MinReadyCycle uninitialized"); 2199 if (MinReadyCycle > NextCycle) 2200 NextCycle = MinReadyCycle; 2201 } 2202 // Update the current micro-ops, which will issue in the next cycle. 2203 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 2204 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 2205 2206 // Decrement DependentLatency based on the next cycle. 2207 if ((NextCycle - CurrCycle) > DependentLatency) 2208 DependentLatency = 0; 2209 else 2210 DependentLatency -= (NextCycle - CurrCycle); 2211 2212 if (!HazardRec->isEnabled()) { 2213 // Bypass HazardRec virtual calls. 2214 CurrCycle = NextCycle; 2215 } else { 2216 // Bypass getHazardType calls in case of long latency. 2217 for (; CurrCycle != NextCycle; ++CurrCycle) { 2218 if (isTop()) 2219 HazardRec->AdvanceCycle(); 2220 else 2221 HazardRec->RecedeCycle(); 2222 } 2223 } 2224 CheckPending = true; 2225 IsResourceLimited = 2226 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2227 getScheduledLatency(), true); 2228 2229 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() 2230 << '\n'); 2231 } 2232 2233 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 2234 ExecutedResCounts[PIdx] += Count; 2235 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 2236 MaxExecutedResCount = ExecutedResCounts[PIdx]; 2237 } 2238 2239 /// Add the given processor resource to this scheduled zone. 2240 /// 2241 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 2242 /// during which this resource is consumed. 2243 /// 2244 /// \return the next cycle at which the instruction may execute without 2245 /// oversubscribing resources. 2246 unsigned SchedBoundary:: 2247 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 2248 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2249 unsigned Count = Factor * Cycles; 2250 LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +" 2251 << Cycles << "x" << Factor << "u\n"); 2252 2253 // Update Executed resources counts. 2254 incExecutedResources(PIdx, Count); 2255 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 2256 Rem->RemainingCounts[PIdx] -= Count; 2257 2258 // Check if this resource exceeds the current critical resource. If so, it 2259 // becomes the critical resource. 2260 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 2261 ZoneCritResIdx = PIdx; 2262 LLVM_DEBUG(dbgs() << " *** Critical resource " 2263 << SchedModel->getResourceName(PIdx) << ": " 2264 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() 2265 << "c\n"); 2266 } 2267 // For reserved resources, record the highest cycle using the resource. 2268 unsigned NextAvailable, InstanceIdx; 2269 std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(PIdx, Cycles); 2270 if (NextAvailable > CurrCycle) { 2271 LLVM_DEBUG(dbgs() << " Resource conflict: " 2272 << SchedModel->getResourceName(PIdx) 2273 << '[' << InstanceIdx - ReservedCyclesIndex[PIdx] << ']' 2274 << " reserved until @" << NextAvailable << "\n"); 2275 } 2276 return NextAvailable; 2277 } 2278 2279 /// Move the boundary of scheduled code by one SUnit. 2280 void SchedBoundary::bumpNode(SUnit *SU) { 2281 // Update the reservation table. 2282 if (HazardRec->isEnabled()) { 2283 if (!isTop() && SU->isCall) { 2284 // Calls are scheduled with their preceding instructions. For bottom-up 2285 // scheduling, clear the pipeline state before emitting. 2286 HazardRec->Reset(); 2287 } 2288 HazardRec->EmitInstruction(SU); 2289 // Scheduling an instruction may have made pending instructions available. 2290 CheckPending = true; 2291 } 2292 // checkHazard should prevent scheduling multiple instructions per cycle that 2293 // exceed the issue width. 2294 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2295 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 2296 assert( 2297 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 2298 "Cannot schedule this instruction's MicroOps in the current cycle."); 2299 2300 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2301 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 2302 2303 unsigned NextCycle = CurrCycle; 2304 switch (SchedModel->getMicroOpBufferSize()) { 2305 case 0: 2306 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 2307 break; 2308 case 1: 2309 if (ReadyCycle > NextCycle) { 2310 NextCycle = ReadyCycle; 2311 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 2312 } 2313 break; 2314 default: 2315 // We don't currently model the OOO reorder buffer, so consider all 2316 // scheduled MOps to be "retired". We do loosely model in-order resource 2317 // latency. If this instruction uses an in-order resource, account for any 2318 // likely stall cycles. 2319 if (SU->isUnbuffered && ReadyCycle > NextCycle) 2320 NextCycle = ReadyCycle; 2321 break; 2322 } 2323 RetiredMOps += IncMOps; 2324 2325 // Update resource counts and critical resource. 2326 if (SchedModel->hasInstrSchedModel()) { 2327 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 2328 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 2329 Rem->RemIssueCount -= DecRemIssue; 2330 if (ZoneCritResIdx) { 2331 // Scale scheduled micro-ops for comparing with the critical resource. 2332 unsigned ScaledMOps = 2333 RetiredMOps * SchedModel->getMicroOpFactor(); 2334 2335 // If scaled micro-ops are now more than the previous critical resource by 2336 // a full cycle, then micro-ops issue becomes critical. 2337 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 2338 >= (int)SchedModel->getLatencyFactor()) { 2339 ZoneCritResIdx = 0; 2340 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2341 << ScaledMOps / SchedModel->getLatencyFactor() 2342 << "c\n"); 2343 } 2344 } 2345 for (TargetSchedModel::ProcResIter 2346 PI = SchedModel->getWriteProcResBegin(SC), 2347 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2348 unsigned RCycle = 2349 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 2350 if (RCycle > NextCycle) 2351 NextCycle = RCycle; 2352 } 2353 if (SU->hasReservedResource) { 2354 // For reserved resources, record the highest cycle using the resource. 2355 // For top-down scheduling, this is the cycle in which we schedule this 2356 // instruction plus the number of cycles the operations reserves the 2357 // resource. For bottom-up is it simply the instruction's cycle. 2358 for (TargetSchedModel::ProcResIter 2359 PI = SchedModel->getWriteProcResBegin(SC), 2360 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2361 unsigned PIdx = PI->ProcResourceIdx; 2362 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 2363 unsigned ReservedUntil, InstanceIdx; 2364 std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(PIdx, 0); 2365 if (isTop()) { 2366 ReservedCycles[InstanceIdx] = 2367 std::max(ReservedUntil, NextCycle + PI->Cycles); 2368 } else 2369 ReservedCycles[InstanceIdx] = NextCycle; 2370 } 2371 } 2372 } 2373 } 2374 // Update ExpectedLatency and DependentLatency. 2375 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2376 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2377 if (SU->getDepth() > TopLatency) { 2378 TopLatency = SU->getDepth(); 2379 LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU(" 2380 << SU->NodeNum << ") " << TopLatency << "c\n"); 2381 } 2382 if (SU->getHeight() > BotLatency) { 2383 BotLatency = SU->getHeight(); 2384 LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU(" 2385 << SU->NodeNum << ") " << BotLatency << "c\n"); 2386 } 2387 // If we stall for any reason, bump the cycle. 2388 if (NextCycle > CurrCycle) 2389 bumpCycle(NextCycle); 2390 else 2391 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2392 // resource limited. If a stall occurred, bumpCycle does this. 2393 IsResourceLimited = 2394 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2395 getScheduledLatency(), true); 2396 2397 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2398 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2399 // one cycle. Since we commonly reach the max MOps here, opportunistically 2400 // bump the cycle to avoid uselessly checking everything in the readyQ. 2401 CurrMOps += IncMOps; 2402 2403 // Bump the cycle count for issue group constraints. 2404 // This must be done after NextCycle has been adjust for all other stalls. 2405 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set 2406 // currCycle to X. 2407 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) || 2408 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) { 2409 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin") 2410 << " group\n"); 2411 bumpCycle(++NextCycle); 2412 } 2413 2414 while (CurrMOps >= SchedModel->getIssueWidth()) { 2415 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle " 2416 << CurrCycle << '\n'); 2417 bumpCycle(++NextCycle); 2418 } 2419 LLVM_DEBUG(dumpScheduledState()); 2420 } 2421 2422 /// Release pending ready nodes in to the available queue. This makes them 2423 /// visible to heuristics. 2424 void SchedBoundary::releasePending() { 2425 // If the available queue is empty, it is safe to reset MinReadyCycle. 2426 if (Available.empty()) 2427 MinReadyCycle = std::numeric_limits<unsigned>::max(); 2428 2429 // Check to see if any of the pending instructions are ready to issue. If 2430 // so, add them to the available queue. 2431 for (unsigned I = 0, E = Pending.size(); I < E; ++I) { 2432 SUnit *SU = *(Pending.begin() + I); 2433 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2434 2435 if (ReadyCycle < MinReadyCycle) 2436 MinReadyCycle = ReadyCycle; 2437 2438 if (Available.size() >= ReadyListLimit) 2439 break; 2440 2441 releaseNode(SU, ReadyCycle, true, I); 2442 if (E != Pending.size()) { 2443 --I; 2444 --E; 2445 } 2446 } 2447 CheckPending = false; 2448 } 2449 2450 /// Remove SU from the ready set for this boundary. 2451 void SchedBoundary::removeReady(SUnit *SU) { 2452 if (Available.isInQueue(SU)) 2453 Available.remove(Available.find(SU)); 2454 else { 2455 assert(Pending.isInQueue(SU) && "bad ready count"); 2456 Pending.remove(Pending.find(SU)); 2457 } 2458 } 2459 2460 /// If this queue only has one ready candidate, return it. As a side effect, 2461 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2462 /// one node is ready. If multiple instructions are ready, return NULL. 2463 SUnit *SchedBoundary::pickOnlyChoice() { 2464 if (CheckPending) 2465 releasePending(); 2466 2467 // Defer any ready instrs that now have a hazard. 2468 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2469 if (checkHazard(*I)) { 2470 Pending.push(*I); 2471 I = Available.remove(I); 2472 continue; 2473 } 2474 ++I; 2475 } 2476 for (unsigned i = 0; Available.empty(); ++i) { 2477 // FIXME: Re-enable assert once PR20057 is resolved. 2478 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2479 // "permanent hazard"); 2480 (void)i; 2481 bumpCycle(CurrCycle + 1); 2482 releasePending(); 2483 } 2484 2485 LLVM_DEBUG(Pending.dump()); 2486 LLVM_DEBUG(Available.dump()); 2487 2488 if (Available.size() == 1) 2489 return *Available.begin(); 2490 return nullptr; 2491 } 2492 2493 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2494 // This is useful information to dump after bumpNode. 2495 // Note that the Queue contents are more useful before pickNodeFromQueue. 2496 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const { 2497 unsigned ResFactor; 2498 unsigned ResCount; 2499 if (ZoneCritResIdx) { 2500 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2501 ResCount = getResourceCount(ZoneCritResIdx); 2502 } else { 2503 ResFactor = SchedModel->getMicroOpFactor(); 2504 ResCount = RetiredMOps * ResFactor; 2505 } 2506 unsigned LFactor = SchedModel->getLatencyFactor(); 2507 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2508 << " Retired: " << RetiredMOps; 2509 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2510 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2511 << ResCount / ResFactor << " " 2512 << SchedModel->getResourceName(ZoneCritResIdx) 2513 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2514 << (IsResourceLimited ? " - Resource" : " - Latency") 2515 << " limited.\n"; 2516 } 2517 #endif 2518 2519 //===----------------------------------------------------------------------===// 2520 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2521 //===----------------------------------------------------------------------===// 2522 2523 void GenericSchedulerBase::SchedCandidate:: 2524 initResourceDelta(const ScheduleDAGMI *DAG, 2525 const TargetSchedModel *SchedModel) { 2526 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2527 return; 2528 2529 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2530 for (TargetSchedModel::ProcResIter 2531 PI = SchedModel->getWriteProcResBegin(SC), 2532 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2533 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2534 ResDelta.CritResources += PI->Cycles; 2535 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2536 ResDelta.DemandedResources += PI->Cycles; 2537 } 2538 } 2539 2540 /// Compute remaining latency. We need this both to determine whether the 2541 /// overall schedule has become latency-limited and whether the instructions 2542 /// outside this zone are resource or latency limited. 2543 /// 2544 /// The "dependent" latency is updated incrementally during scheduling as the 2545 /// max height/depth of scheduled nodes minus the cycles since it was 2546 /// scheduled: 2547 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2548 /// 2549 /// The "independent" latency is the max ready queue depth: 2550 /// ILat = max N.depth for N in Available|Pending 2551 /// 2552 /// RemainingLatency is the greater of independent and dependent latency. 2553 /// 2554 /// These computations are expensive, especially in DAGs with many edges, so 2555 /// only do them if necessary. 2556 static unsigned computeRemLatency(SchedBoundary &CurrZone) { 2557 unsigned RemLatency = CurrZone.getDependentLatency(); 2558 RemLatency = std::max(RemLatency, 2559 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2560 RemLatency = std::max(RemLatency, 2561 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2562 return RemLatency; 2563 } 2564 2565 /// Returns true if the current cycle plus remaning latency is greater than 2566 /// the critical path in the scheduling region. 2567 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy, 2568 SchedBoundary &CurrZone, 2569 bool ComputeRemLatency, 2570 unsigned &RemLatency) const { 2571 // The current cycle is already greater than the critical path, so we are 2572 // already latency limited and don't need to compute the remaining latency. 2573 if (CurrZone.getCurrCycle() > Rem.CriticalPath) 2574 return true; 2575 2576 // If we haven't scheduled anything yet, then we aren't latency limited. 2577 if (CurrZone.getCurrCycle() == 0) 2578 return false; 2579 2580 if (ComputeRemLatency) 2581 RemLatency = computeRemLatency(CurrZone); 2582 2583 return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath; 2584 } 2585 2586 /// Set the CandPolicy given a scheduling zone given the current resources and 2587 /// latencies inside and outside the zone. 2588 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA, 2589 SchedBoundary &CurrZone, 2590 SchedBoundary *OtherZone) { 2591 // Apply preemptive heuristics based on the total latency and resources 2592 // inside and outside this zone. Potential stalls should be considered before 2593 // following this policy. 2594 2595 // Compute the critical resource outside the zone. 2596 unsigned OtherCritIdx = 0; 2597 unsigned OtherCount = 2598 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2599 2600 bool OtherResLimited = false; 2601 unsigned RemLatency = 0; 2602 bool RemLatencyComputed = false; 2603 if (SchedModel->hasInstrSchedModel() && OtherCount != 0) { 2604 RemLatency = computeRemLatency(CurrZone); 2605 RemLatencyComputed = true; 2606 OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(), 2607 OtherCount, RemLatency, false); 2608 } 2609 2610 // Schedule aggressively for latency in PostRA mode. We don't check for 2611 // acyclic latency during PostRA, and highly out-of-order processors will 2612 // skip PostRA scheduling. 2613 if (!OtherResLimited && 2614 (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed, 2615 RemLatency))) { 2616 Policy.ReduceLatency |= true; 2617 LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName() 2618 << " RemainingLatency " << RemLatency << " + " 2619 << CurrZone.getCurrCycle() << "c > CritPath " 2620 << Rem.CriticalPath << "\n"); 2621 } 2622 // If the same resource is limiting inside and outside the zone, do nothing. 2623 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2624 return; 2625 2626 LLVM_DEBUG(if (CurrZone.isResourceLimited()) { 2627 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2628 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n"; 2629 } if (OtherResLimited) dbgs() 2630 << " RemainingLimit: " 2631 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2632 if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs() 2633 << " Latency limited both directions.\n"); 2634 2635 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2636 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2637 2638 if (OtherResLimited) 2639 Policy.DemandResIdx = OtherCritIdx; 2640 } 2641 2642 #ifndef NDEBUG 2643 const char *GenericSchedulerBase::getReasonStr( 2644 GenericSchedulerBase::CandReason Reason) { 2645 switch (Reason) { 2646 case NoCand: return "NOCAND "; 2647 case Only1: return "ONLY1 "; 2648 case PhysReg: return "PHYS-REG "; 2649 case RegExcess: return "REG-EXCESS"; 2650 case RegCritical: return "REG-CRIT "; 2651 case Stall: return "STALL "; 2652 case Cluster: return "CLUSTER "; 2653 case Weak: return "WEAK "; 2654 case RegMax: return "REG-MAX "; 2655 case ResourceReduce: return "RES-REDUCE"; 2656 case ResourceDemand: return "RES-DEMAND"; 2657 case TopDepthReduce: return "TOP-DEPTH "; 2658 case TopPathReduce: return "TOP-PATH "; 2659 case BotHeightReduce:return "BOT-HEIGHT"; 2660 case BotPathReduce: return "BOT-PATH "; 2661 case NextDefUse: return "DEF-USE "; 2662 case NodeOrder: return "ORDER "; 2663 }; 2664 llvm_unreachable("Unknown reason!"); 2665 } 2666 2667 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2668 PressureChange P; 2669 unsigned ResIdx = 0; 2670 unsigned Latency = 0; 2671 switch (Cand.Reason) { 2672 default: 2673 break; 2674 case RegExcess: 2675 P = Cand.RPDelta.Excess; 2676 break; 2677 case RegCritical: 2678 P = Cand.RPDelta.CriticalMax; 2679 break; 2680 case RegMax: 2681 P = Cand.RPDelta.CurrentMax; 2682 break; 2683 case ResourceReduce: 2684 ResIdx = Cand.Policy.ReduceResIdx; 2685 break; 2686 case ResourceDemand: 2687 ResIdx = Cand.Policy.DemandResIdx; 2688 break; 2689 case TopDepthReduce: 2690 Latency = Cand.SU->getDepth(); 2691 break; 2692 case TopPathReduce: 2693 Latency = Cand.SU->getHeight(); 2694 break; 2695 case BotHeightReduce: 2696 Latency = Cand.SU->getHeight(); 2697 break; 2698 case BotPathReduce: 2699 Latency = Cand.SU->getDepth(); 2700 break; 2701 } 2702 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2703 if (P.isValid()) 2704 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2705 << ":" << P.getUnitInc() << " "; 2706 else 2707 dbgs() << " "; 2708 if (ResIdx) 2709 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2710 else 2711 dbgs() << " "; 2712 if (Latency) 2713 dbgs() << " " << Latency << " cycles "; 2714 else 2715 dbgs() << " "; 2716 dbgs() << '\n'; 2717 } 2718 #endif 2719 2720 namespace llvm { 2721 /// Return true if this heuristic determines order. 2722 bool tryLess(int TryVal, int CandVal, 2723 GenericSchedulerBase::SchedCandidate &TryCand, 2724 GenericSchedulerBase::SchedCandidate &Cand, 2725 GenericSchedulerBase::CandReason Reason) { 2726 if (TryVal < CandVal) { 2727 TryCand.Reason = Reason; 2728 return true; 2729 } 2730 if (TryVal > CandVal) { 2731 if (Cand.Reason > Reason) 2732 Cand.Reason = Reason; 2733 return true; 2734 } 2735 return false; 2736 } 2737 2738 bool tryGreater(int TryVal, int CandVal, 2739 GenericSchedulerBase::SchedCandidate &TryCand, 2740 GenericSchedulerBase::SchedCandidate &Cand, 2741 GenericSchedulerBase::CandReason Reason) { 2742 if (TryVal > CandVal) { 2743 TryCand.Reason = Reason; 2744 return true; 2745 } 2746 if (TryVal < CandVal) { 2747 if (Cand.Reason > Reason) 2748 Cand.Reason = Reason; 2749 return true; 2750 } 2751 return false; 2752 } 2753 2754 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2755 GenericSchedulerBase::SchedCandidate &Cand, 2756 SchedBoundary &Zone) { 2757 if (Zone.isTop()) { 2758 // Prefer the candidate with the lesser depth, but only if one of them has 2759 // depth greater than the total latency scheduled so far, otherwise either 2760 // of them could be scheduled now with no stall. 2761 if (std::max(TryCand.SU->getDepth(), Cand.SU->getDepth()) > 2762 Zone.getScheduledLatency()) { 2763 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2764 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2765 return true; 2766 } 2767 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2768 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2769 return true; 2770 } else { 2771 // Prefer the candidate with the lesser height, but only if one of them has 2772 // height greater than the total latency scheduled so far, otherwise either 2773 // of them could be scheduled now with no stall. 2774 if (std::max(TryCand.SU->getHeight(), Cand.SU->getHeight()) > 2775 Zone.getScheduledLatency()) { 2776 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2777 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2778 return true; 2779 } 2780 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2781 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2782 return true; 2783 } 2784 return false; 2785 } 2786 } // end namespace llvm 2787 2788 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) { 2789 LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2790 << GenericSchedulerBase::getReasonStr(Reason) << '\n'); 2791 } 2792 2793 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) { 2794 tracePick(Cand.Reason, Cand.AtTop); 2795 } 2796 2797 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2798 assert(dag->hasVRegLiveness() && 2799 "(PreRA)GenericScheduler needs vreg liveness"); 2800 DAG = static_cast<ScheduleDAGMILive*>(dag); 2801 SchedModel = DAG->getSchedModel(); 2802 TRI = DAG->TRI; 2803 2804 if (RegionPolicy.ComputeDFSResult) 2805 DAG->computeDFSResult(); 2806 2807 Rem.init(DAG, SchedModel); 2808 Top.init(DAG, SchedModel, &Rem); 2809 Bot.init(DAG, SchedModel, &Rem); 2810 2811 // Initialize resource counts. 2812 2813 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2814 // are disabled, then these HazardRecs will be disabled. 2815 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2816 if (!Top.HazardRec) { 2817 Top.HazardRec = 2818 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2819 Itin, DAG); 2820 } 2821 if (!Bot.HazardRec) { 2822 Bot.HazardRec = 2823 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2824 Itin, DAG); 2825 } 2826 TopCand.SU = nullptr; 2827 BotCand.SU = nullptr; 2828 } 2829 2830 /// Initialize the per-region scheduling policy. 2831 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2832 MachineBasicBlock::iterator End, 2833 unsigned NumRegionInstrs) { 2834 const MachineFunction &MF = *Begin->getMF(); 2835 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 2836 2837 // Avoid setting up the register pressure tracker for small regions to save 2838 // compile time. As a rough heuristic, only track pressure when the number of 2839 // schedulable instructions exceeds half the integer register file. 2840 RegionPolicy.ShouldTrackPressure = true; 2841 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2842 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2843 if (TLI->isTypeLegal(LegalIntVT)) { 2844 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2845 TLI->getRegClassFor(LegalIntVT)); 2846 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2847 } 2848 } 2849 2850 // For generic targets, we default to bottom-up, because it's simpler and more 2851 // compile-time optimizations have been implemented in that direction. 2852 RegionPolicy.OnlyBottomUp = true; 2853 2854 // Allow the subtarget to override default policy. 2855 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs); 2856 2857 // After subtarget overrides, apply command line options. 2858 if (!EnableRegPressure) { 2859 RegionPolicy.ShouldTrackPressure = false; 2860 RegionPolicy.ShouldTrackLaneMasks = false; 2861 } 2862 2863 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2864 // e.g. -misched-bottomup=false allows scheduling in both directions. 2865 assert((!ForceTopDown || !ForceBottomUp) && 2866 "-misched-topdown incompatible with -misched-bottomup"); 2867 if (ForceBottomUp.getNumOccurrences() > 0) { 2868 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2869 if (RegionPolicy.OnlyBottomUp) 2870 RegionPolicy.OnlyTopDown = false; 2871 } 2872 if (ForceTopDown.getNumOccurrences() > 0) { 2873 RegionPolicy.OnlyTopDown = ForceTopDown; 2874 if (RegionPolicy.OnlyTopDown) 2875 RegionPolicy.OnlyBottomUp = false; 2876 } 2877 } 2878 2879 void GenericScheduler::dumpPolicy() const { 2880 // Cannot completely remove virtual function even in release mode. 2881 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2882 dbgs() << "GenericScheduler RegionPolicy: " 2883 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure 2884 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown 2885 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp 2886 << "\n"; 2887 #endif 2888 } 2889 2890 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2891 /// critical path by more cycles than it takes to drain the instruction buffer. 2892 /// We estimate an upper bounds on in-flight instructions as: 2893 /// 2894 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2895 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2896 /// InFlightResources = InFlightIterations * LoopResources 2897 /// 2898 /// TODO: Check execution resources in addition to IssueCount. 2899 void GenericScheduler::checkAcyclicLatency() { 2900 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2901 return; 2902 2903 // Scaled number of cycles per loop iteration. 2904 unsigned IterCount = 2905 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2906 Rem.RemIssueCount); 2907 // Scaled acyclic critical path. 2908 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2909 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2910 unsigned InFlightCount = 2911 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2912 unsigned BufferLimit = 2913 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2914 2915 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2916 2917 LLVM_DEBUG( 2918 dbgs() << "IssueCycles=" 2919 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2920 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2921 << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount 2922 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2923 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2924 if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2925 } 2926 2927 void GenericScheduler::registerRoots() { 2928 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2929 2930 // Some roots may not feed into ExitSU. Check all of them in case. 2931 for (const SUnit *SU : Bot.Available) { 2932 if (SU->getDepth() > Rem.CriticalPath) 2933 Rem.CriticalPath = SU->getDepth(); 2934 } 2935 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 2936 if (DumpCriticalPathLength) { 2937 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 2938 } 2939 2940 if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) { 2941 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2942 checkAcyclicLatency(); 2943 } 2944 } 2945 2946 namespace llvm { 2947 bool tryPressure(const PressureChange &TryP, 2948 const PressureChange &CandP, 2949 GenericSchedulerBase::SchedCandidate &TryCand, 2950 GenericSchedulerBase::SchedCandidate &Cand, 2951 GenericSchedulerBase::CandReason Reason, 2952 const TargetRegisterInfo *TRI, 2953 const MachineFunction &MF) { 2954 // If one candidate decreases and the other increases, go with it. 2955 // Invalid candidates have UnitInc==0. 2956 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2957 Reason)) { 2958 return true; 2959 } 2960 // Do not compare the magnitude of pressure changes between top and bottom 2961 // boundary. 2962 if (Cand.AtTop != TryCand.AtTop) 2963 return false; 2964 2965 // If both candidates affect the same set in the same boundary, go with the 2966 // smallest increase. 2967 unsigned TryPSet = TryP.getPSetOrMax(); 2968 unsigned CandPSet = CandP.getPSetOrMax(); 2969 if (TryPSet == CandPSet) { 2970 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2971 Reason); 2972 } 2973 2974 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) : 2975 std::numeric_limits<int>::max(); 2976 2977 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) : 2978 std::numeric_limits<int>::max(); 2979 2980 // If the candidates are decreasing pressure, reverse priority. 2981 if (TryP.getUnitInc() < 0) 2982 std::swap(TryRank, CandRank); 2983 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2984 } 2985 2986 unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2987 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2988 } 2989 2990 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2991 /// their physreg def/use. 2992 /// 2993 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2994 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2995 /// with the operation that produces or consumes the physreg. We'll do this when 2996 /// regalloc has support for parallel copies. 2997 int biasPhysReg(const SUnit *SU, bool isTop) { 2998 const MachineInstr *MI = SU->getInstr(); 2999 3000 if (MI->isCopy()) { 3001 unsigned ScheduledOper = isTop ? 1 : 0; 3002 unsigned UnscheduledOper = isTop ? 0 : 1; 3003 // If we have already scheduled the physreg produce/consumer, immediately 3004 // schedule the copy. 3005 if (Register::isPhysicalRegister(MI->getOperand(ScheduledOper).getReg())) 3006 return 1; 3007 // If the physreg is at the boundary, defer it. Otherwise schedule it 3008 // immediately to free the dependent. We can hoist the copy later. 3009 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 3010 if (Register::isPhysicalRegister(MI->getOperand(UnscheduledOper).getReg())) 3011 return AtBoundary ? -1 : 1; 3012 } 3013 3014 if (MI->isMoveImmediate()) { 3015 // If we have a move immediate and all successors have been assigned, bias 3016 // towards scheduling this later. Make sure all register defs are to 3017 // physical registers. 3018 bool DoBias = true; 3019 for (const MachineOperand &Op : MI->defs()) { 3020 if (Op.isReg() && !Register::isPhysicalRegister(Op.getReg())) { 3021 DoBias = false; 3022 break; 3023 } 3024 } 3025 3026 if (DoBias) 3027 return isTop ? -1 : 1; 3028 } 3029 3030 return 0; 3031 } 3032 } // end namespace llvm 3033 3034 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU, 3035 bool AtTop, 3036 const RegPressureTracker &RPTracker, 3037 RegPressureTracker &TempTracker) { 3038 Cand.SU = SU; 3039 Cand.AtTop = AtTop; 3040 if (DAG->isTrackingPressure()) { 3041 if (AtTop) { 3042 TempTracker.getMaxDownwardPressureDelta( 3043 Cand.SU->getInstr(), 3044 Cand.RPDelta, 3045 DAG->getRegionCriticalPSets(), 3046 DAG->getRegPressure().MaxSetPressure); 3047 } else { 3048 if (VerifyScheduling) { 3049 TempTracker.getMaxUpwardPressureDelta( 3050 Cand.SU->getInstr(), 3051 &DAG->getPressureDiff(Cand.SU), 3052 Cand.RPDelta, 3053 DAG->getRegionCriticalPSets(), 3054 DAG->getRegPressure().MaxSetPressure); 3055 } else { 3056 RPTracker.getUpwardPressureDelta( 3057 Cand.SU->getInstr(), 3058 DAG->getPressureDiff(Cand.SU), 3059 Cand.RPDelta, 3060 DAG->getRegionCriticalPSets(), 3061 DAG->getRegPressure().MaxSetPressure); 3062 } 3063 } 3064 } 3065 LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs() 3066 << " Try SU(" << Cand.SU->NodeNum << ") " 3067 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":" 3068 << Cand.RPDelta.Excess.getUnitInc() << "\n"); 3069 } 3070 3071 /// Apply a set of heuristics to a new candidate. Heuristics are currently 3072 /// hierarchical. This may be more efficient than a graduated cost model because 3073 /// we don't need to evaluate all aspects of the model for each node in the 3074 /// queue. But it's really done to make the heuristics easier to debug and 3075 /// statistically analyze. 3076 /// 3077 /// \param Cand provides the policy and current best candidate. 3078 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3079 /// \param Zone describes the scheduled zone that we are extending, or nullptr 3080 // if Cand is from a different zone than TryCand. 3081 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 3082 SchedCandidate &TryCand, 3083 SchedBoundary *Zone) const { 3084 // Initialize the candidate if needed. 3085 if (!Cand.isValid()) { 3086 TryCand.Reason = NodeOrder; 3087 return; 3088 } 3089 3090 // Bias PhysReg Defs and copies to their uses and defined respectively. 3091 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop), 3092 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg)) 3093 return; 3094 3095 // Avoid exceeding the target's limit. 3096 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 3097 Cand.RPDelta.Excess, 3098 TryCand, Cand, RegExcess, TRI, 3099 DAG->MF)) 3100 return; 3101 3102 // Avoid increasing the max critical pressure in the scheduled region. 3103 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 3104 Cand.RPDelta.CriticalMax, 3105 TryCand, Cand, RegCritical, TRI, 3106 DAG->MF)) 3107 return; 3108 3109 // We only compare a subset of features when comparing nodes between 3110 // Top and Bottom boundary. Some properties are simply incomparable, in many 3111 // other instances we should only override the other boundary if something 3112 // is a clear good pick on one boundary. Skip heuristics that are more 3113 // "tie-breaking" in nature. 3114 bool SameBoundary = Zone != nullptr; 3115 if (SameBoundary) { 3116 // For loops that are acyclic path limited, aggressively schedule for 3117 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal 3118 // heuristics to take precedence. 3119 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() && 3120 tryLatency(TryCand, Cand, *Zone)) 3121 return; 3122 3123 // Prioritize instructions that read unbuffered resources by stall cycles. 3124 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU), 3125 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3126 return; 3127 } 3128 3129 // Keep clustered nodes together to encourage downstream peephole 3130 // optimizations which may reduce resource requirements. 3131 // 3132 // This is a best effort to set things up for a post-RA pass. Optimizations 3133 // like generating loads of multiple registers should ideally be done within 3134 // the scheduler pass by combining the loads during DAG postprocessing. 3135 const SUnit *CandNextClusterSU = 3136 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3137 const SUnit *TryCandNextClusterSU = 3138 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3139 if (tryGreater(TryCand.SU == TryCandNextClusterSU, 3140 Cand.SU == CandNextClusterSU, 3141 TryCand, Cand, Cluster)) 3142 return; 3143 3144 if (SameBoundary) { 3145 // Weak edges are for clustering and other constraints. 3146 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop), 3147 getWeakLeft(Cand.SU, Cand.AtTop), 3148 TryCand, Cand, Weak)) 3149 return; 3150 } 3151 3152 // Avoid increasing the max pressure of the entire region. 3153 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 3154 Cand.RPDelta.CurrentMax, 3155 TryCand, Cand, RegMax, TRI, 3156 DAG->MF)) 3157 return; 3158 3159 if (SameBoundary) { 3160 // Avoid critical resource consumption and balance the schedule. 3161 TryCand.initResourceDelta(DAG, SchedModel); 3162 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3163 TryCand, Cand, ResourceReduce)) 3164 return; 3165 if (tryGreater(TryCand.ResDelta.DemandedResources, 3166 Cand.ResDelta.DemandedResources, 3167 TryCand, Cand, ResourceDemand)) 3168 return; 3169 3170 // Avoid serializing long latency dependence chains. 3171 // For acyclic path limited loops, latency was already checked above. 3172 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency && 3173 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone)) 3174 return; 3175 3176 // Fall through to original instruction order. 3177 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 3178 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 3179 TryCand.Reason = NodeOrder; 3180 } 3181 } 3182 } 3183 3184 /// Pick the best candidate from the queue. 3185 /// 3186 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 3187 /// DAG building. To adjust for the current scheduling location we need to 3188 /// maintain the number of vreg uses remaining to be top-scheduled. 3189 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 3190 const CandPolicy &ZonePolicy, 3191 const RegPressureTracker &RPTracker, 3192 SchedCandidate &Cand) { 3193 // getMaxPressureDelta temporarily modifies the tracker. 3194 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 3195 3196 ReadyQueue &Q = Zone.Available; 3197 for (SUnit *SU : Q) { 3198 3199 SchedCandidate TryCand(ZonePolicy); 3200 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker); 3201 // Pass SchedBoundary only when comparing nodes from the same boundary. 3202 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr; 3203 tryCandidate(Cand, TryCand, ZoneArg); 3204 if (TryCand.Reason != NoCand) { 3205 // Initialize resource delta if needed in case future heuristics query it. 3206 if (TryCand.ResDelta == SchedResourceDelta()) 3207 TryCand.initResourceDelta(DAG, SchedModel); 3208 Cand.setBest(TryCand); 3209 LLVM_DEBUG(traceCandidate(Cand)); 3210 } 3211 } 3212 } 3213 3214 /// Pick the best candidate node from either the top or bottom queue. 3215 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 3216 // Schedule as far as possible in the direction of no choice. This is most 3217 // efficient, but also provides the best heuristics for CriticalPSets. 3218 if (SUnit *SU = Bot.pickOnlyChoice()) { 3219 IsTopNode = false; 3220 tracePick(Only1, false); 3221 return SU; 3222 } 3223 if (SUnit *SU = Top.pickOnlyChoice()) { 3224 IsTopNode = true; 3225 tracePick(Only1, true); 3226 return SU; 3227 } 3228 // Set the bottom-up policy based on the state of the current bottom zone and 3229 // the instructions outside the zone, including the top zone. 3230 CandPolicy BotPolicy; 3231 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top); 3232 // Set the top-down policy based on the state of the current top zone and 3233 // the instructions outside the zone, including the bottom zone. 3234 CandPolicy TopPolicy; 3235 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot); 3236 3237 // See if BotCand is still valid (because we previously scheduled from Top). 3238 LLVM_DEBUG(dbgs() << "Picking from Bot:\n"); 3239 if (!BotCand.isValid() || BotCand.SU->isScheduled || 3240 BotCand.Policy != BotPolicy) { 3241 BotCand.reset(CandPolicy()); 3242 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand); 3243 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 3244 } else { 3245 LLVM_DEBUG(traceCandidate(BotCand)); 3246 #ifndef NDEBUG 3247 if (VerifyScheduling) { 3248 SchedCandidate TCand; 3249 TCand.reset(CandPolicy()); 3250 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand); 3251 assert(TCand.SU == BotCand.SU && 3252 "Last pick result should correspond to re-picking right now"); 3253 } 3254 #endif 3255 } 3256 3257 // Check if the top Q has a better candidate. 3258 LLVM_DEBUG(dbgs() << "Picking from Top:\n"); 3259 if (!TopCand.isValid() || TopCand.SU->isScheduled || 3260 TopCand.Policy != TopPolicy) { 3261 TopCand.reset(CandPolicy()); 3262 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand); 3263 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 3264 } else { 3265 LLVM_DEBUG(traceCandidate(TopCand)); 3266 #ifndef NDEBUG 3267 if (VerifyScheduling) { 3268 SchedCandidate TCand; 3269 TCand.reset(CandPolicy()); 3270 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand); 3271 assert(TCand.SU == TopCand.SU && 3272 "Last pick result should correspond to re-picking right now"); 3273 } 3274 #endif 3275 } 3276 3277 // Pick best from BotCand and TopCand. 3278 assert(BotCand.isValid()); 3279 assert(TopCand.isValid()); 3280 SchedCandidate Cand = BotCand; 3281 TopCand.Reason = NoCand; 3282 tryCandidate(Cand, TopCand, nullptr); 3283 if (TopCand.Reason != NoCand) { 3284 Cand.setBest(TopCand); 3285 LLVM_DEBUG(traceCandidate(Cand)); 3286 } 3287 3288 IsTopNode = Cand.AtTop; 3289 tracePick(Cand); 3290 return Cand.SU; 3291 } 3292 3293 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 3294 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 3295 if (DAG->top() == DAG->bottom()) { 3296 assert(Top.Available.empty() && Top.Pending.empty() && 3297 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 3298 return nullptr; 3299 } 3300 SUnit *SU; 3301 do { 3302 if (RegionPolicy.OnlyTopDown) { 3303 SU = Top.pickOnlyChoice(); 3304 if (!SU) { 3305 CandPolicy NoPolicy; 3306 TopCand.reset(NoPolicy); 3307 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); 3308 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3309 tracePick(TopCand); 3310 SU = TopCand.SU; 3311 } 3312 IsTopNode = true; 3313 } else if (RegionPolicy.OnlyBottomUp) { 3314 SU = Bot.pickOnlyChoice(); 3315 if (!SU) { 3316 CandPolicy NoPolicy; 3317 BotCand.reset(NoPolicy); 3318 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); 3319 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 3320 tracePick(BotCand); 3321 SU = BotCand.SU; 3322 } 3323 IsTopNode = false; 3324 } else { 3325 SU = pickNodeBidirectional(IsTopNode); 3326 } 3327 } while (SU->isScheduled); 3328 3329 if (SU->isTopReady()) 3330 Top.removeReady(SU); 3331 if (SU->isBottomReady()) 3332 Bot.removeReady(SU); 3333 3334 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3335 << *SU->getInstr()); 3336 return SU; 3337 } 3338 3339 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) { 3340 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 3341 if (!isTop) 3342 ++InsertPos; 3343 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 3344 3345 // Find already scheduled copies with a single physreg dependence and move 3346 // them just above the scheduled instruction. 3347 for (SDep &Dep : Deps) { 3348 if (Dep.getKind() != SDep::Data || 3349 !Register::isPhysicalRegister(Dep.getReg())) 3350 continue; 3351 SUnit *DepSU = Dep.getSUnit(); 3352 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 3353 continue; 3354 MachineInstr *Copy = DepSU->getInstr(); 3355 if (!Copy->isCopy() && !Copy->isMoveImmediate()) 3356 continue; 3357 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy "; 3358 DAG->dumpNode(*Dep.getSUnit())); 3359 DAG->moveInstruction(Copy, InsertPos); 3360 } 3361 } 3362 3363 /// Update the scheduler's state after scheduling a node. This is the same node 3364 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 3365 /// update it's state based on the current cycle before MachineSchedStrategy 3366 /// does. 3367 /// 3368 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 3369 /// them here. See comments in biasPhysReg. 3370 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3371 if (IsTopNode) { 3372 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3373 Top.bumpNode(SU); 3374 if (SU->hasPhysRegUses) 3375 reschedulePhysReg(SU, true); 3376 } else { 3377 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 3378 Bot.bumpNode(SU); 3379 if (SU->hasPhysRegDefs) 3380 reschedulePhysReg(SU, false); 3381 } 3382 } 3383 3384 /// Create the standard converging machine scheduler. This will be used as the 3385 /// default scheduler if the target does not set a default. 3386 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) { 3387 ScheduleDAGMILive *DAG = 3388 new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C)); 3389 // Register DAG post-processors. 3390 // 3391 // FIXME: extend the mutation API to allow earlier mutations to instantiate 3392 // data and pass it to later mutations. Have a single mutation that gathers 3393 // the interesting nodes in one pass. 3394 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI)); 3395 return DAG; 3396 } 3397 3398 static ScheduleDAGInstrs *createConveringSched(MachineSchedContext *C) { 3399 return createGenericSchedLive(C); 3400 } 3401 3402 static MachineSchedRegistry 3403 GenericSchedRegistry("converge", "Standard converging scheduler.", 3404 createConveringSched); 3405 3406 //===----------------------------------------------------------------------===// 3407 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 3408 //===----------------------------------------------------------------------===// 3409 3410 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 3411 DAG = Dag; 3412 SchedModel = DAG->getSchedModel(); 3413 TRI = DAG->TRI; 3414 3415 Rem.init(DAG, SchedModel); 3416 Top.init(DAG, SchedModel, &Rem); 3417 BotRoots.clear(); 3418 3419 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 3420 // or are disabled, then these HazardRecs will be disabled. 3421 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3422 if (!Top.HazardRec) { 3423 Top.HazardRec = 3424 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3425 Itin, DAG); 3426 } 3427 } 3428 3429 void PostGenericScheduler::registerRoots() { 3430 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3431 3432 // Some roots may not feed into ExitSU. Check all of them in case. 3433 for (const SUnit *SU : BotRoots) { 3434 if (SU->getDepth() > Rem.CriticalPath) 3435 Rem.CriticalPath = SU->getDepth(); 3436 } 3437 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 3438 if (DumpCriticalPathLength) { 3439 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 3440 } 3441 } 3442 3443 /// Apply a set of heuristics to a new candidate for PostRA scheduling. 3444 /// 3445 /// \param Cand provides the policy and current best candidate. 3446 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3447 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 3448 SchedCandidate &TryCand) { 3449 // Initialize the candidate if needed. 3450 if (!Cand.isValid()) { 3451 TryCand.Reason = NodeOrder; 3452 return; 3453 } 3454 3455 // Prioritize instructions that read unbuffered resources by stall cycles. 3456 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3457 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3458 return; 3459 3460 // Keep clustered nodes together. 3461 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(), 3462 Cand.SU == DAG->getNextClusterSucc(), 3463 TryCand, Cand, Cluster)) 3464 return; 3465 3466 // Avoid critical resource consumption and balance the schedule. 3467 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3468 TryCand, Cand, ResourceReduce)) 3469 return; 3470 if (tryGreater(TryCand.ResDelta.DemandedResources, 3471 Cand.ResDelta.DemandedResources, 3472 TryCand, Cand, ResourceDemand)) 3473 return; 3474 3475 // Avoid serializing long latency dependence chains. 3476 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3477 return; 3478 } 3479 3480 // Fall through to original instruction order. 3481 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 3482 TryCand.Reason = NodeOrder; 3483 } 3484 3485 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3486 ReadyQueue &Q = Top.Available; 3487 for (SUnit *SU : Q) { 3488 SchedCandidate TryCand(Cand.Policy); 3489 TryCand.SU = SU; 3490 TryCand.AtTop = true; 3491 TryCand.initResourceDelta(DAG, SchedModel); 3492 tryCandidate(Cand, TryCand); 3493 if (TryCand.Reason != NoCand) { 3494 Cand.setBest(TryCand); 3495 LLVM_DEBUG(traceCandidate(Cand)); 3496 } 3497 } 3498 } 3499 3500 /// Pick the next node to schedule. 3501 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3502 if (DAG->top() == DAG->bottom()) { 3503 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3504 return nullptr; 3505 } 3506 SUnit *SU; 3507 do { 3508 SU = Top.pickOnlyChoice(); 3509 if (SU) { 3510 tracePick(Only1, true); 3511 } else { 3512 CandPolicy NoPolicy; 3513 SchedCandidate TopCand(NoPolicy); 3514 // Set the top-down policy based on the state of the current top zone and 3515 // the instructions outside the zone, including the bottom zone. 3516 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 3517 pickNodeFromQueue(TopCand); 3518 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3519 tracePick(TopCand); 3520 SU = TopCand.SU; 3521 } 3522 } while (SU->isScheduled); 3523 3524 IsTopNode = true; 3525 Top.removeReady(SU); 3526 3527 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3528 << *SU->getInstr()); 3529 return SU; 3530 } 3531 3532 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3533 /// scheduled/remaining flags in the DAG nodes. 3534 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3535 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3536 Top.bumpNode(SU); 3537 } 3538 3539 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) { 3540 return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C), 3541 /*RemoveKillFlags=*/true); 3542 } 3543 3544 //===----------------------------------------------------------------------===// 3545 // ILP Scheduler. Currently for experimental analysis of heuristics. 3546 //===----------------------------------------------------------------------===// 3547 3548 namespace { 3549 3550 /// Order nodes by the ILP metric. 3551 struct ILPOrder { 3552 const SchedDFSResult *DFSResult = nullptr; 3553 const BitVector *ScheduledTrees = nullptr; 3554 bool MaximizeILP; 3555 3556 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {} 3557 3558 /// Apply a less-than relation on node priority. 3559 /// 3560 /// (Return true if A comes after B in the Q.) 3561 bool operator()(const SUnit *A, const SUnit *B) const { 3562 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3563 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3564 if (SchedTreeA != SchedTreeB) { 3565 // Unscheduled trees have lower priority. 3566 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3567 return ScheduledTrees->test(SchedTreeB); 3568 3569 // Trees with shallower connections have have lower priority. 3570 if (DFSResult->getSubtreeLevel(SchedTreeA) 3571 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3572 return DFSResult->getSubtreeLevel(SchedTreeA) 3573 < DFSResult->getSubtreeLevel(SchedTreeB); 3574 } 3575 } 3576 if (MaximizeILP) 3577 return DFSResult->getILP(A) < DFSResult->getILP(B); 3578 else 3579 return DFSResult->getILP(A) > DFSResult->getILP(B); 3580 } 3581 }; 3582 3583 /// Schedule based on the ILP metric. 3584 class ILPScheduler : public MachineSchedStrategy { 3585 ScheduleDAGMILive *DAG = nullptr; 3586 ILPOrder Cmp; 3587 3588 std::vector<SUnit*> ReadyQ; 3589 3590 public: 3591 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {} 3592 3593 void initialize(ScheduleDAGMI *dag) override { 3594 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3595 DAG = static_cast<ScheduleDAGMILive*>(dag); 3596 DAG->computeDFSResult(); 3597 Cmp.DFSResult = DAG->getDFSResult(); 3598 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3599 ReadyQ.clear(); 3600 } 3601 3602 void registerRoots() override { 3603 // Restore the heap in ReadyQ with the updated DFS results. 3604 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3605 } 3606 3607 /// Implement MachineSchedStrategy interface. 3608 /// ----------------------------------------- 3609 3610 /// Callback to select the highest priority node from the ready Q. 3611 SUnit *pickNode(bool &IsTopNode) override { 3612 if (ReadyQ.empty()) return nullptr; 3613 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3614 SUnit *SU = ReadyQ.back(); 3615 ReadyQ.pop_back(); 3616 IsTopNode = false; 3617 LLVM_DEBUG(dbgs() << "Pick node " 3618 << "SU(" << SU->NodeNum << ") " 3619 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3620 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) 3621 << " @" 3622 << DAG->getDFSResult()->getSubtreeLevel( 3623 DAG->getDFSResult()->getSubtreeID(SU)) 3624 << '\n' 3625 << "Scheduling " << *SU->getInstr()); 3626 return SU; 3627 } 3628 3629 /// Scheduler callback to notify that a new subtree is scheduled. 3630 void scheduleTree(unsigned SubtreeID) override { 3631 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3632 } 3633 3634 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3635 /// DFSResults, and resort the priority Q. 3636 void schedNode(SUnit *SU, bool IsTopNode) override { 3637 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3638 } 3639 3640 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3641 3642 void releaseBottomNode(SUnit *SU) override { 3643 ReadyQ.push_back(SU); 3644 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3645 } 3646 }; 3647 3648 } // end anonymous namespace 3649 3650 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3651 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true)); 3652 } 3653 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3654 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false)); 3655 } 3656 3657 static MachineSchedRegistry ILPMaxRegistry( 3658 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3659 static MachineSchedRegistry ILPMinRegistry( 3660 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3661 3662 //===----------------------------------------------------------------------===// 3663 // Machine Instruction Shuffler for Correctness Testing 3664 //===----------------------------------------------------------------------===// 3665 3666 #ifndef NDEBUG 3667 namespace { 3668 3669 /// Apply a less-than relation on the node order, which corresponds to the 3670 /// instruction order prior to scheduling. IsReverse implements greater-than. 3671 template<bool IsReverse> 3672 struct SUnitOrder { 3673 bool operator()(SUnit *A, SUnit *B) const { 3674 if (IsReverse) 3675 return A->NodeNum > B->NodeNum; 3676 else 3677 return A->NodeNum < B->NodeNum; 3678 } 3679 }; 3680 3681 /// Reorder instructions as much as possible. 3682 class InstructionShuffler : public MachineSchedStrategy { 3683 bool IsAlternating; 3684 bool IsTopDown; 3685 3686 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3687 // gives nodes with a higher number higher priority causing the latest 3688 // instructions to be scheduled first. 3689 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>> 3690 TopQ; 3691 3692 // When scheduling bottom-up, use greater-than as the queue priority. 3693 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>> 3694 BottomQ; 3695 3696 public: 3697 InstructionShuffler(bool alternate, bool topdown) 3698 : IsAlternating(alternate), IsTopDown(topdown) {} 3699 3700 void initialize(ScheduleDAGMI*) override { 3701 TopQ.clear(); 3702 BottomQ.clear(); 3703 } 3704 3705 /// Implement MachineSchedStrategy interface. 3706 /// ----------------------------------------- 3707 3708 SUnit *pickNode(bool &IsTopNode) override { 3709 SUnit *SU; 3710 if (IsTopDown) { 3711 do { 3712 if (TopQ.empty()) return nullptr; 3713 SU = TopQ.top(); 3714 TopQ.pop(); 3715 } while (SU->isScheduled); 3716 IsTopNode = true; 3717 } else { 3718 do { 3719 if (BottomQ.empty()) return nullptr; 3720 SU = BottomQ.top(); 3721 BottomQ.pop(); 3722 } while (SU->isScheduled); 3723 IsTopNode = false; 3724 } 3725 if (IsAlternating) 3726 IsTopDown = !IsTopDown; 3727 return SU; 3728 } 3729 3730 void schedNode(SUnit *SU, bool IsTopNode) override {} 3731 3732 void releaseTopNode(SUnit *SU) override { 3733 TopQ.push(SU); 3734 } 3735 void releaseBottomNode(SUnit *SU) override { 3736 BottomQ.push(SU); 3737 } 3738 }; 3739 3740 } // end anonymous namespace 3741 3742 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3743 bool Alternate = !ForceTopDown && !ForceBottomUp; 3744 bool TopDown = !ForceBottomUp; 3745 assert((TopDown || !ForceTopDown) && 3746 "-misched-topdown incompatible with -misched-bottomup"); 3747 return new ScheduleDAGMILive( 3748 C, std::make_unique<InstructionShuffler>(Alternate, TopDown)); 3749 } 3750 3751 static MachineSchedRegistry ShufflerRegistry( 3752 "shuffle", "Shuffle machine instructions alternating directions", 3753 createInstructionShuffler); 3754 #endif // !NDEBUG 3755 3756 //===----------------------------------------------------------------------===// 3757 // GraphWriter support for ScheduleDAGMILive. 3758 //===----------------------------------------------------------------------===// 3759 3760 #ifndef NDEBUG 3761 namespace llvm { 3762 3763 template<> struct GraphTraits< 3764 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3765 3766 template<> 3767 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3768 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 3769 3770 static std::string getGraphName(const ScheduleDAG *G) { 3771 return std::string(G->MF.getName()); 3772 } 3773 3774 static bool renderGraphFromBottomUp() { 3775 return true; 3776 } 3777 3778 static bool isNodeHidden(const SUnit *Node) { 3779 if (ViewMISchedCutoff == 0) 3780 return false; 3781 return (Node->Preds.size() > ViewMISchedCutoff 3782 || Node->Succs.size() > ViewMISchedCutoff); 3783 } 3784 3785 /// If you want to override the dot attributes printed for a particular 3786 /// edge, override this method. 3787 static std::string getEdgeAttributes(const SUnit *Node, 3788 SUnitIterator EI, 3789 const ScheduleDAG *Graph) { 3790 if (EI.isArtificialDep()) 3791 return "color=cyan,style=dashed"; 3792 if (EI.isCtrlDep()) 3793 return "color=blue,style=dashed"; 3794 return ""; 3795 } 3796 3797 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3798 std::string Str; 3799 raw_string_ostream SS(Str); 3800 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3801 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3802 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3803 SS << "SU:" << SU->NodeNum; 3804 if (DFS) 3805 SS << " I:" << DFS->getNumInstrs(SU); 3806 return SS.str(); 3807 } 3808 3809 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3810 return G->getGraphNodeLabel(SU); 3811 } 3812 3813 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3814 std::string Str("shape=Mrecord"); 3815 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3816 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3817 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3818 if (DFS) { 3819 Str += ",style=filled,fillcolor=\"#"; 3820 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3821 Str += '"'; 3822 } 3823 return Str; 3824 } 3825 }; 3826 3827 } // end namespace llvm 3828 #endif // NDEBUG 3829 3830 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3831 /// rendered using 'dot'. 3832 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3833 #ifndef NDEBUG 3834 ViewGraph(this, Name, false, Title); 3835 #else 3836 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3837 << "systems with Graphviz or gv!\n"; 3838 #endif // NDEBUG 3839 } 3840 3841 /// Out-of-line implementation with no arguments is handy for gdb. 3842 void ScheduleDAGMI::viewGraph() { 3843 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3844 } 3845