1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // MachineScheduler schedules machine instructions after phi elimination. It 10 // preserves LiveIntervals so it can be invoked before register allocation. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MachineScheduler.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PriorityQueue.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/LiveInterval.h" 25 #include "llvm/CodeGen/LiveIntervals.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineDominators.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineFunctionPass.h" 30 #include "llvm/CodeGen/MachineInstr.h" 31 #include "llvm/CodeGen/MachineLoopInfo.h" 32 #include "llvm/CodeGen/MachineOperand.h" 33 #include "llvm/CodeGen/MachinePassRegistry.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/MachineValueType.h" 36 #include "llvm/CodeGen/RegisterClassInfo.h" 37 #include "llvm/CodeGen/RegisterPressure.h" 38 #include "llvm/CodeGen/ScheduleDAG.h" 39 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 40 #include "llvm/CodeGen/ScheduleDAGMutation.h" 41 #include "llvm/CodeGen/ScheduleDFS.h" 42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 43 #include "llvm/CodeGen/SlotIndexes.h" 44 #include "llvm/CodeGen/TargetFrameLowering.h" 45 #include "llvm/CodeGen/TargetInstrInfo.h" 46 #include "llvm/CodeGen/TargetLowering.h" 47 #include "llvm/CodeGen/TargetPassConfig.h" 48 #include "llvm/CodeGen/TargetRegisterInfo.h" 49 #include "llvm/CodeGen/TargetSchedule.h" 50 #include "llvm/CodeGen/TargetSubtargetInfo.h" 51 #include "llvm/Config/llvm-config.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/MC/LaneBitmask.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/GraphWriter.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include <algorithm> 62 #include <cassert> 63 #include <cstdint> 64 #include <iterator> 65 #include <limits> 66 #include <memory> 67 #include <string> 68 #include <tuple> 69 #include <utility> 70 #include <vector> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "machine-scheduler" 75 76 STATISTIC(NumClustered, "Number of load/store pairs clustered"); 77 78 namespace llvm { 79 80 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 81 cl::desc("Force top-down list scheduling")); 82 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 83 cl::desc("Force bottom-up list scheduling")); 84 cl::opt<bool> 85 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 86 cl::desc("Print critical path length to stdout")); 87 88 cl::opt<bool> VerifyScheduling( 89 "verify-misched", cl::Hidden, 90 cl::desc("Verify machine instrs before and after machine scheduling")); 91 92 #ifndef NDEBUG 93 cl::opt<bool> ViewMISchedDAGs( 94 "view-misched-dags", cl::Hidden, 95 cl::desc("Pop up a window to show MISched dags after they are processed")); 96 cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden, 97 cl::desc("Print schedule DAGs")); 98 cl::opt<bool> MISchedDumpReservedCycles( 99 "misched-dump-reserved-cycles", cl::Hidden, cl::init(false), 100 cl::desc("Dump resource usage at schedule boundary.")); 101 #else 102 const bool ViewMISchedDAGs = false; 103 const bool PrintDAGs = false; 104 #ifdef LLVM_ENABLE_DUMP 105 const bool MISchedDumpReservedCycles = false; 106 #endif // LLVM_ENABLE_DUMP 107 #endif // NDEBUG 108 109 } // end namespace llvm 110 111 #ifndef NDEBUG 112 /// In some situations a few uninteresting nodes depend on nearly all other 113 /// nodes in the graph, provide a cutoff to hide them. 114 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden, 115 cl::desc("Hide nodes with more predecessor/successor than cutoff")); 116 117 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 118 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 119 120 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 121 cl::desc("Only schedule this function")); 122 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 123 cl::desc("Only schedule this MBB#")); 124 #endif // NDEBUG 125 126 /// Avoid quadratic complexity in unusually large basic blocks by limiting the 127 /// size of the ready lists. 128 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden, 129 cl::desc("Limit ready list to N instructions"), cl::init(256)); 130 131 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 132 cl::desc("Enable register pressure scheduling."), cl::init(true)); 133 134 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 135 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 136 137 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden, 138 cl::desc("Enable memop clustering."), 139 cl::init(true)); 140 static cl::opt<bool> 141 ForceFastCluster("force-fast-cluster", cl::Hidden, 142 cl::desc("Switch to fast cluster algorithm with the lost " 143 "of some fusion opportunities"), 144 cl::init(false)); 145 static cl::opt<unsigned> 146 FastClusterThreshold("fast-cluster-threshold", cl::Hidden, 147 cl::desc("The threshold for fast cluster"), 148 cl::init(1000)); 149 150 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 151 static cl::opt<bool> MISchedDumpScheduleTrace( 152 "misched-dump-schedule-trace", cl::Hidden, cl::init(false), 153 cl::desc("Dump resource usage at schedule boundary.")); 154 static cl::opt<unsigned> 155 HeaderColWidth("misched-dump-schedule-trace-col-header-width", cl::Hidden, 156 cl::desc("Set width of the columns with " 157 "the resources and schedule units"), 158 cl::init(19)); 159 static cl::opt<unsigned> 160 ColWidth("misched-dump-schedule-trace-col-width", cl::Hidden, 161 cl::desc("Set width of the columns showing resource booking."), 162 cl::init(5)); 163 static cl::opt<bool> MISchedSortResourcesInTrace( 164 "misched-sort-resources-in-trace", cl::Hidden, cl::init(true), 165 cl::desc("Sort the resources printed in the dump trace")); 166 #endif 167 168 static cl::opt<unsigned> 169 MIResourceCutOff("misched-resource-cutoff", cl::Hidden, 170 cl::desc("Number of intervals to track"), cl::init(10)); 171 172 // DAG subtrees must have at least this many nodes. 173 static const unsigned MinSubtreeSize = 8; 174 175 // Pin the vtables to this file. 176 void MachineSchedStrategy::anchor() {} 177 178 void ScheduleDAGMutation::anchor() {} 179 180 //===----------------------------------------------------------------------===// 181 // Machine Instruction Scheduling Pass and Registry 182 //===----------------------------------------------------------------------===// 183 184 MachineSchedContext::MachineSchedContext() { 185 RegClassInfo = new RegisterClassInfo(); 186 } 187 188 MachineSchedContext::~MachineSchedContext() { 189 delete RegClassInfo; 190 } 191 192 namespace { 193 194 /// Base class for a machine scheduler class that can run at any point. 195 class MachineSchedulerBase : public MachineSchedContext, 196 public MachineFunctionPass { 197 public: 198 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 199 200 void print(raw_ostream &O, const Module* = nullptr) const override; 201 202 protected: 203 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); 204 }; 205 206 /// MachineScheduler runs after coalescing and before register allocation. 207 class MachineScheduler : public MachineSchedulerBase { 208 public: 209 MachineScheduler(); 210 211 void getAnalysisUsage(AnalysisUsage &AU) const override; 212 213 bool runOnMachineFunction(MachineFunction&) override; 214 215 static char ID; // Class identification, replacement for typeinfo 216 217 protected: 218 ScheduleDAGInstrs *createMachineScheduler(); 219 }; 220 221 /// PostMachineScheduler runs after shortly before code emission. 222 class PostMachineScheduler : public MachineSchedulerBase { 223 public: 224 PostMachineScheduler(); 225 226 void getAnalysisUsage(AnalysisUsage &AU) const override; 227 228 bool runOnMachineFunction(MachineFunction&) override; 229 230 static char ID; // Class identification, replacement for typeinfo 231 232 protected: 233 ScheduleDAGInstrs *createPostMachineScheduler(); 234 }; 235 236 } // end anonymous namespace 237 238 char MachineScheduler::ID = 0; 239 240 char &llvm::MachineSchedulerID = MachineScheduler::ID; 241 242 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE, 243 "Machine Instruction Scheduler", false, false) 244 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 245 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 246 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 247 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 248 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 249 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE, 250 "Machine Instruction Scheduler", false, false) 251 252 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) { 253 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 254 } 255 256 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 257 AU.setPreservesCFG(); 258 AU.addRequired<MachineDominatorTree>(); 259 AU.addRequired<MachineLoopInfo>(); 260 AU.addRequired<AAResultsWrapperPass>(); 261 AU.addRequired<TargetPassConfig>(); 262 AU.addRequired<SlotIndexes>(); 263 AU.addPreserved<SlotIndexes>(); 264 AU.addRequired<LiveIntervals>(); 265 AU.addPreserved<LiveIntervals>(); 266 MachineFunctionPass::getAnalysisUsage(AU); 267 } 268 269 char PostMachineScheduler::ID = 0; 270 271 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 272 273 INITIALIZE_PASS_BEGIN(PostMachineScheduler, "postmisched", 274 "PostRA Machine Instruction Scheduler", false, false) 275 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 276 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 277 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 278 INITIALIZE_PASS_END(PostMachineScheduler, "postmisched", 279 "PostRA Machine Instruction Scheduler", false, false) 280 281 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) { 282 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 283 } 284 285 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 286 AU.setPreservesCFG(); 287 AU.addRequired<MachineDominatorTree>(); 288 AU.addRequired<MachineLoopInfo>(); 289 AU.addRequired<AAResultsWrapperPass>(); 290 AU.addRequired<TargetPassConfig>(); 291 MachineFunctionPass::getAnalysisUsage(AU); 292 } 293 294 MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor> 295 MachineSchedRegistry::Registry; 296 297 /// A dummy default scheduler factory indicates whether the scheduler 298 /// is overridden on the command line. 299 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 300 return nullptr; 301 } 302 303 /// MachineSchedOpt allows command line selection of the scheduler. 304 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 305 RegisterPassParser<MachineSchedRegistry>> 306 MachineSchedOpt("misched", 307 cl::init(&useDefaultMachineSched), cl::Hidden, 308 cl::desc("Machine instruction scheduler to use")); 309 310 static MachineSchedRegistry 311 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 312 useDefaultMachineSched); 313 314 static cl::opt<bool> EnableMachineSched( 315 "enable-misched", 316 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), 317 cl::Hidden); 318 319 static cl::opt<bool> EnablePostRAMachineSched( 320 "enable-post-misched", 321 cl::desc("Enable the post-ra machine instruction scheduling pass."), 322 cl::init(true), cl::Hidden); 323 324 /// Decrement this iterator until reaching the top or a non-debug instr. 325 static MachineBasicBlock::const_iterator 326 priorNonDebug(MachineBasicBlock::const_iterator I, 327 MachineBasicBlock::const_iterator Beg) { 328 assert(I != Beg && "reached the top of the region, cannot decrement"); 329 while (--I != Beg) { 330 if (!I->isDebugOrPseudoInstr()) 331 break; 332 } 333 return I; 334 } 335 336 /// Non-const version. 337 static MachineBasicBlock::iterator 338 priorNonDebug(MachineBasicBlock::iterator I, 339 MachineBasicBlock::const_iterator Beg) { 340 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg) 341 .getNonConstIterator(); 342 } 343 344 /// If this iterator is a debug value, increment until reaching the End or a 345 /// non-debug instruction. 346 static MachineBasicBlock::const_iterator 347 nextIfDebug(MachineBasicBlock::const_iterator I, 348 MachineBasicBlock::const_iterator End) { 349 for(; I != End; ++I) { 350 if (!I->isDebugOrPseudoInstr()) 351 break; 352 } 353 return I; 354 } 355 356 /// Non-const version. 357 static MachineBasicBlock::iterator 358 nextIfDebug(MachineBasicBlock::iterator I, 359 MachineBasicBlock::const_iterator End) { 360 return nextIfDebug(MachineBasicBlock::const_iterator(I), End) 361 .getNonConstIterator(); 362 } 363 364 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 365 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 366 // Select the scheduler, or set the default. 367 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 368 if (Ctor != useDefaultMachineSched) 369 return Ctor(this); 370 371 // Get the default scheduler set by the target for this function. 372 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 373 if (Scheduler) 374 return Scheduler; 375 376 // Default to GenericScheduler. 377 return createGenericSchedLive(this); 378 } 379 380 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 381 /// the caller. We don't have a command line option to override the postRA 382 /// scheduler. The Target must configure it. 383 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 384 // Get the postRA scheduler set by the target for this function. 385 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 386 if (Scheduler) 387 return Scheduler; 388 389 // Default to GenericScheduler. 390 return createGenericSchedPostRA(this); 391 } 392 393 /// Top-level MachineScheduler pass driver. 394 /// 395 /// Visit blocks in function order. Divide each block into scheduling regions 396 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 397 /// consistent with the DAG builder, which traverses the interior of the 398 /// scheduling regions bottom-up. 399 /// 400 /// This design avoids exposing scheduling boundaries to the DAG builder, 401 /// simplifying the DAG builder's support for "special" target instructions. 402 /// At the same time the design allows target schedulers to operate across 403 /// scheduling boundaries, for example to bundle the boundary instructions 404 /// without reordering them. This creates complexity, because the target 405 /// scheduler must update the RegionBegin and RegionEnd positions cached by 406 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 407 /// design would be to split blocks at scheduling boundaries, but LLVM has a 408 /// general bias against block splitting purely for implementation simplicity. 409 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 410 if (skipFunction(mf.getFunction())) 411 return false; 412 413 if (EnableMachineSched.getNumOccurrences()) { 414 if (!EnableMachineSched) 415 return false; 416 } else if (!mf.getSubtarget().enableMachineScheduler()) 417 return false; 418 419 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); 420 421 // Initialize the context of the pass. 422 MF = &mf; 423 MLI = &getAnalysis<MachineLoopInfo>(); 424 MDT = &getAnalysis<MachineDominatorTree>(); 425 PassConfig = &getAnalysis<TargetPassConfig>(); 426 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 427 428 LIS = &getAnalysis<LiveIntervals>(); 429 430 if (VerifyScheduling) { 431 LLVM_DEBUG(LIS->dump()); 432 MF->verify(this, "Before machine scheduling."); 433 } 434 RegClassInfo->runOnMachineFunction(*MF); 435 436 // Instantiate the selected scheduler for this target, function, and 437 // optimization level. 438 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 439 scheduleRegions(*Scheduler, false); 440 441 LLVM_DEBUG(LIS->dump()); 442 if (VerifyScheduling) 443 MF->verify(this, "After machine scheduling."); 444 return true; 445 } 446 447 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 448 if (skipFunction(mf.getFunction())) 449 return false; 450 451 if (EnablePostRAMachineSched.getNumOccurrences()) { 452 if (!EnablePostRAMachineSched) 453 return false; 454 } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) { 455 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 456 return false; 457 } 458 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 459 460 // Initialize the context of the pass. 461 MF = &mf; 462 MLI = &getAnalysis<MachineLoopInfo>(); 463 PassConfig = &getAnalysis<TargetPassConfig>(); 464 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 465 466 if (VerifyScheduling) 467 MF->verify(this, "Before post machine scheduling."); 468 469 // Instantiate the selected scheduler for this target, function, and 470 // optimization level. 471 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 472 scheduleRegions(*Scheduler, true); 473 474 if (VerifyScheduling) 475 MF->verify(this, "After post machine scheduling."); 476 return true; 477 } 478 479 /// Return true of the given instruction should not be included in a scheduling 480 /// region. 481 /// 482 /// MachineScheduler does not currently support scheduling across calls. To 483 /// handle calls, the DAG builder needs to be modified to create register 484 /// anti/output dependencies on the registers clobbered by the call's regmask 485 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 486 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 487 /// the boundary, but there would be no benefit to postRA scheduling across 488 /// calls this late anyway. 489 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 490 MachineBasicBlock *MBB, 491 MachineFunction *MF, 492 const TargetInstrInfo *TII) { 493 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF); 494 } 495 496 /// A region of an MBB for scheduling. 497 namespace { 498 struct SchedRegion { 499 /// RegionBegin is the first instruction in the scheduling region, and 500 /// RegionEnd is either MBB->end() or the scheduling boundary after the 501 /// last instruction in the scheduling region. These iterators cannot refer 502 /// to instructions outside of the identified scheduling region because 503 /// those may be reordered before scheduling this region. 504 MachineBasicBlock::iterator RegionBegin; 505 MachineBasicBlock::iterator RegionEnd; 506 unsigned NumRegionInstrs; 507 508 SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, 509 unsigned N) : 510 RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {} 511 }; 512 } // end anonymous namespace 513 514 using MBBRegionsVector = SmallVector<SchedRegion, 16>; 515 516 static void 517 getSchedRegions(MachineBasicBlock *MBB, 518 MBBRegionsVector &Regions, 519 bool RegionsTopDown) { 520 MachineFunction *MF = MBB->getParent(); 521 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 522 523 MachineBasicBlock::iterator I = nullptr; 524 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 525 RegionEnd != MBB->begin(); RegionEnd = I) { 526 527 // Avoid decrementing RegionEnd for blocks with no terminator. 528 if (RegionEnd != MBB->end() || 529 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) { 530 --RegionEnd; 531 } 532 533 // The next region starts above the previous region. Look backward in the 534 // instruction stream until we find the nearest boundary. 535 unsigned NumRegionInstrs = 0; 536 I = RegionEnd; 537 for (;I != MBB->begin(); --I) { 538 MachineInstr &MI = *std::prev(I); 539 if (isSchedBoundary(&MI, &*MBB, MF, TII)) 540 break; 541 if (!MI.isDebugOrPseudoInstr()) { 542 // MBB::size() uses instr_iterator to count. Here we need a bundle to 543 // count as a single instruction. 544 ++NumRegionInstrs; 545 } 546 } 547 548 // It's possible we found a scheduling region that only has debug 549 // instructions. Don't bother scheduling these. 550 if (NumRegionInstrs != 0) 551 Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs)); 552 } 553 554 if (RegionsTopDown) 555 std::reverse(Regions.begin(), Regions.end()); 556 } 557 558 /// Main driver for both MachineScheduler and PostMachineScheduler. 559 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler, 560 bool FixKillFlags) { 561 // Visit all machine basic blocks. 562 // 563 // TODO: Visit blocks in global postorder or postorder within the bottom-up 564 // loop tree. Then we can optionally compute global RegPressure. 565 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 566 MBB != MBBEnd; ++MBB) { 567 568 Scheduler.startBlock(&*MBB); 569 570 #ifndef NDEBUG 571 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 572 continue; 573 if (SchedOnlyBlock.getNumOccurrences() 574 && (int)SchedOnlyBlock != MBB->getNumber()) 575 continue; 576 #endif 577 578 // Break the block into scheduling regions [I, RegionEnd). RegionEnd 579 // points to the scheduling boundary at the bottom of the region. The DAG 580 // does not include RegionEnd, but the region does (i.e. the next 581 // RegionEnd is above the previous RegionBegin). If the current block has 582 // no terminator then RegionEnd == MBB->end() for the bottom region. 583 // 584 // All the regions of MBB are first found and stored in MBBRegions, which 585 // will be processed (MBB) top-down if initialized with true. 586 // 587 // The Scheduler may insert instructions during either schedule() or 588 // exitRegion(), even for empty regions. So the local iterators 'I' and 589 // 'RegionEnd' are invalid across these calls. Instructions must not be 590 // added to other regions than the current one without updating MBBRegions. 591 592 MBBRegionsVector MBBRegions; 593 getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown()); 594 for (const SchedRegion &R : MBBRegions) { 595 MachineBasicBlock::iterator I = R.RegionBegin; 596 MachineBasicBlock::iterator RegionEnd = R.RegionEnd; 597 unsigned NumRegionInstrs = R.NumRegionInstrs; 598 599 // Notify the scheduler of the region, even if we may skip scheduling 600 // it. Perhaps it still needs to be bundled. 601 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs); 602 603 // Skip empty scheduling regions (0 or 1 schedulable instructions). 604 if (I == RegionEnd || I == std::prev(RegionEnd)) { 605 // Close the current region. Bundle the terminator if needed. 606 // This invalidates 'RegionEnd' and 'I'. 607 Scheduler.exitRegion(); 608 continue; 609 } 610 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n"); 611 LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) 612 << " " << MBB->getName() << "\n From: " << *I 613 << " To: "; 614 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 615 else dbgs() << "End\n"; 616 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); 617 if (DumpCriticalPathLength) { 618 errs() << MF->getName(); 619 errs() << ":%bb. " << MBB->getNumber(); 620 errs() << " " << MBB->getName() << " \n"; 621 } 622 623 // Schedule a region: possibly reorder instructions. 624 // This invalidates the original region iterators. 625 Scheduler.schedule(); 626 627 // Close the current region. 628 Scheduler.exitRegion(); 629 } 630 Scheduler.finishBlock(); 631 // FIXME: Ideally, no further passes should rely on kill flags. However, 632 // thumb2 size reduction is currently an exception, so the PostMIScheduler 633 // needs to do this. 634 if (FixKillFlags) 635 Scheduler.fixupKills(*MBB); 636 } 637 Scheduler.finalizeSchedule(); 638 } 639 640 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 641 // unimplemented 642 } 643 644 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 645 LLVM_DUMP_METHOD void ReadyQueue::dump() const { 646 dbgs() << "Queue " << Name << ": "; 647 for (const SUnit *SU : Queue) 648 dbgs() << SU->NodeNum << " "; 649 dbgs() << "\n"; 650 } 651 #endif 652 653 //===----------------------------------------------------------------------===// 654 // ScheduleDAGMI - Basic machine instruction scheduling. This is 655 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 656 // virtual registers. 657 // ===----------------------------------------------------------------------===/ 658 659 // Provide a vtable anchor. 660 ScheduleDAGMI::~ScheduleDAGMI() = default; 661 662 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 663 /// NumPredsLeft reaches zero, release the successor node. 664 /// 665 /// FIXME: Adjust SuccSU height based on MinLatency. 666 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 667 SUnit *SuccSU = SuccEdge->getSUnit(); 668 669 if (SuccEdge->isWeak()) { 670 --SuccSU->WeakPredsLeft; 671 if (SuccEdge->isCluster()) 672 NextClusterSucc = SuccSU; 673 return; 674 } 675 #ifndef NDEBUG 676 if (SuccSU->NumPredsLeft == 0) { 677 dbgs() << "*** Scheduling failed! ***\n"; 678 dumpNode(*SuccSU); 679 dbgs() << " has been released too many times!\n"; 680 llvm_unreachable(nullptr); 681 } 682 #endif 683 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 684 // CurrCycle may have advanced since then. 685 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 686 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 687 688 --SuccSU->NumPredsLeft; 689 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 690 SchedImpl->releaseTopNode(SuccSU); 691 } 692 693 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 694 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 695 for (SDep &Succ : SU->Succs) 696 releaseSucc(SU, &Succ); 697 } 698 699 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 700 /// NumSuccsLeft reaches zero, release the predecessor node. 701 /// 702 /// FIXME: Adjust PredSU height based on MinLatency. 703 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 704 SUnit *PredSU = PredEdge->getSUnit(); 705 706 if (PredEdge->isWeak()) { 707 --PredSU->WeakSuccsLeft; 708 if (PredEdge->isCluster()) 709 NextClusterPred = PredSU; 710 return; 711 } 712 #ifndef NDEBUG 713 if (PredSU->NumSuccsLeft == 0) { 714 dbgs() << "*** Scheduling failed! ***\n"; 715 dumpNode(*PredSU); 716 dbgs() << " has been released too many times!\n"; 717 llvm_unreachable(nullptr); 718 } 719 #endif 720 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 721 // CurrCycle may have advanced since then. 722 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 723 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 724 725 --PredSU->NumSuccsLeft; 726 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 727 SchedImpl->releaseBottomNode(PredSU); 728 } 729 730 /// releasePredecessors - Call releasePred on each of SU's predecessors. 731 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 732 for (SDep &Pred : SU->Preds) 733 releasePred(SU, &Pred); 734 } 735 736 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) { 737 ScheduleDAGInstrs::startBlock(bb); 738 SchedImpl->enterMBB(bb); 739 } 740 741 void ScheduleDAGMI::finishBlock() { 742 SchedImpl->leaveMBB(); 743 ScheduleDAGInstrs::finishBlock(); 744 } 745 746 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 747 /// crossing a scheduling boundary. [begin, end) includes all instructions in 748 /// the region, including the boundary itself and single-instruction regions 749 /// that don't get scheduled. 750 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 751 MachineBasicBlock::iterator begin, 752 MachineBasicBlock::iterator end, 753 unsigned regioninstrs) 754 { 755 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 756 757 SchedImpl->initPolicy(begin, end, regioninstrs); 758 } 759 760 /// This is normally called from the main scheduler loop but may also be invoked 761 /// by the scheduling strategy to perform additional code motion. 762 void ScheduleDAGMI::moveInstruction( 763 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 764 // Advance RegionBegin if the first instruction moves down. 765 if (&*RegionBegin == MI) 766 ++RegionBegin; 767 768 // Update the instruction stream. 769 BB->splice(InsertPos, BB, MI); 770 771 // Update LiveIntervals 772 if (LIS) 773 LIS->handleMove(*MI, /*UpdateFlags=*/true); 774 775 // Recede RegionBegin if an instruction moves above the first. 776 if (RegionBegin == InsertPos) 777 RegionBegin = MI; 778 } 779 780 bool ScheduleDAGMI::checkSchedLimit() { 781 #if LLVM_ENABLE_ABI_BREAKING_CHECKS && !defined(NDEBUG) 782 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 783 CurrentTop = CurrentBottom; 784 return false; 785 } 786 ++NumInstrsScheduled; 787 #endif 788 return true; 789 } 790 791 /// Per-region scheduling driver, called back from 792 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 793 /// does not consider liveness or register pressure. It is useful for PostRA 794 /// scheduling and potentially other custom schedulers. 795 void ScheduleDAGMI::schedule() { 796 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n"); 797 LLVM_DEBUG(SchedImpl->dumpPolicy()); 798 799 // Build the DAG. 800 buildSchedGraph(AA); 801 802 postProcessDAG(); 803 804 SmallVector<SUnit*, 8> TopRoots, BotRoots; 805 findRootsAndBiasEdges(TopRoots, BotRoots); 806 807 LLVM_DEBUG(dump()); 808 if (PrintDAGs) dump(); 809 if (ViewMISchedDAGs) viewGraph(); 810 811 // Initialize the strategy before modifying the DAG. 812 // This may initialize a DFSResult to be used for queue priority. 813 SchedImpl->initialize(this); 814 815 // Initialize ready queues now that the DAG and priority data are finalized. 816 initQueues(TopRoots, BotRoots); 817 818 bool IsTopNode = false; 819 while (true) { 820 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n"); 821 SUnit *SU = SchedImpl->pickNode(IsTopNode); 822 if (!SU) break; 823 824 assert(!SU->isScheduled && "Node already scheduled"); 825 if (!checkSchedLimit()) 826 break; 827 828 MachineInstr *MI = SU->getInstr(); 829 if (IsTopNode) { 830 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 831 if (&*CurrentTop == MI) 832 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 833 else 834 moveInstruction(MI, CurrentTop); 835 } else { 836 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 837 MachineBasicBlock::iterator priorII = 838 priorNonDebug(CurrentBottom, CurrentTop); 839 if (&*priorII == MI) 840 CurrentBottom = priorII; 841 else { 842 if (&*CurrentTop == MI) 843 CurrentTop = nextIfDebug(++CurrentTop, priorII); 844 moveInstruction(MI, CurrentBottom); 845 CurrentBottom = MI; 846 } 847 } 848 // Notify the scheduling strategy before updating the DAG. 849 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 850 // runs, it can then use the accurate ReadyCycle time to determine whether 851 // newly released nodes can move to the readyQ. 852 SchedImpl->schedNode(SU, IsTopNode); 853 854 updateQueues(SU, IsTopNode); 855 } 856 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 857 858 placeDebugValues(); 859 860 LLVM_DEBUG({ 861 dbgs() << "*** Final schedule for " 862 << printMBBReference(*begin()->getParent()) << " ***\n"; 863 dumpSchedule(); 864 dbgs() << '\n'; 865 }); 866 } 867 868 /// Apply each ScheduleDAGMutation step in order. 869 void ScheduleDAGMI::postProcessDAG() { 870 for (auto &m : Mutations) 871 m->apply(this); 872 } 873 874 void ScheduleDAGMI:: 875 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 876 SmallVectorImpl<SUnit*> &BotRoots) { 877 for (SUnit &SU : SUnits) { 878 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits"); 879 880 // Order predecessors so DFSResult follows the critical path. 881 SU.biasCriticalPath(); 882 883 // A SUnit is ready to top schedule if it has no predecessors. 884 if (!SU.NumPredsLeft) 885 TopRoots.push_back(&SU); 886 // A SUnit is ready to bottom schedule if it has no successors. 887 if (!SU.NumSuccsLeft) 888 BotRoots.push_back(&SU); 889 } 890 ExitSU.biasCriticalPath(); 891 } 892 893 /// Identify DAG roots and setup scheduler queues. 894 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 895 ArrayRef<SUnit*> BotRoots) { 896 NextClusterSucc = nullptr; 897 NextClusterPred = nullptr; 898 899 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 900 // 901 // Nodes with unreleased weak edges can still be roots. 902 // Release top roots in forward order. 903 for (SUnit *SU : TopRoots) 904 SchedImpl->releaseTopNode(SU); 905 906 // Release bottom roots in reverse order so the higher priority nodes appear 907 // first. This is more natural and slightly more efficient. 908 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 909 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 910 SchedImpl->releaseBottomNode(*I); 911 } 912 913 releaseSuccessors(&EntrySU); 914 releasePredecessors(&ExitSU); 915 916 SchedImpl->registerRoots(); 917 918 // Advance past initial DebugValues. 919 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 920 CurrentBottom = RegionEnd; 921 } 922 923 /// Update scheduler queues after scheduling an instruction. 924 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 925 // Release dependent instructions for scheduling. 926 if (IsTopNode) 927 releaseSuccessors(SU); 928 else 929 releasePredecessors(SU); 930 931 SU->isScheduled = true; 932 } 933 934 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 935 void ScheduleDAGMI::placeDebugValues() { 936 // If first instruction was a DBG_VALUE then put it back. 937 if (FirstDbgValue) { 938 BB->splice(RegionBegin, BB, FirstDbgValue); 939 RegionBegin = FirstDbgValue; 940 } 941 942 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator 943 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 944 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 945 MachineInstr *DbgValue = P.first; 946 MachineBasicBlock::iterator OrigPrevMI = P.second; 947 if (&*RegionBegin == DbgValue) 948 ++RegionBegin; 949 BB->splice(std::next(OrigPrevMI), BB, DbgValue); 950 if (RegionEnd != BB->end() && OrigPrevMI == &*RegionEnd) 951 RegionEnd = DbgValue; 952 } 953 } 954 955 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 956 static const char *scheduleTableLegend = " i: issue\n x: resource booked"; 957 958 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpScheduleTraceTopDown() const { 959 // Bail off when there is no schedule model to query. 960 if (!SchedModel.hasInstrSchedModel()) 961 return; 962 963 // Nothing to show if there is no or just one instruction. 964 if (BB->size() < 2) 965 return; 966 967 dbgs() << " * Schedule table (TopDown):\n"; 968 dbgs() << scheduleTableLegend << "\n"; 969 const unsigned FirstCycle = getSUnit(&*(std::begin(*this)))->TopReadyCycle; 970 unsigned LastCycle = getSUnit(&*(std::prev(std::end(*this))))->TopReadyCycle; 971 for (MachineInstr &MI : *this) { 972 SUnit *SU = getSUnit(&MI); 973 if (!SU) 974 continue; 975 const MCSchedClassDesc *SC = getSchedClass(SU); 976 for (TargetSchedModel::ProcResIter PI = SchedModel.getWriteProcResBegin(SC), 977 PE = SchedModel.getWriteProcResEnd(SC); 978 PI != PE; ++PI) { 979 if (SU->TopReadyCycle + PI->Cycles - 1 > LastCycle) 980 LastCycle = SU->TopReadyCycle + PI->Cycles - 1; 981 } 982 } 983 // Print the header with the cycles 984 dbgs() << llvm::left_justify("Cycle", HeaderColWidth); 985 for (unsigned C = FirstCycle; C <= LastCycle; ++C) 986 dbgs() << llvm::left_justify("| " + std::to_string(C), ColWidth); 987 dbgs() << "|\n"; 988 989 for (MachineInstr &MI : *this) { 990 SUnit *SU = getSUnit(&MI); 991 if (!SU) { 992 dbgs() << "Missing SUnit\n"; 993 continue; 994 } 995 std::string NodeName("SU("); 996 NodeName += std::to_string(SU->NodeNum) + ")"; 997 dbgs() << llvm::left_justify(NodeName, HeaderColWidth); 998 unsigned C = FirstCycle; 999 for (; C <= LastCycle; ++C) { 1000 if (C == SU->TopReadyCycle) 1001 dbgs() << llvm::left_justify("| i", ColWidth); 1002 else 1003 dbgs() << llvm::left_justify("|", ColWidth); 1004 } 1005 dbgs() << "|\n"; 1006 const MCSchedClassDesc *SC = getSchedClass(SU); 1007 1008 SmallVector<MCWriteProcResEntry, 4> ResourcesIt( 1009 make_range(SchedModel.getWriteProcResBegin(SC), 1010 SchedModel.getWriteProcResEnd(SC))); 1011 1012 if (MISchedSortResourcesInTrace) 1013 llvm::sort(ResourcesIt.begin(), ResourcesIt.end(), 1014 [](const MCWriteProcResEntry &LHS, 1015 const MCWriteProcResEntry &RHS) -> bool { 1016 return LHS.StartAtCycle < RHS.StartAtCycle || 1017 (LHS.StartAtCycle == RHS.StartAtCycle && 1018 LHS.Cycles < RHS.Cycles); 1019 }); 1020 for (const MCWriteProcResEntry &PI : ResourcesIt) { 1021 C = FirstCycle; 1022 const std::string ResName = 1023 SchedModel.getResourceName(PI.ProcResourceIdx); 1024 dbgs() << llvm::right_justify(ResName + " ", HeaderColWidth); 1025 for (; C < SU->TopReadyCycle + PI.StartAtCycle; ++C) { 1026 dbgs() << llvm::left_justify("|", ColWidth); 1027 } 1028 for (unsigned I = 0, E = PI.Cycles - PI.StartAtCycle; I != E; ++I, ++C) 1029 dbgs() << llvm::left_justify("| x", ColWidth); 1030 while (C++ <= LastCycle) 1031 dbgs() << llvm::left_justify("|", ColWidth); 1032 // Place end char 1033 dbgs() << "| \n"; 1034 } 1035 } 1036 } 1037 1038 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpScheduleTraceBottomUp() const { 1039 // Bail off when there is no schedule model to query. 1040 if (!SchedModel.hasInstrSchedModel()) 1041 return; 1042 1043 // Nothing to show if there is no or just one instruction. 1044 if (BB->size() < 2) 1045 return; 1046 1047 dbgs() << " * Schedule table (BottomUp):\n"; 1048 dbgs() << scheduleTableLegend << "\n"; 1049 1050 const int FirstCycle = getSUnit(&*(std::begin(*this)))->BotReadyCycle; 1051 int LastCycle = getSUnit(&*(std::prev(std::end(*this))))->BotReadyCycle; 1052 for (MachineInstr &MI : *this) { 1053 SUnit *SU = getSUnit(&MI); 1054 if (!SU) 1055 continue; 1056 const MCSchedClassDesc *SC = getSchedClass(SU); 1057 for (TargetSchedModel::ProcResIter PI = SchedModel.getWriteProcResBegin(SC), 1058 PE = SchedModel.getWriteProcResEnd(SC); 1059 PI != PE; ++PI) { 1060 if ((int)SU->BotReadyCycle - PI->Cycles + 1 < LastCycle) 1061 LastCycle = (int)SU->BotReadyCycle - PI->Cycles + 1; 1062 } 1063 } 1064 // Print the header with the cycles 1065 dbgs() << llvm::left_justify("Cycle", HeaderColWidth); 1066 for (int C = FirstCycle; C >= LastCycle; --C) 1067 dbgs() << llvm::left_justify("| " + std::to_string(C), ColWidth); 1068 dbgs() << "|\n"; 1069 1070 for (MachineInstr &MI : *this) { 1071 SUnit *SU = getSUnit(&MI); 1072 if (!SU) { 1073 dbgs() << "Missing SUnit\n"; 1074 continue; 1075 } 1076 std::string NodeName("SU("); 1077 NodeName += std::to_string(SU->NodeNum) + ")"; 1078 dbgs() << llvm::left_justify(NodeName, HeaderColWidth); 1079 int C = FirstCycle; 1080 for (; C >= LastCycle; --C) { 1081 if (C == (int)SU->BotReadyCycle) 1082 dbgs() << llvm::left_justify("| i", ColWidth); 1083 else 1084 dbgs() << llvm::left_justify("|", ColWidth); 1085 } 1086 dbgs() << "|\n"; 1087 const MCSchedClassDesc *SC = getSchedClass(SU); 1088 SmallVector<MCWriteProcResEntry, 4> ResourcesIt( 1089 make_range(SchedModel.getWriteProcResBegin(SC), 1090 SchedModel.getWriteProcResEnd(SC))); 1091 1092 if (MISchedSortResourcesInTrace) 1093 llvm::sort(ResourcesIt.begin(), ResourcesIt.end(), 1094 [](const MCWriteProcResEntry &LHS, 1095 const MCWriteProcResEntry &RHS) -> bool { 1096 return LHS.StartAtCycle < RHS.StartAtCycle || 1097 (LHS.StartAtCycle == RHS.StartAtCycle && 1098 LHS.Cycles < RHS.Cycles); 1099 }); 1100 for (const MCWriteProcResEntry &PI : ResourcesIt) { 1101 C = FirstCycle; 1102 const std::string ResName = 1103 SchedModel.getResourceName(PI.ProcResourceIdx); 1104 dbgs() << llvm::right_justify(ResName + " ", HeaderColWidth); 1105 for (; C > ((int)SU->BotReadyCycle - (int)PI.StartAtCycle); --C) { 1106 dbgs() << llvm::left_justify("|", ColWidth); 1107 } 1108 for (unsigned I = 0, E = PI.Cycles - PI.StartAtCycle; I != E; ++I, --C) 1109 dbgs() << llvm::left_justify("| x", ColWidth); 1110 while (C-- >= LastCycle) 1111 dbgs() << llvm::left_justify("|", ColWidth); 1112 // Place end char 1113 dbgs() << "| \n"; 1114 } 1115 } 1116 } 1117 #endif 1118 1119 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1120 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const { 1121 if (MISchedDumpScheduleTrace) { 1122 if (ForceTopDown) 1123 dumpScheduleTraceTopDown(); 1124 else if (ForceBottomUp) 1125 dumpScheduleTraceBottomUp(); 1126 else { 1127 dbgs() << "* Schedule table (Bidirectional): not implemented\n"; 1128 } 1129 } 1130 1131 for (MachineInstr &MI : *this) { 1132 if (SUnit *SU = getSUnit(&MI)) 1133 dumpNode(*SU); 1134 else 1135 dbgs() << "Missing SUnit\n"; 1136 } 1137 } 1138 #endif 1139 1140 //===----------------------------------------------------------------------===// 1141 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 1142 // preservation. 1143 //===----------------------------------------------------------------------===// 1144 1145 ScheduleDAGMILive::~ScheduleDAGMILive() { 1146 delete DFSResult; 1147 } 1148 1149 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) { 1150 const MachineInstr &MI = *SU.getInstr(); 1151 for (const MachineOperand &MO : MI.operands()) { 1152 if (!MO.isReg()) 1153 continue; 1154 if (!MO.readsReg()) 1155 continue; 1156 if (TrackLaneMasks && !MO.isUse()) 1157 continue; 1158 1159 Register Reg = MO.getReg(); 1160 if (!Reg.isVirtual()) 1161 continue; 1162 1163 // Ignore re-defs. 1164 if (TrackLaneMasks) { 1165 bool FoundDef = false; 1166 for (const MachineOperand &MO2 : MI.all_defs()) { 1167 if (MO2.getReg() == Reg && !MO2.isDead()) { 1168 FoundDef = true; 1169 break; 1170 } 1171 } 1172 if (FoundDef) 1173 continue; 1174 } 1175 1176 // Record this local VReg use. 1177 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg); 1178 for (; UI != VRegUses.end(); ++UI) { 1179 if (UI->SU == &SU) 1180 break; 1181 } 1182 if (UI == VRegUses.end()) 1183 VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU)); 1184 } 1185 } 1186 1187 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 1188 /// crossing a scheduling boundary. [begin, end) includes all instructions in 1189 /// the region, including the boundary itself and single-instruction regions 1190 /// that don't get scheduled. 1191 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 1192 MachineBasicBlock::iterator begin, 1193 MachineBasicBlock::iterator end, 1194 unsigned regioninstrs) 1195 { 1196 // ScheduleDAGMI initializes SchedImpl's per-region policy. 1197 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 1198 1199 // For convenience remember the end of the liveness region. 1200 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 1201 1202 SUPressureDiffs.clear(); 1203 1204 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 1205 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks(); 1206 1207 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) && 1208 "ShouldTrackLaneMasks requires ShouldTrackPressure"); 1209 } 1210 1211 // Setup the register pressure trackers for the top scheduled and bottom 1212 // scheduled regions. 1213 void ScheduleDAGMILive::initRegPressure() { 1214 VRegUses.clear(); 1215 VRegUses.setUniverse(MRI.getNumVirtRegs()); 1216 for (SUnit &SU : SUnits) 1217 collectVRegUses(SU); 1218 1219 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin, 1220 ShouldTrackLaneMasks, false); 1221 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1222 ShouldTrackLaneMasks, false); 1223 1224 // Close the RPTracker to finalize live ins. 1225 RPTracker.closeRegion(); 1226 1227 LLVM_DEBUG(RPTracker.dump()); 1228 1229 // Initialize the live ins and live outs. 1230 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 1231 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 1232 1233 // Close one end of the tracker so we can call 1234 // getMaxUpward/DownwardPressureDelta before advancing across any 1235 // instructions. This converts currently live regs into live ins/outs. 1236 TopRPTracker.closeTop(); 1237 BotRPTracker.closeBottom(); 1238 1239 BotRPTracker.initLiveThru(RPTracker); 1240 if (!BotRPTracker.getLiveThru().empty()) { 1241 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 1242 LLVM_DEBUG(dbgs() << "Live Thru: "; 1243 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 1244 }; 1245 1246 // For each live out vreg reduce the pressure change associated with other 1247 // uses of the same vreg below the live-out reaching def. 1248 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 1249 1250 // Account for liveness generated by the region boundary. 1251 if (LiveRegionEnd != RegionEnd) { 1252 SmallVector<RegisterMaskPair, 8> LiveUses; 1253 BotRPTracker.recede(&LiveUses); 1254 updatePressureDiffs(LiveUses); 1255 } 1256 1257 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; 1258 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI); 1259 dbgs() << "Bottom Pressure:\n"; 1260 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);); 1261 1262 assert((BotRPTracker.getPos() == RegionEnd || 1263 (RegionEnd->isDebugInstr() && 1264 BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) && 1265 "Can't find the region bottom"); 1266 1267 // Cache the list of excess pressure sets in this region. This will also track 1268 // the max pressure in the scheduled code for these sets. 1269 RegionCriticalPSets.clear(); 1270 const std::vector<unsigned> &RegionPressure = 1271 RPTracker.getPressure().MaxSetPressure; 1272 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 1273 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 1274 if (RegionPressure[i] > Limit) { 1275 LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit 1276 << " Actual " << RegionPressure[i] << "\n"); 1277 RegionCriticalPSets.push_back(PressureChange(i)); 1278 } 1279 } 1280 LLVM_DEBUG(dbgs() << "Excess PSets: "; 1281 for (const PressureChange &RCPS 1282 : RegionCriticalPSets) dbgs() 1283 << TRI->getRegPressureSetName(RCPS.getPSet()) << " "; 1284 dbgs() << "\n"); 1285 } 1286 1287 void ScheduleDAGMILive:: 1288 updateScheduledPressure(const SUnit *SU, 1289 const std::vector<unsigned> &NewMaxPressure) { 1290 const PressureDiff &PDiff = getPressureDiff(SU); 1291 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 1292 for (const PressureChange &PC : PDiff) { 1293 if (!PC.isValid()) 1294 break; 1295 unsigned ID = PC.getPSet(); 1296 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 1297 ++CritIdx; 1298 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 1299 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 1300 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max()) 1301 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 1302 } 1303 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 1304 if (NewMaxPressure[ID] >= Limit - 2) { 1305 LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 1306 << NewMaxPressure[ID] 1307 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") 1308 << Limit << "(+ " << BotRPTracker.getLiveThru()[ID] 1309 << " livethru)\n"); 1310 } 1311 } 1312 } 1313 1314 /// Update the PressureDiff array for liveness after scheduling this 1315 /// instruction. 1316 void ScheduleDAGMILive::updatePressureDiffs( 1317 ArrayRef<RegisterMaskPair> LiveUses) { 1318 for (const RegisterMaskPair &P : LiveUses) { 1319 Register Reg = P.RegUnit; 1320 /// FIXME: Currently assuming single-use physregs. 1321 if (!Reg.isVirtual()) 1322 continue; 1323 1324 if (ShouldTrackLaneMasks) { 1325 // If the register has just become live then other uses won't change 1326 // this fact anymore => decrement pressure. 1327 // If the register has just become dead then other uses make it come 1328 // back to life => increment pressure. 1329 bool Decrement = P.LaneMask.any(); 1330 1331 for (const VReg2SUnit &V2SU 1332 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1333 SUnit &SU = *V2SU.SU; 1334 if (SU.isScheduled || &SU == &ExitSU) 1335 continue; 1336 1337 PressureDiff &PDiff = getPressureDiff(&SU); 1338 PDiff.addPressureChange(Reg, Decrement, &MRI); 1339 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") " 1340 << printReg(Reg, TRI) << ':' 1341 << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr(); 1342 dbgs() << " to "; PDiff.dump(*TRI);); 1343 } 1344 } else { 1345 assert(P.LaneMask.any()); 1346 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n"); 1347 // This may be called before CurrentBottom has been initialized. However, 1348 // BotRPTracker must have a valid position. We want the value live into the 1349 // instruction or live out of the block, so ask for the previous 1350 // instruction's live-out. 1351 const LiveInterval &LI = LIS->getInterval(Reg); 1352 VNInfo *VNI; 1353 MachineBasicBlock::const_iterator I = 1354 nextIfDebug(BotRPTracker.getPos(), BB->end()); 1355 if (I == BB->end()) 1356 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1357 else { 1358 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I)); 1359 VNI = LRQ.valueIn(); 1360 } 1361 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 1362 assert(VNI && "No live value at use."); 1363 for (const VReg2SUnit &V2SU 1364 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1365 SUnit *SU = V2SU.SU; 1366 // If this use comes before the reaching def, it cannot be a last use, 1367 // so decrease its pressure change. 1368 if (!SU->isScheduled && SU != &ExitSU) { 1369 LiveQueryResult LRQ = 1370 LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1371 if (LRQ.valueIn() == VNI) { 1372 PressureDiff &PDiff = getPressureDiff(SU); 1373 PDiff.addPressureChange(Reg, true, &MRI); 1374 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 1375 << *SU->getInstr(); 1376 dbgs() << " to "; PDiff.dump(*TRI);); 1377 } 1378 } 1379 } 1380 } 1381 } 1382 } 1383 1384 void ScheduleDAGMILive::dump() const { 1385 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1386 if (EntrySU.getInstr() != nullptr) 1387 dumpNodeAll(EntrySU); 1388 for (const SUnit &SU : SUnits) { 1389 dumpNodeAll(SU); 1390 if (ShouldTrackPressure) { 1391 dbgs() << " Pressure Diff : "; 1392 getPressureDiff(&SU).dump(*TRI); 1393 } 1394 dbgs() << " Single Issue : "; 1395 if (SchedModel.mustBeginGroup(SU.getInstr()) && 1396 SchedModel.mustEndGroup(SU.getInstr())) 1397 dbgs() << "true;"; 1398 else 1399 dbgs() << "false;"; 1400 dbgs() << '\n'; 1401 } 1402 if (ExitSU.getInstr() != nullptr) 1403 dumpNodeAll(ExitSU); 1404 #endif 1405 } 1406 1407 /// schedule - Called back from MachineScheduler::runOnMachineFunction 1408 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 1409 /// only includes instructions that have DAG nodes, not scheduling boundaries. 1410 /// 1411 /// This is a skeletal driver, with all the functionality pushed into helpers, 1412 /// so that it can be easily extended by experimental schedulers. Generally, 1413 /// implementing MachineSchedStrategy should be sufficient to implement a new 1414 /// scheduling algorithm. However, if a scheduler further subclasses 1415 /// ScheduleDAGMILive then it will want to override this virtual method in order 1416 /// to update any specialized state. 1417 void ScheduleDAGMILive::schedule() { 1418 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n"); 1419 LLVM_DEBUG(SchedImpl->dumpPolicy()); 1420 buildDAGWithRegPressure(); 1421 1422 postProcessDAG(); 1423 1424 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1425 findRootsAndBiasEdges(TopRoots, BotRoots); 1426 1427 // Initialize the strategy before modifying the DAG. 1428 // This may initialize a DFSResult to be used for queue priority. 1429 SchedImpl->initialize(this); 1430 1431 LLVM_DEBUG(dump()); 1432 if (PrintDAGs) dump(); 1433 if (ViewMISchedDAGs) viewGraph(); 1434 1435 // Initialize ready queues now that the DAG and priority data are finalized. 1436 initQueues(TopRoots, BotRoots); 1437 1438 bool IsTopNode = false; 1439 while (true) { 1440 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n"); 1441 SUnit *SU = SchedImpl->pickNode(IsTopNode); 1442 if (!SU) break; 1443 1444 assert(!SU->isScheduled && "Node already scheduled"); 1445 if (!checkSchedLimit()) 1446 break; 1447 1448 scheduleMI(SU, IsTopNode); 1449 1450 if (DFSResult) { 1451 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1452 if (!ScheduledTrees.test(SubtreeID)) { 1453 ScheduledTrees.set(SubtreeID); 1454 DFSResult->scheduleTree(SubtreeID); 1455 SchedImpl->scheduleTree(SubtreeID); 1456 } 1457 } 1458 1459 // Notify the scheduling strategy after updating the DAG. 1460 SchedImpl->schedNode(SU, IsTopNode); 1461 1462 updateQueues(SU, IsTopNode); 1463 } 1464 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1465 1466 placeDebugValues(); 1467 1468 LLVM_DEBUG({ 1469 dbgs() << "*** Final schedule for " 1470 << printMBBReference(*begin()->getParent()) << " ***\n"; 1471 dumpSchedule(); 1472 dbgs() << '\n'; 1473 }); 1474 } 1475 1476 /// Build the DAG and setup three register pressure trackers. 1477 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1478 if (!ShouldTrackPressure) { 1479 RPTracker.reset(); 1480 RegionCriticalPSets.clear(); 1481 buildSchedGraph(AA); 1482 return; 1483 } 1484 1485 // Initialize the register pressure tracker used by buildSchedGraph. 1486 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1487 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true); 1488 1489 // Account for liveness generate by the region boundary. 1490 if (LiveRegionEnd != RegionEnd) 1491 RPTracker.recede(); 1492 1493 // Build the DAG, and compute current register pressure. 1494 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks); 1495 1496 // Initialize top/bottom trackers after computing region pressure. 1497 initRegPressure(); 1498 } 1499 1500 void ScheduleDAGMILive::computeDFSResult() { 1501 if (!DFSResult) 1502 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1503 DFSResult->clear(); 1504 ScheduledTrees.clear(); 1505 DFSResult->resize(SUnits.size()); 1506 DFSResult->compute(SUnits); 1507 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1508 } 1509 1510 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1511 /// only provides the critical path for single block loops. To handle loops that 1512 /// span blocks, we could use the vreg path latencies provided by 1513 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1514 /// available for use in the scheduler. 1515 /// 1516 /// The cyclic path estimation identifies a def-use pair that crosses the back 1517 /// edge and considers the depth and height of the nodes. For example, consider 1518 /// the following instruction sequence where each instruction has unit latency 1519 /// and defines an eponymous virtual register: 1520 /// 1521 /// a->b(a,c)->c(b)->d(c)->exit 1522 /// 1523 /// The cyclic critical path is a two cycles: b->c->b 1524 /// The acyclic critical path is four cycles: a->b->c->d->exit 1525 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1526 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1527 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1528 /// LiveInDepth = depth(b) = len(a->b) = 1 1529 /// 1530 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1531 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1532 /// CyclicCriticalPath = min(2, 2) = 2 1533 /// 1534 /// This could be relevant to PostRA scheduling, but is currently implemented 1535 /// assuming LiveIntervals. 1536 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1537 // This only applies to single block loop. 1538 if (!BB->isSuccessor(BB)) 1539 return 0; 1540 1541 unsigned MaxCyclicLatency = 0; 1542 // Visit each live out vreg def to find def/use pairs that cross iterations. 1543 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) { 1544 Register Reg = P.RegUnit; 1545 if (!Reg.isVirtual()) 1546 continue; 1547 const LiveInterval &LI = LIS->getInterval(Reg); 1548 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1549 if (!DefVNI) 1550 continue; 1551 1552 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1553 const SUnit *DefSU = getSUnit(DefMI); 1554 if (!DefSU) 1555 continue; 1556 1557 unsigned LiveOutHeight = DefSU->getHeight(); 1558 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1559 // Visit all local users of the vreg def. 1560 for (const VReg2SUnit &V2SU 1561 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1562 SUnit *SU = V2SU.SU; 1563 if (SU == &ExitSU) 1564 continue; 1565 1566 // Only consider uses of the phi. 1567 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1568 if (!LRQ.valueIn()->isPHIDef()) 1569 continue; 1570 1571 // Assume that a path spanning two iterations is a cycle, which could 1572 // overestimate in strange cases. This allows cyclic latency to be 1573 // estimated as the minimum slack of the vreg's depth or height. 1574 unsigned CyclicLatency = 0; 1575 if (LiveOutDepth > SU->getDepth()) 1576 CyclicLatency = LiveOutDepth - SU->getDepth(); 1577 1578 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency; 1579 if (LiveInHeight > LiveOutHeight) { 1580 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1581 CyclicLatency = LiveInHeight - LiveOutHeight; 1582 } else 1583 CyclicLatency = 0; 1584 1585 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1586 << SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1587 if (CyclicLatency > MaxCyclicLatency) 1588 MaxCyclicLatency = CyclicLatency; 1589 } 1590 } 1591 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1592 return MaxCyclicLatency; 1593 } 1594 1595 /// Release ExitSU predecessors and setup scheduler queues. Re-position 1596 /// the Top RP tracker in case the region beginning has changed. 1597 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots, 1598 ArrayRef<SUnit*> BotRoots) { 1599 ScheduleDAGMI::initQueues(TopRoots, BotRoots); 1600 if (ShouldTrackPressure) { 1601 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1602 TopRPTracker.setPos(CurrentTop); 1603 } 1604 } 1605 1606 /// Move an instruction and update register pressure. 1607 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1608 // Move the instruction to its new location in the instruction stream. 1609 MachineInstr *MI = SU->getInstr(); 1610 1611 if (IsTopNode) { 1612 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1613 if (&*CurrentTop == MI) 1614 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1615 else { 1616 moveInstruction(MI, CurrentTop); 1617 TopRPTracker.setPos(MI); 1618 } 1619 1620 if (ShouldTrackPressure) { 1621 // Update top scheduled pressure. 1622 RegisterOperands RegOpers; 1623 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1624 if (ShouldTrackLaneMasks) { 1625 // Adjust liveness and add missing dead+read-undef flags. 1626 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1627 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1628 } else { 1629 // Adjust for missing dead-def flags. 1630 RegOpers.detectDeadDefs(*MI, *LIS); 1631 } 1632 1633 TopRPTracker.advance(RegOpers); 1634 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1635 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure( 1636 TopRPTracker.getRegSetPressureAtPos(), TRI);); 1637 1638 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1639 } 1640 } else { 1641 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1642 MachineBasicBlock::iterator priorII = 1643 priorNonDebug(CurrentBottom, CurrentTop); 1644 if (&*priorII == MI) 1645 CurrentBottom = priorII; 1646 else { 1647 if (&*CurrentTop == MI) { 1648 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1649 TopRPTracker.setPos(CurrentTop); 1650 } 1651 moveInstruction(MI, CurrentBottom); 1652 CurrentBottom = MI; 1653 BotRPTracker.setPos(CurrentBottom); 1654 } 1655 if (ShouldTrackPressure) { 1656 RegisterOperands RegOpers; 1657 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1658 if (ShouldTrackLaneMasks) { 1659 // Adjust liveness and add missing dead+read-undef flags. 1660 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1661 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1662 } else { 1663 // Adjust for missing dead-def flags. 1664 RegOpers.detectDeadDefs(*MI, *LIS); 1665 } 1666 1667 if (BotRPTracker.getPos() != CurrentBottom) 1668 BotRPTracker.recedeSkipDebugValues(); 1669 SmallVector<RegisterMaskPair, 8> LiveUses; 1670 BotRPTracker.recede(RegOpers, &LiveUses); 1671 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1672 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure( 1673 BotRPTracker.getRegSetPressureAtPos(), TRI);); 1674 1675 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1676 updatePressureDiffs(LiveUses); 1677 } 1678 } 1679 } 1680 1681 //===----------------------------------------------------------------------===// 1682 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores. 1683 //===----------------------------------------------------------------------===// 1684 1685 namespace { 1686 1687 /// Post-process the DAG to create cluster edges between neighboring 1688 /// loads or between neighboring stores. 1689 class BaseMemOpClusterMutation : public ScheduleDAGMutation { 1690 struct MemOpInfo { 1691 SUnit *SU; 1692 SmallVector<const MachineOperand *, 4> BaseOps; 1693 int64_t Offset; 1694 unsigned Width; 1695 1696 MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps, 1697 int64_t Offset, unsigned Width) 1698 : SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset), 1699 Width(Width) {} 1700 1701 static bool Compare(const MachineOperand *const &A, 1702 const MachineOperand *const &B) { 1703 if (A->getType() != B->getType()) 1704 return A->getType() < B->getType(); 1705 if (A->isReg()) 1706 return A->getReg() < B->getReg(); 1707 if (A->isFI()) { 1708 const MachineFunction &MF = *A->getParent()->getParent()->getParent(); 1709 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1710 bool StackGrowsDown = TFI.getStackGrowthDirection() == 1711 TargetFrameLowering::StackGrowsDown; 1712 return StackGrowsDown ? A->getIndex() > B->getIndex() 1713 : A->getIndex() < B->getIndex(); 1714 } 1715 1716 llvm_unreachable("MemOpClusterMutation only supports register or frame " 1717 "index bases."); 1718 } 1719 1720 bool operator<(const MemOpInfo &RHS) const { 1721 // FIXME: Don't compare everything twice. Maybe use C++20 three way 1722 // comparison instead when it's available. 1723 if (std::lexicographical_compare(BaseOps.begin(), BaseOps.end(), 1724 RHS.BaseOps.begin(), RHS.BaseOps.end(), 1725 Compare)) 1726 return true; 1727 if (std::lexicographical_compare(RHS.BaseOps.begin(), RHS.BaseOps.end(), 1728 BaseOps.begin(), BaseOps.end(), Compare)) 1729 return false; 1730 if (Offset != RHS.Offset) 1731 return Offset < RHS.Offset; 1732 return SU->NodeNum < RHS.SU->NodeNum; 1733 } 1734 }; 1735 1736 const TargetInstrInfo *TII; 1737 const TargetRegisterInfo *TRI; 1738 bool IsLoad; 1739 1740 public: 1741 BaseMemOpClusterMutation(const TargetInstrInfo *tii, 1742 const TargetRegisterInfo *tri, bool IsLoad) 1743 : TII(tii), TRI(tri), IsLoad(IsLoad) {} 1744 1745 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1746 1747 protected: 1748 void clusterNeighboringMemOps(ArrayRef<MemOpInfo> MemOps, bool FastCluster, 1749 ScheduleDAGInstrs *DAG); 1750 void collectMemOpRecords(std::vector<SUnit> &SUnits, 1751 SmallVectorImpl<MemOpInfo> &MemOpRecords); 1752 bool groupMemOps(ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG, 1753 DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups); 1754 }; 1755 1756 class StoreClusterMutation : public BaseMemOpClusterMutation { 1757 public: 1758 StoreClusterMutation(const TargetInstrInfo *tii, 1759 const TargetRegisterInfo *tri) 1760 : BaseMemOpClusterMutation(tii, tri, false) {} 1761 }; 1762 1763 class LoadClusterMutation : public BaseMemOpClusterMutation { 1764 public: 1765 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri) 1766 : BaseMemOpClusterMutation(tii, tri, true) {} 1767 }; 1768 1769 } // end anonymous namespace 1770 1771 namespace llvm { 1772 1773 std::unique_ptr<ScheduleDAGMutation> 1774 createLoadClusterDAGMutation(const TargetInstrInfo *TII, 1775 const TargetRegisterInfo *TRI) { 1776 return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI) 1777 : nullptr; 1778 } 1779 1780 std::unique_ptr<ScheduleDAGMutation> 1781 createStoreClusterDAGMutation(const TargetInstrInfo *TII, 1782 const TargetRegisterInfo *TRI) { 1783 return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI) 1784 : nullptr; 1785 } 1786 1787 } // end namespace llvm 1788 1789 // Sorting all the loads/stores first, then for each load/store, checking the 1790 // following load/store one by one, until reach the first non-dependent one and 1791 // call target hook to see if they can cluster. 1792 // If FastCluster is enabled, we assume that, all the loads/stores have been 1793 // preprocessed and now, they didn't have dependencies on each other. 1794 void BaseMemOpClusterMutation::clusterNeighboringMemOps( 1795 ArrayRef<MemOpInfo> MemOpRecords, bool FastCluster, 1796 ScheduleDAGInstrs *DAG) { 1797 // Keep track of the current cluster length and bytes for each SUnit. 1798 DenseMap<unsigned, std::pair<unsigned, unsigned>> SUnit2ClusterInfo; 1799 1800 // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to 1801 // cluster mem ops collected within `MemOpRecords` array. 1802 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { 1803 // Decision to cluster mem ops is taken based on target dependent logic 1804 auto MemOpa = MemOpRecords[Idx]; 1805 1806 // Seek for the next load/store to do the cluster. 1807 unsigned NextIdx = Idx + 1; 1808 for (; NextIdx < End; ++NextIdx) 1809 // Skip if MemOpb has been clustered already or has dependency with 1810 // MemOpa. 1811 if (!SUnit2ClusterInfo.count(MemOpRecords[NextIdx].SU->NodeNum) && 1812 (FastCluster || 1813 (!DAG->IsReachable(MemOpRecords[NextIdx].SU, MemOpa.SU) && 1814 !DAG->IsReachable(MemOpa.SU, MemOpRecords[NextIdx].SU)))) 1815 break; 1816 if (NextIdx == End) 1817 continue; 1818 1819 auto MemOpb = MemOpRecords[NextIdx]; 1820 unsigned ClusterLength = 2; 1821 unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width; 1822 if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) { 1823 ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1; 1824 CurrentClusterBytes = 1825 SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width; 1826 } 1827 1828 if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpb.BaseOps, ClusterLength, 1829 CurrentClusterBytes)) 1830 continue; 1831 1832 SUnit *SUa = MemOpa.SU; 1833 SUnit *SUb = MemOpb.SU; 1834 if (SUa->NodeNum > SUb->NodeNum) 1835 std::swap(SUa, SUb); 1836 1837 // FIXME: Is this check really required? 1838 if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) 1839 continue; 1840 1841 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU(" 1842 << SUb->NodeNum << ")\n"); 1843 ++NumClustered; 1844 1845 if (IsLoad) { 1846 // Copy successor edges from SUa to SUb. Interleaving computation 1847 // dependent on SUa can prevent load combining due to register reuse. 1848 // Predecessor edges do not need to be copied from SUb to SUa since 1849 // nearby loads should have effectively the same inputs. 1850 for (const SDep &Succ : SUa->Succs) { 1851 if (Succ.getSUnit() == SUb) 1852 continue; 1853 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum 1854 << ")\n"); 1855 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial)); 1856 } 1857 } else { 1858 // Copy predecessor edges from SUb to SUa to avoid the SUnits that 1859 // SUb dependent on scheduled in-between SUb and SUa. Successor edges 1860 // do not need to be copied from SUa to SUb since no one will depend 1861 // on stores. 1862 // Notice that, we don't need to care about the memory dependency as 1863 // we won't try to cluster them if they have any memory dependency. 1864 for (const SDep &Pred : SUb->Preds) { 1865 if (Pred.getSUnit() == SUa) 1866 continue; 1867 LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred.getSUnit()->NodeNum 1868 << ")\n"); 1869 DAG->addEdge(SUa, SDep(Pred.getSUnit(), SDep::Artificial)); 1870 } 1871 } 1872 1873 SUnit2ClusterInfo[MemOpb.SU->NodeNum] = {ClusterLength, 1874 CurrentClusterBytes}; 1875 1876 LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength 1877 << ", Curr cluster bytes: " << CurrentClusterBytes 1878 << "\n"); 1879 } 1880 } 1881 1882 void BaseMemOpClusterMutation::collectMemOpRecords( 1883 std::vector<SUnit> &SUnits, SmallVectorImpl<MemOpInfo> &MemOpRecords) { 1884 for (auto &SU : SUnits) { 1885 if ((IsLoad && !SU.getInstr()->mayLoad()) || 1886 (!IsLoad && !SU.getInstr()->mayStore())) 1887 continue; 1888 1889 const MachineInstr &MI = *SU.getInstr(); 1890 SmallVector<const MachineOperand *, 4> BaseOps; 1891 int64_t Offset; 1892 bool OffsetIsScalable; 1893 unsigned Width; 1894 if (TII->getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, 1895 OffsetIsScalable, Width, TRI)) { 1896 MemOpRecords.push_back(MemOpInfo(&SU, BaseOps, Offset, Width)); 1897 1898 LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps.size() << ", Offset: " 1899 << Offset << ", OffsetIsScalable: " << OffsetIsScalable 1900 << ", Width: " << Width << "\n"); 1901 } 1902 #ifndef NDEBUG 1903 for (const auto *Op : BaseOps) 1904 assert(Op); 1905 #endif 1906 } 1907 } 1908 1909 bool BaseMemOpClusterMutation::groupMemOps( 1910 ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG, 1911 DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups) { 1912 bool FastCluster = 1913 ForceFastCluster || 1914 MemOps.size() * DAG->SUnits.size() / 1000 > FastClusterThreshold; 1915 1916 for (const auto &MemOp : MemOps) { 1917 unsigned ChainPredID = DAG->SUnits.size(); 1918 if (FastCluster) { 1919 for (const SDep &Pred : MemOp.SU->Preds) { 1920 // We only want to cluster the mem ops that have the same ctrl(non-data) 1921 // pred so that they didn't have ctrl dependency for each other. But for 1922 // store instrs, we can still cluster them if the pred is load instr. 1923 if ((Pred.isCtrl() && 1924 (IsLoad || 1925 (Pred.getSUnit() && Pred.getSUnit()->getInstr()->mayStore()))) && 1926 !Pred.isArtificial()) { 1927 ChainPredID = Pred.getSUnit()->NodeNum; 1928 break; 1929 } 1930 } 1931 } else 1932 ChainPredID = 0; 1933 1934 Groups[ChainPredID].push_back(MemOp); 1935 } 1936 return FastCluster; 1937 } 1938 1939 /// Callback from DAG postProcessing to create cluster edges for loads/stores. 1940 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) { 1941 // Collect all the clusterable loads/stores 1942 SmallVector<MemOpInfo, 32> MemOpRecords; 1943 collectMemOpRecords(DAG->SUnits, MemOpRecords); 1944 1945 if (MemOpRecords.size() < 2) 1946 return; 1947 1948 // Put the loads/stores without dependency into the same group with some 1949 // heuristic if the DAG is too complex to avoid compiling time blow up. 1950 // Notice that, some fusion pair could be lost with this. 1951 DenseMap<unsigned, SmallVector<MemOpInfo, 32>> Groups; 1952 bool FastCluster = groupMemOps(MemOpRecords, DAG, Groups); 1953 1954 for (auto &Group : Groups) { 1955 // Sorting the loads/stores, so that, we can stop the cluster as early as 1956 // possible. 1957 llvm::sort(Group.second); 1958 1959 // Trying to cluster all the neighboring loads/stores. 1960 clusterNeighboringMemOps(Group.second, FastCluster, DAG); 1961 } 1962 } 1963 1964 //===----------------------------------------------------------------------===// 1965 // CopyConstrain - DAG post-processing to encourage copy elimination. 1966 //===----------------------------------------------------------------------===// 1967 1968 namespace { 1969 1970 /// Post-process the DAG to create weak edges from all uses of a copy to 1971 /// the one use that defines the copy's source vreg, most likely an induction 1972 /// variable increment. 1973 class CopyConstrain : public ScheduleDAGMutation { 1974 // Transient state. 1975 SlotIndex RegionBeginIdx; 1976 1977 // RegionEndIdx is the slot index of the last non-debug instruction in the 1978 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1979 SlotIndex RegionEndIdx; 1980 1981 public: 1982 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1983 1984 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1985 1986 protected: 1987 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1988 }; 1989 1990 } // end anonymous namespace 1991 1992 namespace llvm { 1993 1994 std::unique_ptr<ScheduleDAGMutation> 1995 createCopyConstrainDAGMutation(const TargetInstrInfo *TII, 1996 const TargetRegisterInfo *TRI) { 1997 return std::make_unique<CopyConstrain>(TII, TRI); 1998 } 1999 2000 } // end namespace llvm 2001 2002 /// constrainLocalCopy handles two possibilities: 2003 /// 1) Local src: 2004 /// I0: = dst 2005 /// I1: src = ... 2006 /// I2: = dst 2007 /// I3: dst = src (copy) 2008 /// (create pred->succ edges I0->I1, I2->I1) 2009 /// 2010 /// 2) Local copy: 2011 /// I0: dst = src (copy) 2012 /// I1: = dst 2013 /// I2: src = ... 2014 /// I3: = dst 2015 /// (create pred->succ edges I1->I2, I3->I2) 2016 /// 2017 /// Although the MachineScheduler is currently constrained to single blocks, 2018 /// this algorithm should handle extended blocks. An EBB is a set of 2019 /// contiguously numbered blocks such that the previous block in the EBB is 2020 /// always the single predecessor. 2021 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 2022 LiveIntervals *LIS = DAG->getLIS(); 2023 MachineInstr *Copy = CopySU->getInstr(); 2024 2025 // Check for pure vreg copies. 2026 const MachineOperand &SrcOp = Copy->getOperand(1); 2027 Register SrcReg = SrcOp.getReg(); 2028 if (!SrcReg.isVirtual() || !SrcOp.readsReg()) 2029 return; 2030 2031 const MachineOperand &DstOp = Copy->getOperand(0); 2032 Register DstReg = DstOp.getReg(); 2033 if (!DstReg.isVirtual() || DstOp.isDead()) 2034 return; 2035 2036 // Check if either the dest or source is local. If it's live across a back 2037 // edge, it's not local. Note that if both vregs are live across the back 2038 // edge, we cannot successfully contrain the copy without cyclic scheduling. 2039 // If both the copy's source and dest are local live intervals, then we 2040 // should treat the dest as the global for the purpose of adding 2041 // constraints. This adds edges from source's other uses to the copy. 2042 unsigned LocalReg = SrcReg; 2043 unsigned GlobalReg = DstReg; 2044 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 2045 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 2046 LocalReg = DstReg; 2047 GlobalReg = SrcReg; 2048 LocalLI = &LIS->getInterval(LocalReg); 2049 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 2050 return; 2051 } 2052 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 2053 2054 // Find the global segment after the start of the local LI. 2055 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 2056 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 2057 // local live range. We could create edges from other global uses to the local 2058 // start, but the coalescer should have already eliminated these cases, so 2059 // don't bother dealing with it. 2060 if (GlobalSegment == GlobalLI->end()) 2061 return; 2062 2063 // If GlobalSegment is killed at the LocalLI->start, the call to find() 2064 // returned the next global segment. But if GlobalSegment overlaps with 2065 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI 2066 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 2067 if (GlobalSegment->contains(LocalLI->beginIndex())) 2068 ++GlobalSegment; 2069 2070 if (GlobalSegment == GlobalLI->end()) 2071 return; 2072 2073 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 2074 if (GlobalSegment != GlobalLI->begin()) { 2075 // Two address defs have no hole. 2076 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 2077 GlobalSegment->start)) { 2078 return; 2079 } 2080 // If the prior global segment may be defined by the same two-address 2081 // instruction that also defines LocalLI, then can't make a hole here. 2082 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 2083 LocalLI->beginIndex())) { 2084 return; 2085 } 2086 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 2087 // it would be a disconnected component in the live range. 2088 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 2089 "Disconnected LRG within the scheduling region."); 2090 } 2091 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 2092 if (!GlobalDef) 2093 return; 2094 2095 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 2096 if (!GlobalSU) 2097 return; 2098 2099 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 2100 // constraining the uses of the last local def to precede GlobalDef. 2101 SmallVector<SUnit*,8> LocalUses; 2102 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 2103 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 2104 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 2105 for (const SDep &Succ : LastLocalSU->Succs) { 2106 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg) 2107 continue; 2108 if (Succ.getSUnit() == GlobalSU) 2109 continue; 2110 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit())) 2111 return; 2112 LocalUses.push_back(Succ.getSUnit()); 2113 } 2114 // Open the top of the GlobalLI hole by constraining any earlier global uses 2115 // to precede the start of LocalLI. 2116 SmallVector<SUnit*,8> GlobalUses; 2117 MachineInstr *FirstLocalDef = 2118 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 2119 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 2120 for (const SDep &Pred : GlobalSU->Preds) { 2121 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg) 2122 continue; 2123 if (Pred.getSUnit() == FirstLocalSU) 2124 continue; 2125 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit())) 2126 return; 2127 GlobalUses.push_back(Pred.getSUnit()); 2128 } 2129 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 2130 // Add the weak edges. 2131 for (SUnit *LU : LocalUses) { 2132 LLVM_DEBUG(dbgs() << " Local use SU(" << LU->NodeNum << ") -> SU(" 2133 << GlobalSU->NodeNum << ")\n"); 2134 DAG->addEdge(GlobalSU, SDep(LU, SDep::Weak)); 2135 } 2136 for (SUnit *GU : GlobalUses) { 2137 LLVM_DEBUG(dbgs() << " Global use SU(" << GU->NodeNum << ") -> SU(" 2138 << FirstLocalSU->NodeNum << ")\n"); 2139 DAG->addEdge(FirstLocalSU, SDep(GU, SDep::Weak)); 2140 } 2141 } 2142 2143 /// Callback from DAG postProcessing to create weak edges to encourage 2144 /// copy elimination. 2145 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { 2146 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 2147 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 2148 2149 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 2150 if (FirstPos == DAG->end()) 2151 return; 2152 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos); 2153 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 2154 *priorNonDebug(DAG->end(), DAG->begin())); 2155 2156 for (SUnit &SU : DAG->SUnits) { 2157 if (!SU.getInstr()->isCopy()) 2158 continue; 2159 2160 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG)); 2161 } 2162 } 2163 2164 //===----------------------------------------------------------------------===// 2165 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 2166 // and possibly other custom schedulers. 2167 //===----------------------------------------------------------------------===// 2168 2169 static const unsigned InvalidCycle = ~0U; 2170 2171 SchedBoundary::~SchedBoundary() { delete HazardRec; } 2172 2173 /// Given a Count of resource usage and a Latency value, return true if a 2174 /// SchedBoundary becomes resource limited. 2175 /// If we are checking after scheduling a node, we should return true when 2176 /// we just reach the resource limit. 2177 static bool checkResourceLimit(unsigned LFactor, unsigned Count, 2178 unsigned Latency, bool AfterSchedNode) { 2179 int ResCntFactor = (int)(Count - (Latency * LFactor)); 2180 if (AfterSchedNode) 2181 return ResCntFactor >= (int)LFactor; 2182 else 2183 return ResCntFactor > (int)LFactor; 2184 } 2185 2186 void SchedBoundary::reset() { 2187 // A new HazardRec is created for each DAG and owned by SchedBoundary. 2188 // Destroying and reconstructing it is very expensive though. So keep 2189 // invalid, placeholder HazardRecs. 2190 if (HazardRec && HazardRec->isEnabled()) { 2191 delete HazardRec; 2192 HazardRec = nullptr; 2193 } 2194 Available.clear(); 2195 Pending.clear(); 2196 CheckPending = false; 2197 CurrCycle = 0; 2198 CurrMOps = 0; 2199 MinReadyCycle = std::numeric_limits<unsigned>::max(); 2200 ExpectedLatency = 0; 2201 DependentLatency = 0; 2202 RetiredMOps = 0; 2203 MaxExecutedResCount = 0; 2204 ZoneCritResIdx = 0; 2205 IsResourceLimited = false; 2206 ReservedCycles.clear(); 2207 ReservedResourceSegments.clear(); 2208 ReservedCyclesIndex.clear(); 2209 ResourceGroupSubUnitMasks.clear(); 2210 #if LLVM_ENABLE_ABI_BREAKING_CHECKS 2211 // Track the maximum number of stall cycles that could arise either from the 2212 // latency of a DAG edge or the number of cycles that a processor resource is 2213 // reserved (SchedBoundary::ReservedCycles). 2214 MaxObservedStall = 0; 2215 #endif 2216 // Reserve a zero-count for invalid CritResIdx. 2217 ExecutedResCounts.resize(1); 2218 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 2219 } 2220 2221 void SchedRemainder:: 2222 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 2223 reset(); 2224 if (!SchedModel->hasInstrSchedModel()) 2225 return; 2226 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 2227 for (SUnit &SU : DAG->SUnits) { 2228 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU); 2229 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC) 2230 * SchedModel->getMicroOpFactor(); 2231 for (TargetSchedModel::ProcResIter 2232 PI = SchedModel->getWriteProcResBegin(SC), 2233 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2234 unsigned PIdx = PI->ProcResourceIdx; 2235 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2236 assert(PI->Cycles >= PI->StartAtCycle); 2237 RemainingCounts[PIdx] += (Factor * (PI->Cycles - PI->StartAtCycle)); 2238 } 2239 } 2240 } 2241 2242 void SchedBoundary:: 2243 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 2244 reset(); 2245 DAG = dag; 2246 SchedModel = smodel; 2247 Rem = rem; 2248 if (SchedModel->hasInstrSchedModel()) { 2249 unsigned ResourceCount = SchedModel->getNumProcResourceKinds(); 2250 ReservedCyclesIndex.resize(ResourceCount); 2251 ExecutedResCounts.resize(ResourceCount); 2252 ResourceGroupSubUnitMasks.resize(ResourceCount, APInt(ResourceCount, 0)); 2253 unsigned NumUnits = 0; 2254 2255 for (unsigned i = 0; i < ResourceCount; ++i) { 2256 ReservedCyclesIndex[i] = NumUnits; 2257 NumUnits += SchedModel->getProcResource(i)->NumUnits; 2258 if (isUnbufferedGroup(i)) { 2259 auto SubUnits = SchedModel->getProcResource(i)->SubUnitsIdxBegin; 2260 for (unsigned U = 0, UE = SchedModel->getProcResource(i)->NumUnits; 2261 U != UE; ++U) 2262 ResourceGroupSubUnitMasks[i].setBit(SubUnits[U]); 2263 } 2264 } 2265 2266 ReservedCycles.resize(NumUnits, InvalidCycle); 2267 } 2268 } 2269 2270 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 2271 /// these "soft stalls" differently than the hard stall cycles based on CPU 2272 /// resources and computed by checkHazard(). A fully in-order model 2273 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 2274 /// available for scheduling until they are ready. However, a weaker in-order 2275 /// model may use this for heuristics. For example, if a processor has in-order 2276 /// behavior when reading certain resources, this may come into play. 2277 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 2278 if (!SU->isUnbuffered) 2279 return 0; 2280 2281 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2282 if (ReadyCycle > CurrCycle) 2283 return ReadyCycle - CurrCycle; 2284 return 0; 2285 } 2286 2287 /// Compute the next cycle at which the given processor resource unit 2288 /// can be scheduled. 2289 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx, 2290 unsigned Cycles, 2291 unsigned StartAtCycle) { 2292 if (SchedModel && SchedModel->enableIntervals()) { 2293 if (isTop()) 2294 return ReservedResourceSegments[InstanceIdx].getFirstAvailableAtFromTop( 2295 CurrCycle, StartAtCycle, Cycles); 2296 2297 return ReservedResourceSegments[InstanceIdx].getFirstAvailableAtFromBottom( 2298 CurrCycle, StartAtCycle, Cycles); 2299 } 2300 2301 unsigned NextUnreserved = ReservedCycles[InstanceIdx]; 2302 // If this resource has never been used, always return cycle zero. 2303 if (NextUnreserved == InvalidCycle) 2304 return 0; 2305 // For bottom-up scheduling add the cycles needed for the current operation. 2306 if (!isTop()) 2307 NextUnreserved += Cycles; 2308 return NextUnreserved; 2309 } 2310 2311 /// Compute the next cycle at which the given processor resource can be 2312 /// scheduled. Returns the next cycle and the index of the processor resource 2313 /// instance in the reserved cycles vector. 2314 std::pair<unsigned, unsigned> 2315 SchedBoundary::getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx, 2316 unsigned Cycles, unsigned StartAtCycle) { 2317 2318 unsigned MinNextUnreserved = InvalidCycle; 2319 unsigned InstanceIdx = 0; 2320 unsigned StartIndex = ReservedCyclesIndex[PIdx]; 2321 unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits; 2322 assert(NumberOfInstances > 0 && 2323 "Cannot have zero instances of a ProcResource"); 2324 2325 if (isUnbufferedGroup(PIdx)) { 2326 // If any subunits are used by the instruction, report that the resource 2327 // group is available at 0, effectively removing the group record from 2328 // hazarding and basing the hazarding decisions on the subunit records. 2329 // Otherwise, choose the first available instance from among the subunits. 2330 // Specifications which assign cycles to both the subunits and the group or 2331 // which use an unbuffered group with buffered subunits will appear to 2332 // schedule strangely. In the first case, the additional cycles for the 2333 // group will be ignored. In the second, the group will be ignored 2334 // entirely. 2335 for (const MCWriteProcResEntry &PE : 2336 make_range(SchedModel->getWriteProcResBegin(SC), 2337 SchedModel->getWriteProcResEnd(SC))) 2338 if (ResourceGroupSubUnitMasks[PIdx][PE.ProcResourceIdx]) 2339 return std::make_pair(0u, StartIndex); 2340 2341 auto SubUnits = SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin; 2342 for (unsigned I = 0, End = NumberOfInstances; I < End; ++I) { 2343 unsigned NextUnreserved, NextInstanceIdx; 2344 std::tie(NextUnreserved, NextInstanceIdx) = 2345 getNextResourceCycle(SC, SubUnits[I], Cycles, StartAtCycle); 2346 if (MinNextUnreserved > NextUnreserved) { 2347 InstanceIdx = NextInstanceIdx; 2348 MinNextUnreserved = NextUnreserved; 2349 } 2350 } 2351 return std::make_pair(MinNextUnreserved, InstanceIdx); 2352 } 2353 2354 for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End; 2355 ++I) { 2356 unsigned NextUnreserved = 2357 getNextResourceCycleByInstance(I, Cycles, StartAtCycle); 2358 if (MinNextUnreserved > NextUnreserved) { 2359 InstanceIdx = I; 2360 MinNextUnreserved = NextUnreserved; 2361 } 2362 } 2363 return std::make_pair(MinNextUnreserved, InstanceIdx); 2364 } 2365 2366 /// Does this SU have a hazard within the current instruction group. 2367 /// 2368 /// The scheduler supports two modes of hazard recognition. The first is the 2369 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 2370 /// supports highly complicated in-order reservation tables 2371 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic. 2372 /// 2373 /// The second is a streamlined mechanism that checks for hazards based on 2374 /// simple counters that the scheduler itself maintains. It explicitly checks 2375 /// for instruction dispatch limitations, including the number of micro-ops that 2376 /// can dispatch per cycle. 2377 /// 2378 /// TODO: Also check whether the SU must start a new group. 2379 bool SchedBoundary::checkHazard(SUnit *SU) { 2380 if (HazardRec->isEnabled() 2381 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 2382 return true; 2383 } 2384 2385 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 2386 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 2387 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 2388 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 2389 return true; 2390 } 2391 2392 if (CurrMOps > 0 && 2393 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || 2394 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { 2395 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " 2396 << (isTop() ? "begin" : "end") << " group\n"); 2397 return true; 2398 } 2399 2400 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 2401 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2402 for (const MCWriteProcResEntry &PE : 2403 make_range(SchedModel->getWriteProcResBegin(SC), 2404 SchedModel->getWriteProcResEnd(SC))) { 2405 unsigned ResIdx = PE.ProcResourceIdx; 2406 unsigned Cycles = PE.Cycles; 2407 unsigned StartAtCycle = PE.StartAtCycle; 2408 unsigned NRCycle, InstanceIdx; 2409 std::tie(NRCycle, InstanceIdx) = 2410 getNextResourceCycle(SC, ResIdx, Cycles, StartAtCycle); 2411 if (NRCycle > CurrCycle) { 2412 #if LLVM_ENABLE_ABI_BREAKING_CHECKS 2413 MaxObservedStall = std::max(Cycles, MaxObservedStall); 2414 #endif 2415 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 2416 << SchedModel->getResourceName(ResIdx) 2417 << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx] << ']' 2418 << "=" << NRCycle << "c\n"); 2419 return true; 2420 } 2421 } 2422 } 2423 return false; 2424 } 2425 2426 // Find the unscheduled node in ReadySUs with the highest latency. 2427 unsigned SchedBoundary:: 2428 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 2429 SUnit *LateSU = nullptr; 2430 unsigned RemLatency = 0; 2431 for (SUnit *SU : ReadySUs) { 2432 unsigned L = getUnscheduledLatency(SU); 2433 if (L > RemLatency) { 2434 RemLatency = L; 2435 LateSU = SU; 2436 } 2437 } 2438 if (LateSU) { 2439 LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 2440 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 2441 } 2442 return RemLatency; 2443 } 2444 2445 // Count resources in this zone and the remaining unscheduled 2446 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 2447 // resource index, or zero if the zone is issue limited. 2448 unsigned SchedBoundary:: 2449 getOtherResourceCount(unsigned &OtherCritIdx) { 2450 OtherCritIdx = 0; 2451 if (!SchedModel->hasInstrSchedModel()) 2452 return 0; 2453 2454 unsigned OtherCritCount = Rem->RemIssueCount 2455 + (RetiredMOps * SchedModel->getMicroOpFactor()); 2456 LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 2457 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 2458 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 2459 PIdx != PEnd; ++PIdx) { 2460 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 2461 if (OtherCount > OtherCritCount) { 2462 OtherCritCount = OtherCount; 2463 OtherCritIdx = PIdx; 2464 } 2465 } 2466 if (OtherCritIdx) { 2467 LLVM_DEBUG( 2468 dbgs() << " " << Available.getName() << " + Remain CritRes: " 2469 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 2470 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 2471 } 2472 return OtherCritCount; 2473 } 2474 2475 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, 2476 unsigned Idx) { 2477 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 2478 2479 #if LLVM_ENABLE_ABI_BREAKING_CHECKS 2480 // ReadyCycle was been bumped up to the CurrCycle when this node was 2481 // scheduled, but CurrCycle may have been eagerly advanced immediately after 2482 // scheduling, so may now be greater than ReadyCycle. 2483 if (ReadyCycle > CurrCycle) 2484 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 2485 #endif 2486 2487 if (ReadyCycle < MinReadyCycle) 2488 MinReadyCycle = ReadyCycle; 2489 2490 // Check for interlocks first. For the purpose of other heuristics, an 2491 // instruction that cannot issue appears as if it's not in the ReadyQueue. 2492 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2493 bool HazardDetected = (!IsBuffered && ReadyCycle > CurrCycle) || 2494 checkHazard(SU) || (Available.size() >= ReadyListLimit); 2495 2496 if (!HazardDetected) { 2497 Available.push(SU); 2498 2499 if (InPQueue) 2500 Pending.remove(Pending.begin() + Idx); 2501 return; 2502 } 2503 2504 if (!InPQueue) 2505 Pending.push(SU); 2506 } 2507 2508 /// Move the boundary of scheduled code by one cycle. 2509 void SchedBoundary::bumpCycle(unsigned NextCycle) { 2510 if (SchedModel->getMicroOpBufferSize() == 0) { 2511 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() && 2512 "MinReadyCycle uninitialized"); 2513 if (MinReadyCycle > NextCycle) 2514 NextCycle = MinReadyCycle; 2515 } 2516 // Update the current micro-ops, which will issue in the next cycle. 2517 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 2518 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 2519 2520 // Decrement DependentLatency based on the next cycle. 2521 if ((NextCycle - CurrCycle) > DependentLatency) 2522 DependentLatency = 0; 2523 else 2524 DependentLatency -= (NextCycle - CurrCycle); 2525 2526 if (!HazardRec->isEnabled()) { 2527 // Bypass HazardRec virtual calls. 2528 CurrCycle = NextCycle; 2529 } else { 2530 // Bypass getHazardType calls in case of long latency. 2531 for (; CurrCycle != NextCycle; ++CurrCycle) { 2532 if (isTop()) 2533 HazardRec->AdvanceCycle(); 2534 else 2535 HazardRec->RecedeCycle(); 2536 } 2537 } 2538 CheckPending = true; 2539 IsResourceLimited = 2540 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2541 getScheduledLatency(), true); 2542 2543 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() 2544 << '\n'); 2545 } 2546 2547 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 2548 ExecutedResCounts[PIdx] += Count; 2549 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 2550 MaxExecutedResCount = ExecutedResCounts[PIdx]; 2551 } 2552 2553 /// Add the given processor resource to this scheduled zone. 2554 /// 2555 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 2556 /// during which this resource is consumed. 2557 /// 2558 /// \return the next cycle at which the instruction may execute without 2559 /// oversubscribing resources. 2560 unsigned SchedBoundary::countResource(const MCSchedClassDesc *SC, unsigned PIdx, 2561 unsigned Cycles, unsigned NextCycle, 2562 unsigned StartAtCycle) { 2563 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2564 unsigned Count = Factor * (Cycles - StartAtCycle); 2565 LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +" 2566 << Cycles << "x" << Factor << "u\n"); 2567 2568 // Update Executed resources counts. 2569 incExecutedResources(PIdx, Count); 2570 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 2571 Rem->RemainingCounts[PIdx] -= Count; 2572 2573 // Check if this resource exceeds the current critical resource. If so, it 2574 // becomes the critical resource. 2575 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 2576 ZoneCritResIdx = PIdx; 2577 LLVM_DEBUG(dbgs() << " *** Critical resource " 2578 << SchedModel->getResourceName(PIdx) << ": " 2579 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() 2580 << "c\n"); 2581 } 2582 // For reserved resources, record the highest cycle using the resource. 2583 unsigned NextAvailable, InstanceIdx; 2584 std::tie(NextAvailable, InstanceIdx) = 2585 getNextResourceCycle(SC, PIdx, Cycles, StartAtCycle); 2586 if (NextAvailable > CurrCycle) { 2587 LLVM_DEBUG(dbgs() << " Resource conflict: " 2588 << SchedModel->getResourceName(PIdx) 2589 << '[' << InstanceIdx - ReservedCyclesIndex[PIdx] << ']' 2590 << " reserved until @" << NextAvailable << "\n"); 2591 } 2592 return NextAvailable; 2593 } 2594 2595 /// Move the boundary of scheduled code by one SUnit. 2596 void SchedBoundary::bumpNode(SUnit *SU) { 2597 // Update the reservation table. 2598 if (HazardRec->isEnabled()) { 2599 if (!isTop() && SU->isCall) { 2600 // Calls are scheduled with their preceding instructions. For bottom-up 2601 // scheduling, clear the pipeline state before emitting. 2602 HazardRec->Reset(); 2603 } 2604 HazardRec->EmitInstruction(SU); 2605 // Scheduling an instruction may have made pending instructions available. 2606 CheckPending = true; 2607 } 2608 // checkHazard should prevent scheduling multiple instructions per cycle that 2609 // exceed the issue width. 2610 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2611 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 2612 assert( 2613 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 2614 "Cannot schedule this instruction's MicroOps in the current cycle."); 2615 2616 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2617 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 2618 2619 unsigned NextCycle = CurrCycle; 2620 switch (SchedModel->getMicroOpBufferSize()) { 2621 case 0: 2622 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 2623 break; 2624 case 1: 2625 if (ReadyCycle > NextCycle) { 2626 NextCycle = ReadyCycle; 2627 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 2628 } 2629 break; 2630 default: 2631 // We don't currently model the OOO reorder buffer, so consider all 2632 // scheduled MOps to be "retired". We do loosely model in-order resource 2633 // latency. If this instruction uses an in-order resource, account for any 2634 // likely stall cycles. 2635 if (SU->isUnbuffered && ReadyCycle > NextCycle) 2636 NextCycle = ReadyCycle; 2637 break; 2638 } 2639 RetiredMOps += IncMOps; 2640 2641 // Update resource counts and critical resource. 2642 if (SchedModel->hasInstrSchedModel()) { 2643 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 2644 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 2645 Rem->RemIssueCount -= DecRemIssue; 2646 if (ZoneCritResIdx) { 2647 // Scale scheduled micro-ops for comparing with the critical resource. 2648 unsigned ScaledMOps = 2649 RetiredMOps * SchedModel->getMicroOpFactor(); 2650 2651 // If scaled micro-ops are now more than the previous critical resource by 2652 // a full cycle, then micro-ops issue becomes critical. 2653 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 2654 >= (int)SchedModel->getLatencyFactor()) { 2655 ZoneCritResIdx = 0; 2656 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2657 << ScaledMOps / SchedModel->getLatencyFactor() 2658 << "c\n"); 2659 } 2660 } 2661 for (TargetSchedModel::ProcResIter 2662 PI = SchedModel->getWriteProcResBegin(SC), 2663 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2664 unsigned RCycle = countResource(SC, PI->ProcResourceIdx, PI->Cycles, 2665 NextCycle, PI->StartAtCycle); 2666 if (RCycle > NextCycle) 2667 NextCycle = RCycle; 2668 } 2669 if (SU->hasReservedResource) { 2670 // For reserved resources, record the highest cycle using the resource. 2671 // For top-down scheduling, this is the cycle in which we schedule this 2672 // instruction plus the number of cycles the operations reserves the 2673 // resource. For bottom-up is it simply the instruction's cycle. 2674 for (TargetSchedModel::ProcResIter 2675 PI = SchedModel->getWriteProcResBegin(SC), 2676 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2677 unsigned PIdx = PI->ProcResourceIdx; 2678 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 2679 2680 if (SchedModel && SchedModel->enableIntervals()) { 2681 unsigned ReservedUntil, InstanceIdx; 2682 std::tie(ReservedUntil, InstanceIdx) = 2683 getNextResourceCycle(SC, PIdx, PI->Cycles, PI->StartAtCycle); 2684 if (isTop()) { 2685 ReservedResourceSegments[InstanceIdx].add( 2686 ResourceSegments::getResourceIntervalTop( 2687 NextCycle, PI->StartAtCycle, PI->Cycles), 2688 MIResourceCutOff); 2689 } else { 2690 ReservedResourceSegments[InstanceIdx].add( 2691 ResourceSegments::getResourceIntervalBottom( 2692 NextCycle, PI->StartAtCycle, PI->Cycles), 2693 MIResourceCutOff); 2694 } 2695 } else { 2696 2697 unsigned ReservedUntil, InstanceIdx; 2698 std::tie(ReservedUntil, InstanceIdx) = 2699 getNextResourceCycle(SC, PIdx, 0, PI->StartAtCycle); 2700 if (isTop()) { 2701 ReservedCycles[InstanceIdx] = 2702 std::max(ReservedUntil, NextCycle + PI->Cycles); 2703 } else 2704 ReservedCycles[InstanceIdx] = NextCycle; 2705 } 2706 } 2707 } 2708 } 2709 } 2710 // Update ExpectedLatency and DependentLatency. 2711 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2712 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2713 if (SU->getDepth() > TopLatency) { 2714 TopLatency = SU->getDepth(); 2715 LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU(" 2716 << SU->NodeNum << ") " << TopLatency << "c\n"); 2717 } 2718 if (SU->getHeight() > BotLatency) { 2719 BotLatency = SU->getHeight(); 2720 LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU(" 2721 << SU->NodeNum << ") " << BotLatency << "c\n"); 2722 } 2723 // If we stall for any reason, bump the cycle. 2724 if (NextCycle > CurrCycle) 2725 bumpCycle(NextCycle); 2726 else 2727 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2728 // resource limited. If a stall occurred, bumpCycle does this. 2729 IsResourceLimited = 2730 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2731 getScheduledLatency(), true); 2732 2733 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2734 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2735 // one cycle. Since we commonly reach the max MOps here, opportunistically 2736 // bump the cycle to avoid uselessly checking everything in the readyQ. 2737 CurrMOps += IncMOps; 2738 2739 // Bump the cycle count for issue group constraints. 2740 // This must be done after NextCycle has been adjust for all other stalls. 2741 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set 2742 // currCycle to X. 2743 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) || 2744 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) { 2745 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin") 2746 << " group\n"); 2747 bumpCycle(++NextCycle); 2748 } 2749 2750 while (CurrMOps >= SchedModel->getIssueWidth()) { 2751 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle " 2752 << CurrCycle << '\n'); 2753 bumpCycle(++NextCycle); 2754 } 2755 LLVM_DEBUG(dumpScheduledState()); 2756 } 2757 2758 /// Release pending ready nodes in to the available queue. This makes them 2759 /// visible to heuristics. 2760 void SchedBoundary::releasePending() { 2761 // If the available queue is empty, it is safe to reset MinReadyCycle. 2762 if (Available.empty()) 2763 MinReadyCycle = std::numeric_limits<unsigned>::max(); 2764 2765 // Check to see if any of the pending instructions are ready to issue. If 2766 // so, add them to the available queue. 2767 for (unsigned I = 0, E = Pending.size(); I < E; ++I) { 2768 SUnit *SU = *(Pending.begin() + I); 2769 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2770 2771 if (ReadyCycle < MinReadyCycle) 2772 MinReadyCycle = ReadyCycle; 2773 2774 if (Available.size() >= ReadyListLimit) 2775 break; 2776 2777 releaseNode(SU, ReadyCycle, true, I); 2778 if (E != Pending.size()) { 2779 --I; 2780 --E; 2781 } 2782 } 2783 CheckPending = false; 2784 } 2785 2786 /// Remove SU from the ready set for this boundary. 2787 void SchedBoundary::removeReady(SUnit *SU) { 2788 if (Available.isInQueue(SU)) 2789 Available.remove(Available.find(SU)); 2790 else { 2791 assert(Pending.isInQueue(SU) && "bad ready count"); 2792 Pending.remove(Pending.find(SU)); 2793 } 2794 } 2795 2796 /// If this queue only has one ready candidate, return it. As a side effect, 2797 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2798 /// one node is ready. If multiple instructions are ready, return NULL. 2799 SUnit *SchedBoundary::pickOnlyChoice() { 2800 if (CheckPending) 2801 releasePending(); 2802 2803 // Defer any ready instrs that now have a hazard. 2804 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2805 if (checkHazard(*I)) { 2806 Pending.push(*I); 2807 I = Available.remove(I); 2808 continue; 2809 } 2810 ++I; 2811 } 2812 for (unsigned i = 0; Available.empty(); ++i) { 2813 // FIXME: Re-enable assert once PR20057 is resolved. 2814 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2815 // "permanent hazard"); 2816 (void)i; 2817 bumpCycle(CurrCycle + 1); 2818 releasePending(); 2819 } 2820 2821 LLVM_DEBUG(Pending.dump()); 2822 LLVM_DEBUG(Available.dump()); 2823 2824 if (Available.size() == 1) 2825 return *Available.begin(); 2826 return nullptr; 2827 } 2828 2829 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2830 2831 /// Dump the content of the \ref ReservedCycles vector for the 2832 /// resources that are used in the basic block. 2833 /// 2834 LLVM_DUMP_METHOD void SchedBoundary::dumpReservedCycles() const { 2835 if (!SchedModel->hasInstrSchedModel()) 2836 return; 2837 2838 unsigned ResourceCount = SchedModel->getNumProcResourceKinds(); 2839 unsigned StartIdx = 0; 2840 2841 for (unsigned ResIdx = 0; ResIdx < ResourceCount; ++ResIdx) { 2842 const unsigned NumUnits = SchedModel->getProcResource(ResIdx)->NumUnits; 2843 std::string ResName = SchedModel->getResourceName(ResIdx); 2844 for (unsigned UnitIdx = 0; UnitIdx < NumUnits; ++UnitIdx) { 2845 dbgs() << ResName << "(" << UnitIdx << ") = "; 2846 if (SchedModel && SchedModel->enableIntervals()) { 2847 if (ReservedResourceSegments.count(StartIdx + UnitIdx)) 2848 dbgs() << ReservedResourceSegments.at(StartIdx + UnitIdx); 2849 else 2850 dbgs() << "{ }\n"; 2851 } else 2852 dbgs() << ReservedCycles[StartIdx + UnitIdx] << "\n"; 2853 } 2854 StartIdx += NumUnits; 2855 } 2856 } 2857 2858 // This is useful information to dump after bumpNode. 2859 // Note that the Queue contents are more useful before pickNodeFromQueue. 2860 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const { 2861 unsigned ResFactor; 2862 unsigned ResCount; 2863 if (ZoneCritResIdx) { 2864 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2865 ResCount = getResourceCount(ZoneCritResIdx); 2866 } else { 2867 ResFactor = SchedModel->getMicroOpFactor(); 2868 ResCount = RetiredMOps * ResFactor; 2869 } 2870 unsigned LFactor = SchedModel->getLatencyFactor(); 2871 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2872 << " Retired: " << RetiredMOps; 2873 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2874 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2875 << ResCount / ResFactor << " " 2876 << SchedModel->getResourceName(ZoneCritResIdx) 2877 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2878 << (IsResourceLimited ? " - Resource" : " - Latency") 2879 << " limited.\n"; 2880 if (MISchedDumpReservedCycles) 2881 dumpReservedCycles(); 2882 } 2883 #endif 2884 2885 //===----------------------------------------------------------------------===// 2886 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2887 //===----------------------------------------------------------------------===// 2888 2889 void GenericSchedulerBase::SchedCandidate:: 2890 initResourceDelta(const ScheduleDAGMI *DAG, 2891 const TargetSchedModel *SchedModel) { 2892 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2893 return; 2894 2895 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2896 for (TargetSchedModel::ProcResIter 2897 PI = SchedModel->getWriteProcResBegin(SC), 2898 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2899 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2900 ResDelta.CritResources += PI->Cycles; 2901 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2902 ResDelta.DemandedResources += PI->Cycles; 2903 } 2904 } 2905 2906 /// Compute remaining latency. We need this both to determine whether the 2907 /// overall schedule has become latency-limited and whether the instructions 2908 /// outside this zone are resource or latency limited. 2909 /// 2910 /// The "dependent" latency is updated incrementally during scheduling as the 2911 /// max height/depth of scheduled nodes minus the cycles since it was 2912 /// scheduled: 2913 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2914 /// 2915 /// The "independent" latency is the max ready queue depth: 2916 /// ILat = max N.depth for N in Available|Pending 2917 /// 2918 /// RemainingLatency is the greater of independent and dependent latency. 2919 /// 2920 /// These computations are expensive, especially in DAGs with many edges, so 2921 /// only do them if necessary. 2922 static unsigned computeRemLatency(SchedBoundary &CurrZone) { 2923 unsigned RemLatency = CurrZone.getDependentLatency(); 2924 RemLatency = std::max(RemLatency, 2925 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2926 RemLatency = std::max(RemLatency, 2927 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2928 return RemLatency; 2929 } 2930 2931 /// Returns true if the current cycle plus remaning latency is greater than 2932 /// the critical path in the scheduling region. 2933 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy, 2934 SchedBoundary &CurrZone, 2935 bool ComputeRemLatency, 2936 unsigned &RemLatency) const { 2937 // The current cycle is already greater than the critical path, so we are 2938 // already latency limited and don't need to compute the remaining latency. 2939 if (CurrZone.getCurrCycle() > Rem.CriticalPath) 2940 return true; 2941 2942 // If we haven't scheduled anything yet, then we aren't latency limited. 2943 if (CurrZone.getCurrCycle() == 0) 2944 return false; 2945 2946 if (ComputeRemLatency) 2947 RemLatency = computeRemLatency(CurrZone); 2948 2949 return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath; 2950 } 2951 2952 /// Set the CandPolicy given a scheduling zone given the current resources and 2953 /// latencies inside and outside the zone. 2954 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA, 2955 SchedBoundary &CurrZone, 2956 SchedBoundary *OtherZone) { 2957 // Apply preemptive heuristics based on the total latency and resources 2958 // inside and outside this zone. Potential stalls should be considered before 2959 // following this policy. 2960 2961 // Compute the critical resource outside the zone. 2962 unsigned OtherCritIdx = 0; 2963 unsigned OtherCount = 2964 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2965 2966 bool OtherResLimited = false; 2967 unsigned RemLatency = 0; 2968 bool RemLatencyComputed = false; 2969 if (SchedModel->hasInstrSchedModel() && OtherCount != 0) { 2970 RemLatency = computeRemLatency(CurrZone); 2971 RemLatencyComputed = true; 2972 OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(), 2973 OtherCount, RemLatency, false); 2974 } 2975 2976 // Schedule aggressively for latency in PostRA mode. We don't check for 2977 // acyclic latency during PostRA, and highly out-of-order processors will 2978 // skip PostRA scheduling. 2979 if (!OtherResLimited && 2980 (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed, 2981 RemLatency))) { 2982 Policy.ReduceLatency |= true; 2983 LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName() 2984 << " RemainingLatency " << RemLatency << " + " 2985 << CurrZone.getCurrCycle() << "c > CritPath " 2986 << Rem.CriticalPath << "\n"); 2987 } 2988 // If the same resource is limiting inside and outside the zone, do nothing. 2989 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2990 return; 2991 2992 LLVM_DEBUG(if (CurrZone.isResourceLimited()) { 2993 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2994 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n"; 2995 } if (OtherResLimited) dbgs() 2996 << " RemainingLimit: " 2997 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2998 if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs() 2999 << " Latency limited both directions.\n"); 3000 3001 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 3002 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 3003 3004 if (OtherResLimited) 3005 Policy.DemandResIdx = OtherCritIdx; 3006 } 3007 3008 #ifndef NDEBUG 3009 const char *GenericSchedulerBase::getReasonStr( 3010 GenericSchedulerBase::CandReason Reason) { 3011 switch (Reason) { 3012 case NoCand: return "NOCAND "; 3013 case Only1: return "ONLY1 "; 3014 case PhysReg: return "PHYS-REG "; 3015 case RegExcess: return "REG-EXCESS"; 3016 case RegCritical: return "REG-CRIT "; 3017 case Stall: return "STALL "; 3018 case Cluster: return "CLUSTER "; 3019 case Weak: return "WEAK "; 3020 case RegMax: return "REG-MAX "; 3021 case ResourceReduce: return "RES-REDUCE"; 3022 case ResourceDemand: return "RES-DEMAND"; 3023 case TopDepthReduce: return "TOP-DEPTH "; 3024 case TopPathReduce: return "TOP-PATH "; 3025 case BotHeightReduce:return "BOT-HEIGHT"; 3026 case BotPathReduce: return "BOT-PATH "; 3027 case NextDefUse: return "DEF-USE "; 3028 case NodeOrder: return "ORDER "; 3029 }; 3030 llvm_unreachable("Unknown reason!"); 3031 } 3032 3033 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 3034 PressureChange P; 3035 unsigned ResIdx = 0; 3036 unsigned Latency = 0; 3037 switch (Cand.Reason) { 3038 default: 3039 break; 3040 case RegExcess: 3041 P = Cand.RPDelta.Excess; 3042 break; 3043 case RegCritical: 3044 P = Cand.RPDelta.CriticalMax; 3045 break; 3046 case RegMax: 3047 P = Cand.RPDelta.CurrentMax; 3048 break; 3049 case ResourceReduce: 3050 ResIdx = Cand.Policy.ReduceResIdx; 3051 break; 3052 case ResourceDemand: 3053 ResIdx = Cand.Policy.DemandResIdx; 3054 break; 3055 case TopDepthReduce: 3056 Latency = Cand.SU->getDepth(); 3057 break; 3058 case TopPathReduce: 3059 Latency = Cand.SU->getHeight(); 3060 break; 3061 case BotHeightReduce: 3062 Latency = Cand.SU->getHeight(); 3063 break; 3064 case BotPathReduce: 3065 Latency = Cand.SU->getDepth(); 3066 break; 3067 } 3068 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 3069 if (P.isValid()) 3070 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 3071 << ":" << P.getUnitInc() << " "; 3072 else 3073 dbgs() << " "; 3074 if (ResIdx) 3075 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 3076 else 3077 dbgs() << " "; 3078 if (Latency) 3079 dbgs() << " " << Latency << " cycles "; 3080 else 3081 dbgs() << " "; 3082 dbgs() << '\n'; 3083 } 3084 #endif 3085 3086 namespace llvm { 3087 /// Return true if this heuristic determines order. 3088 /// TODO: Consider refactor return type of these functions as integer or enum, 3089 /// as we may need to differentiate whether TryCand is better than Cand. 3090 bool tryLess(int TryVal, int CandVal, 3091 GenericSchedulerBase::SchedCandidate &TryCand, 3092 GenericSchedulerBase::SchedCandidate &Cand, 3093 GenericSchedulerBase::CandReason Reason) { 3094 if (TryVal < CandVal) { 3095 TryCand.Reason = Reason; 3096 return true; 3097 } 3098 if (TryVal > CandVal) { 3099 if (Cand.Reason > Reason) 3100 Cand.Reason = Reason; 3101 return true; 3102 } 3103 return false; 3104 } 3105 3106 bool tryGreater(int TryVal, int CandVal, 3107 GenericSchedulerBase::SchedCandidate &TryCand, 3108 GenericSchedulerBase::SchedCandidate &Cand, 3109 GenericSchedulerBase::CandReason Reason) { 3110 if (TryVal > CandVal) { 3111 TryCand.Reason = Reason; 3112 return true; 3113 } 3114 if (TryVal < CandVal) { 3115 if (Cand.Reason > Reason) 3116 Cand.Reason = Reason; 3117 return true; 3118 } 3119 return false; 3120 } 3121 3122 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 3123 GenericSchedulerBase::SchedCandidate &Cand, 3124 SchedBoundary &Zone) { 3125 if (Zone.isTop()) { 3126 // Prefer the candidate with the lesser depth, but only if one of them has 3127 // depth greater than the total latency scheduled so far, otherwise either 3128 // of them could be scheduled now with no stall. 3129 if (std::max(TryCand.SU->getDepth(), Cand.SU->getDepth()) > 3130 Zone.getScheduledLatency()) { 3131 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 3132 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 3133 return true; 3134 } 3135 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 3136 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 3137 return true; 3138 } else { 3139 // Prefer the candidate with the lesser height, but only if one of them has 3140 // height greater than the total latency scheduled so far, otherwise either 3141 // of them could be scheduled now with no stall. 3142 if (std::max(TryCand.SU->getHeight(), Cand.SU->getHeight()) > 3143 Zone.getScheduledLatency()) { 3144 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 3145 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 3146 return true; 3147 } 3148 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 3149 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 3150 return true; 3151 } 3152 return false; 3153 } 3154 } // end namespace llvm 3155 3156 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) { 3157 LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 3158 << GenericSchedulerBase::getReasonStr(Reason) << '\n'); 3159 } 3160 3161 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) { 3162 tracePick(Cand.Reason, Cand.AtTop); 3163 } 3164 3165 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 3166 assert(dag->hasVRegLiveness() && 3167 "(PreRA)GenericScheduler needs vreg liveness"); 3168 DAG = static_cast<ScheduleDAGMILive*>(dag); 3169 SchedModel = DAG->getSchedModel(); 3170 TRI = DAG->TRI; 3171 3172 if (RegionPolicy.ComputeDFSResult) 3173 DAG->computeDFSResult(); 3174 3175 Rem.init(DAG, SchedModel); 3176 Top.init(DAG, SchedModel, &Rem); 3177 Bot.init(DAG, SchedModel, &Rem); 3178 3179 // Initialize resource counts. 3180 3181 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 3182 // are disabled, then these HazardRecs will be disabled. 3183 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3184 if (!Top.HazardRec) { 3185 Top.HazardRec = 3186 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3187 Itin, DAG); 3188 } 3189 if (!Bot.HazardRec) { 3190 Bot.HazardRec = 3191 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3192 Itin, DAG); 3193 } 3194 TopCand.SU = nullptr; 3195 BotCand.SU = nullptr; 3196 } 3197 3198 /// Initialize the per-region scheduling policy. 3199 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 3200 MachineBasicBlock::iterator End, 3201 unsigned NumRegionInstrs) { 3202 const MachineFunction &MF = *Begin->getMF(); 3203 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 3204 3205 // Avoid setting up the register pressure tracker for small regions to save 3206 // compile time. As a rough heuristic, only track pressure when the number of 3207 // schedulable instructions exceeds half the integer register file. 3208 RegionPolicy.ShouldTrackPressure = true; 3209 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 3210 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 3211 if (TLI->isTypeLegal(LegalIntVT)) { 3212 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 3213 TLI->getRegClassFor(LegalIntVT)); 3214 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 3215 } 3216 } 3217 3218 // For generic targets, we default to bottom-up, because it's simpler and more 3219 // compile-time optimizations have been implemented in that direction. 3220 RegionPolicy.OnlyBottomUp = true; 3221 3222 // Allow the subtarget to override default policy. 3223 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs); 3224 3225 // After subtarget overrides, apply command line options. 3226 if (!EnableRegPressure) { 3227 RegionPolicy.ShouldTrackPressure = false; 3228 RegionPolicy.ShouldTrackLaneMasks = false; 3229 } 3230 3231 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 3232 // e.g. -misched-bottomup=false allows scheduling in both directions. 3233 assert((!ForceTopDown || !ForceBottomUp) && 3234 "-misched-topdown incompatible with -misched-bottomup"); 3235 if (ForceBottomUp.getNumOccurrences() > 0) { 3236 RegionPolicy.OnlyBottomUp = ForceBottomUp; 3237 if (RegionPolicy.OnlyBottomUp) 3238 RegionPolicy.OnlyTopDown = false; 3239 } 3240 if (ForceTopDown.getNumOccurrences() > 0) { 3241 RegionPolicy.OnlyTopDown = ForceTopDown; 3242 if (RegionPolicy.OnlyTopDown) 3243 RegionPolicy.OnlyBottomUp = false; 3244 } 3245 } 3246 3247 void GenericScheduler::dumpPolicy() const { 3248 // Cannot completely remove virtual function even in release mode. 3249 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 3250 dbgs() << "GenericScheduler RegionPolicy: " 3251 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure 3252 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown 3253 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp 3254 << "\n"; 3255 #endif 3256 } 3257 3258 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 3259 /// critical path by more cycles than it takes to drain the instruction buffer. 3260 /// We estimate an upper bounds on in-flight instructions as: 3261 /// 3262 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 3263 /// InFlightIterations = AcyclicPath / CyclesPerIteration 3264 /// InFlightResources = InFlightIterations * LoopResources 3265 /// 3266 /// TODO: Check execution resources in addition to IssueCount. 3267 void GenericScheduler::checkAcyclicLatency() { 3268 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 3269 return; 3270 3271 // Scaled number of cycles per loop iteration. 3272 unsigned IterCount = 3273 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 3274 Rem.RemIssueCount); 3275 // Scaled acyclic critical path. 3276 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 3277 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 3278 unsigned InFlightCount = 3279 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 3280 unsigned BufferLimit = 3281 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 3282 3283 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 3284 3285 LLVM_DEBUG( 3286 dbgs() << "IssueCycles=" 3287 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 3288 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 3289 << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount 3290 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 3291 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 3292 if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n"); 3293 } 3294 3295 void GenericScheduler::registerRoots() { 3296 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3297 3298 // Some roots may not feed into ExitSU. Check all of them in case. 3299 for (const SUnit *SU : Bot.Available) { 3300 if (SU->getDepth() > Rem.CriticalPath) 3301 Rem.CriticalPath = SU->getDepth(); 3302 } 3303 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 3304 if (DumpCriticalPathLength) { 3305 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 3306 } 3307 3308 if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) { 3309 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 3310 checkAcyclicLatency(); 3311 } 3312 } 3313 3314 namespace llvm { 3315 bool tryPressure(const PressureChange &TryP, 3316 const PressureChange &CandP, 3317 GenericSchedulerBase::SchedCandidate &TryCand, 3318 GenericSchedulerBase::SchedCandidate &Cand, 3319 GenericSchedulerBase::CandReason Reason, 3320 const TargetRegisterInfo *TRI, 3321 const MachineFunction &MF) { 3322 // If one candidate decreases and the other increases, go with it. 3323 // Invalid candidates have UnitInc==0. 3324 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 3325 Reason)) { 3326 return true; 3327 } 3328 // Do not compare the magnitude of pressure changes between top and bottom 3329 // boundary. 3330 if (Cand.AtTop != TryCand.AtTop) 3331 return false; 3332 3333 // If both candidates affect the same set in the same boundary, go with the 3334 // smallest increase. 3335 unsigned TryPSet = TryP.getPSetOrMax(); 3336 unsigned CandPSet = CandP.getPSetOrMax(); 3337 if (TryPSet == CandPSet) { 3338 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 3339 Reason); 3340 } 3341 3342 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) : 3343 std::numeric_limits<int>::max(); 3344 3345 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) : 3346 std::numeric_limits<int>::max(); 3347 3348 // If the candidates are decreasing pressure, reverse priority. 3349 if (TryP.getUnitInc() < 0) 3350 std::swap(TryRank, CandRank); 3351 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 3352 } 3353 3354 unsigned getWeakLeft(const SUnit *SU, bool isTop) { 3355 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 3356 } 3357 3358 /// Minimize physical register live ranges. Regalloc wants them adjacent to 3359 /// their physreg def/use. 3360 /// 3361 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 3362 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 3363 /// with the operation that produces or consumes the physreg. We'll do this when 3364 /// regalloc has support for parallel copies. 3365 int biasPhysReg(const SUnit *SU, bool isTop) { 3366 const MachineInstr *MI = SU->getInstr(); 3367 3368 if (MI->isCopy()) { 3369 unsigned ScheduledOper = isTop ? 1 : 0; 3370 unsigned UnscheduledOper = isTop ? 0 : 1; 3371 // If we have already scheduled the physreg produce/consumer, immediately 3372 // schedule the copy. 3373 if (MI->getOperand(ScheduledOper).getReg().isPhysical()) 3374 return 1; 3375 // If the physreg is at the boundary, defer it. Otherwise schedule it 3376 // immediately to free the dependent. We can hoist the copy later. 3377 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 3378 if (MI->getOperand(UnscheduledOper).getReg().isPhysical()) 3379 return AtBoundary ? -1 : 1; 3380 } 3381 3382 if (MI->isMoveImmediate()) { 3383 // If we have a move immediate and all successors have been assigned, bias 3384 // towards scheduling this later. Make sure all register defs are to 3385 // physical registers. 3386 bool DoBias = true; 3387 for (const MachineOperand &Op : MI->defs()) { 3388 if (Op.isReg() && !Op.getReg().isPhysical()) { 3389 DoBias = false; 3390 break; 3391 } 3392 } 3393 3394 if (DoBias) 3395 return isTop ? -1 : 1; 3396 } 3397 3398 return 0; 3399 } 3400 } // end namespace llvm 3401 3402 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU, 3403 bool AtTop, 3404 const RegPressureTracker &RPTracker, 3405 RegPressureTracker &TempTracker) { 3406 Cand.SU = SU; 3407 Cand.AtTop = AtTop; 3408 if (DAG->isTrackingPressure()) { 3409 if (AtTop) { 3410 TempTracker.getMaxDownwardPressureDelta( 3411 Cand.SU->getInstr(), 3412 Cand.RPDelta, 3413 DAG->getRegionCriticalPSets(), 3414 DAG->getRegPressure().MaxSetPressure); 3415 } else { 3416 if (VerifyScheduling) { 3417 TempTracker.getMaxUpwardPressureDelta( 3418 Cand.SU->getInstr(), 3419 &DAG->getPressureDiff(Cand.SU), 3420 Cand.RPDelta, 3421 DAG->getRegionCriticalPSets(), 3422 DAG->getRegPressure().MaxSetPressure); 3423 } else { 3424 RPTracker.getUpwardPressureDelta( 3425 Cand.SU->getInstr(), 3426 DAG->getPressureDiff(Cand.SU), 3427 Cand.RPDelta, 3428 DAG->getRegionCriticalPSets(), 3429 DAG->getRegPressure().MaxSetPressure); 3430 } 3431 } 3432 } 3433 LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs() 3434 << " Try SU(" << Cand.SU->NodeNum << ") " 3435 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":" 3436 << Cand.RPDelta.Excess.getUnitInc() << "\n"); 3437 } 3438 3439 /// Apply a set of heuristics to a new candidate. Heuristics are currently 3440 /// hierarchical. This may be more efficient than a graduated cost model because 3441 /// we don't need to evaluate all aspects of the model for each node in the 3442 /// queue. But it's really done to make the heuristics easier to debug and 3443 /// statistically analyze. 3444 /// 3445 /// \param Cand provides the policy and current best candidate. 3446 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3447 /// \param Zone describes the scheduled zone that we are extending, or nullptr 3448 /// if Cand is from a different zone than TryCand. 3449 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand) 3450 bool GenericScheduler::tryCandidate(SchedCandidate &Cand, 3451 SchedCandidate &TryCand, 3452 SchedBoundary *Zone) const { 3453 // Initialize the candidate if needed. 3454 if (!Cand.isValid()) { 3455 TryCand.Reason = NodeOrder; 3456 return true; 3457 } 3458 3459 // Bias PhysReg Defs and copies to their uses and defined respectively. 3460 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop), 3461 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg)) 3462 return TryCand.Reason != NoCand; 3463 3464 // Avoid exceeding the target's limit. 3465 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 3466 Cand.RPDelta.Excess, 3467 TryCand, Cand, RegExcess, TRI, 3468 DAG->MF)) 3469 return TryCand.Reason != NoCand; 3470 3471 // Avoid increasing the max critical pressure in the scheduled region. 3472 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 3473 Cand.RPDelta.CriticalMax, 3474 TryCand, Cand, RegCritical, TRI, 3475 DAG->MF)) 3476 return TryCand.Reason != NoCand; 3477 3478 // We only compare a subset of features when comparing nodes between 3479 // Top and Bottom boundary. Some properties are simply incomparable, in many 3480 // other instances we should only override the other boundary if something 3481 // is a clear good pick on one boundary. Skip heuristics that are more 3482 // "tie-breaking" in nature. 3483 bool SameBoundary = Zone != nullptr; 3484 if (SameBoundary) { 3485 // For loops that are acyclic path limited, aggressively schedule for 3486 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal 3487 // heuristics to take precedence. 3488 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() && 3489 tryLatency(TryCand, Cand, *Zone)) 3490 return TryCand.Reason != NoCand; 3491 3492 // Prioritize instructions that read unbuffered resources by stall cycles. 3493 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU), 3494 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3495 return TryCand.Reason != NoCand; 3496 } 3497 3498 // Keep clustered nodes together to encourage downstream peephole 3499 // optimizations which may reduce resource requirements. 3500 // 3501 // This is a best effort to set things up for a post-RA pass. Optimizations 3502 // like generating loads of multiple registers should ideally be done within 3503 // the scheduler pass by combining the loads during DAG postprocessing. 3504 const SUnit *CandNextClusterSU = 3505 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3506 const SUnit *TryCandNextClusterSU = 3507 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 3508 if (tryGreater(TryCand.SU == TryCandNextClusterSU, 3509 Cand.SU == CandNextClusterSU, 3510 TryCand, Cand, Cluster)) 3511 return TryCand.Reason != NoCand; 3512 3513 if (SameBoundary) { 3514 // Weak edges are for clustering and other constraints. 3515 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop), 3516 getWeakLeft(Cand.SU, Cand.AtTop), 3517 TryCand, Cand, Weak)) 3518 return TryCand.Reason != NoCand; 3519 } 3520 3521 // Avoid increasing the max pressure of the entire region. 3522 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 3523 Cand.RPDelta.CurrentMax, 3524 TryCand, Cand, RegMax, TRI, 3525 DAG->MF)) 3526 return TryCand.Reason != NoCand; 3527 3528 if (SameBoundary) { 3529 // Avoid critical resource consumption and balance the schedule. 3530 TryCand.initResourceDelta(DAG, SchedModel); 3531 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3532 TryCand, Cand, ResourceReduce)) 3533 return TryCand.Reason != NoCand; 3534 if (tryGreater(TryCand.ResDelta.DemandedResources, 3535 Cand.ResDelta.DemandedResources, 3536 TryCand, Cand, ResourceDemand)) 3537 return TryCand.Reason != NoCand; 3538 3539 // Avoid serializing long latency dependence chains. 3540 // For acyclic path limited loops, latency was already checked above. 3541 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency && 3542 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone)) 3543 return TryCand.Reason != NoCand; 3544 3545 // Fall through to original instruction order. 3546 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 3547 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 3548 TryCand.Reason = NodeOrder; 3549 return true; 3550 } 3551 } 3552 3553 return false; 3554 } 3555 3556 /// Pick the best candidate from the queue. 3557 /// 3558 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 3559 /// DAG building. To adjust for the current scheduling location we need to 3560 /// maintain the number of vreg uses remaining to be top-scheduled. 3561 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 3562 const CandPolicy &ZonePolicy, 3563 const RegPressureTracker &RPTracker, 3564 SchedCandidate &Cand) { 3565 // getMaxPressureDelta temporarily modifies the tracker. 3566 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 3567 3568 ReadyQueue &Q = Zone.Available; 3569 for (SUnit *SU : Q) { 3570 3571 SchedCandidate TryCand(ZonePolicy); 3572 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker); 3573 // Pass SchedBoundary only when comparing nodes from the same boundary. 3574 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr; 3575 if (tryCandidate(Cand, TryCand, ZoneArg)) { 3576 // Initialize resource delta if needed in case future heuristics query it. 3577 if (TryCand.ResDelta == SchedResourceDelta()) 3578 TryCand.initResourceDelta(DAG, SchedModel); 3579 Cand.setBest(TryCand); 3580 LLVM_DEBUG(traceCandidate(Cand)); 3581 } 3582 } 3583 } 3584 3585 /// Pick the best candidate node from either the top or bottom queue. 3586 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 3587 // Schedule as far as possible in the direction of no choice. This is most 3588 // efficient, but also provides the best heuristics for CriticalPSets. 3589 if (SUnit *SU = Bot.pickOnlyChoice()) { 3590 IsTopNode = false; 3591 tracePick(Only1, false); 3592 return SU; 3593 } 3594 if (SUnit *SU = Top.pickOnlyChoice()) { 3595 IsTopNode = true; 3596 tracePick(Only1, true); 3597 return SU; 3598 } 3599 // Set the bottom-up policy based on the state of the current bottom zone and 3600 // the instructions outside the zone, including the top zone. 3601 CandPolicy BotPolicy; 3602 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top); 3603 // Set the top-down policy based on the state of the current top zone and 3604 // the instructions outside the zone, including the bottom zone. 3605 CandPolicy TopPolicy; 3606 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot); 3607 3608 // See if BotCand is still valid (because we previously scheduled from Top). 3609 LLVM_DEBUG(dbgs() << "Picking from Bot:\n"); 3610 if (!BotCand.isValid() || BotCand.SU->isScheduled || 3611 BotCand.Policy != BotPolicy) { 3612 BotCand.reset(CandPolicy()); 3613 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand); 3614 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 3615 } else { 3616 LLVM_DEBUG(traceCandidate(BotCand)); 3617 #ifndef NDEBUG 3618 if (VerifyScheduling) { 3619 SchedCandidate TCand; 3620 TCand.reset(CandPolicy()); 3621 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand); 3622 assert(TCand.SU == BotCand.SU && 3623 "Last pick result should correspond to re-picking right now"); 3624 } 3625 #endif 3626 } 3627 3628 // Check if the top Q has a better candidate. 3629 LLVM_DEBUG(dbgs() << "Picking from Top:\n"); 3630 if (!TopCand.isValid() || TopCand.SU->isScheduled || 3631 TopCand.Policy != TopPolicy) { 3632 TopCand.reset(CandPolicy()); 3633 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand); 3634 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 3635 } else { 3636 LLVM_DEBUG(traceCandidate(TopCand)); 3637 #ifndef NDEBUG 3638 if (VerifyScheduling) { 3639 SchedCandidate TCand; 3640 TCand.reset(CandPolicy()); 3641 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand); 3642 assert(TCand.SU == TopCand.SU && 3643 "Last pick result should correspond to re-picking right now"); 3644 } 3645 #endif 3646 } 3647 3648 // Pick best from BotCand and TopCand. 3649 assert(BotCand.isValid()); 3650 assert(TopCand.isValid()); 3651 SchedCandidate Cand = BotCand; 3652 TopCand.Reason = NoCand; 3653 if (tryCandidate(Cand, TopCand, nullptr)) { 3654 Cand.setBest(TopCand); 3655 LLVM_DEBUG(traceCandidate(Cand)); 3656 } 3657 3658 IsTopNode = Cand.AtTop; 3659 tracePick(Cand); 3660 return Cand.SU; 3661 } 3662 3663 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 3664 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 3665 if (DAG->top() == DAG->bottom()) { 3666 assert(Top.Available.empty() && Top.Pending.empty() && 3667 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 3668 return nullptr; 3669 } 3670 SUnit *SU; 3671 do { 3672 if (RegionPolicy.OnlyTopDown) { 3673 SU = Top.pickOnlyChoice(); 3674 if (!SU) { 3675 CandPolicy NoPolicy; 3676 TopCand.reset(NoPolicy); 3677 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); 3678 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3679 tracePick(TopCand); 3680 SU = TopCand.SU; 3681 } 3682 IsTopNode = true; 3683 } else if (RegionPolicy.OnlyBottomUp) { 3684 SU = Bot.pickOnlyChoice(); 3685 if (!SU) { 3686 CandPolicy NoPolicy; 3687 BotCand.reset(NoPolicy); 3688 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); 3689 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 3690 tracePick(BotCand); 3691 SU = BotCand.SU; 3692 } 3693 IsTopNode = false; 3694 } else { 3695 SU = pickNodeBidirectional(IsTopNode); 3696 } 3697 } while (SU->isScheduled); 3698 3699 if (SU->isTopReady()) 3700 Top.removeReady(SU); 3701 if (SU->isBottomReady()) 3702 Bot.removeReady(SU); 3703 3704 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3705 << *SU->getInstr()); 3706 return SU; 3707 } 3708 3709 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) { 3710 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 3711 if (!isTop) 3712 ++InsertPos; 3713 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 3714 3715 // Find already scheduled copies with a single physreg dependence and move 3716 // them just above the scheduled instruction. 3717 for (SDep &Dep : Deps) { 3718 if (Dep.getKind() != SDep::Data || 3719 !Register::isPhysicalRegister(Dep.getReg())) 3720 continue; 3721 SUnit *DepSU = Dep.getSUnit(); 3722 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 3723 continue; 3724 MachineInstr *Copy = DepSU->getInstr(); 3725 if (!Copy->isCopy() && !Copy->isMoveImmediate()) 3726 continue; 3727 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy "; 3728 DAG->dumpNode(*Dep.getSUnit())); 3729 DAG->moveInstruction(Copy, InsertPos); 3730 } 3731 } 3732 3733 /// Update the scheduler's state after scheduling a node. This is the same node 3734 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 3735 /// update it's state based on the current cycle before MachineSchedStrategy 3736 /// does. 3737 /// 3738 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 3739 /// them here. See comments in biasPhysReg. 3740 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3741 if (IsTopNode) { 3742 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3743 Top.bumpNode(SU); 3744 if (SU->hasPhysRegUses) 3745 reschedulePhysReg(SU, true); 3746 } else { 3747 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 3748 Bot.bumpNode(SU); 3749 if (SU->hasPhysRegDefs) 3750 reschedulePhysReg(SU, false); 3751 } 3752 } 3753 3754 /// Create the standard converging machine scheduler. This will be used as the 3755 /// default scheduler if the target does not set a default. 3756 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) { 3757 ScheduleDAGMILive *DAG = 3758 new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C)); 3759 // Register DAG post-processors. 3760 // 3761 // FIXME: extend the mutation API to allow earlier mutations to instantiate 3762 // data and pass it to later mutations. Have a single mutation that gathers 3763 // the interesting nodes in one pass. 3764 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI)); 3765 return DAG; 3766 } 3767 3768 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 3769 return createGenericSchedLive(C); 3770 } 3771 3772 static MachineSchedRegistry 3773 GenericSchedRegistry("converge", "Standard converging scheduler.", 3774 createConvergingSched); 3775 3776 //===----------------------------------------------------------------------===// 3777 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 3778 //===----------------------------------------------------------------------===// 3779 3780 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 3781 DAG = Dag; 3782 SchedModel = DAG->getSchedModel(); 3783 TRI = DAG->TRI; 3784 3785 Rem.init(DAG, SchedModel); 3786 Top.init(DAG, SchedModel, &Rem); 3787 BotRoots.clear(); 3788 3789 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 3790 // or are disabled, then these HazardRecs will be disabled. 3791 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3792 if (!Top.HazardRec) { 3793 Top.HazardRec = 3794 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3795 Itin, DAG); 3796 } 3797 } 3798 3799 void PostGenericScheduler::registerRoots() { 3800 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3801 3802 // Some roots may not feed into ExitSU. Check all of them in case. 3803 for (const SUnit *SU : BotRoots) { 3804 if (SU->getDepth() > Rem.CriticalPath) 3805 Rem.CriticalPath = SU->getDepth(); 3806 } 3807 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 3808 if (DumpCriticalPathLength) { 3809 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 3810 } 3811 } 3812 3813 /// Apply a set of heuristics to a new candidate for PostRA scheduling. 3814 /// 3815 /// \param Cand provides the policy and current best candidate. 3816 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3817 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand) 3818 bool PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 3819 SchedCandidate &TryCand) { 3820 // Initialize the candidate if needed. 3821 if (!Cand.isValid()) { 3822 TryCand.Reason = NodeOrder; 3823 return true; 3824 } 3825 3826 // Prioritize instructions that read unbuffered resources by stall cycles. 3827 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3828 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3829 return TryCand.Reason != NoCand; 3830 3831 // Keep clustered nodes together. 3832 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(), 3833 Cand.SU == DAG->getNextClusterSucc(), 3834 TryCand, Cand, Cluster)) 3835 return TryCand.Reason != NoCand; 3836 3837 // Avoid critical resource consumption and balance the schedule. 3838 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3839 TryCand, Cand, ResourceReduce)) 3840 return TryCand.Reason != NoCand; 3841 if (tryGreater(TryCand.ResDelta.DemandedResources, 3842 Cand.ResDelta.DemandedResources, 3843 TryCand, Cand, ResourceDemand)) 3844 return TryCand.Reason != NoCand; 3845 3846 // Avoid serializing long latency dependence chains. 3847 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3848 return TryCand.Reason != NoCand; 3849 } 3850 3851 // Fall through to original instruction order. 3852 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) { 3853 TryCand.Reason = NodeOrder; 3854 return true; 3855 } 3856 3857 return false; 3858 } 3859 3860 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3861 ReadyQueue &Q = Top.Available; 3862 for (SUnit *SU : Q) { 3863 SchedCandidate TryCand(Cand.Policy); 3864 TryCand.SU = SU; 3865 TryCand.AtTop = true; 3866 TryCand.initResourceDelta(DAG, SchedModel); 3867 if (tryCandidate(Cand, TryCand)) { 3868 Cand.setBest(TryCand); 3869 LLVM_DEBUG(traceCandidate(Cand)); 3870 } 3871 } 3872 } 3873 3874 /// Pick the next node to schedule. 3875 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3876 if (DAG->top() == DAG->bottom()) { 3877 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3878 return nullptr; 3879 } 3880 SUnit *SU; 3881 do { 3882 SU = Top.pickOnlyChoice(); 3883 if (SU) { 3884 tracePick(Only1, true); 3885 } else { 3886 CandPolicy NoPolicy; 3887 SchedCandidate TopCand(NoPolicy); 3888 // Set the top-down policy based on the state of the current top zone and 3889 // the instructions outside the zone, including the bottom zone. 3890 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 3891 pickNodeFromQueue(TopCand); 3892 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3893 tracePick(TopCand); 3894 SU = TopCand.SU; 3895 } 3896 } while (SU->isScheduled); 3897 3898 IsTopNode = true; 3899 Top.removeReady(SU); 3900 3901 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3902 << *SU->getInstr()); 3903 return SU; 3904 } 3905 3906 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3907 /// scheduled/remaining flags in the DAG nodes. 3908 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3909 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3910 Top.bumpNode(SU); 3911 } 3912 3913 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) { 3914 return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C), 3915 /*RemoveKillFlags=*/true); 3916 } 3917 3918 //===----------------------------------------------------------------------===// 3919 // ILP Scheduler. Currently for experimental analysis of heuristics. 3920 //===----------------------------------------------------------------------===// 3921 3922 namespace { 3923 3924 /// Order nodes by the ILP metric. 3925 struct ILPOrder { 3926 const SchedDFSResult *DFSResult = nullptr; 3927 const BitVector *ScheduledTrees = nullptr; 3928 bool MaximizeILP; 3929 3930 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {} 3931 3932 /// Apply a less-than relation on node priority. 3933 /// 3934 /// (Return true if A comes after B in the Q.) 3935 bool operator()(const SUnit *A, const SUnit *B) const { 3936 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3937 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3938 if (SchedTreeA != SchedTreeB) { 3939 // Unscheduled trees have lower priority. 3940 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3941 return ScheduledTrees->test(SchedTreeB); 3942 3943 // Trees with shallower connections have have lower priority. 3944 if (DFSResult->getSubtreeLevel(SchedTreeA) 3945 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3946 return DFSResult->getSubtreeLevel(SchedTreeA) 3947 < DFSResult->getSubtreeLevel(SchedTreeB); 3948 } 3949 } 3950 if (MaximizeILP) 3951 return DFSResult->getILP(A) < DFSResult->getILP(B); 3952 else 3953 return DFSResult->getILP(A) > DFSResult->getILP(B); 3954 } 3955 }; 3956 3957 /// Schedule based on the ILP metric. 3958 class ILPScheduler : public MachineSchedStrategy { 3959 ScheduleDAGMILive *DAG = nullptr; 3960 ILPOrder Cmp; 3961 3962 std::vector<SUnit*> ReadyQ; 3963 3964 public: 3965 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {} 3966 3967 void initialize(ScheduleDAGMI *dag) override { 3968 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3969 DAG = static_cast<ScheduleDAGMILive*>(dag); 3970 DAG->computeDFSResult(); 3971 Cmp.DFSResult = DAG->getDFSResult(); 3972 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3973 ReadyQ.clear(); 3974 } 3975 3976 void registerRoots() override { 3977 // Restore the heap in ReadyQ with the updated DFS results. 3978 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3979 } 3980 3981 /// Implement MachineSchedStrategy interface. 3982 /// ----------------------------------------- 3983 3984 /// Callback to select the highest priority node from the ready Q. 3985 SUnit *pickNode(bool &IsTopNode) override { 3986 if (ReadyQ.empty()) return nullptr; 3987 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3988 SUnit *SU = ReadyQ.back(); 3989 ReadyQ.pop_back(); 3990 IsTopNode = false; 3991 LLVM_DEBUG(dbgs() << "Pick node " 3992 << "SU(" << SU->NodeNum << ") " 3993 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3994 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) 3995 << " @" 3996 << DAG->getDFSResult()->getSubtreeLevel( 3997 DAG->getDFSResult()->getSubtreeID(SU)) 3998 << '\n' 3999 << "Scheduling " << *SU->getInstr()); 4000 return SU; 4001 } 4002 4003 /// Scheduler callback to notify that a new subtree is scheduled. 4004 void scheduleTree(unsigned SubtreeID) override { 4005 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 4006 } 4007 4008 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 4009 /// DFSResults, and resort the priority Q. 4010 void schedNode(SUnit *SU, bool IsTopNode) override { 4011 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 4012 } 4013 4014 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 4015 4016 void releaseBottomNode(SUnit *SU) override { 4017 ReadyQ.push_back(SU); 4018 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 4019 } 4020 }; 4021 4022 } // end anonymous namespace 4023 4024 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 4025 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true)); 4026 } 4027 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 4028 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false)); 4029 } 4030 4031 static MachineSchedRegistry ILPMaxRegistry( 4032 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 4033 static MachineSchedRegistry ILPMinRegistry( 4034 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 4035 4036 //===----------------------------------------------------------------------===// 4037 // Machine Instruction Shuffler for Correctness Testing 4038 //===----------------------------------------------------------------------===// 4039 4040 #ifndef NDEBUG 4041 namespace { 4042 4043 /// Apply a less-than relation on the node order, which corresponds to the 4044 /// instruction order prior to scheduling. IsReverse implements greater-than. 4045 template<bool IsReverse> 4046 struct SUnitOrder { 4047 bool operator()(SUnit *A, SUnit *B) const { 4048 if (IsReverse) 4049 return A->NodeNum > B->NodeNum; 4050 else 4051 return A->NodeNum < B->NodeNum; 4052 } 4053 }; 4054 4055 /// Reorder instructions as much as possible. 4056 class InstructionShuffler : public MachineSchedStrategy { 4057 bool IsAlternating; 4058 bool IsTopDown; 4059 4060 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 4061 // gives nodes with a higher number higher priority causing the latest 4062 // instructions to be scheduled first. 4063 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>> 4064 TopQ; 4065 4066 // When scheduling bottom-up, use greater-than as the queue priority. 4067 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>> 4068 BottomQ; 4069 4070 public: 4071 InstructionShuffler(bool alternate, bool topdown) 4072 : IsAlternating(alternate), IsTopDown(topdown) {} 4073 4074 void initialize(ScheduleDAGMI*) override { 4075 TopQ.clear(); 4076 BottomQ.clear(); 4077 } 4078 4079 /// Implement MachineSchedStrategy interface. 4080 /// ----------------------------------------- 4081 4082 SUnit *pickNode(bool &IsTopNode) override { 4083 SUnit *SU; 4084 if (IsTopDown) { 4085 do { 4086 if (TopQ.empty()) return nullptr; 4087 SU = TopQ.top(); 4088 TopQ.pop(); 4089 } while (SU->isScheduled); 4090 IsTopNode = true; 4091 } else { 4092 do { 4093 if (BottomQ.empty()) return nullptr; 4094 SU = BottomQ.top(); 4095 BottomQ.pop(); 4096 } while (SU->isScheduled); 4097 IsTopNode = false; 4098 } 4099 if (IsAlternating) 4100 IsTopDown = !IsTopDown; 4101 return SU; 4102 } 4103 4104 void schedNode(SUnit *SU, bool IsTopNode) override {} 4105 4106 void releaseTopNode(SUnit *SU) override { 4107 TopQ.push(SU); 4108 } 4109 void releaseBottomNode(SUnit *SU) override { 4110 BottomQ.push(SU); 4111 } 4112 }; 4113 4114 } // end anonymous namespace 4115 4116 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 4117 bool Alternate = !ForceTopDown && !ForceBottomUp; 4118 bool TopDown = !ForceBottomUp; 4119 assert((TopDown || !ForceTopDown) && 4120 "-misched-topdown incompatible with -misched-bottomup"); 4121 return new ScheduleDAGMILive( 4122 C, std::make_unique<InstructionShuffler>(Alternate, TopDown)); 4123 } 4124 4125 static MachineSchedRegistry ShufflerRegistry( 4126 "shuffle", "Shuffle machine instructions alternating directions", 4127 createInstructionShuffler); 4128 #endif // !NDEBUG 4129 4130 //===----------------------------------------------------------------------===// 4131 // GraphWriter support for ScheduleDAGMILive. 4132 //===----------------------------------------------------------------------===// 4133 4134 #ifndef NDEBUG 4135 namespace llvm { 4136 4137 template<> struct GraphTraits< 4138 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 4139 4140 template<> 4141 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 4142 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 4143 4144 static std::string getGraphName(const ScheduleDAG *G) { 4145 return std::string(G->MF.getName()); 4146 } 4147 4148 static bool renderGraphFromBottomUp() { 4149 return true; 4150 } 4151 4152 static bool isNodeHidden(const SUnit *Node, const ScheduleDAG *G) { 4153 if (ViewMISchedCutoff == 0) 4154 return false; 4155 return (Node->Preds.size() > ViewMISchedCutoff 4156 || Node->Succs.size() > ViewMISchedCutoff); 4157 } 4158 4159 /// If you want to override the dot attributes printed for a particular 4160 /// edge, override this method. 4161 static std::string getEdgeAttributes(const SUnit *Node, 4162 SUnitIterator EI, 4163 const ScheduleDAG *Graph) { 4164 if (EI.isArtificialDep()) 4165 return "color=cyan,style=dashed"; 4166 if (EI.isCtrlDep()) 4167 return "color=blue,style=dashed"; 4168 return ""; 4169 } 4170 4171 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 4172 std::string Str; 4173 raw_string_ostream SS(Str); 4174 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 4175 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 4176 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 4177 SS << "SU:" << SU->NodeNum; 4178 if (DFS) 4179 SS << " I:" << DFS->getNumInstrs(SU); 4180 return SS.str(); 4181 } 4182 4183 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 4184 return G->getGraphNodeLabel(SU); 4185 } 4186 4187 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 4188 std::string Str("shape=Mrecord"); 4189 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 4190 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 4191 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 4192 if (DFS) { 4193 Str += ",style=filled,fillcolor=\"#"; 4194 Str += DOT::getColorString(DFS->getSubtreeID(N)); 4195 Str += '"'; 4196 } 4197 return Str; 4198 } 4199 }; 4200 4201 } // end namespace llvm 4202 #endif // NDEBUG 4203 4204 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 4205 /// rendered using 'dot'. 4206 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 4207 #ifndef NDEBUG 4208 ViewGraph(this, Name, false, Title); 4209 #else 4210 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 4211 << "systems with Graphviz or gv!\n"; 4212 #endif // NDEBUG 4213 } 4214 4215 /// Out-of-line implementation with no arguments is handy for gdb. 4216 void ScheduleDAGMI::viewGraph() { 4217 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 4218 } 4219 4220 /// Sort predicate for the intervals stored in an instance of 4221 /// ResourceSegments. Intervals are always disjoint (no intersection 4222 /// for any pairs of intervals), therefore we can sort the totality of 4223 /// the intervals by looking only at the left boundary. 4224 static bool sortIntervals(const ResourceSegments::IntervalTy &A, 4225 const ResourceSegments::IntervalTy &B) { 4226 return A.first < B.first; 4227 } 4228 4229 unsigned ResourceSegments::getFirstAvailableAt( 4230 unsigned CurrCycle, unsigned StartAtCycle, unsigned Cycle, 4231 std::function<ResourceSegments::IntervalTy(unsigned, unsigned, unsigned)> 4232 IntervalBuilder) const { 4233 assert(std::is_sorted(std::begin(_Intervals), std::end(_Intervals), 4234 sortIntervals) && 4235 "Cannot execute on an un-sorted set of intervals."); 4236 unsigned RetCycle = CurrCycle; 4237 ResourceSegments::IntervalTy NewInterval = 4238 IntervalBuilder(RetCycle, StartAtCycle, Cycle); 4239 for (auto &Interval : _Intervals) { 4240 if (!intersects(NewInterval, Interval)) 4241 continue; 4242 4243 // Move the interval right next to the top of the one it 4244 // intersects. 4245 assert(Interval.second > NewInterval.first && 4246 "Invalid intervals configuration."); 4247 RetCycle += (unsigned)Interval.second - (unsigned)NewInterval.first; 4248 NewInterval = IntervalBuilder(RetCycle, StartAtCycle, Cycle); 4249 } 4250 return RetCycle; 4251 } 4252 4253 void ResourceSegments::add(ResourceSegments::IntervalTy A, 4254 const unsigned CutOff) { 4255 assert(A.first < A.second && "Cannot add empty resource usage"); 4256 assert(CutOff > 0 && "0-size interval history has no use."); 4257 assert(all_of(_Intervals, 4258 [&A](const ResourceSegments::IntervalTy &Interval) -> bool { 4259 return !intersects(A, Interval); 4260 }) && 4261 "A resource is being overwritten"); 4262 _Intervals.push_back(A); 4263 4264 sortAndMerge(); 4265 4266 // Do not keep the full history of the intervals, just the 4267 // latest #CutOff. 4268 while (_Intervals.size() > CutOff) 4269 _Intervals.pop_front(); 4270 } 4271 4272 bool ResourceSegments::intersects(ResourceSegments::IntervalTy A, 4273 ResourceSegments::IntervalTy B) { 4274 assert(A.first <= A.second && "Invalid interval"); 4275 assert(B.first <= B.second && "Invalid interval"); 4276 4277 // Share one boundary. 4278 if ((A.first == B.first) || (A.second == B.second)) 4279 return true; 4280 4281 // full intersersect: [ *** ) B 4282 // [***) A 4283 if ((A.first > B.first) && (A.second < B.second)) 4284 return true; 4285 4286 // right intersect: [ ***) B 4287 // [*** ) A 4288 if ((A.first > B.first) && (A.first < B.second) && (A.second > B.second)) 4289 return true; 4290 4291 // left intersect: [*** ) B 4292 // [ ***) A 4293 if ((A.first < B.first) && (B.first < A.second) && (B.second > B.first)) 4294 return true; 4295 4296 return false; 4297 } 4298 4299 void ResourceSegments::sortAndMerge() { 4300 if (_Intervals.size() <= 1) 4301 return; 4302 4303 // First sort the collection. 4304 _Intervals.sort(sortIntervals); 4305 4306 // can use next because I have at least 2 elements in the list 4307 auto next = std::next(std::begin(_Intervals)); 4308 auto E = std::end(_Intervals); 4309 for (; next != E; ++next) { 4310 if (std::prev(next)->second >= next->first) { 4311 next->first = std::prev(next)->first; 4312 _Intervals.erase(std::prev(next)); 4313 continue; 4314 } 4315 } 4316 } 4317