1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineScheduler.h" 16 #include "llvm/ADT/PriorityQueue.h" 17 #include "llvm/Analysis/AliasAnalysis.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineDominators.h" 20 #include "llvm/CodeGen/MachineLoopInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/Passes.h" 23 #include "llvm/CodeGen/RegisterClassInfo.h" 24 #include "llvm/CodeGen/ScheduleDFS.h" 25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/GraphWriter.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/Target/TargetInstrInfo.h" 32 #include <queue> 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "misched" 37 38 namespace llvm { 39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 40 cl::desc("Force top-down list scheduling")); 41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 42 cl::desc("Force bottom-up list scheduling")); 43 cl::opt<bool> 44 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 45 cl::desc("Print critical path length to stdout")); 46 } 47 48 #ifndef NDEBUG 49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 50 cl::desc("Pop up a window to show MISched dags after they are processed")); 51 52 /// In some situations a few uninteresting nodes depend on nearly all other 53 /// nodes in the graph, provide a cutoff to hide them. 54 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden, 55 cl::desc("Hide nodes with more predecessor/successor than cutoff")); 56 57 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 58 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 59 60 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 61 cl::desc("Only schedule this function")); 62 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 63 cl::desc("Only schedule this MBB#")); 64 #else 65 static bool ViewMISchedDAGs = false; 66 #endif // NDEBUG 67 68 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 69 cl::desc("Enable register pressure scheduling."), cl::init(true)); 70 71 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 72 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 73 74 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden, 75 cl::desc("Enable memop clustering."), 76 cl::init(true)); 77 78 // Experimental heuristics 79 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 80 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 81 82 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 83 cl::desc("Verify machine instrs before and after machine scheduling")); 84 85 // DAG subtrees must have at least this many nodes. 86 static const unsigned MinSubtreeSize = 8; 87 88 // Pin the vtables to this file. 89 void MachineSchedStrategy::anchor() {} 90 void ScheduleDAGMutation::anchor() {} 91 92 //===----------------------------------------------------------------------===// 93 // Machine Instruction Scheduling Pass and Registry 94 //===----------------------------------------------------------------------===// 95 96 MachineSchedContext::MachineSchedContext(): 97 MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) { 98 RegClassInfo = new RegisterClassInfo(); 99 } 100 101 MachineSchedContext::~MachineSchedContext() { 102 delete RegClassInfo; 103 } 104 105 namespace { 106 /// Base class for a machine scheduler class that can run at any point. 107 class MachineSchedulerBase : public MachineSchedContext, 108 public MachineFunctionPass { 109 public: 110 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 111 112 void print(raw_ostream &O, const Module* = nullptr) const override; 113 114 protected: 115 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); 116 }; 117 118 /// MachineScheduler runs after coalescing and before register allocation. 119 class MachineScheduler : public MachineSchedulerBase { 120 public: 121 MachineScheduler(); 122 123 void getAnalysisUsage(AnalysisUsage &AU) const override; 124 125 bool runOnMachineFunction(MachineFunction&) override; 126 127 static char ID; // Class identification, replacement for typeinfo 128 129 protected: 130 ScheduleDAGInstrs *createMachineScheduler(); 131 }; 132 133 /// PostMachineScheduler runs after shortly before code emission. 134 class PostMachineScheduler : public MachineSchedulerBase { 135 public: 136 PostMachineScheduler(); 137 138 void getAnalysisUsage(AnalysisUsage &AU) const override; 139 140 bool runOnMachineFunction(MachineFunction&) override; 141 142 static char ID; // Class identification, replacement for typeinfo 143 144 protected: 145 ScheduleDAGInstrs *createPostMachineScheduler(); 146 }; 147 } // namespace 148 149 char MachineScheduler::ID = 0; 150 151 char &llvm::MachineSchedulerID = MachineScheduler::ID; 152 153 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler", 154 "Machine Instruction Scheduler", false, false) 155 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 156 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 157 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 158 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler", 159 "Machine Instruction Scheduler", false, false) 160 161 MachineScheduler::MachineScheduler() 162 : MachineSchedulerBase(ID) { 163 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 164 } 165 166 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 167 AU.setPreservesCFG(); 168 AU.addRequiredID(MachineDominatorsID); 169 AU.addRequired<MachineLoopInfo>(); 170 AU.addRequired<AAResultsWrapperPass>(); 171 AU.addRequired<TargetPassConfig>(); 172 AU.addRequired<SlotIndexes>(); 173 AU.addPreserved<SlotIndexes>(); 174 AU.addRequired<LiveIntervals>(); 175 AU.addPreserved<LiveIntervals>(); 176 MachineFunctionPass::getAnalysisUsage(AU); 177 } 178 179 char PostMachineScheduler::ID = 0; 180 181 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 182 183 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 184 "PostRA Machine Instruction Scheduler", false, false) 185 186 PostMachineScheduler::PostMachineScheduler() 187 : MachineSchedulerBase(ID) { 188 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 189 } 190 191 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 192 AU.setPreservesCFG(); 193 AU.addRequiredID(MachineDominatorsID); 194 AU.addRequired<MachineLoopInfo>(); 195 AU.addRequired<TargetPassConfig>(); 196 MachineFunctionPass::getAnalysisUsage(AU); 197 } 198 199 MachinePassRegistry MachineSchedRegistry::Registry; 200 201 /// A dummy default scheduler factory indicates whether the scheduler 202 /// is overridden on the command line. 203 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 204 return nullptr; 205 } 206 207 /// MachineSchedOpt allows command line selection of the scheduler. 208 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 209 RegisterPassParser<MachineSchedRegistry> > 210 MachineSchedOpt("misched", 211 cl::init(&useDefaultMachineSched), cl::Hidden, 212 cl::desc("Machine instruction scheduler to use")); 213 214 static MachineSchedRegistry 215 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 216 useDefaultMachineSched); 217 218 static cl::opt<bool> EnableMachineSched( 219 "enable-misched", 220 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), 221 cl::Hidden); 222 223 static cl::opt<bool> EnablePostRAMachineSched( 224 "enable-post-misched", 225 cl::desc("Enable the post-ra machine instruction scheduling pass."), 226 cl::init(true), cl::Hidden); 227 228 /// Forward declare the standard machine scheduler. This will be used as the 229 /// default scheduler if the target does not set a default. 230 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C); 231 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C); 232 233 /// Decrement this iterator until reaching the top or a non-debug instr. 234 static MachineBasicBlock::const_iterator 235 priorNonDebug(MachineBasicBlock::const_iterator I, 236 MachineBasicBlock::const_iterator Beg) { 237 assert(I != Beg && "reached the top of the region, cannot decrement"); 238 while (--I != Beg) { 239 if (!I->isDebugValue()) 240 break; 241 } 242 return I; 243 } 244 245 /// Non-const version. 246 static MachineBasicBlock::iterator 247 priorNonDebug(MachineBasicBlock::iterator I, 248 MachineBasicBlock::const_iterator Beg) { 249 return const_cast<MachineInstr*>( 250 &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)); 251 } 252 253 /// If this iterator is a debug value, increment until reaching the End or a 254 /// non-debug instruction. 255 static MachineBasicBlock::const_iterator 256 nextIfDebug(MachineBasicBlock::const_iterator I, 257 MachineBasicBlock::const_iterator End) { 258 for(; I != End; ++I) { 259 if (!I->isDebugValue()) 260 break; 261 } 262 return I; 263 } 264 265 /// Non-const version. 266 static MachineBasicBlock::iterator 267 nextIfDebug(MachineBasicBlock::iterator I, 268 MachineBasicBlock::const_iterator End) { 269 // Cast the return value to nonconst MachineInstr, then cast to an 270 // instr_iterator, which does not check for null, finally return a 271 // bundle_iterator. 272 return MachineBasicBlock::instr_iterator( 273 const_cast<MachineInstr*>( 274 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 275 } 276 277 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 278 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 279 // Select the scheduler, or set the default. 280 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 281 if (Ctor != useDefaultMachineSched) 282 return Ctor(this); 283 284 // Get the default scheduler set by the target for this function. 285 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 286 if (Scheduler) 287 return Scheduler; 288 289 // Default to GenericScheduler. 290 return createGenericSchedLive(this); 291 } 292 293 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 294 /// the caller. We don't have a command line option to override the postRA 295 /// scheduler. The Target must configure it. 296 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 297 // Get the postRA scheduler set by the target for this function. 298 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 299 if (Scheduler) 300 return Scheduler; 301 302 // Default to GenericScheduler. 303 return createGenericSchedPostRA(this); 304 } 305 306 /// Top-level MachineScheduler pass driver. 307 /// 308 /// Visit blocks in function order. Divide each block into scheduling regions 309 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 310 /// consistent with the DAG builder, which traverses the interior of the 311 /// scheduling regions bottom-up. 312 /// 313 /// This design avoids exposing scheduling boundaries to the DAG builder, 314 /// simplifying the DAG builder's support for "special" target instructions. 315 /// At the same time the design allows target schedulers to operate across 316 /// scheduling boundaries, for example to bundle the boudary instructions 317 /// without reordering them. This creates complexity, because the target 318 /// scheduler must update the RegionBegin and RegionEnd positions cached by 319 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 320 /// design would be to split blocks at scheduling boundaries, but LLVM has a 321 /// general bias against block splitting purely for implementation simplicity. 322 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 323 if (skipOptnoneFunction(*mf.getFunction())) 324 return false; 325 326 if (EnableMachineSched.getNumOccurrences()) { 327 if (!EnableMachineSched) 328 return false; 329 } else if (!mf.getSubtarget().enableMachineScheduler()) 330 return false; 331 332 DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); 333 334 // Initialize the context of the pass. 335 MF = &mf; 336 MLI = &getAnalysis<MachineLoopInfo>(); 337 MDT = &getAnalysis<MachineDominatorTree>(); 338 PassConfig = &getAnalysis<TargetPassConfig>(); 339 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 340 341 LIS = &getAnalysis<LiveIntervals>(); 342 343 if (VerifyScheduling) { 344 DEBUG(LIS->dump()); 345 MF->verify(this, "Before machine scheduling."); 346 } 347 RegClassInfo->runOnMachineFunction(*MF); 348 349 // Instantiate the selected scheduler for this target, function, and 350 // optimization level. 351 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 352 scheduleRegions(*Scheduler, false); 353 354 DEBUG(LIS->dump()); 355 if (VerifyScheduling) 356 MF->verify(this, "After machine scheduling."); 357 return true; 358 } 359 360 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 361 if (skipOptnoneFunction(*mf.getFunction())) 362 return false; 363 364 if (EnablePostRAMachineSched.getNumOccurrences()) { 365 if (!EnablePostRAMachineSched) 366 return false; 367 } else if (!mf.getSubtarget().enablePostRAScheduler()) { 368 DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 369 return false; 370 } 371 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 372 373 // Initialize the context of the pass. 374 MF = &mf; 375 PassConfig = &getAnalysis<TargetPassConfig>(); 376 377 if (VerifyScheduling) 378 MF->verify(this, "Before post machine scheduling."); 379 380 // Instantiate the selected scheduler for this target, function, and 381 // optimization level. 382 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 383 scheduleRegions(*Scheduler, true); 384 385 if (VerifyScheduling) 386 MF->verify(this, "After post machine scheduling."); 387 return true; 388 } 389 390 /// Return true of the given instruction should not be included in a scheduling 391 /// region. 392 /// 393 /// MachineScheduler does not currently support scheduling across calls. To 394 /// handle calls, the DAG builder needs to be modified to create register 395 /// anti/output dependencies on the registers clobbered by the call's regmask 396 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 397 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 398 /// the boundary, but there would be no benefit to postRA scheduling across 399 /// calls this late anyway. 400 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 401 MachineBasicBlock *MBB, 402 MachineFunction *MF, 403 const TargetInstrInfo *TII) { 404 return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF); 405 } 406 407 /// Main driver for both MachineScheduler and PostMachineScheduler. 408 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler, 409 bool FixKillFlags) { 410 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 411 412 // Visit all machine basic blocks. 413 // 414 // TODO: Visit blocks in global postorder or postorder within the bottom-up 415 // loop tree. Then we can optionally compute global RegPressure. 416 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 417 MBB != MBBEnd; ++MBB) { 418 419 Scheduler.startBlock(&*MBB); 420 421 #ifndef NDEBUG 422 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 423 continue; 424 if (SchedOnlyBlock.getNumOccurrences() 425 && (int)SchedOnlyBlock != MBB->getNumber()) 426 continue; 427 #endif 428 429 // Break the block into scheduling regions [I, RegionEnd), and schedule each 430 // region as soon as it is discovered. RegionEnd points the scheduling 431 // boundary at the bottom of the region. The DAG does not include RegionEnd, 432 // but the region does (i.e. the next RegionEnd is above the previous 433 // RegionBegin). If the current block has no terminator then RegionEnd == 434 // MBB->end() for the bottom region. 435 // 436 // The Scheduler may insert instructions during either schedule() or 437 // exitRegion(), even for empty regions. So the local iterators 'I' and 438 // 'RegionEnd' are invalid across these calls. 439 // 440 // MBB::size() uses instr_iterator to count. Here we need a bundle to count 441 // as a single instruction. 442 unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end()); 443 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 444 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) { 445 446 // Avoid decrementing RegionEnd for blocks with no terminator. 447 if (RegionEnd != MBB->end() || 448 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) { 449 --RegionEnd; 450 // Count the boundary instruction. 451 --RemainingInstrs; 452 } 453 454 // The next region starts above the previous region. Look backward in the 455 // instruction stream until we find the nearest boundary. 456 unsigned NumRegionInstrs = 0; 457 MachineBasicBlock::iterator I = RegionEnd; 458 for(;I != MBB->begin(); --I, --RemainingInstrs) { 459 if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII)) 460 break; 461 if (!I->isDebugValue()) 462 ++NumRegionInstrs; 463 } 464 // Notify the scheduler of the region, even if we may skip scheduling 465 // it. Perhaps it still needs to be bundled. 466 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs); 467 468 // Skip empty scheduling regions (0 or 1 schedulable instructions). 469 if (I == RegionEnd || I == std::prev(RegionEnd)) { 470 // Close the current region. Bundle the terminator if needed. 471 // This invalidates 'RegionEnd' and 'I'. 472 Scheduler.exitRegion(); 473 continue; 474 } 475 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 476 DEBUG(dbgs() << MF->getName() 477 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 478 << "\n From: " << *I << " To: "; 479 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 480 else dbgs() << "End"; 481 dbgs() << " RegionInstrs: " << NumRegionInstrs 482 << " Remaining: " << RemainingInstrs << "\n"); 483 if (DumpCriticalPathLength) { 484 errs() << MF->getName(); 485 errs() << ":BB# " << MBB->getNumber(); 486 errs() << " " << MBB->getName() << " \n"; 487 } 488 489 // Schedule a region: possibly reorder instructions. 490 // This invalidates 'RegionEnd' and 'I'. 491 Scheduler.schedule(); 492 493 // Close the current region. 494 Scheduler.exitRegion(); 495 496 // Scheduling has invalidated the current iterator 'I'. Ask the 497 // scheduler for the top of it's scheduled region. 498 RegionEnd = Scheduler.begin(); 499 } 500 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 501 Scheduler.finishBlock(); 502 // FIXME: Ideally, no further passes should rely on kill flags. However, 503 // thumb2 size reduction is currently an exception, so the PostMIScheduler 504 // needs to do this. 505 if (FixKillFlags) 506 Scheduler.fixupKills(&*MBB); 507 } 508 Scheduler.finalizeSchedule(); 509 } 510 511 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 512 // unimplemented 513 } 514 515 LLVM_DUMP_METHOD 516 void ReadyQueue::dump() { 517 dbgs() << "Queue " << Name << ": "; 518 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 519 dbgs() << Queue[i]->NodeNum << " "; 520 dbgs() << "\n"; 521 } 522 523 //===----------------------------------------------------------------------===// 524 // ScheduleDAGMI - Basic machine instruction scheduling. This is 525 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 526 // virtual registers. 527 // ===----------------------------------------------------------------------===/ 528 529 // Provide a vtable anchor. 530 ScheduleDAGMI::~ScheduleDAGMI() { 531 } 532 533 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 534 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 535 } 536 537 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 538 if (SuccSU != &ExitSU) { 539 // Do not use WillCreateCycle, it assumes SD scheduling. 540 // If Pred is reachable from Succ, then the edge creates a cycle. 541 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 542 return false; 543 Topo.AddPred(SuccSU, PredDep.getSUnit()); 544 } 545 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 546 // Return true regardless of whether a new edge needed to be inserted. 547 return true; 548 } 549 550 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 551 /// NumPredsLeft reaches zero, release the successor node. 552 /// 553 /// FIXME: Adjust SuccSU height based on MinLatency. 554 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 555 SUnit *SuccSU = SuccEdge->getSUnit(); 556 557 if (SuccEdge->isWeak()) { 558 --SuccSU->WeakPredsLeft; 559 if (SuccEdge->isCluster()) 560 NextClusterSucc = SuccSU; 561 return; 562 } 563 #ifndef NDEBUG 564 if (SuccSU->NumPredsLeft == 0) { 565 dbgs() << "*** Scheduling failed! ***\n"; 566 SuccSU->dump(this); 567 dbgs() << " has been released too many times!\n"; 568 llvm_unreachable(nullptr); 569 } 570 #endif 571 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 572 // CurrCycle may have advanced since then. 573 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 574 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 575 576 --SuccSU->NumPredsLeft; 577 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 578 SchedImpl->releaseTopNode(SuccSU); 579 } 580 581 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 582 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 583 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 584 I != E; ++I) { 585 releaseSucc(SU, &*I); 586 } 587 } 588 589 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 590 /// NumSuccsLeft reaches zero, release the predecessor node. 591 /// 592 /// FIXME: Adjust PredSU height based on MinLatency. 593 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 594 SUnit *PredSU = PredEdge->getSUnit(); 595 596 if (PredEdge->isWeak()) { 597 --PredSU->WeakSuccsLeft; 598 if (PredEdge->isCluster()) 599 NextClusterPred = PredSU; 600 return; 601 } 602 #ifndef NDEBUG 603 if (PredSU->NumSuccsLeft == 0) { 604 dbgs() << "*** Scheduling failed! ***\n"; 605 PredSU->dump(this); 606 dbgs() << " has been released too many times!\n"; 607 llvm_unreachable(nullptr); 608 } 609 #endif 610 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 611 // CurrCycle may have advanced since then. 612 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 613 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 614 615 --PredSU->NumSuccsLeft; 616 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 617 SchedImpl->releaseBottomNode(PredSU); 618 } 619 620 /// releasePredecessors - Call releasePred on each of SU's predecessors. 621 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 622 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 623 I != E; ++I) { 624 releasePred(SU, &*I); 625 } 626 } 627 628 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 629 /// crossing a scheduling boundary. [begin, end) includes all instructions in 630 /// the region, including the boundary itself and single-instruction regions 631 /// that don't get scheduled. 632 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 633 MachineBasicBlock::iterator begin, 634 MachineBasicBlock::iterator end, 635 unsigned regioninstrs) 636 { 637 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 638 639 SchedImpl->initPolicy(begin, end, regioninstrs); 640 } 641 642 /// This is normally called from the main scheduler loop but may also be invoked 643 /// by the scheduling strategy to perform additional code motion. 644 void ScheduleDAGMI::moveInstruction( 645 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 646 // Advance RegionBegin if the first instruction moves down. 647 if (&*RegionBegin == MI) 648 ++RegionBegin; 649 650 // Update the instruction stream. 651 BB->splice(InsertPos, BB, MI); 652 653 // Update LiveIntervals 654 if (LIS) 655 LIS->handleMove(*MI, /*UpdateFlags=*/true); 656 657 // Recede RegionBegin if an instruction moves above the first. 658 if (RegionBegin == InsertPos) 659 RegionBegin = MI; 660 } 661 662 bool ScheduleDAGMI::checkSchedLimit() { 663 #ifndef NDEBUG 664 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 665 CurrentTop = CurrentBottom; 666 return false; 667 } 668 ++NumInstrsScheduled; 669 #endif 670 return true; 671 } 672 673 /// Per-region scheduling driver, called back from 674 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 675 /// does not consider liveness or register pressure. It is useful for PostRA 676 /// scheduling and potentially other custom schedulers. 677 void ScheduleDAGMI::schedule() { 678 DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n"); 679 DEBUG(SchedImpl->dumpPolicy()); 680 681 // Build the DAG. 682 buildSchedGraph(AA); 683 684 Topo.InitDAGTopologicalSorting(); 685 686 postprocessDAG(); 687 688 SmallVector<SUnit*, 8> TopRoots, BotRoots; 689 findRootsAndBiasEdges(TopRoots, BotRoots); 690 691 // Initialize the strategy before modifying the DAG. 692 // This may initialize a DFSResult to be used for queue priority. 693 SchedImpl->initialize(this); 694 695 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 696 SUnits[su].dumpAll(this)); 697 if (ViewMISchedDAGs) viewGraph(); 698 699 // Initialize ready queues now that the DAG and priority data are finalized. 700 initQueues(TopRoots, BotRoots); 701 702 bool IsTopNode = false; 703 while (true) { 704 DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n"); 705 SUnit *SU = SchedImpl->pickNode(IsTopNode); 706 if (!SU) break; 707 708 assert(!SU->isScheduled && "Node already scheduled"); 709 if (!checkSchedLimit()) 710 break; 711 712 MachineInstr *MI = SU->getInstr(); 713 if (IsTopNode) { 714 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 715 if (&*CurrentTop == MI) 716 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 717 else 718 moveInstruction(MI, CurrentTop); 719 } 720 else { 721 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 722 MachineBasicBlock::iterator priorII = 723 priorNonDebug(CurrentBottom, CurrentTop); 724 if (&*priorII == MI) 725 CurrentBottom = priorII; 726 else { 727 if (&*CurrentTop == MI) 728 CurrentTop = nextIfDebug(++CurrentTop, priorII); 729 moveInstruction(MI, CurrentBottom); 730 CurrentBottom = MI; 731 } 732 } 733 // Notify the scheduling strategy before updating the DAG. 734 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 735 // runs, it can then use the accurate ReadyCycle time to determine whether 736 // newly released nodes can move to the readyQ. 737 SchedImpl->schedNode(SU, IsTopNode); 738 739 updateQueues(SU, IsTopNode); 740 } 741 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 742 743 placeDebugValues(); 744 745 DEBUG({ 746 unsigned BBNum = begin()->getParent()->getNumber(); 747 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 748 dumpSchedule(); 749 dbgs() << '\n'; 750 }); 751 } 752 753 /// Apply each ScheduleDAGMutation step in order. 754 void ScheduleDAGMI::postprocessDAG() { 755 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 756 Mutations[i]->apply(this); 757 } 758 } 759 760 void ScheduleDAGMI:: 761 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 762 SmallVectorImpl<SUnit*> &BotRoots) { 763 for (std::vector<SUnit>::iterator 764 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 765 SUnit *SU = &(*I); 766 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 767 768 // Order predecessors so DFSResult follows the critical path. 769 SU->biasCriticalPath(); 770 771 // A SUnit is ready to top schedule if it has no predecessors. 772 if (!I->NumPredsLeft) 773 TopRoots.push_back(SU); 774 // A SUnit is ready to bottom schedule if it has no successors. 775 if (!I->NumSuccsLeft) 776 BotRoots.push_back(SU); 777 } 778 ExitSU.biasCriticalPath(); 779 } 780 781 /// Identify DAG roots and setup scheduler queues. 782 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 783 ArrayRef<SUnit*> BotRoots) { 784 NextClusterSucc = nullptr; 785 NextClusterPred = nullptr; 786 787 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 788 // 789 // Nodes with unreleased weak edges can still be roots. 790 // Release top roots in forward order. 791 for (SmallVectorImpl<SUnit*>::const_iterator 792 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 793 SchedImpl->releaseTopNode(*I); 794 } 795 // Release bottom roots in reverse order so the higher priority nodes appear 796 // first. This is more natural and slightly more efficient. 797 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 798 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 799 SchedImpl->releaseBottomNode(*I); 800 } 801 802 releaseSuccessors(&EntrySU); 803 releasePredecessors(&ExitSU); 804 805 SchedImpl->registerRoots(); 806 807 // Advance past initial DebugValues. 808 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 809 CurrentBottom = RegionEnd; 810 } 811 812 /// Update scheduler queues after scheduling an instruction. 813 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 814 // Release dependent instructions for scheduling. 815 if (IsTopNode) 816 releaseSuccessors(SU); 817 else 818 releasePredecessors(SU); 819 820 SU->isScheduled = true; 821 } 822 823 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 824 void ScheduleDAGMI::placeDebugValues() { 825 // If first instruction was a DBG_VALUE then put it back. 826 if (FirstDbgValue) { 827 BB->splice(RegionBegin, BB, FirstDbgValue); 828 RegionBegin = FirstDbgValue; 829 } 830 831 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 832 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 833 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 834 MachineInstr *DbgValue = P.first; 835 MachineBasicBlock::iterator OrigPrevMI = P.second; 836 if (&*RegionBegin == DbgValue) 837 ++RegionBegin; 838 BB->splice(++OrigPrevMI, BB, DbgValue); 839 if (OrigPrevMI == std::prev(RegionEnd)) 840 RegionEnd = DbgValue; 841 } 842 DbgValues.clear(); 843 FirstDbgValue = nullptr; 844 } 845 846 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 847 void ScheduleDAGMI::dumpSchedule() const { 848 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 849 if (SUnit *SU = getSUnit(&(*MI))) 850 SU->dump(this); 851 else 852 dbgs() << "Missing SUnit\n"; 853 } 854 } 855 #endif 856 857 //===----------------------------------------------------------------------===// 858 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 859 // preservation. 860 //===----------------------------------------------------------------------===// 861 862 ScheduleDAGMILive::~ScheduleDAGMILive() { 863 delete DFSResult; 864 } 865 866 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 867 /// crossing a scheduling boundary. [begin, end) includes all instructions in 868 /// the region, including the boundary itself and single-instruction regions 869 /// that don't get scheduled. 870 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 871 MachineBasicBlock::iterator begin, 872 MachineBasicBlock::iterator end, 873 unsigned regioninstrs) 874 { 875 // ScheduleDAGMI initializes SchedImpl's per-region policy. 876 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 877 878 // For convenience remember the end of the liveness region. 879 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 880 881 SUPressureDiffs.clear(); 882 883 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 884 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks(); 885 886 if (ShouldTrackLaneMasks) { 887 if (!ShouldTrackPressure) 888 report_fatal_error("ShouldTrackLaneMasks requires ShouldTrackPressure"); 889 // Dead subregister defs have no users and therefore no dependencies, 890 // moving them around may cause liveintervals to degrade into multiple 891 // components. Change independent components to have their own vreg to avoid 892 // this. 893 if (!DisconnectedComponentsRenamed) 894 LIS->renameDisconnectedComponents(); 895 } 896 } 897 898 // Setup the register pressure trackers for the top scheduled top and bottom 899 // scheduled regions. 900 void ScheduleDAGMILive::initRegPressure() { 901 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin, 902 ShouldTrackLaneMasks, false); 903 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 904 ShouldTrackLaneMasks, false); 905 906 // Close the RPTracker to finalize live ins. 907 RPTracker.closeRegion(); 908 909 DEBUG(RPTracker.dump()); 910 911 // Initialize the live ins and live outs. 912 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 913 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 914 915 // Close one end of the tracker so we can call 916 // getMaxUpward/DownwardPressureDelta before advancing across any 917 // instructions. This converts currently live regs into live ins/outs. 918 TopRPTracker.closeTop(); 919 BotRPTracker.closeBottom(); 920 921 BotRPTracker.initLiveThru(RPTracker); 922 if (!BotRPTracker.getLiveThru().empty()) { 923 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 924 DEBUG(dbgs() << "Live Thru: "; 925 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 926 }; 927 928 // For each live out vreg reduce the pressure change associated with other 929 // uses of the same vreg below the live-out reaching def. 930 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 931 932 // Account for liveness generated by the region boundary. 933 if (LiveRegionEnd != RegionEnd) { 934 SmallVector<RegisterMaskPair, 8> LiveUses; 935 BotRPTracker.recede(&LiveUses); 936 updatePressureDiffs(LiveUses); 937 } 938 939 DEBUG( 940 dbgs() << "Top Pressure:\n"; 941 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI); 942 dbgs() << "Bottom Pressure:\n"; 943 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI); 944 ); 945 946 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 947 948 // Cache the list of excess pressure sets in this region. This will also track 949 // the max pressure in the scheduled code for these sets. 950 RegionCriticalPSets.clear(); 951 const std::vector<unsigned> &RegionPressure = 952 RPTracker.getPressure().MaxSetPressure; 953 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 954 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 955 if (RegionPressure[i] > Limit) { 956 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 957 << " Limit " << Limit 958 << " Actual " << RegionPressure[i] << "\n"); 959 RegionCriticalPSets.push_back(PressureChange(i)); 960 } 961 } 962 DEBUG(dbgs() << "Excess PSets: "; 963 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 964 dbgs() << TRI->getRegPressureSetName( 965 RegionCriticalPSets[i].getPSet()) << " "; 966 dbgs() << "\n"); 967 } 968 969 void ScheduleDAGMILive:: 970 updateScheduledPressure(const SUnit *SU, 971 const std::vector<unsigned> &NewMaxPressure) { 972 const PressureDiff &PDiff = getPressureDiff(SU); 973 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 974 for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end(); 975 I != E; ++I) { 976 if (!I->isValid()) 977 break; 978 unsigned ID = I->getPSet(); 979 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 980 ++CritIdx; 981 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 982 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 983 && NewMaxPressure[ID] <= INT16_MAX) 984 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 985 } 986 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 987 if (NewMaxPressure[ID] >= Limit - 2) { 988 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 989 << NewMaxPressure[ID] 990 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit 991 << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n"); 992 } 993 } 994 } 995 996 /// Update the PressureDiff array for liveness after scheduling this 997 /// instruction. 998 void ScheduleDAGMILive::updatePressureDiffs( 999 ArrayRef<RegisterMaskPair> LiveUses) { 1000 for (const RegisterMaskPair &P : LiveUses) { 1001 unsigned Reg = P.RegUnit; 1002 /// FIXME: Currently assuming single-use physregs. 1003 if (!TRI->isVirtualRegister(Reg)) 1004 continue; 1005 1006 if (ShouldTrackLaneMasks) { 1007 // If the register has just become live then other uses won't change 1008 // this fact anymore => decrement pressure. 1009 // If the register has just become dead then other uses make it come 1010 // back to life => increment pressure. 1011 bool Decrement = P.LaneMask != 0; 1012 1013 for (const VReg2SUnit &V2SU 1014 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1015 SUnit &SU = *V2SU.SU; 1016 if (SU.isScheduled || &SU == &ExitSU) 1017 continue; 1018 1019 PressureDiff &PDiff = getPressureDiff(&SU); 1020 PDiff.addPressureChange(Reg, Decrement, &MRI); 1021 DEBUG( 1022 dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") " 1023 << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask) 1024 << ' ' << *SU.getInstr(); 1025 dbgs() << " to "; 1026 PDiff.dump(*TRI); 1027 ); 1028 } 1029 } else { 1030 assert(P.LaneMask != 0); 1031 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n"); 1032 // This may be called before CurrentBottom has been initialized. However, 1033 // BotRPTracker must have a valid position. We want the value live into the 1034 // instruction or live out of the block, so ask for the previous 1035 // instruction's live-out. 1036 const LiveInterval &LI = LIS->getInterval(Reg); 1037 VNInfo *VNI; 1038 MachineBasicBlock::const_iterator I = 1039 nextIfDebug(BotRPTracker.getPos(), BB->end()); 1040 if (I == BB->end()) 1041 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1042 else { 1043 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I)); 1044 VNI = LRQ.valueIn(); 1045 } 1046 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 1047 assert(VNI && "No live value at use."); 1048 for (const VReg2SUnit &V2SU 1049 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1050 SUnit *SU = V2SU.SU; 1051 // If this use comes before the reaching def, it cannot be a last use, 1052 // so decrease its pressure change. 1053 if (!SU->isScheduled && SU != &ExitSU) { 1054 LiveQueryResult LRQ = 1055 LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1056 if (LRQ.valueIn() == VNI) { 1057 PressureDiff &PDiff = getPressureDiff(SU); 1058 PDiff.addPressureChange(Reg, true, &MRI); 1059 DEBUG( 1060 dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 1061 << *SU->getInstr(); 1062 dbgs() << " to "; 1063 PDiff.dump(*TRI); 1064 ); 1065 } 1066 } 1067 } 1068 } 1069 } 1070 } 1071 1072 /// schedule - Called back from MachineScheduler::runOnMachineFunction 1073 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 1074 /// only includes instructions that have DAG nodes, not scheduling boundaries. 1075 /// 1076 /// This is a skeletal driver, with all the functionality pushed into helpers, 1077 /// so that it can be easily extended by experimental schedulers. Generally, 1078 /// implementing MachineSchedStrategy should be sufficient to implement a new 1079 /// scheduling algorithm. However, if a scheduler further subclasses 1080 /// ScheduleDAGMILive then it will want to override this virtual method in order 1081 /// to update any specialized state. 1082 void ScheduleDAGMILive::schedule() { 1083 DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n"); 1084 DEBUG(SchedImpl->dumpPolicy()); 1085 buildDAGWithRegPressure(); 1086 1087 Topo.InitDAGTopologicalSorting(); 1088 1089 postprocessDAG(); 1090 1091 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1092 findRootsAndBiasEdges(TopRoots, BotRoots); 1093 1094 // Initialize the strategy before modifying the DAG. 1095 // This may initialize a DFSResult to be used for queue priority. 1096 SchedImpl->initialize(this); 1097 1098 DEBUG( 1099 for (const SUnit &SU : SUnits) { 1100 SU.dumpAll(this); 1101 if (ShouldTrackPressure) { 1102 dbgs() << " Pressure Diff : "; 1103 getPressureDiff(&SU).dump(*TRI); 1104 } 1105 dbgs() << '\n'; 1106 } 1107 ); 1108 if (ViewMISchedDAGs) viewGraph(); 1109 1110 // Initialize ready queues now that the DAG and priority data are finalized. 1111 initQueues(TopRoots, BotRoots); 1112 1113 if (ShouldTrackPressure) { 1114 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1115 TopRPTracker.setPos(CurrentTop); 1116 } 1117 1118 bool IsTopNode = false; 1119 while (true) { 1120 DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n"); 1121 SUnit *SU = SchedImpl->pickNode(IsTopNode); 1122 if (!SU) break; 1123 1124 assert(!SU->isScheduled && "Node already scheduled"); 1125 if (!checkSchedLimit()) 1126 break; 1127 1128 scheduleMI(SU, IsTopNode); 1129 1130 if (DFSResult) { 1131 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1132 if (!ScheduledTrees.test(SubtreeID)) { 1133 ScheduledTrees.set(SubtreeID); 1134 DFSResult->scheduleTree(SubtreeID); 1135 SchedImpl->scheduleTree(SubtreeID); 1136 } 1137 } 1138 1139 // Notify the scheduling strategy after updating the DAG. 1140 SchedImpl->schedNode(SU, IsTopNode); 1141 1142 updateQueues(SU, IsTopNode); 1143 } 1144 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1145 1146 placeDebugValues(); 1147 1148 DEBUG({ 1149 unsigned BBNum = begin()->getParent()->getNumber(); 1150 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 1151 dumpSchedule(); 1152 dbgs() << '\n'; 1153 }); 1154 } 1155 1156 /// Build the DAG and setup three register pressure trackers. 1157 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1158 if (!ShouldTrackPressure) { 1159 RPTracker.reset(); 1160 RegionCriticalPSets.clear(); 1161 buildSchedGraph(AA); 1162 return; 1163 } 1164 1165 // Initialize the register pressure tracker used by buildSchedGraph. 1166 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1167 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true); 1168 1169 // Account for liveness generate by the region boundary. 1170 if (LiveRegionEnd != RegionEnd) 1171 RPTracker.recede(); 1172 1173 // Build the DAG, and compute current register pressure. 1174 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks); 1175 1176 // Initialize top/bottom trackers after computing region pressure. 1177 initRegPressure(); 1178 } 1179 1180 void ScheduleDAGMILive::computeDFSResult() { 1181 if (!DFSResult) 1182 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1183 DFSResult->clear(); 1184 ScheduledTrees.clear(); 1185 DFSResult->resize(SUnits.size()); 1186 DFSResult->compute(SUnits); 1187 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1188 } 1189 1190 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1191 /// only provides the critical path for single block loops. To handle loops that 1192 /// span blocks, we could use the vreg path latencies provided by 1193 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1194 /// available for use in the scheduler. 1195 /// 1196 /// The cyclic path estimation identifies a def-use pair that crosses the back 1197 /// edge and considers the depth and height of the nodes. For example, consider 1198 /// the following instruction sequence where each instruction has unit latency 1199 /// and defines an epomymous virtual register: 1200 /// 1201 /// a->b(a,c)->c(b)->d(c)->exit 1202 /// 1203 /// The cyclic critical path is a two cycles: b->c->b 1204 /// The acyclic critical path is four cycles: a->b->c->d->exit 1205 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1206 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1207 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1208 /// LiveInDepth = depth(b) = len(a->b) = 1 1209 /// 1210 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1211 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1212 /// CyclicCriticalPath = min(2, 2) = 2 1213 /// 1214 /// This could be relevant to PostRA scheduling, but is currently implemented 1215 /// assuming LiveIntervals. 1216 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1217 // This only applies to single block loop. 1218 if (!BB->isSuccessor(BB)) 1219 return 0; 1220 1221 unsigned MaxCyclicLatency = 0; 1222 // Visit each live out vreg def to find def/use pairs that cross iterations. 1223 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) { 1224 unsigned Reg = P.RegUnit; 1225 if (!TRI->isVirtualRegister(Reg)) 1226 continue; 1227 const LiveInterval &LI = LIS->getInterval(Reg); 1228 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1229 if (!DefVNI) 1230 continue; 1231 1232 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1233 const SUnit *DefSU = getSUnit(DefMI); 1234 if (!DefSU) 1235 continue; 1236 1237 unsigned LiveOutHeight = DefSU->getHeight(); 1238 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1239 // Visit all local users of the vreg def. 1240 for (const VReg2SUnit &V2SU 1241 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1242 SUnit *SU = V2SU.SU; 1243 if (SU == &ExitSU) 1244 continue; 1245 1246 // Only consider uses of the phi. 1247 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1248 if (!LRQ.valueIn()->isPHIDef()) 1249 continue; 1250 1251 // Assume that a path spanning two iterations is a cycle, which could 1252 // overestimate in strange cases. This allows cyclic latency to be 1253 // estimated as the minimum slack of the vreg's depth or height. 1254 unsigned CyclicLatency = 0; 1255 if (LiveOutDepth > SU->getDepth()) 1256 CyclicLatency = LiveOutDepth - SU->getDepth(); 1257 1258 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency; 1259 if (LiveInHeight > LiveOutHeight) { 1260 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1261 CyclicLatency = LiveInHeight - LiveOutHeight; 1262 } 1263 else 1264 CyclicLatency = 0; 1265 1266 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1267 << SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1268 if (CyclicLatency > MaxCyclicLatency) 1269 MaxCyclicLatency = CyclicLatency; 1270 } 1271 } 1272 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1273 return MaxCyclicLatency; 1274 } 1275 1276 /// Move an instruction and update register pressure. 1277 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1278 // Move the instruction to its new location in the instruction stream. 1279 MachineInstr *MI = SU->getInstr(); 1280 1281 if (IsTopNode) { 1282 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1283 if (&*CurrentTop == MI) 1284 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1285 else { 1286 moveInstruction(MI, CurrentTop); 1287 TopRPTracker.setPos(MI); 1288 } 1289 1290 if (ShouldTrackPressure) { 1291 // Update top scheduled pressure. 1292 RegisterOperands RegOpers; 1293 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1294 if (ShouldTrackLaneMasks) { 1295 // Adjust liveness and add missing dead+read-undef flags. 1296 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1297 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1298 } else { 1299 // Adjust for missing dead-def flags. 1300 RegOpers.detectDeadDefs(*MI, *LIS); 1301 } 1302 1303 TopRPTracker.advance(RegOpers); 1304 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1305 DEBUG( 1306 dbgs() << "Top Pressure:\n"; 1307 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI); 1308 ); 1309 1310 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1311 } 1312 } 1313 else { 1314 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1315 MachineBasicBlock::iterator priorII = 1316 priorNonDebug(CurrentBottom, CurrentTop); 1317 if (&*priorII == MI) 1318 CurrentBottom = priorII; 1319 else { 1320 if (&*CurrentTop == MI) { 1321 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1322 TopRPTracker.setPos(CurrentTop); 1323 } 1324 moveInstruction(MI, CurrentBottom); 1325 CurrentBottom = MI; 1326 } 1327 if (ShouldTrackPressure) { 1328 RegisterOperands RegOpers; 1329 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1330 if (ShouldTrackLaneMasks) { 1331 // Adjust liveness and add missing dead+read-undef flags. 1332 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1333 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1334 } else { 1335 // Adjust for missing dead-def flags. 1336 RegOpers.detectDeadDefs(*MI, *LIS); 1337 } 1338 1339 BotRPTracker.recedeSkipDebugValues(); 1340 SmallVector<RegisterMaskPair, 8> LiveUses; 1341 BotRPTracker.recede(RegOpers, &LiveUses); 1342 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1343 DEBUG( 1344 dbgs() << "Bottom Pressure:\n"; 1345 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI); 1346 ); 1347 1348 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1349 updatePressureDiffs(LiveUses); 1350 } 1351 } 1352 } 1353 1354 //===----------------------------------------------------------------------===// 1355 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores. 1356 //===----------------------------------------------------------------------===// 1357 1358 namespace { 1359 /// \brief Post-process the DAG to create cluster edges between neighboring 1360 /// loads or between neighboring stores. 1361 class BaseMemOpClusterMutation : public ScheduleDAGMutation { 1362 struct MemOpInfo { 1363 SUnit *SU; 1364 unsigned BaseReg; 1365 int64_t Offset; 1366 MemOpInfo(SUnit *su, unsigned reg, int64_t ofs) 1367 : SU(su), BaseReg(reg), Offset(ofs) {} 1368 1369 bool operator<(const MemOpInfo&RHS) const { 1370 return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset); 1371 } 1372 }; 1373 1374 const TargetInstrInfo *TII; 1375 const TargetRegisterInfo *TRI; 1376 bool IsLoad; 1377 1378 public: 1379 BaseMemOpClusterMutation(const TargetInstrInfo *tii, 1380 const TargetRegisterInfo *tri, bool IsLoad) 1381 : TII(tii), TRI(tri), IsLoad(IsLoad) {} 1382 1383 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1384 1385 protected: 1386 void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG); 1387 }; 1388 1389 class StoreClusterMutation : public BaseMemOpClusterMutation { 1390 public: 1391 StoreClusterMutation(const TargetInstrInfo *tii, 1392 const TargetRegisterInfo *tri) 1393 : BaseMemOpClusterMutation(tii, tri, false) {} 1394 }; 1395 1396 class LoadClusterMutation : public BaseMemOpClusterMutation { 1397 public: 1398 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri) 1399 : BaseMemOpClusterMutation(tii, tri, true) {} 1400 }; 1401 } // anonymous 1402 1403 void BaseMemOpClusterMutation::clusterNeighboringMemOps( 1404 ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) { 1405 SmallVector<MemOpInfo, 32> MemOpRecords; 1406 for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) { 1407 SUnit *SU = MemOps[Idx]; 1408 unsigned BaseReg; 1409 int64_t Offset; 1410 if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 1411 MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset)); 1412 } 1413 if (MemOpRecords.size() < 2) 1414 return; 1415 1416 std::sort(MemOpRecords.begin(), MemOpRecords.end()); 1417 unsigned ClusterLength = 1; 1418 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { 1419 if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) { 1420 ClusterLength = 1; 1421 continue; 1422 } 1423 1424 SUnit *SUa = MemOpRecords[Idx].SU; 1425 SUnit *SUb = MemOpRecords[Idx+1].SU; 1426 if (TII->shouldClusterMemOps(SUa->getInstr(), SUb->getInstr(), ClusterLength) 1427 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1428 DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU(" 1429 << SUb->NodeNum << ")\n"); 1430 // Copy successor edges from SUa to SUb. Interleaving computation 1431 // dependent on SUa can prevent load combining due to register reuse. 1432 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1433 // loads should have effectively the same inputs. 1434 for (SUnit::const_succ_iterator 1435 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 1436 if (SI->getSUnit() == SUb) 1437 continue; 1438 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 1439 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 1440 } 1441 ++ClusterLength; 1442 } 1443 else 1444 ClusterLength = 1; 1445 } 1446 } 1447 1448 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 1449 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) { 1450 1451 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1452 1453 // Map DAG NodeNum to store chain ID. 1454 DenseMap<unsigned, unsigned> StoreChainIDs; 1455 // Map each store chain to a set of dependent MemOps. 1456 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1457 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1458 SUnit *SU = &DAG->SUnits[Idx]; 1459 if ((IsLoad && !SU->getInstr()->mayLoad()) || 1460 (!IsLoad && !SU->getInstr()->mayStore())) 1461 continue; 1462 1463 unsigned ChainPredID = DAG->SUnits.size(); 1464 for (SUnit::const_pred_iterator 1465 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1466 if (PI->isCtrl()) { 1467 ChainPredID = PI->getSUnit()->NodeNum; 1468 break; 1469 } 1470 } 1471 // Check if this chain-like pred has been seen 1472 // before. ChainPredID==MaxNodeID at the top of the schedule. 1473 unsigned NumChains = StoreChainDependents.size(); 1474 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1475 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1476 if (Result.second) 1477 StoreChainDependents.resize(NumChains + 1); 1478 StoreChainDependents[Result.first->second].push_back(SU); 1479 } 1480 1481 // Iterate over the store chains. 1482 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 1483 clusterNeighboringMemOps(StoreChainDependents[Idx], DAG); 1484 } 1485 1486 //===----------------------------------------------------------------------===// 1487 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 1488 //===----------------------------------------------------------------------===// 1489 1490 namespace { 1491 /// \brief Post-process the DAG to create cluster edges between instructions 1492 /// that may be fused by the processor into a single operation. 1493 class MacroFusion : public ScheduleDAGMutation { 1494 const TargetInstrInfo &TII; 1495 const TargetRegisterInfo &TRI; 1496 public: 1497 MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) 1498 : TII(TII), TRI(TRI) {} 1499 1500 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1501 }; 1502 } // anonymous 1503 1504 /// Returns true if \p MI reads a register written by \p Other. 1505 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI, 1506 const MachineInstr &Other) { 1507 for (const MachineOperand &MO : MI.uses()) { 1508 if (!MO.isReg() || !MO.readsReg()) 1509 continue; 1510 1511 unsigned Reg = MO.getReg(); 1512 if (Other.modifiesRegister(Reg, &TRI)) 1513 return true; 1514 } 1515 return false; 1516 } 1517 1518 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 1519 /// fused operations. 1520 void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) { 1521 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1522 1523 // For now, assume targets can only fuse with the branch. 1524 SUnit &ExitSU = DAG->ExitSU; 1525 MachineInstr *Branch = ExitSU.getInstr(); 1526 if (!Branch) 1527 return; 1528 1529 for (SUnit &SU : DAG->SUnits) { 1530 // SUnits with successors can't be schedule in front of the ExitSU. 1531 if (!SU.Succs.empty()) 1532 continue; 1533 // We only care if the node writes to a register that the branch reads. 1534 MachineInstr *Pred = SU.getInstr(); 1535 if (!HasDataDep(TRI, *Branch, *Pred)) 1536 continue; 1537 1538 if (!TII.shouldScheduleAdjacent(Pred, Branch)) 1539 continue; 1540 1541 // Create a single weak edge from SU to ExitSU. The only effect is to cause 1542 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 1543 // need to copy predecessor edges from ExitSU to SU, since top-down 1544 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 1545 // of SU, we could create an artificial edge from the deepest root, but it 1546 // hasn't been needed yet. 1547 bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster)); 1548 (void)Success; 1549 assert(Success && "No DAG nodes should be reachable from ExitSU"); 1550 1551 DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n"); 1552 break; 1553 } 1554 } 1555 1556 //===----------------------------------------------------------------------===// 1557 // CopyConstrain - DAG post-processing to encourage copy elimination. 1558 //===----------------------------------------------------------------------===// 1559 1560 namespace { 1561 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 1562 /// the one use that defines the copy's source vreg, most likely an induction 1563 /// variable increment. 1564 class CopyConstrain : public ScheduleDAGMutation { 1565 // Transient state. 1566 SlotIndex RegionBeginIdx; 1567 // RegionEndIdx is the slot index of the last non-debug instruction in the 1568 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1569 SlotIndex RegionEndIdx; 1570 public: 1571 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1572 1573 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1574 1575 protected: 1576 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1577 }; 1578 } // anonymous 1579 1580 /// constrainLocalCopy handles two possibilities: 1581 /// 1) Local src: 1582 /// I0: = dst 1583 /// I1: src = ... 1584 /// I2: = dst 1585 /// I3: dst = src (copy) 1586 /// (create pred->succ edges I0->I1, I2->I1) 1587 /// 1588 /// 2) Local copy: 1589 /// I0: dst = src (copy) 1590 /// I1: = dst 1591 /// I2: src = ... 1592 /// I3: = dst 1593 /// (create pred->succ edges I1->I2, I3->I2) 1594 /// 1595 /// Although the MachineScheduler is currently constrained to single blocks, 1596 /// this algorithm should handle extended blocks. An EBB is a set of 1597 /// contiguously numbered blocks such that the previous block in the EBB is 1598 /// always the single predecessor. 1599 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1600 LiveIntervals *LIS = DAG->getLIS(); 1601 MachineInstr *Copy = CopySU->getInstr(); 1602 1603 // Check for pure vreg copies. 1604 const MachineOperand &SrcOp = Copy->getOperand(1); 1605 unsigned SrcReg = SrcOp.getReg(); 1606 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg()) 1607 return; 1608 1609 const MachineOperand &DstOp = Copy->getOperand(0); 1610 unsigned DstReg = DstOp.getReg(); 1611 if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead()) 1612 return; 1613 1614 // Check if either the dest or source is local. If it's live across a back 1615 // edge, it's not local. Note that if both vregs are live across the back 1616 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1617 // If both the copy's source and dest are local live intervals, then we 1618 // should treat the dest as the global for the purpose of adding 1619 // constraints. This adds edges from source's other uses to the copy. 1620 unsigned LocalReg = SrcReg; 1621 unsigned GlobalReg = DstReg; 1622 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1623 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1624 LocalReg = DstReg; 1625 GlobalReg = SrcReg; 1626 LocalLI = &LIS->getInterval(LocalReg); 1627 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1628 return; 1629 } 1630 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1631 1632 // Find the global segment after the start of the local LI. 1633 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1634 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1635 // local live range. We could create edges from other global uses to the local 1636 // start, but the coalescer should have already eliminated these cases, so 1637 // don't bother dealing with it. 1638 if (GlobalSegment == GlobalLI->end()) 1639 return; 1640 1641 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1642 // returned the next global segment. But if GlobalSegment overlaps with 1643 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1644 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1645 if (GlobalSegment->contains(LocalLI->beginIndex())) 1646 ++GlobalSegment; 1647 1648 if (GlobalSegment == GlobalLI->end()) 1649 return; 1650 1651 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1652 if (GlobalSegment != GlobalLI->begin()) { 1653 // Two address defs have no hole. 1654 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1655 GlobalSegment->start)) { 1656 return; 1657 } 1658 // If the prior global segment may be defined by the same two-address 1659 // instruction that also defines LocalLI, then can't make a hole here. 1660 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1661 LocalLI->beginIndex())) { 1662 return; 1663 } 1664 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1665 // it would be a disconnected component in the live range. 1666 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1667 "Disconnected LRG within the scheduling region."); 1668 } 1669 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1670 if (!GlobalDef) 1671 return; 1672 1673 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1674 if (!GlobalSU) 1675 return; 1676 1677 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1678 // constraining the uses of the last local def to precede GlobalDef. 1679 SmallVector<SUnit*,8> LocalUses; 1680 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1681 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1682 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1683 for (SUnit::const_succ_iterator 1684 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1685 I != E; ++I) { 1686 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1687 continue; 1688 if (I->getSUnit() == GlobalSU) 1689 continue; 1690 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1691 return; 1692 LocalUses.push_back(I->getSUnit()); 1693 } 1694 // Open the top of the GlobalLI hole by constraining any earlier global uses 1695 // to precede the start of LocalLI. 1696 SmallVector<SUnit*,8> GlobalUses; 1697 MachineInstr *FirstLocalDef = 1698 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1699 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1700 for (SUnit::const_pred_iterator 1701 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1702 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1703 continue; 1704 if (I->getSUnit() == FirstLocalSU) 1705 continue; 1706 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1707 return; 1708 GlobalUses.push_back(I->getSUnit()); 1709 } 1710 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1711 // Add the weak edges. 1712 for (SmallVectorImpl<SUnit*>::const_iterator 1713 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1714 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1715 << GlobalSU->NodeNum << ")\n"); 1716 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1717 } 1718 for (SmallVectorImpl<SUnit*>::const_iterator 1719 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1720 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1721 << FirstLocalSU->NodeNum << ")\n"); 1722 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1723 } 1724 } 1725 1726 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1727 /// copy elimination. 1728 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { 1729 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1730 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1731 1732 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1733 if (FirstPos == DAG->end()) 1734 return; 1735 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos); 1736 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1737 *priorNonDebug(DAG->end(), DAG->begin())); 1738 1739 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1740 SUnit *SU = &DAG->SUnits[Idx]; 1741 if (!SU->getInstr()->isCopy()) 1742 continue; 1743 1744 constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG)); 1745 } 1746 } 1747 1748 //===----------------------------------------------------------------------===// 1749 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1750 // and possibly other custom schedulers. 1751 //===----------------------------------------------------------------------===// 1752 1753 static const unsigned InvalidCycle = ~0U; 1754 1755 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1756 1757 void SchedBoundary::reset() { 1758 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1759 // Destroying and reconstructing it is very expensive though. So keep 1760 // invalid, placeholder HazardRecs. 1761 if (HazardRec && HazardRec->isEnabled()) { 1762 delete HazardRec; 1763 HazardRec = nullptr; 1764 } 1765 Available.clear(); 1766 Pending.clear(); 1767 CheckPending = false; 1768 NextSUs.clear(); 1769 CurrCycle = 0; 1770 CurrMOps = 0; 1771 MinReadyCycle = UINT_MAX; 1772 ExpectedLatency = 0; 1773 DependentLatency = 0; 1774 RetiredMOps = 0; 1775 MaxExecutedResCount = 0; 1776 ZoneCritResIdx = 0; 1777 IsResourceLimited = false; 1778 ReservedCycles.clear(); 1779 #ifndef NDEBUG 1780 // Track the maximum number of stall cycles that could arise either from the 1781 // latency of a DAG edge or the number of cycles that a processor resource is 1782 // reserved (SchedBoundary::ReservedCycles). 1783 MaxObservedStall = 0; 1784 #endif 1785 // Reserve a zero-count for invalid CritResIdx. 1786 ExecutedResCounts.resize(1); 1787 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1788 } 1789 1790 void SchedRemainder:: 1791 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1792 reset(); 1793 if (!SchedModel->hasInstrSchedModel()) 1794 return; 1795 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1796 for (std::vector<SUnit>::iterator 1797 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1798 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1799 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC) 1800 * SchedModel->getMicroOpFactor(); 1801 for (TargetSchedModel::ProcResIter 1802 PI = SchedModel->getWriteProcResBegin(SC), 1803 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1804 unsigned PIdx = PI->ProcResourceIdx; 1805 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1806 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1807 } 1808 } 1809 } 1810 1811 void SchedBoundary:: 1812 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1813 reset(); 1814 DAG = dag; 1815 SchedModel = smodel; 1816 Rem = rem; 1817 if (SchedModel->hasInstrSchedModel()) { 1818 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1819 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle); 1820 } 1821 } 1822 1823 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 1824 /// these "soft stalls" differently than the hard stall cycles based on CPU 1825 /// resources and computed by checkHazard(). A fully in-order model 1826 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 1827 /// available for scheduling until they are ready. However, a weaker in-order 1828 /// model may use this for heuristics. For example, if a processor has in-order 1829 /// behavior when reading certain resources, this may come into play. 1830 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 1831 if (!SU->isUnbuffered) 1832 return 0; 1833 1834 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1835 if (ReadyCycle > CurrCycle) 1836 return ReadyCycle - CurrCycle; 1837 return 0; 1838 } 1839 1840 /// Compute the next cycle at which the given processor resource can be 1841 /// scheduled. 1842 unsigned SchedBoundary:: 1843 getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 1844 unsigned NextUnreserved = ReservedCycles[PIdx]; 1845 // If this resource has never been used, always return cycle zero. 1846 if (NextUnreserved == InvalidCycle) 1847 return 0; 1848 // For bottom-up scheduling add the cycles needed for the current operation. 1849 if (!isTop()) 1850 NextUnreserved += Cycles; 1851 return NextUnreserved; 1852 } 1853 1854 /// Does this SU have a hazard within the current instruction group. 1855 /// 1856 /// The scheduler supports two modes of hazard recognition. The first is the 1857 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1858 /// supports highly complicated in-order reservation tables 1859 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1860 /// 1861 /// The second is a streamlined mechanism that checks for hazards based on 1862 /// simple counters that the scheduler itself maintains. It explicitly checks 1863 /// for instruction dispatch limitations, including the number of micro-ops that 1864 /// can dispatch per cycle. 1865 /// 1866 /// TODO: Also check whether the SU must start a new group. 1867 bool SchedBoundary::checkHazard(SUnit *SU) { 1868 if (HazardRec->isEnabled() 1869 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 1870 return true; 1871 } 1872 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1873 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1874 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1875 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1876 return true; 1877 } 1878 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 1879 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1880 for (TargetSchedModel::ProcResIter 1881 PI = SchedModel->getWriteProcResBegin(SC), 1882 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1883 unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles); 1884 if (NRCycle > CurrCycle) { 1885 #ifndef NDEBUG 1886 MaxObservedStall = std::max(PI->Cycles, MaxObservedStall); 1887 #endif 1888 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 1889 << SchedModel->getResourceName(PI->ProcResourceIdx) 1890 << "=" << NRCycle << "c\n"); 1891 return true; 1892 } 1893 } 1894 } 1895 return false; 1896 } 1897 1898 // Find the unscheduled node in ReadySUs with the highest latency. 1899 unsigned SchedBoundary:: 1900 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1901 SUnit *LateSU = nullptr; 1902 unsigned RemLatency = 0; 1903 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end(); 1904 I != E; ++I) { 1905 unsigned L = getUnscheduledLatency(*I); 1906 if (L > RemLatency) { 1907 RemLatency = L; 1908 LateSU = *I; 1909 } 1910 } 1911 if (LateSU) { 1912 DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1913 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1914 } 1915 return RemLatency; 1916 } 1917 1918 // Count resources in this zone and the remaining unscheduled 1919 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 1920 // resource index, or zero if the zone is issue limited. 1921 unsigned SchedBoundary:: 1922 getOtherResourceCount(unsigned &OtherCritIdx) { 1923 OtherCritIdx = 0; 1924 if (!SchedModel->hasInstrSchedModel()) 1925 return 0; 1926 1927 unsigned OtherCritCount = Rem->RemIssueCount 1928 + (RetiredMOps * SchedModel->getMicroOpFactor()); 1929 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 1930 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 1931 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 1932 PIdx != PEnd; ++PIdx) { 1933 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 1934 if (OtherCount > OtherCritCount) { 1935 OtherCritCount = OtherCount; 1936 OtherCritIdx = PIdx; 1937 } 1938 } 1939 if (OtherCritIdx) { 1940 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: " 1941 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 1942 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 1943 } 1944 return OtherCritCount; 1945 } 1946 1947 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { 1948 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1949 1950 #ifndef NDEBUG 1951 // ReadyCycle was been bumped up to the CurrCycle when this node was 1952 // scheduled, but CurrCycle may have been eagerly advanced immediately after 1953 // scheduling, so may now be greater than ReadyCycle. 1954 if (ReadyCycle > CurrCycle) 1955 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 1956 #endif 1957 1958 if (ReadyCycle < MinReadyCycle) 1959 MinReadyCycle = ReadyCycle; 1960 1961 // Check for interlocks first. For the purpose of other heuristics, an 1962 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1963 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1964 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU)) 1965 Pending.push(SU); 1966 else 1967 Available.push(SU); 1968 1969 // Record this node as an immediate dependent of the scheduled node. 1970 NextSUs.insert(SU); 1971 } 1972 1973 void SchedBoundary::releaseTopNode(SUnit *SU) { 1974 if (SU->isScheduled) 1975 return; 1976 1977 releaseNode(SU, SU->TopReadyCycle); 1978 } 1979 1980 void SchedBoundary::releaseBottomNode(SUnit *SU) { 1981 if (SU->isScheduled) 1982 return; 1983 1984 releaseNode(SU, SU->BotReadyCycle); 1985 } 1986 1987 /// Move the boundary of scheduled code by one cycle. 1988 void SchedBoundary::bumpCycle(unsigned NextCycle) { 1989 if (SchedModel->getMicroOpBufferSize() == 0) { 1990 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1991 if (MinReadyCycle > NextCycle) 1992 NextCycle = MinReadyCycle; 1993 } 1994 // Update the current micro-ops, which will issue in the next cycle. 1995 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 1996 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 1997 1998 // Decrement DependentLatency based on the next cycle. 1999 if ((NextCycle - CurrCycle) > DependentLatency) 2000 DependentLatency = 0; 2001 else 2002 DependentLatency -= (NextCycle - CurrCycle); 2003 2004 if (!HazardRec->isEnabled()) { 2005 // Bypass HazardRec virtual calls. 2006 CurrCycle = NextCycle; 2007 } 2008 else { 2009 // Bypass getHazardType calls in case of long latency. 2010 for (; CurrCycle != NextCycle; ++CurrCycle) { 2011 if (isTop()) 2012 HazardRec->AdvanceCycle(); 2013 else 2014 HazardRec->RecedeCycle(); 2015 } 2016 } 2017 CheckPending = true; 2018 unsigned LFactor = SchedModel->getLatencyFactor(); 2019 IsResourceLimited = 2020 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 2021 > (int)LFactor; 2022 2023 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n'); 2024 } 2025 2026 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 2027 ExecutedResCounts[PIdx] += Count; 2028 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 2029 MaxExecutedResCount = ExecutedResCounts[PIdx]; 2030 } 2031 2032 /// Add the given processor resource to this scheduled zone. 2033 /// 2034 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 2035 /// during which this resource is consumed. 2036 /// 2037 /// \return the next cycle at which the instruction may execute without 2038 /// oversubscribing resources. 2039 unsigned SchedBoundary:: 2040 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 2041 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2042 unsigned Count = Factor * Cycles; 2043 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) 2044 << " +" << Cycles << "x" << Factor << "u\n"); 2045 2046 // Update Executed resources counts. 2047 incExecutedResources(PIdx, Count); 2048 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 2049 Rem->RemainingCounts[PIdx] -= Count; 2050 2051 // Check if this resource exceeds the current critical resource. If so, it 2052 // becomes the critical resource. 2053 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 2054 ZoneCritResIdx = PIdx; 2055 DEBUG(dbgs() << " *** Critical resource " 2056 << SchedModel->getResourceName(PIdx) << ": " 2057 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n"); 2058 } 2059 // For reserved resources, record the highest cycle using the resource. 2060 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles); 2061 if (NextAvailable > CurrCycle) { 2062 DEBUG(dbgs() << " Resource conflict: " 2063 << SchedModel->getProcResource(PIdx)->Name << " reserved until @" 2064 << NextAvailable << "\n"); 2065 } 2066 return NextAvailable; 2067 } 2068 2069 /// Move the boundary of scheduled code by one SUnit. 2070 void SchedBoundary::bumpNode(SUnit *SU) { 2071 // Update the reservation table. 2072 if (HazardRec->isEnabled()) { 2073 if (!isTop() && SU->isCall) { 2074 // Calls are scheduled with their preceding instructions. For bottom-up 2075 // scheduling, clear the pipeline state before emitting. 2076 HazardRec->Reset(); 2077 } 2078 HazardRec->EmitInstruction(SU); 2079 } 2080 // checkHazard should prevent scheduling multiple instructions per cycle that 2081 // exceed the issue width. 2082 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2083 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 2084 assert( 2085 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 2086 "Cannot schedule this instruction's MicroOps in the current cycle."); 2087 2088 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2089 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 2090 2091 unsigned NextCycle = CurrCycle; 2092 switch (SchedModel->getMicroOpBufferSize()) { 2093 case 0: 2094 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 2095 break; 2096 case 1: 2097 if (ReadyCycle > NextCycle) { 2098 NextCycle = ReadyCycle; 2099 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 2100 } 2101 break; 2102 default: 2103 // We don't currently model the OOO reorder buffer, so consider all 2104 // scheduled MOps to be "retired". We do loosely model in-order resource 2105 // latency. If this instruction uses an in-order resource, account for any 2106 // likely stall cycles. 2107 if (SU->isUnbuffered && ReadyCycle > NextCycle) 2108 NextCycle = ReadyCycle; 2109 break; 2110 } 2111 RetiredMOps += IncMOps; 2112 2113 // Update resource counts and critical resource. 2114 if (SchedModel->hasInstrSchedModel()) { 2115 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 2116 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 2117 Rem->RemIssueCount -= DecRemIssue; 2118 if (ZoneCritResIdx) { 2119 // Scale scheduled micro-ops for comparing with the critical resource. 2120 unsigned ScaledMOps = 2121 RetiredMOps * SchedModel->getMicroOpFactor(); 2122 2123 // If scaled micro-ops are now more than the previous critical resource by 2124 // a full cycle, then micro-ops issue becomes critical. 2125 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 2126 >= (int)SchedModel->getLatencyFactor()) { 2127 ZoneCritResIdx = 0; 2128 DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2129 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); 2130 } 2131 } 2132 for (TargetSchedModel::ProcResIter 2133 PI = SchedModel->getWriteProcResBegin(SC), 2134 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2135 unsigned RCycle = 2136 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 2137 if (RCycle > NextCycle) 2138 NextCycle = RCycle; 2139 } 2140 if (SU->hasReservedResource) { 2141 // For reserved resources, record the highest cycle using the resource. 2142 // For top-down scheduling, this is the cycle in which we schedule this 2143 // instruction plus the number of cycles the operations reserves the 2144 // resource. For bottom-up is it simply the instruction's cycle. 2145 for (TargetSchedModel::ProcResIter 2146 PI = SchedModel->getWriteProcResBegin(SC), 2147 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2148 unsigned PIdx = PI->ProcResourceIdx; 2149 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 2150 if (isTop()) { 2151 ReservedCycles[PIdx] = 2152 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles); 2153 } 2154 else 2155 ReservedCycles[PIdx] = NextCycle; 2156 } 2157 } 2158 } 2159 } 2160 // Update ExpectedLatency and DependentLatency. 2161 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2162 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2163 if (SU->getDepth() > TopLatency) { 2164 TopLatency = SU->getDepth(); 2165 DEBUG(dbgs() << " " << Available.getName() 2166 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n"); 2167 } 2168 if (SU->getHeight() > BotLatency) { 2169 BotLatency = SU->getHeight(); 2170 DEBUG(dbgs() << " " << Available.getName() 2171 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n"); 2172 } 2173 // If we stall for any reason, bump the cycle. 2174 if (NextCycle > CurrCycle) { 2175 bumpCycle(NextCycle); 2176 } 2177 else { 2178 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2179 // resource limited. If a stall occurred, bumpCycle does this. 2180 unsigned LFactor = SchedModel->getLatencyFactor(); 2181 IsResourceLimited = 2182 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 2183 > (int)LFactor; 2184 } 2185 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2186 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2187 // one cycle. Since we commonly reach the max MOps here, opportunistically 2188 // bump the cycle to avoid uselessly checking everything in the readyQ. 2189 CurrMOps += IncMOps; 2190 while (CurrMOps >= SchedModel->getIssueWidth()) { 2191 DEBUG(dbgs() << " *** Max MOps " << CurrMOps 2192 << " at cycle " << CurrCycle << '\n'); 2193 bumpCycle(++NextCycle); 2194 } 2195 DEBUG(dumpScheduledState()); 2196 } 2197 2198 /// Release pending ready nodes in to the available queue. This makes them 2199 /// visible to heuristics. 2200 void SchedBoundary::releasePending() { 2201 // If the available queue is empty, it is safe to reset MinReadyCycle. 2202 if (Available.empty()) 2203 MinReadyCycle = UINT_MAX; 2204 2205 // Check to see if any of the pending instructions are ready to issue. If 2206 // so, add them to the available queue. 2207 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2208 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2209 SUnit *SU = *(Pending.begin()+i); 2210 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2211 2212 if (ReadyCycle < MinReadyCycle) 2213 MinReadyCycle = ReadyCycle; 2214 2215 if (!IsBuffered && ReadyCycle > CurrCycle) 2216 continue; 2217 2218 if (checkHazard(SU)) 2219 continue; 2220 2221 Available.push(SU); 2222 Pending.remove(Pending.begin()+i); 2223 --i; --e; 2224 } 2225 DEBUG(if (!Pending.empty()) Pending.dump()); 2226 CheckPending = false; 2227 } 2228 2229 /// Remove SU from the ready set for this boundary. 2230 void SchedBoundary::removeReady(SUnit *SU) { 2231 if (Available.isInQueue(SU)) 2232 Available.remove(Available.find(SU)); 2233 else { 2234 assert(Pending.isInQueue(SU) && "bad ready count"); 2235 Pending.remove(Pending.find(SU)); 2236 } 2237 } 2238 2239 /// If this queue only has one ready candidate, return it. As a side effect, 2240 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2241 /// one node is ready. If multiple instructions are ready, return NULL. 2242 SUnit *SchedBoundary::pickOnlyChoice() { 2243 if (CheckPending) 2244 releasePending(); 2245 2246 if (CurrMOps > 0) { 2247 // Defer any ready instrs that now have a hazard. 2248 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2249 if (checkHazard(*I)) { 2250 Pending.push(*I); 2251 I = Available.remove(I); 2252 continue; 2253 } 2254 ++I; 2255 } 2256 } 2257 for (unsigned i = 0; Available.empty(); ++i) { 2258 // FIXME: Re-enable assert once PR20057 is resolved. 2259 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2260 // "permanent hazard"); 2261 (void)i; 2262 bumpCycle(CurrCycle + 1); 2263 releasePending(); 2264 } 2265 if (Available.size() == 1) 2266 return *Available.begin(); 2267 return nullptr; 2268 } 2269 2270 #ifndef NDEBUG 2271 // This is useful information to dump after bumpNode. 2272 // Note that the Queue contents are more useful before pickNodeFromQueue. 2273 void SchedBoundary::dumpScheduledState() { 2274 unsigned ResFactor; 2275 unsigned ResCount; 2276 if (ZoneCritResIdx) { 2277 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2278 ResCount = getResourceCount(ZoneCritResIdx); 2279 } 2280 else { 2281 ResFactor = SchedModel->getMicroOpFactor(); 2282 ResCount = RetiredMOps * SchedModel->getMicroOpFactor(); 2283 } 2284 unsigned LFactor = SchedModel->getLatencyFactor(); 2285 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2286 << " Retired: " << RetiredMOps; 2287 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2288 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2289 << ResCount / ResFactor << " " 2290 << SchedModel->getResourceName(ZoneCritResIdx) 2291 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2292 << (IsResourceLimited ? " - Resource" : " - Latency") 2293 << " limited.\n"; 2294 } 2295 #endif 2296 2297 //===----------------------------------------------------------------------===// 2298 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2299 //===----------------------------------------------------------------------===// 2300 2301 void GenericSchedulerBase::SchedCandidate:: 2302 initResourceDelta(const ScheduleDAGMI *DAG, 2303 const TargetSchedModel *SchedModel) { 2304 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2305 return; 2306 2307 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2308 for (TargetSchedModel::ProcResIter 2309 PI = SchedModel->getWriteProcResBegin(SC), 2310 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2311 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2312 ResDelta.CritResources += PI->Cycles; 2313 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2314 ResDelta.DemandedResources += PI->Cycles; 2315 } 2316 } 2317 2318 /// Set the CandPolicy given a scheduling zone given the current resources and 2319 /// latencies inside and outside the zone. 2320 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, 2321 bool IsPostRA, 2322 SchedBoundary &CurrZone, 2323 SchedBoundary *OtherZone) { 2324 // Apply preemptive heuristics based on the total latency and resources 2325 // inside and outside this zone. Potential stalls should be considered before 2326 // following this policy. 2327 2328 // Compute remaining latency. We need this both to determine whether the 2329 // overall schedule has become latency-limited and whether the instructions 2330 // outside this zone are resource or latency limited. 2331 // 2332 // The "dependent" latency is updated incrementally during scheduling as the 2333 // max height/depth of scheduled nodes minus the cycles since it was 2334 // scheduled: 2335 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2336 // 2337 // The "independent" latency is the max ready queue depth: 2338 // ILat = max N.depth for N in Available|Pending 2339 // 2340 // RemainingLatency is the greater of independent and dependent latency. 2341 unsigned RemLatency = CurrZone.getDependentLatency(); 2342 RemLatency = std::max(RemLatency, 2343 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2344 RemLatency = std::max(RemLatency, 2345 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2346 2347 // Compute the critical resource outside the zone. 2348 unsigned OtherCritIdx = 0; 2349 unsigned OtherCount = 2350 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2351 2352 bool OtherResLimited = false; 2353 if (SchedModel->hasInstrSchedModel()) { 2354 unsigned LFactor = SchedModel->getLatencyFactor(); 2355 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor; 2356 } 2357 // Schedule aggressively for latency in PostRA mode. We don't check for 2358 // acyclic latency during PostRA, and highly out-of-order processors will 2359 // skip PostRA scheduling. 2360 if (!OtherResLimited) { 2361 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) { 2362 Policy.ReduceLatency |= true; 2363 DEBUG(dbgs() << " " << CurrZone.Available.getName() 2364 << " RemainingLatency " << RemLatency << " + " 2365 << CurrZone.getCurrCycle() << "c > CritPath " 2366 << Rem.CriticalPath << "\n"); 2367 } 2368 } 2369 // If the same resource is limiting inside and outside the zone, do nothing. 2370 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2371 return; 2372 2373 DEBUG( 2374 if (CurrZone.isResourceLimited()) { 2375 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2376 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) 2377 << "\n"; 2378 } 2379 if (OtherResLimited) 2380 dbgs() << " RemainingLimit: " 2381 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2382 if (!CurrZone.isResourceLimited() && !OtherResLimited) 2383 dbgs() << " Latency limited both directions.\n"); 2384 2385 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2386 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2387 2388 if (OtherResLimited) 2389 Policy.DemandResIdx = OtherCritIdx; 2390 } 2391 2392 #ifndef NDEBUG 2393 const char *GenericSchedulerBase::getReasonStr( 2394 GenericSchedulerBase::CandReason Reason) { 2395 switch (Reason) { 2396 case NoCand: return "NOCAND "; 2397 case PhysRegCopy: return "PREG-COPY"; 2398 case RegExcess: return "REG-EXCESS"; 2399 case RegCritical: return "REG-CRIT "; 2400 case Stall: return "STALL "; 2401 case Cluster: return "CLUSTER "; 2402 case Weak: return "WEAK "; 2403 case RegMax: return "REG-MAX "; 2404 case ResourceReduce: return "RES-REDUCE"; 2405 case ResourceDemand: return "RES-DEMAND"; 2406 case TopDepthReduce: return "TOP-DEPTH "; 2407 case TopPathReduce: return "TOP-PATH "; 2408 case BotHeightReduce:return "BOT-HEIGHT"; 2409 case BotPathReduce: return "BOT-PATH "; 2410 case NextDefUse: return "DEF-USE "; 2411 case NodeOrder: return "ORDER "; 2412 }; 2413 llvm_unreachable("Unknown reason!"); 2414 } 2415 2416 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2417 PressureChange P; 2418 unsigned ResIdx = 0; 2419 unsigned Latency = 0; 2420 switch (Cand.Reason) { 2421 default: 2422 break; 2423 case RegExcess: 2424 P = Cand.RPDelta.Excess; 2425 break; 2426 case RegCritical: 2427 P = Cand.RPDelta.CriticalMax; 2428 break; 2429 case RegMax: 2430 P = Cand.RPDelta.CurrentMax; 2431 break; 2432 case ResourceReduce: 2433 ResIdx = Cand.Policy.ReduceResIdx; 2434 break; 2435 case ResourceDemand: 2436 ResIdx = Cand.Policy.DemandResIdx; 2437 break; 2438 case TopDepthReduce: 2439 Latency = Cand.SU->getDepth(); 2440 break; 2441 case TopPathReduce: 2442 Latency = Cand.SU->getHeight(); 2443 break; 2444 case BotHeightReduce: 2445 Latency = Cand.SU->getHeight(); 2446 break; 2447 case BotPathReduce: 2448 Latency = Cand.SU->getDepth(); 2449 break; 2450 } 2451 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2452 if (P.isValid()) 2453 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2454 << ":" << P.getUnitInc() << " "; 2455 else 2456 dbgs() << " "; 2457 if (ResIdx) 2458 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2459 else 2460 dbgs() << " "; 2461 if (Latency) 2462 dbgs() << " " << Latency << " cycles "; 2463 else 2464 dbgs() << " "; 2465 dbgs() << '\n'; 2466 } 2467 #endif 2468 2469 /// Return true if this heuristic determines order. 2470 static bool tryLess(int TryVal, int CandVal, 2471 GenericSchedulerBase::SchedCandidate &TryCand, 2472 GenericSchedulerBase::SchedCandidate &Cand, 2473 GenericSchedulerBase::CandReason Reason) { 2474 if (TryVal < CandVal) { 2475 TryCand.Reason = Reason; 2476 return true; 2477 } 2478 if (TryVal > CandVal) { 2479 if (Cand.Reason > Reason) 2480 Cand.Reason = Reason; 2481 return true; 2482 } 2483 Cand.setRepeat(Reason); 2484 return false; 2485 } 2486 2487 static bool tryGreater(int TryVal, int CandVal, 2488 GenericSchedulerBase::SchedCandidate &TryCand, 2489 GenericSchedulerBase::SchedCandidate &Cand, 2490 GenericSchedulerBase::CandReason Reason) { 2491 if (TryVal > CandVal) { 2492 TryCand.Reason = Reason; 2493 return true; 2494 } 2495 if (TryVal < CandVal) { 2496 if (Cand.Reason > Reason) 2497 Cand.Reason = Reason; 2498 return true; 2499 } 2500 Cand.setRepeat(Reason); 2501 return false; 2502 } 2503 2504 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2505 GenericSchedulerBase::SchedCandidate &Cand, 2506 SchedBoundary &Zone) { 2507 if (Zone.isTop()) { 2508 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2509 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2510 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2511 return true; 2512 } 2513 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2514 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2515 return true; 2516 } 2517 else { 2518 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2519 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2520 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2521 return true; 2522 } 2523 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2524 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2525 return true; 2526 } 2527 return false; 2528 } 2529 2530 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand, 2531 bool IsTop) { 2532 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2533 << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n'); 2534 } 2535 2536 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2537 assert(dag->hasVRegLiveness() && 2538 "(PreRA)GenericScheduler needs vreg liveness"); 2539 DAG = static_cast<ScheduleDAGMILive*>(dag); 2540 SchedModel = DAG->getSchedModel(); 2541 TRI = DAG->TRI; 2542 2543 Rem.init(DAG, SchedModel); 2544 Top.init(DAG, SchedModel, &Rem); 2545 Bot.init(DAG, SchedModel, &Rem); 2546 2547 // Initialize resource counts. 2548 2549 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2550 // are disabled, then these HazardRecs will be disabled. 2551 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2552 if (!Top.HazardRec) { 2553 Top.HazardRec = 2554 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2555 Itin, DAG); 2556 } 2557 if (!Bot.HazardRec) { 2558 Bot.HazardRec = 2559 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2560 Itin, DAG); 2561 } 2562 } 2563 2564 /// Initialize the per-region scheduling policy. 2565 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2566 MachineBasicBlock::iterator End, 2567 unsigned NumRegionInstrs) { 2568 const MachineFunction &MF = *Begin->getParent()->getParent(); 2569 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 2570 2571 // Avoid setting up the register pressure tracker for small regions to save 2572 // compile time. As a rough heuristic, only track pressure when the number of 2573 // schedulable instructions exceeds half the integer register file. 2574 RegionPolicy.ShouldTrackPressure = true; 2575 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2576 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2577 if (TLI->isTypeLegal(LegalIntVT)) { 2578 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2579 TLI->getRegClassFor(LegalIntVT)); 2580 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2581 } 2582 } 2583 2584 // For generic targets, we default to bottom-up, because it's simpler and more 2585 // compile-time optimizations have been implemented in that direction. 2586 RegionPolicy.OnlyBottomUp = true; 2587 2588 // Allow the subtarget to override default policy. 2589 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End, 2590 NumRegionInstrs); 2591 2592 // After subtarget overrides, apply command line options. 2593 if (!EnableRegPressure) 2594 RegionPolicy.ShouldTrackPressure = false; 2595 2596 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2597 // e.g. -misched-bottomup=false allows scheduling in both directions. 2598 assert((!ForceTopDown || !ForceBottomUp) && 2599 "-misched-topdown incompatible with -misched-bottomup"); 2600 if (ForceBottomUp.getNumOccurrences() > 0) { 2601 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2602 if (RegionPolicy.OnlyBottomUp) 2603 RegionPolicy.OnlyTopDown = false; 2604 } 2605 if (ForceTopDown.getNumOccurrences() > 0) { 2606 RegionPolicy.OnlyTopDown = ForceTopDown; 2607 if (RegionPolicy.OnlyTopDown) 2608 RegionPolicy.OnlyBottomUp = false; 2609 } 2610 } 2611 2612 void GenericScheduler::dumpPolicy() { 2613 dbgs() << "GenericScheduler RegionPolicy: " 2614 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure 2615 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown 2616 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp 2617 << "\n"; 2618 } 2619 2620 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2621 /// critical path by more cycles than it takes to drain the instruction buffer. 2622 /// We estimate an upper bounds on in-flight instructions as: 2623 /// 2624 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2625 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2626 /// InFlightResources = InFlightIterations * LoopResources 2627 /// 2628 /// TODO: Check execution resources in addition to IssueCount. 2629 void GenericScheduler::checkAcyclicLatency() { 2630 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2631 return; 2632 2633 // Scaled number of cycles per loop iteration. 2634 unsigned IterCount = 2635 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2636 Rem.RemIssueCount); 2637 // Scaled acyclic critical path. 2638 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2639 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2640 unsigned InFlightCount = 2641 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2642 unsigned BufferLimit = 2643 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2644 2645 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2646 2647 DEBUG(dbgs() << "IssueCycles=" 2648 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2649 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2650 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount 2651 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2652 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2653 if (Rem.IsAcyclicLatencyLimited) 2654 dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2655 } 2656 2657 void GenericScheduler::registerRoots() { 2658 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2659 2660 // Some roots may not feed into ExitSU. Check all of them in case. 2661 for (std::vector<SUnit*>::const_iterator 2662 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 2663 if ((*I)->getDepth() > Rem.CriticalPath) 2664 Rem.CriticalPath = (*I)->getDepth(); 2665 } 2666 DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 2667 if (DumpCriticalPathLength) { 2668 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 2669 } 2670 2671 if (EnableCyclicPath) { 2672 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2673 checkAcyclicLatency(); 2674 } 2675 } 2676 2677 static bool tryPressure(const PressureChange &TryP, 2678 const PressureChange &CandP, 2679 GenericSchedulerBase::SchedCandidate &TryCand, 2680 GenericSchedulerBase::SchedCandidate &Cand, 2681 GenericSchedulerBase::CandReason Reason, 2682 const TargetRegisterInfo *TRI, 2683 const MachineFunction &MF) { 2684 unsigned TryPSet = TryP.getPSetOrMax(); 2685 unsigned CandPSet = CandP.getPSetOrMax(); 2686 // If both candidates affect the same set, go with the smallest increase. 2687 if (TryPSet == CandPSet) { 2688 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2689 Reason); 2690 } 2691 // If one candidate decreases and the other increases, go with it. 2692 // Invalid candidates have UnitInc==0. 2693 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2694 Reason)) { 2695 return true; 2696 } 2697 2698 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) : 2699 std::numeric_limits<int>::max(); 2700 2701 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) : 2702 std::numeric_limits<int>::max(); 2703 2704 // If the candidates are decreasing pressure, reverse priority. 2705 if (TryP.getUnitInc() < 0) 2706 std::swap(TryRank, CandRank); 2707 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2708 } 2709 2710 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2711 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2712 } 2713 2714 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2715 /// their physreg def/use. 2716 /// 2717 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2718 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2719 /// with the operation that produces or consumes the physreg. We'll do this when 2720 /// regalloc has support for parallel copies. 2721 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2722 const MachineInstr *MI = SU->getInstr(); 2723 if (!MI->isCopy()) 2724 return 0; 2725 2726 unsigned ScheduledOper = isTop ? 1 : 0; 2727 unsigned UnscheduledOper = isTop ? 0 : 1; 2728 // If we have already scheduled the physreg produce/consumer, immediately 2729 // schedule the copy. 2730 if (TargetRegisterInfo::isPhysicalRegister( 2731 MI->getOperand(ScheduledOper).getReg())) 2732 return 1; 2733 // If the physreg is at the boundary, defer it. Otherwise schedule it 2734 // immediately to free the dependent. We can hoist the copy later. 2735 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2736 if (TargetRegisterInfo::isPhysicalRegister( 2737 MI->getOperand(UnscheduledOper).getReg())) 2738 return AtBoundary ? -1 : 1; 2739 return 0; 2740 } 2741 2742 /// Apply a set of heursitics to a new candidate. Heuristics are currently 2743 /// hierarchical. This may be more efficient than a graduated cost model because 2744 /// we don't need to evaluate all aspects of the model for each node in the 2745 /// queue. But it's really done to make the heuristics easier to debug and 2746 /// statistically analyze. 2747 /// 2748 /// \param Cand provides the policy and current best candidate. 2749 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2750 /// \param Zone describes the scheduled zone that we are extending. 2751 /// \param RPTracker describes reg pressure within the scheduled zone. 2752 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 2753 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 2754 SchedCandidate &TryCand, 2755 SchedBoundary &Zone, 2756 const RegPressureTracker &RPTracker, 2757 RegPressureTracker &TempTracker) { 2758 2759 if (DAG->isTrackingPressure()) { 2760 // Always initialize TryCand's RPDelta. 2761 if (Zone.isTop()) { 2762 TempTracker.getMaxDownwardPressureDelta( 2763 TryCand.SU->getInstr(), 2764 TryCand.RPDelta, 2765 DAG->getRegionCriticalPSets(), 2766 DAG->getRegPressure().MaxSetPressure); 2767 } 2768 else { 2769 if (VerifyScheduling) { 2770 TempTracker.getMaxUpwardPressureDelta( 2771 TryCand.SU->getInstr(), 2772 &DAG->getPressureDiff(TryCand.SU), 2773 TryCand.RPDelta, 2774 DAG->getRegionCriticalPSets(), 2775 DAG->getRegPressure().MaxSetPressure); 2776 } 2777 else { 2778 RPTracker.getUpwardPressureDelta( 2779 TryCand.SU->getInstr(), 2780 DAG->getPressureDiff(TryCand.SU), 2781 TryCand.RPDelta, 2782 DAG->getRegionCriticalPSets(), 2783 DAG->getRegPressure().MaxSetPressure); 2784 } 2785 } 2786 } 2787 DEBUG(if (TryCand.RPDelta.Excess.isValid()) 2788 dbgs() << " Try SU(" << TryCand.SU->NodeNum << ") " 2789 << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet()) 2790 << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n"); 2791 2792 // Initialize the candidate if needed. 2793 if (!Cand.isValid()) { 2794 TryCand.Reason = NodeOrder; 2795 return; 2796 } 2797 2798 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 2799 biasPhysRegCopy(Cand.SU, Zone.isTop()), 2800 TryCand, Cand, PhysRegCopy)) 2801 return; 2802 2803 // Avoid exceeding the target's limit. 2804 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 2805 Cand.RPDelta.Excess, 2806 TryCand, Cand, RegExcess, TRI, 2807 DAG->MF)) 2808 return; 2809 2810 // Avoid increasing the max critical pressure in the scheduled region. 2811 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 2812 Cand.RPDelta.CriticalMax, 2813 TryCand, Cand, RegCritical, TRI, 2814 DAG->MF)) 2815 return; 2816 2817 // For loops that are acyclic path limited, aggressively schedule for latency. 2818 // This can result in very long dependence chains scheduled in sequence, so 2819 // once every cycle (when CurrMOps == 0), switch to normal heuristics. 2820 if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps() 2821 && tryLatency(TryCand, Cand, Zone)) 2822 return; 2823 2824 // Prioritize instructions that read unbuffered resources by stall cycles. 2825 if (tryLess(Zone.getLatencyStallCycles(TryCand.SU), 2826 Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2827 return; 2828 2829 // Keep clustered nodes together to encourage downstream peephole 2830 // optimizations which may reduce resource requirements. 2831 // 2832 // This is a best effort to set things up for a post-RA pass. Optimizations 2833 // like generating loads of multiple registers should ideally be done within 2834 // the scheduler pass by combining the loads during DAG postprocessing. 2835 const SUnit *NextClusterSU = 2836 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2837 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 2838 TryCand, Cand, Cluster)) 2839 return; 2840 2841 // Weak edges are for clustering and other constraints. 2842 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 2843 getWeakLeft(Cand.SU, Zone.isTop()), 2844 TryCand, Cand, Weak)) { 2845 return; 2846 } 2847 // Avoid increasing the max pressure of the entire region. 2848 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 2849 Cand.RPDelta.CurrentMax, 2850 TryCand, Cand, RegMax, TRI, 2851 DAG->MF)) 2852 return; 2853 2854 // Avoid critical resource consumption and balance the schedule. 2855 TryCand.initResourceDelta(DAG, SchedModel); 2856 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2857 TryCand, Cand, ResourceReduce)) 2858 return; 2859 if (tryGreater(TryCand.ResDelta.DemandedResources, 2860 Cand.ResDelta.DemandedResources, 2861 TryCand, Cand, ResourceDemand)) 2862 return; 2863 2864 // Avoid serializing long latency dependence chains. 2865 // For acyclic path limited loops, latency was already checked above. 2866 if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency && 2867 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) { 2868 return; 2869 } 2870 2871 // Prefer immediate defs/users of the last scheduled instruction. This is a 2872 // local pressure avoidance strategy that also makes the machine code 2873 // readable. 2874 if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU), 2875 TryCand, Cand, NextDefUse)) 2876 return; 2877 2878 // Fall through to original instruction order. 2879 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2880 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2881 TryCand.Reason = NodeOrder; 2882 } 2883 } 2884 2885 /// Pick the best candidate from the queue. 2886 /// 2887 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2888 /// DAG building. To adjust for the current scheduling location we need to 2889 /// maintain the number of vreg uses remaining to be top-scheduled. 2890 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2891 const RegPressureTracker &RPTracker, 2892 SchedCandidate &Cand) { 2893 ReadyQueue &Q = Zone.Available; 2894 2895 DEBUG(Q.dump()); 2896 2897 // getMaxPressureDelta temporarily modifies the tracker. 2898 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2899 2900 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2901 2902 SchedCandidate TryCand(Cand.Policy); 2903 TryCand.SU = *I; 2904 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2905 if (TryCand.Reason != NoCand) { 2906 // Initialize resource delta if needed in case future heuristics query it. 2907 if (TryCand.ResDelta == SchedResourceDelta()) 2908 TryCand.initResourceDelta(DAG, SchedModel); 2909 Cand.setBest(TryCand); 2910 DEBUG(traceCandidate(Cand)); 2911 } 2912 } 2913 } 2914 2915 /// Pick the best candidate node from either the top or bottom queue. 2916 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 2917 // Schedule as far as possible in the direction of no choice. This is most 2918 // efficient, but also provides the best heuristics for CriticalPSets. 2919 if (SUnit *SU = Bot.pickOnlyChoice()) { 2920 IsTopNode = false; 2921 DEBUG(dbgs() << "Pick Bot ONLY1\n"); 2922 return SU; 2923 } 2924 if (SUnit *SU = Top.pickOnlyChoice()) { 2925 IsTopNode = true; 2926 DEBUG(dbgs() << "Pick Top ONLY1\n"); 2927 return SU; 2928 } 2929 CandPolicy NoPolicy; 2930 SchedCandidate BotCand(NoPolicy); 2931 SchedCandidate TopCand(NoPolicy); 2932 // Set the bottom-up policy based on the state of the current bottom zone and 2933 // the instructions outside the zone, including the top zone. 2934 setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top); 2935 // Set the top-down policy based on the state of the current top zone and 2936 // the instructions outside the zone, including the bottom zone. 2937 setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot); 2938 2939 // Prefer bottom scheduling when heuristics are silent. 2940 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2941 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2942 2943 // If either Q has a single candidate that provides the least increase in 2944 // Excess pressure, we can immediately schedule from that Q. 2945 // 2946 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2947 // affects picking from either Q. If scheduling in one direction must 2948 // increase pressure for one of the excess PSets, then schedule in that 2949 // direction first to provide more freedom in the other direction. 2950 if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess)) 2951 || (BotCand.Reason == RegCritical 2952 && !BotCand.isRepeat(RegCritical))) 2953 { 2954 IsTopNode = false; 2955 tracePick(BotCand, IsTopNode); 2956 return BotCand.SU; 2957 } 2958 // Check if the top Q has a better candidate. 2959 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2960 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2961 2962 // Choose the queue with the most important (lowest enum) reason. 2963 if (TopCand.Reason < BotCand.Reason) { 2964 IsTopNode = true; 2965 tracePick(TopCand, IsTopNode); 2966 return TopCand.SU; 2967 } 2968 // Otherwise prefer the bottom candidate, in node order if all else failed. 2969 IsTopNode = false; 2970 tracePick(BotCand, IsTopNode); 2971 return BotCand.SU; 2972 } 2973 2974 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2975 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 2976 if (DAG->top() == DAG->bottom()) { 2977 assert(Top.Available.empty() && Top.Pending.empty() && 2978 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2979 return nullptr; 2980 } 2981 SUnit *SU; 2982 do { 2983 if (RegionPolicy.OnlyTopDown) { 2984 SU = Top.pickOnlyChoice(); 2985 if (!SU) { 2986 CandPolicy NoPolicy; 2987 SchedCandidate TopCand(NoPolicy); 2988 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2989 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2990 tracePick(TopCand, true); 2991 SU = TopCand.SU; 2992 } 2993 IsTopNode = true; 2994 } 2995 else if (RegionPolicy.OnlyBottomUp) { 2996 SU = Bot.pickOnlyChoice(); 2997 if (!SU) { 2998 CandPolicy NoPolicy; 2999 SchedCandidate BotCand(NoPolicy); 3000 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 3001 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 3002 tracePick(BotCand, false); 3003 SU = BotCand.SU; 3004 } 3005 IsTopNode = false; 3006 } 3007 else { 3008 SU = pickNodeBidirectional(IsTopNode); 3009 } 3010 } while (SU->isScheduled); 3011 3012 if (SU->isTopReady()) 3013 Top.removeReady(SU); 3014 if (SU->isBottomReady()) 3015 Bot.removeReady(SU); 3016 3017 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 3018 return SU; 3019 } 3020 3021 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 3022 3023 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 3024 if (!isTop) 3025 ++InsertPos; 3026 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 3027 3028 // Find already scheduled copies with a single physreg dependence and move 3029 // them just above the scheduled instruction. 3030 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 3031 I != E; ++I) { 3032 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 3033 continue; 3034 SUnit *DepSU = I->getSUnit(); 3035 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 3036 continue; 3037 MachineInstr *Copy = DepSU->getInstr(); 3038 if (!Copy->isCopy()) 3039 continue; 3040 DEBUG(dbgs() << " Rescheduling physreg copy "; 3041 I->getSUnit()->dump(DAG)); 3042 DAG->moveInstruction(Copy, InsertPos); 3043 } 3044 } 3045 3046 /// Update the scheduler's state after scheduling a node. This is the same node 3047 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 3048 /// update it's state based on the current cycle before MachineSchedStrategy 3049 /// does. 3050 /// 3051 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 3052 /// them here. See comments in biasPhysRegCopy. 3053 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3054 if (IsTopNode) { 3055 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3056 Top.bumpNode(SU); 3057 if (SU->hasPhysRegUses) 3058 reschedulePhysRegCopies(SU, true); 3059 } 3060 else { 3061 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 3062 Bot.bumpNode(SU); 3063 if (SU->hasPhysRegDefs) 3064 reschedulePhysRegCopies(SU, false); 3065 } 3066 } 3067 3068 /// Create the standard converging machine scheduler. This will be used as the 3069 /// default scheduler if the target does not set a default. 3070 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) { 3071 ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)); 3072 // Register DAG post-processors. 3073 // 3074 // FIXME: extend the mutation API to allow earlier mutations to instantiate 3075 // data and pass it to later mutations. Have a single mutation that gathers 3076 // the interesting nodes in one pass. 3077 DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI)); 3078 if (EnableMemOpCluster) { 3079 if (DAG->TII->enableClusterLoads()) 3080 DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI)); 3081 if (DAG->TII->enableClusterStores()) 3082 DAG->addMutation(make_unique<StoreClusterMutation>(DAG->TII, DAG->TRI)); 3083 } 3084 if (EnableMacroFusion) 3085 DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI)); 3086 return DAG; 3087 } 3088 3089 static MachineSchedRegistry 3090 GenericSchedRegistry("converge", "Standard converging scheduler.", 3091 createGenericSchedLive); 3092 3093 //===----------------------------------------------------------------------===// 3094 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 3095 //===----------------------------------------------------------------------===// 3096 3097 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 3098 DAG = Dag; 3099 SchedModel = DAG->getSchedModel(); 3100 TRI = DAG->TRI; 3101 3102 Rem.init(DAG, SchedModel); 3103 Top.init(DAG, SchedModel, &Rem); 3104 BotRoots.clear(); 3105 3106 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 3107 // or are disabled, then these HazardRecs will be disabled. 3108 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3109 if (!Top.HazardRec) { 3110 Top.HazardRec = 3111 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3112 Itin, DAG); 3113 } 3114 } 3115 3116 3117 void PostGenericScheduler::registerRoots() { 3118 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3119 3120 // Some roots may not feed into ExitSU. Check all of them in case. 3121 for (SmallVectorImpl<SUnit*>::const_iterator 3122 I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) { 3123 if ((*I)->getDepth() > Rem.CriticalPath) 3124 Rem.CriticalPath = (*I)->getDepth(); 3125 } 3126 DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 3127 if (DumpCriticalPathLength) { 3128 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 3129 } 3130 } 3131 3132 /// Apply a set of heursitics to a new candidate for PostRA scheduling. 3133 /// 3134 /// \param Cand provides the policy and current best candidate. 3135 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3136 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 3137 SchedCandidate &TryCand) { 3138 3139 // Initialize the candidate if needed. 3140 if (!Cand.isValid()) { 3141 TryCand.Reason = NodeOrder; 3142 return; 3143 } 3144 3145 // Prioritize instructions that read unbuffered resources by stall cycles. 3146 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3147 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3148 return; 3149 3150 // Avoid critical resource consumption and balance the schedule. 3151 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3152 TryCand, Cand, ResourceReduce)) 3153 return; 3154 if (tryGreater(TryCand.ResDelta.DemandedResources, 3155 Cand.ResDelta.DemandedResources, 3156 TryCand, Cand, ResourceDemand)) 3157 return; 3158 3159 // Avoid serializing long latency dependence chains. 3160 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3161 return; 3162 } 3163 3164 // Fall through to original instruction order. 3165 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 3166 TryCand.Reason = NodeOrder; 3167 } 3168 3169 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3170 ReadyQueue &Q = Top.Available; 3171 3172 DEBUG(Q.dump()); 3173 3174 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 3175 SchedCandidate TryCand(Cand.Policy); 3176 TryCand.SU = *I; 3177 TryCand.initResourceDelta(DAG, SchedModel); 3178 tryCandidate(Cand, TryCand); 3179 if (TryCand.Reason != NoCand) { 3180 Cand.setBest(TryCand); 3181 DEBUG(traceCandidate(Cand)); 3182 } 3183 } 3184 } 3185 3186 /// Pick the next node to schedule. 3187 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3188 if (DAG->top() == DAG->bottom()) { 3189 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3190 return nullptr; 3191 } 3192 SUnit *SU; 3193 do { 3194 SU = Top.pickOnlyChoice(); 3195 if (!SU) { 3196 CandPolicy NoPolicy; 3197 SchedCandidate TopCand(NoPolicy); 3198 // Set the top-down policy based on the state of the current top zone and 3199 // the instructions outside the zone, including the bottom zone. 3200 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 3201 pickNodeFromQueue(TopCand); 3202 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3203 tracePick(TopCand, true); 3204 SU = TopCand.SU; 3205 } 3206 } while (SU->isScheduled); 3207 3208 IsTopNode = true; 3209 Top.removeReady(SU); 3210 3211 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 3212 return SU; 3213 } 3214 3215 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3216 /// scheduled/remaining flags in the DAG nodes. 3217 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3218 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3219 Top.bumpNode(SU); 3220 } 3221 3222 /// Create a generic scheduler with no vreg liveness or DAG mutation passes. 3223 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) { 3224 return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true); 3225 } 3226 3227 //===----------------------------------------------------------------------===// 3228 // ILP Scheduler. Currently for experimental analysis of heuristics. 3229 //===----------------------------------------------------------------------===// 3230 3231 namespace { 3232 /// \brief Order nodes by the ILP metric. 3233 struct ILPOrder { 3234 const SchedDFSResult *DFSResult; 3235 const BitVector *ScheduledTrees; 3236 bool MaximizeILP; 3237 3238 ILPOrder(bool MaxILP) 3239 : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {} 3240 3241 /// \brief Apply a less-than relation on node priority. 3242 /// 3243 /// (Return true if A comes after B in the Q.) 3244 bool operator()(const SUnit *A, const SUnit *B) const { 3245 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3246 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3247 if (SchedTreeA != SchedTreeB) { 3248 // Unscheduled trees have lower priority. 3249 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3250 return ScheduledTrees->test(SchedTreeB); 3251 3252 // Trees with shallower connections have have lower priority. 3253 if (DFSResult->getSubtreeLevel(SchedTreeA) 3254 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3255 return DFSResult->getSubtreeLevel(SchedTreeA) 3256 < DFSResult->getSubtreeLevel(SchedTreeB); 3257 } 3258 } 3259 if (MaximizeILP) 3260 return DFSResult->getILP(A) < DFSResult->getILP(B); 3261 else 3262 return DFSResult->getILP(A) > DFSResult->getILP(B); 3263 } 3264 }; 3265 3266 /// \brief Schedule based on the ILP metric. 3267 class ILPScheduler : public MachineSchedStrategy { 3268 ScheduleDAGMILive *DAG; 3269 ILPOrder Cmp; 3270 3271 std::vector<SUnit*> ReadyQ; 3272 public: 3273 ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {} 3274 3275 void initialize(ScheduleDAGMI *dag) override { 3276 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3277 DAG = static_cast<ScheduleDAGMILive*>(dag); 3278 DAG->computeDFSResult(); 3279 Cmp.DFSResult = DAG->getDFSResult(); 3280 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3281 ReadyQ.clear(); 3282 } 3283 3284 void registerRoots() override { 3285 // Restore the heap in ReadyQ with the updated DFS results. 3286 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3287 } 3288 3289 /// Implement MachineSchedStrategy interface. 3290 /// ----------------------------------------- 3291 3292 /// Callback to select the highest priority node from the ready Q. 3293 SUnit *pickNode(bool &IsTopNode) override { 3294 if (ReadyQ.empty()) return nullptr; 3295 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3296 SUnit *SU = ReadyQ.back(); 3297 ReadyQ.pop_back(); 3298 IsTopNode = false; 3299 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 3300 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3301 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 3302 << DAG->getDFSResult()->getSubtreeLevel( 3303 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 3304 << "Scheduling " << *SU->getInstr()); 3305 return SU; 3306 } 3307 3308 /// \brief Scheduler callback to notify that a new subtree is scheduled. 3309 void scheduleTree(unsigned SubtreeID) override { 3310 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3311 } 3312 3313 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3314 /// DFSResults, and resort the priority Q. 3315 void schedNode(SUnit *SU, bool IsTopNode) override { 3316 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3317 } 3318 3319 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3320 3321 void releaseBottomNode(SUnit *SU) override { 3322 ReadyQ.push_back(SU); 3323 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3324 } 3325 }; 3326 } // namespace 3327 3328 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3329 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true)); 3330 } 3331 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3332 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false)); 3333 } 3334 static MachineSchedRegistry ILPMaxRegistry( 3335 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3336 static MachineSchedRegistry ILPMinRegistry( 3337 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3338 3339 //===----------------------------------------------------------------------===// 3340 // Machine Instruction Shuffler for Correctness Testing 3341 //===----------------------------------------------------------------------===// 3342 3343 #ifndef NDEBUG 3344 namespace { 3345 /// Apply a less-than relation on the node order, which corresponds to the 3346 /// instruction order prior to scheduling. IsReverse implements greater-than. 3347 template<bool IsReverse> 3348 struct SUnitOrder { 3349 bool operator()(SUnit *A, SUnit *B) const { 3350 if (IsReverse) 3351 return A->NodeNum > B->NodeNum; 3352 else 3353 return A->NodeNum < B->NodeNum; 3354 } 3355 }; 3356 3357 /// Reorder instructions as much as possible. 3358 class InstructionShuffler : public MachineSchedStrategy { 3359 bool IsAlternating; 3360 bool IsTopDown; 3361 3362 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3363 // gives nodes with a higher number higher priority causing the latest 3364 // instructions to be scheduled first. 3365 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 3366 TopQ; 3367 // When scheduling bottom-up, use greater-than as the queue priority. 3368 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 3369 BottomQ; 3370 public: 3371 InstructionShuffler(bool alternate, bool topdown) 3372 : IsAlternating(alternate), IsTopDown(topdown) {} 3373 3374 void initialize(ScheduleDAGMI*) override { 3375 TopQ.clear(); 3376 BottomQ.clear(); 3377 } 3378 3379 /// Implement MachineSchedStrategy interface. 3380 /// ----------------------------------------- 3381 3382 SUnit *pickNode(bool &IsTopNode) override { 3383 SUnit *SU; 3384 if (IsTopDown) { 3385 do { 3386 if (TopQ.empty()) return nullptr; 3387 SU = TopQ.top(); 3388 TopQ.pop(); 3389 } while (SU->isScheduled); 3390 IsTopNode = true; 3391 } 3392 else { 3393 do { 3394 if (BottomQ.empty()) return nullptr; 3395 SU = BottomQ.top(); 3396 BottomQ.pop(); 3397 } while (SU->isScheduled); 3398 IsTopNode = false; 3399 } 3400 if (IsAlternating) 3401 IsTopDown = !IsTopDown; 3402 return SU; 3403 } 3404 3405 void schedNode(SUnit *SU, bool IsTopNode) override {} 3406 3407 void releaseTopNode(SUnit *SU) override { 3408 TopQ.push(SU); 3409 } 3410 void releaseBottomNode(SUnit *SU) override { 3411 BottomQ.push(SU); 3412 } 3413 }; 3414 } // namespace 3415 3416 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3417 bool Alternate = !ForceTopDown && !ForceBottomUp; 3418 bool TopDown = !ForceBottomUp; 3419 assert((TopDown || !ForceTopDown) && 3420 "-misched-topdown incompatible with -misched-bottomup"); 3421 return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown)); 3422 } 3423 static MachineSchedRegistry ShufflerRegistry( 3424 "shuffle", "Shuffle machine instructions alternating directions", 3425 createInstructionShuffler); 3426 #endif // !NDEBUG 3427 3428 //===----------------------------------------------------------------------===// 3429 // GraphWriter support for ScheduleDAGMILive. 3430 //===----------------------------------------------------------------------===// 3431 3432 #ifndef NDEBUG 3433 namespace llvm { 3434 3435 template<> struct GraphTraits< 3436 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3437 3438 template<> 3439 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3440 3441 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 3442 3443 static std::string getGraphName(const ScheduleDAG *G) { 3444 return G->MF.getName(); 3445 } 3446 3447 static bool renderGraphFromBottomUp() { 3448 return true; 3449 } 3450 3451 static bool isNodeHidden(const SUnit *Node) { 3452 if (ViewMISchedCutoff == 0) 3453 return false; 3454 return (Node->Preds.size() > ViewMISchedCutoff 3455 || Node->Succs.size() > ViewMISchedCutoff); 3456 } 3457 3458 /// If you want to override the dot attributes printed for a particular 3459 /// edge, override this method. 3460 static std::string getEdgeAttributes(const SUnit *Node, 3461 SUnitIterator EI, 3462 const ScheduleDAG *Graph) { 3463 if (EI.isArtificialDep()) 3464 return "color=cyan,style=dashed"; 3465 if (EI.isCtrlDep()) 3466 return "color=blue,style=dashed"; 3467 return ""; 3468 } 3469 3470 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3471 std::string Str; 3472 raw_string_ostream SS(Str); 3473 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3474 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3475 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3476 SS << "SU:" << SU->NodeNum; 3477 if (DFS) 3478 SS << " I:" << DFS->getNumInstrs(SU); 3479 return SS.str(); 3480 } 3481 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3482 return G->getGraphNodeLabel(SU); 3483 } 3484 3485 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3486 std::string Str("shape=Mrecord"); 3487 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3488 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3489 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3490 if (DFS) { 3491 Str += ",style=filled,fillcolor=\"#"; 3492 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3493 Str += '"'; 3494 } 3495 return Str; 3496 } 3497 }; 3498 } // namespace llvm 3499 #endif // NDEBUG 3500 3501 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3502 /// rendered using 'dot'. 3503 /// 3504 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3505 #ifndef NDEBUG 3506 ViewGraph(this, Name, false, Title); 3507 #else 3508 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3509 << "systems with Graphviz or gv!\n"; 3510 #endif // NDEBUG 3511 } 3512 3513 /// Out-of-line implementation with no arguments is handy for gdb. 3514 void ScheduleDAGMI::viewGraph() { 3515 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3516 } 3517