1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineScheduler.h" 16 #include "llvm/ADT/PriorityQueue.h" 17 #include "llvm/Analysis/AliasAnalysis.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineDominators.h" 20 #include "llvm/CodeGen/MachineLoopInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/Passes.h" 23 #include "llvm/CodeGen/RegisterClassInfo.h" 24 #include "llvm/CodeGen/ScheduleDFS.h" 25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/GraphWriter.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/Target/TargetInstrInfo.h" 32 #include <queue> 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "misched" 37 38 namespace llvm { 39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 40 cl::desc("Force top-down list scheduling")); 41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 42 cl::desc("Force bottom-up list scheduling")); 43 cl::opt<bool> 44 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 45 cl::desc("Print critical path length to stdout")); 46 } 47 48 #ifndef NDEBUG 49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 50 cl::desc("Pop up a window to show MISched dags after they are processed")); 51 52 /// In some situations a few uninteresting nodes depend on nearly all other 53 /// nodes in the graph, provide a cutoff to hide them. 54 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden, 55 cl::desc("Hide nodes with more predecessor/successor than cutoff")); 56 57 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 58 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 59 60 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 61 cl::desc("Only schedule this function")); 62 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 63 cl::desc("Only schedule this MBB#")); 64 #else 65 static bool ViewMISchedDAGs = false; 66 #endif // NDEBUG 67 68 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 69 cl::desc("Enable register pressure scheduling."), cl::init(true)); 70 71 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 72 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 73 74 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 75 cl::desc("Enable load clustering."), cl::init(true)); 76 77 // Experimental heuristics 78 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 79 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 80 81 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 82 cl::desc("Verify machine instrs before and after machine scheduling")); 83 84 // DAG subtrees must have at least this many nodes. 85 static const unsigned MinSubtreeSize = 8; 86 87 // Pin the vtables to this file. 88 void MachineSchedStrategy::anchor() {} 89 void ScheduleDAGMutation::anchor() {} 90 91 //===----------------------------------------------------------------------===// 92 // Machine Instruction Scheduling Pass and Registry 93 //===----------------------------------------------------------------------===// 94 95 MachineSchedContext::MachineSchedContext(): 96 MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) { 97 RegClassInfo = new RegisterClassInfo(); 98 } 99 100 MachineSchedContext::~MachineSchedContext() { 101 delete RegClassInfo; 102 } 103 104 namespace { 105 /// Base class for a machine scheduler class that can run at any point. 106 class MachineSchedulerBase : public MachineSchedContext, 107 public MachineFunctionPass { 108 public: 109 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 110 111 void print(raw_ostream &O, const Module* = nullptr) const override; 112 113 protected: 114 void scheduleRegions(ScheduleDAGInstrs &Scheduler); 115 }; 116 117 /// MachineScheduler runs after coalescing and before register allocation. 118 class MachineScheduler : public MachineSchedulerBase { 119 public: 120 MachineScheduler(); 121 122 void getAnalysisUsage(AnalysisUsage &AU) const override; 123 124 bool runOnMachineFunction(MachineFunction&) override; 125 126 static char ID; // Class identification, replacement for typeinfo 127 128 protected: 129 ScheduleDAGInstrs *createMachineScheduler(); 130 }; 131 132 /// PostMachineScheduler runs after shortly before code emission. 133 class PostMachineScheduler : public MachineSchedulerBase { 134 public: 135 PostMachineScheduler(); 136 137 void getAnalysisUsage(AnalysisUsage &AU) const override; 138 139 bool runOnMachineFunction(MachineFunction&) override; 140 141 static char ID; // Class identification, replacement for typeinfo 142 143 protected: 144 ScheduleDAGInstrs *createPostMachineScheduler(); 145 }; 146 } // namespace 147 148 char MachineScheduler::ID = 0; 149 150 char &llvm::MachineSchedulerID = MachineScheduler::ID; 151 152 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler", 153 "Machine Instruction Scheduler", false, false) 154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 155 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 156 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 157 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler", 158 "Machine Instruction Scheduler", false, false) 159 160 MachineScheduler::MachineScheduler() 161 : MachineSchedulerBase(ID) { 162 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 163 } 164 165 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 166 AU.setPreservesCFG(); 167 AU.addRequiredID(MachineDominatorsID); 168 AU.addRequired<MachineLoopInfo>(); 169 AU.addRequired<AAResultsWrapperPass>(); 170 AU.addRequired<TargetPassConfig>(); 171 AU.addRequired<SlotIndexes>(); 172 AU.addPreserved<SlotIndexes>(); 173 AU.addRequired<LiveIntervals>(); 174 AU.addPreserved<LiveIntervals>(); 175 MachineFunctionPass::getAnalysisUsage(AU); 176 } 177 178 char PostMachineScheduler::ID = 0; 179 180 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 181 182 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 183 "PostRA Machine Instruction Scheduler", false, false) 184 185 PostMachineScheduler::PostMachineScheduler() 186 : MachineSchedulerBase(ID) { 187 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 188 } 189 190 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 191 AU.setPreservesCFG(); 192 AU.addRequiredID(MachineDominatorsID); 193 AU.addRequired<MachineLoopInfo>(); 194 AU.addRequired<TargetPassConfig>(); 195 MachineFunctionPass::getAnalysisUsage(AU); 196 } 197 198 MachinePassRegistry MachineSchedRegistry::Registry; 199 200 /// A dummy default scheduler factory indicates whether the scheduler 201 /// is overridden on the command line. 202 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 203 return nullptr; 204 } 205 206 /// MachineSchedOpt allows command line selection of the scheduler. 207 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 208 RegisterPassParser<MachineSchedRegistry> > 209 MachineSchedOpt("misched", 210 cl::init(&useDefaultMachineSched), cl::Hidden, 211 cl::desc("Machine instruction scheduler to use")); 212 213 static MachineSchedRegistry 214 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 215 useDefaultMachineSched); 216 217 static cl::opt<bool> EnableMachineSched( 218 "enable-misched", 219 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), 220 cl::Hidden); 221 222 /// Forward declare the standard machine scheduler. This will be used as the 223 /// default scheduler if the target does not set a default. 224 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C); 225 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C); 226 227 /// Decrement this iterator until reaching the top or a non-debug instr. 228 static MachineBasicBlock::const_iterator 229 priorNonDebug(MachineBasicBlock::const_iterator I, 230 MachineBasicBlock::const_iterator Beg) { 231 assert(I != Beg && "reached the top of the region, cannot decrement"); 232 while (--I != Beg) { 233 if (!I->isDebugValue()) 234 break; 235 } 236 return I; 237 } 238 239 /// Non-const version. 240 static MachineBasicBlock::iterator 241 priorNonDebug(MachineBasicBlock::iterator I, 242 MachineBasicBlock::const_iterator Beg) { 243 return const_cast<MachineInstr*>( 244 &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)); 245 } 246 247 /// If this iterator is a debug value, increment until reaching the End or a 248 /// non-debug instruction. 249 static MachineBasicBlock::const_iterator 250 nextIfDebug(MachineBasicBlock::const_iterator I, 251 MachineBasicBlock::const_iterator End) { 252 for(; I != End; ++I) { 253 if (!I->isDebugValue()) 254 break; 255 } 256 return I; 257 } 258 259 /// Non-const version. 260 static MachineBasicBlock::iterator 261 nextIfDebug(MachineBasicBlock::iterator I, 262 MachineBasicBlock::const_iterator End) { 263 // Cast the return value to nonconst MachineInstr, then cast to an 264 // instr_iterator, which does not check for null, finally return a 265 // bundle_iterator. 266 return MachineBasicBlock::instr_iterator( 267 const_cast<MachineInstr*>( 268 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 269 } 270 271 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 272 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 273 // Select the scheduler, or set the default. 274 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 275 if (Ctor != useDefaultMachineSched) 276 return Ctor(this); 277 278 // Get the default scheduler set by the target for this function. 279 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 280 if (Scheduler) 281 return Scheduler; 282 283 // Default to GenericScheduler. 284 return createGenericSchedLive(this); 285 } 286 287 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 288 /// the caller. We don't have a command line option to override the postRA 289 /// scheduler. The Target must configure it. 290 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 291 // Get the postRA scheduler set by the target for this function. 292 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 293 if (Scheduler) 294 return Scheduler; 295 296 // Default to GenericScheduler. 297 return createGenericSchedPostRA(this); 298 } 299 300 /// Top-level MachineScheduler pass driver. 301 /// 302 /// Visit blocks in function order. Divide each block into scheduling regions 303 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 304 /// consistent with the DAG builder, which traverses the interior of the 305 /// scheduling regions bottom-up. 306 /// 307 /// This design avoids exposing scheduling boundaries to the DAG builder, 308 /// simplifying the DAG builder's support for "special" target instructions. 309 /// At the same time the design allows target schedulers to operate across 310 /// scheduling boundaries, for example to bundle the boudary instructions 311 /// without reordering them. This creates complexity, because the target 312 /// scheduler must update the RegionBegin and RegionEnd positions cached by 313 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 314 /// design would be to split blocks at scheduling boundaries, but LLVM has a 315 /// general bias against block splitting purely for implementation simplicity. 316 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 317 if (EnableMachineSched.getNumOccurrences()) { 318 if (!EnableMachineSched) 319 return false; 320 } else if (!mf.getSubtarget().enableMachineScheduler()) 321 return false; 322 323 DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); 324 325 // Initialize the context of the pass. 326 MF = &mf; 327 MLI = &getAnalysis<MachineLoopInfo>(); 328 MDT = &getAnalysis<MachineDominatorTree>(); 329 PassConfig = &getAnalysis<TargetPassConfig>(); 330 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 331 332 LIS = &getAnalysis<LiveIntervals>(); 333 334 if (VerifyScheduling) { 335 DEBUG(LIS->dump()); 336 MF->verify(this, "Before machine scheduling."); 337 } 338 RegClassInfo->runOnMachineFunction(*MF); 339 340 // Instantiate the selected scheduler for this target, function, and 341 // optimization level. 342 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 343 scheduleRegions(*Scheduler); 344 345 DEBUG(LIS->dump()); 346 if (VerifyScheduling) 347 MF->verify(this, "After machine scheduling."); 348 return true; 349 } 350 351 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 352 if (skipOptnoneFunction(*mf.getFunction())) 353 return false; 354 355 if (!mf.getSubtarget().enablePostRAScheduler()) { 356 DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 357 return false; 358 } 359 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 360 361 // Initialize the context of the pass. 362 MF = &mf; 363 PassConfig = &getAnalysis<TargetPassConfig>(); 364 365 if (VerifyScheduling) 366 MF->verify(this, "Before post machine scheduling."); 367 368 // Instantiate the selected scheduler for this target, function, and 369 // optimization level. 370 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 371 scheduleRegions(*Scheduler); 372 373 if (VerifyScheduling) 374 MF->verify(this, "After post machine scheduling."); 375 return true; 376 } 377 378 /// Return true of the given instruction should not be included in a scheduling 379 /// region. 380 /// 381 /// MachineScheduler does not currently support scheduling across calls. To 382 /// handle calls, the DAG builder needs to be modified to create register 383 /// anti/output dependencies on the registers clobbered by the call's regmask 384 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 385 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 386 /// the boundary, but there would be no benefit to postRA scheduling across 387 /// calls this late anyway. 388 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 389 MachineBasicBlock *MBB, 390 MachineFunction *MF, 391 const TargetInstrInfo *TII, 392 bool IsPostRA) { 393 return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF); 394 } 395 396 /// Main driver for both MachineScheduler and PostMachineScheduler. 397 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) { 398 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 399 bool IsPostRA = Scheduler.isPostRA(); 400 401 // Visit all machine basic blocks. 402 // 403 // TODO: Visit blocks in global postorder or postorder within the bottom-up 404 // loop tree. Then we can optionally compute global RegPressure. 405 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 406 MBB != MBBEnd; ++MBB) { 407 408 Scheduler.startBlock(&*MBB); 409 410 #ifndef NDEBUG 411 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 412 continue; 413 if (SchedOnlyBlock.getNumOccurrences() 414 && (int)SchedOnlyBlock != MBB->getNumber()) 415 continue; 416 #endif 417 418 // Break the block into scheduling regions [I, RegionEnd), and schedule each 419 // region as soon as it is discovered. RegionEnd points the scheduling 420 // boundary at the bottom of the region. The DAG does not include RegionEnd, 421 // but the region does (i.e. the next RegionEnd is above the previous 422 // RegionBegin). If the current block has no terminator then RegionEnd == 423 // MBB->end() for the bottom region. 424 // 425 // The Scheduler may insert instructions during either schedule() or 426 // exitRegion(), even for empty regions. So the local iterators 'I' and 427 // 'RegionEnd' are invalid across these calls. 428 // 429 // MBB::size() uses instr_iterator to count. Here we need a bundle to count 430 // as a single instruction. 431 unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end()); 432 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 433 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) { 434 435 // Avoid decrementing RegionEnd for blocks with no terminator. 436 if (RegionEnd != MBB->end() || 437 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII, IsPostRA)) { 438 --RegionEnd; 439 // Count the boundary instruction. 440 --RemainingInstrs; 441 } 442 443 // The next region starts above the previous region. Look backward in the 444 // instruction stream until we find the nearest boundary. 445 unsigned NumRegionInstrs = 0; 446 MachineBasicBlock::iterator I = RegionEnd; 447 for(;I != MBB->begin(); --I, --RemainingInstrs) { 448 if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII, IsPostRA)) 449 break; 450 if (!I->isDebugValue()) 451 ++NumRegionInstrs; 452 } 453 // Notify the scheduler of the region, even if we may skip scheduling 454 // it. Perhaps it still needs to be bundled. 455 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs); 456 457 // Skip empty scheduling regions (0 or 1 schedulable instructions). 458 if (I == RegionEnd || I == std::prev(RegionEnd)) { 459 // Close the current region. Bundle the terminator if needed. 460 // This invalidates 'RegionEnd' and 'I'. 461 Scheduler.exitRegion(); 462 continue; 463 } 464 DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "") 465 << "MI Scheduling **********\n"); 466 DEBUG(dbgs() << MF->getName() 467 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 468 << "\n From: " << *I << " To: "; 469 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 470 else dbgs() << "End"; 471 dbgs() << " RegionInstrs: " << NumRegionInstrs 472 << " Remaining: " << RemainingInstrs << "\n"); 473 if (DumpCriticalPathLength) { 474 errs() << MF->getName(); 475 errs() << ":BB# " << MBB->getNumber(); 476 errs() << " " << MBB->getName() << " \n"; 477 } 478 479 // Schedule a region: possibly reorder instructions. 480 // This invalidates 'RegionEnd' and 'I'. 481 Scheduler.schedule(); 482 483 // Close the current region. 484 Scheduler.exitRegion(); 485 486 // Scheduling has invalidated the current iterator 'I'. Ask the 487 // scheduler for the top of it's scheduled region. 488 RegionEnd = Scheduler.begin(); 489 } 490 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 491 Scheduler.finishBlock(); 492 if (Scheduler.isPostRA()) { 493 // FIXME: Ideally, no further passes should rely on kill flags. However, 494 // thumb2 size reduction is currently an exception. 495 Scheduler.fixupKills(&*MBB); 496 } 497 } 498 Scheduler.finalizeSchedule(); 499 } 500 501 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 502 // unimplemented 503 } 504 505 LLVM_DUMP_METHOD 506 void ReadyQueue::dump() { 507 dbgs() << "Queue " << Name << ": "; 508 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 509 dbgs() << Queue[i]->NodeNum << " "; 510 dbgs() << "\n"; 511 } 512 513 //===----------------------------------------------------------------------===// 514 // ScheduleDAGMI - Basic machine instruction scheduling. This is 515 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 516 // virtual registers. 517 // ===----------------------------------------------------------------------===/ 518 519 // Provide a vtable anchor. 520 ScheduleDAGMI::~ScheduleDAGMI() { 521 } 522 523 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 524 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 525 } 526 527 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 528 if (SuccSU != &ExitSU) { 529 // Do not use WillCreateCycle, it assumes SD scheduling. 530 // If Pred is reachable from Succ, then the edge creates a cycle. 531 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 532 return false; 533 Topo.AddPred(SuccSU, PredDep.getSUnit()); 534 } 535 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 536 // Return true regardless of whether a new edge needed to be inserted. 537 return true; 538 } 539 540 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 541 /// NumPredsLeft reaches zero, release the successor node. 542 /// 543 /// FIXME: Adjust SuccSU height based on MinLatency. 544 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 545 SUnit *SuccSU = SuccEdge->getSUnit(); 546 547 if (SuccEdge->isWeak()) { 548 --SuccSU->WeakPredsLeft; 549 if (SuccEdge->isCluster()) 550 NextClusterSucc = SuccSU; 551 return; 552 } 553 #ifndef NDEBUG 554 if (SuccSU->NumPredsLeft == 0) { 555 dbgs() << "*** Scheduling failed! ***\n"; 556 SuccSU->dump(this); 557 dbgs() << " has been released too many times!\n"; 558 llvm_unreachable(nullptr); 559 } 560 #endif 561 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 562 // CurrCycle may have advanced since then. 563 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 564 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 565 566 --SuccSU->NumPredsLeft; 567 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 568 SchedImpl->releaseTopNode(SuccSU); 569 } 570 571 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 572 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 573 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 574 I != E; ++I) { 575 releaseSucc(SU, &*I); 576 } 577 } 578 579 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 580 /// NumSuccsLeft reaches zero, release the predecessor node. 581 /// 582 /// FIXME: Adjust PredSU height based on MinLatency. 583 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 584 SUnit *PredSU = PredEdge->getSUnit(); 585 586 if (PredEdge->isWeak()) { 587 --PredSU->WeakSuccsLeft; 588 if (PredEdge->isCluster()) 589 NextClusterPred = PredSU; 590 return; 591 } 592 #ifndef NDEBUG 593 if (PredSU->NumSuccsLeft == 0) { 594 dbgs() << "*** Scheduling failed! ***\n"; 595 PredSU->dump(this); 596 dbgs() << " has been released too many times!\n"; 597 llvm_unreachable(nullptr); 598 } 599 #endif 600 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 601 // CurrCycle may have advanced since then. 602 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 603 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 604 605 --PredSU->NumSuccsLeft; 606 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 607 SchedImpl->releaseBottomNode(PredSU); 608 } 609 610 /// releasePredecessors - Call releasePred on each of SU's predecessors. 611 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 612 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 613 I != E; ++I) { 614 releasePred(SU, &*I); 615 } 616 } 617 618 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 619 /// crossing a scheduling boundary. [begin, end) includes all instructions in 620 /// the region, including the boundary itself and single-instruction regions 621 /// that don't get scheduled. 622 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 623 MachineBasicBlock::iterator begin, 624 MachineBasicBlock::iterator end, 625 unsigned regioninstrs) 626 { 627 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 628 629 SchedImpl->initPolicy(begin, end, regioninstrs); 630 } 631 632 /// This is normally called from the main scheduler loop but may also be invoked 633 /// by the scheduling strategy to perform additional code motion. 634 void ScheduleDAGMI::moveInstruction( 635 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 636 // Advance RegionBegin if the first instruction moves down. 637 if (&*RegionBegin == MI) 638 ++RegionBegin; 639 640 // Update the instruction stream. 641 BB->splice(InsertPos, BB, MI); 642 643 // Update LiveIntervals 644 if (LIS) 645 LIS->handleMove(MI, /*UpdateFlags=*/true); 646 647 // Recede RegionBegin if an instruction moves above the first. 648 if (RegionBegin == InsertPos) 649 RegionBegin = MI; 650 } 651 652 bool ScheduleDAGMI::checkSchedLimit() { 653 #ifndef NDEBUG 654 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 655 CurrentTop = CurrentBottom; 656 return false; 657 } 658 ++NumInstrsScheduled; 659 #endif 660 return true; 661 } 662 663 /// Per-region scheduling driver, called back from 664 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 665 /// does not consider liveness or register pressure. It is useful for PostRA 666 /// scheduling and potentially other custom schedulers. 667 void ScheduleDAGMI::schedule() { 668 DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n"); 669 DEBUG(SchedImpl->dumpPolicy()); 670 671 // Build the DAG. 672 buildSchedGraph(AA); 673 674 Topo.InitDAGTopologicalSorting(); 675 676 postprocessDAG(); 677 678 SmallVector<SUnit*, 8> TopRoots, BotRoots; 679 findRootsAndBiasEdges(TopRoots, BotRoots); 680 681 // Initialize the strategy before modifying the DAG. 682 // This may initialize a DFSResult to be used for queue priority. 683 SchedImpl->initialize(this); 684 685 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 686 SUnits[su].dumpAll(this)); 687 if (ViewMISchedDAGs) viewGraph(); 688 689 // Initialize ready queues now that the DAG and priority data are finalized. 690 initQueues(TopRoots, BotRoots); 691 692 bool IsTopNode = false; 693 while (true) { 694 DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n"); 695 SUnit *SU = SchedImpl->pickNode(IsTopNode); 696 if (!SU) break; 697 698 assert(!SU->isScheduled && "Node already scheduled"); 699 if (!checkSchedLimit()) 700 break; 701 702 MachineInstr *MI = SU->getInstr(); 703 if (IsTopNode) { 704 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 705 if (&*CurrentTop == MI) 706 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 707 else 708 moveInstruction(MI, CurrentTop); 709 } 710 else { 711 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 712 MachineBasicBlock::iterator priorII = 713 priorNonDebug(CurrentBottom, CurrentTop); 714 if (&*priorII == MI) 715 CurrentBottom = priorII; 716 else { 717 if (&*CurrentTop == MI) 718 CurrentTop = nextIfDebug(++CurrentTop, priorII); 719 moveInstruction(MI, CurrentBottom); 720 CurrentBottom = MI; 721 } 722 } 723 // Notify the scheduling strategy before updating the DAG. 724 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 725 // runs, it can then use the accurate ReadyCycle time to determine whether 726 // newly released nodes can move to the readyQ. 727 SchedImpl->schedNode(SU, IsTopNode); 728 729 updateQueues(SU, IsTopNode); 730 } 731 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 732 733 placeDebugValues(); 734 735 DEBUG({ 736 unsigned BBNum = begin()->getParent()->getNumber(); 737 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 738 dumpSchedule(); 739 dbgs() << '\n'; 740 }); 741 } 742 743 /// Apply each ScheduleDAGMutation step in order. 744 void ScheduleDAGMI::postprocessDAG() { 745 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 746 Mutations[i]->apply(this); 747 } 748 } 749 750 void ScheduleDAGMI:: 751 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 752 SmallVectorImpl<SUnit*> &BotRoots) { 753 for (std::vector<SUnit>::iterator 754 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 755 SUnit *SU = &(*I); 756 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 757 758 // Order predecessors so DFSResult follows the critical path. 759 SU->biasCriticalPath(); 760 761 // A SUnit is ready to top schedule if it has no predecessors. 762 if (!I->NumPredsLeft) 763 TopRoots.push_back(SU); 764 // A SUnit is ready to bottom schedule if it has no successors. 765 if (!I->NumSuccsLeft) 766 BotRoots.push_back(SU); 767 } 768 ExitSU.biasCriticalPath(); 769 } 770 771 /// Identify DAG roots and setup scheduler queues. 772 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 773 ArrayRef<SUnit*> BotRoots) { 774 NextClusterSucc = nullptr; 775 NextClusterPred = nullptr; 776 777 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 778 // 779 // Nodes with unreleased weak edges can still be roots. 780 // Release top roots in forward order. 781 for (SmallVectorImpl<SUnit*>::const_iterator 782 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 783 SchedImpl->releaseTopNode(*I); 784 } 785 // Release bottom roots in reverse order so the higher priority nodes appear 786 // first. This is more natural and slightly more efficient. 787 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 788 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 789 SchedImpl->releaseBottomNode(*I); 790 } 791 792 releaseSuccessors(&EntrySU); 793 releasePredecessors(&ExitSU); 794 795 SchedImpl->registerRoots(); 796 797 // Advance past initial DebugValues. 798 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 799 CurrentBottom = RegionEnd; 800 } 801 802 /// Update scheduler queues after scheduling an instruction. 803 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 804 // Release dependent instructions for scheduling. 805 if (IsTopNode) 806 releaseSuccessors(SU); 807 else 808 releasePredecessors(SU); 809 810 SU->isScheduled = true; 811 } 812 813 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 814 void ScheduleDAGMI::placeDebugValues() { 815 // If first instruction was a DBG_VALUE then put it back. 816 if (FirstDbgValue) { 817 BB->splice(RegionBegin, BB, FirstDbgValue); 818 RegionBegin = FirstDbgValue; 819 } 820 821 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 822 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 823 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 824 MachineInstr *DbgValue = P.first; 825 MachineBasicBlock::iterator OrigPrevMI = P.second; 826 if (&*RegionBegin == DbgValue) 827 ++RegionBegin; 828 BB->splice(++OrigPrevMI, BB, DbgValue); 829 if (OrigPrevMI == std::prev(RegionEnd)) 830 RegionEnd = DbgValue; 831 } 832 DbgValues.clear(); 833 FirstDbgValue = nullptr; 834 } 835 836 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 837 void ScheduleDAGMI::dumpSchedule() const { 838 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 839 if (SUnit *SU = getSUnit(&(*MI))) 840 SU->dump(this); 841 else 842 dbgs() << "Missing SUnit\n"; 843 } 844 } 845 #endif 846 847 //===----------------------------------------------------------------------===// 848 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 849 // preservation. 850 //===----------------------------------------------------------------------===// 851 852 ScheduleDAGMILive::~ScheduleDAGMILive() { 853 delete DFSResult; 854 } 855 856 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 857 /// crossing a scheduling boundary. [begin, end) includes all instructions in 858 /// the region, including the boundary itself and single-instruction regions 859 /// that don't get scheduled. 860 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 861 MachineBasicBlock::iterator begin, 862 MachineBasicBlock::iterator end, 863 unsigned regioninstrs) 864 { 865 // ScheduleDAGMI initializes SchedImpl's per-region policy. 866 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 867 868 // For convenience remember the end of the liveness region. 869 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 870 871 SUPressureDiffs.clear(); 872 873 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 874 } 875 876 // Setup the register pressure trackers for the top scheduled top and bottom 877 // scheduled regions. 878 void ScheduleDAGMILive::initRegPressure() { 879 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 880 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 881 882 // Close the RPTracker to finalize live ins. 883 RPTracker.closeRegion(); 884 885 DEBUG(RPTracker.dump()); 886 887 // Initialize the live ins and live outs. 888 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 889 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 890 891 // Close one end of the tracker so we can call 892 // getMaxUpward/DownwardPressureDelta before advancing across any 893 // instructions. This converts currently live regs into live ins/outs. 894 TopRPTracker.closeTop(); 895 BotRPTracker.closeBottom(); 896 897 BotRPTracker.initLiveThru(RPTracker); 898 if (!BotRPTracker.getLiveThru().empty()) { 899 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 900 DEBUG(dbgs() << "Live Thru: "; 901 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 902 }; 903 904 // For each live out vreg reduce the pressure change associated with other 905 // uses of the same vreg below the live-out reaching def. 906 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 907 908 // Account for liveness generated by the region boundary. 909 if (LiveRegionEnd != RegionEnd) { 910 SmallVector<unsigned, 8> LiveUses; 911 BotRPTracker.recede(&LiveUses); 912 updatePressureDiffs(LiveUses); 913 } 914 915 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 916 917 // Cache the list of excess pressure sets in this region. This will also track 918 // the max pressure in the scheduled code for these sets. 919 RegionCriticalPSets.clear(); 920 const std::vector<unsigned> &RegionPressure = 921 RPTracker.getPressure().MaxSetPressure; 922 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 923 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 924 if (RegionPressure[i] > Limit) { 925 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 926 << " Limit " << Limit 927 << " Actual " << RegionPressure[i] << "\n"); 928 RegionCriticalPSets.push_back(PressureChange(i)); 929 } 930 } 931 DEBUG(dbgs() << "Excess PSets: "; 932 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 933 dbgs() << TRI->getRegPressureSetName( 934 RegionCriticalPSets[i].getPSet()) << " "; 935 dbgs() << "\n"); 936 } 937 938 void ScheduleDAGMILive:: 939 updateScheduledPressure(const SUnit *SU, 940 const std::vector<unsigned> &NewMaxPressure) { 941 const PressureDiff &PDiff = getPressureDiff(SU); 942 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 943 for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end(); 944 I != E; ++I) { 945 if (!I->isValid()) 946 break; 947 unsigned ID = I->getPSet(); 948 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 949 ++CritIdx; 950 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 951 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 952 && NewMaxPressure[ID] <= INT16_MAX) 953 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 954 } 955 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 956 if (NewMaxPressure[ID] >= Limit - 2) { 957 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 958 << NewMaxPressure[ID] 959 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit 960 << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n"); 961 } 962 } 963 } 964 965 /// Update the PressureDiff array for liveness after scheduling this 966 /// instruction. 967 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) { 968 for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) { 969 /// FIXME: Currently assuming single-use physregs. 970 unsigned Reg = LiveUses[LUIdx]; 971 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n"); 972 if (!TRI->isVirtualRegister(Reg)) 973 continue; 974 975 // This may be called before CurrentBottom has been initialized. However, 976 // BotRPTracker must have a valid position. We want the value live into the 977 // instruction or live out of the block, so ask for the previous 978 // instruction's live-out. 979 const LiveInterval &LI = LIS->getInterval(Reg); 980 VNInfo *VNI; 981 MachineBasicBlock::const_iterator I = 982 nextIfDebug(BotRPTracker.getPos(), BB->end()); 983 if (I == BB->end()) 984 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 985 else { 986 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I)); 987 VNI = LRQ.valueIn(); 988 } 989 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 990 assert(VNI && "No live value at use."); 991 for (const VReg2SUnit &V2SU 992 : make_range(VRegUses.find(Reg), VRegUses.end())) { 993 SUnit *SU = V2SU.SU; 994 DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 995 << *SU->getInstr()); 996 // If this use comes before the reaching def, it cannot be a last use, so 997 // descrease its pressure change. 998 if (!SU->isScheduled && SU != &ExitSU) { 999 LiveQueryResult LRQ 1000 = LI.Query(LIS->getInstructionIndex(SU->getInstr())); 1001 if (LRQ.valueIn() == VNI) 1002 getPressureDiff(SU).addPressureChange(Reg, true, &MRI); 1003 } 1004 } 1005 } 1006 } 1007 1008 /// schedule - Called back from MachineScheduler::runOnMachineFunction 1009 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 1010 /// only includes instructions that have DAG nodes, not scheduling boundaries. 1011 /// 1012 /// This is a skeletal driver, with all the functionality pushed into helpers, 1013 /// so that it can be easily extended by experimental schedulers. Generally, 1014 /// implementing MachineSchedStrategy should be sufficient to implement a new 1015 /// scheduling algorithm. However, if a scheduler further subclasses 1016 /// ScheduleDAGMILive then it will want to override this virtual method in order 1017 /// to update any specialized state. 1018 void ScheduleDAGMILive::schedule() { 1019 DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n"); 1020 DEBUG(SchedImpl->dumpPolicy()); 1021 buildDAGWithRegPressure(); 1022 1023 Topo.InitDAGTopologicalSorting(); 1024 1025 postprocessDAG(); 1026 1027 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1028 findRootsAndBiasEdges(TopRoots, BotRoots); 1029 1030 // Initialize the strategy before modifying the DAG. 1031 // This may initialize a DFSResult to be used for queue priority. 1032 SchedImpl->initialize(this); 1033 1034 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 1035 SUnits[su].dumpAll(this)); 1036 if (ViewMISchedDAGs) viewGraph(); 1037 1038 // Initialize ready queues now that the DAG and priority data are finalized. 1039 initQueues(TopRoots, BotRoots); 1040 1041 if (ShouldTrackPressure) { 1042 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1043 TopRPTracker.setPos(CurrentTop); 1044 } 1045 1046 bool IsTopNode = false; 1047 while (true) { 1048 DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n"); 1049 SUnit *SU = SchedImpl->pickNode(IsTopNode); 1050 if (!SU) break; 1051 1052 assert(!SU->isScheduled && "Node already scheduled"); 1053 if (!checkSchedLimit()) 1054 break; 1055 1056 scheduleMI(SU, IsTopNode); 1057 1058 if (DFSResult) { 1059 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1060 if (!ScheduledTrees.test(SubtreeID)) { 1061 ScheduledTrees.set(SubtreeID); 1062 DFSResult->scheduleTree(SubtreeID); 1063 SchedImpl->scheduleTree(SubtreeID); 1064 } 1065 } 1066 1067 // Notify the scheduling strategy after updating the DAG. 1068 SchedImpl->schedNode(SU, IsTopNode); 1069 1070 updateQueues(SU, IsTopNode); 1071 } 1072 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1073 1074 placeDebugValues(); 1075 1076 DEBUG({ 1077 unsigned BBNum = begin()->getParent()->getNumber(); 1078 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 1079 dumpSchedule(); 1080 dbgs() << '\n'; 1081 }); 1082 } 1083 1084 /// Build the DAG and setup three register pressure trackers. 1085 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1086 if (!ShouldTrackPressure) { 1087 RPTracker.reset(); 1088 RegionCriticalPSets.clear(); 1089 buildSchedGraph(AA); 1090 return; 1091 } 1092 1093 // Initialize the register pressure tracker used by buildSchedGraph. 1094 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1095 /*TrackUntiedDefs=*/true); 1096 1097 // Account for liveness generate by the region boundary. 1098 if (LiveRegionEnd != RegionEnd) 1099 RPTracker.recede(); 1100 1101 // Build the DAG, and compute current register pressure. 1102 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs); 1103 1104 // Initialize top/bottom trackers after computing region pressure. 1105 initRegPressure(); 1106 } 1107 1108 void ScheduleDAGMILive::computeDFSResult() { 1109 if (!DFSResult) 1110 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1111 DFSResult->clear(); 1112 ScheduledTrees.clear(); 1113 DFSResult->resize(SUnits.size()); 1114 DFSResult->compute(SUnits); 1115 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1116 } 1117 1118 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1119 /// only provides the critical path for single block loops. To handle loops that 1120 /// span blocks, we could use the vreg path latencies provided by 1121 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1122 /// available for use in the scheduler. 1123 /// 1124 /// The cyclic path estimation identifies a def-use pair that crosses the back 1125 /// edge and considers the depth and height of the nodes. For example, consider 1126 /// the following instruction sequence where each instruction has unit latency 1127 /// and defines an epomymous virtual register: 1128 /// 1129 /// a->b(a,c)->c(b)->d(c)->exit 1130 /// 1131 /// The cyclic critical path is a two cycles: b->c->b 1132 /// The acyclic critical path is four cycles: a->b->c->d->exit 1133 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1134 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1135 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1136 /// LiveInDepth = depth(b) = len(a->b) = 1 1137 /// 1138 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1139 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1140 /// CyclicCriticalPath = min(2, 2) = 2 1141 /// 1142 /// This could be relevant to PostRA scheduling, but is currently implemented 1143 /// assuming LiveIntervals. 1144 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1145 // This only applies to single block loop. 1146 if (!BB->isSuccessor(BB)) 1147 return 0; 1148 1149 unsigned MaxCyclicLatency = 0; 1150 // Visit each live out vreg def to find def/use pairs that cross iterations. 1151 ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs; 1152 for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end(); 1153 RI != RE; ++RI) { 1154 unsigned Reg = *RI; 1155 if (!TRI->isVirtualRegister(Reg)) 1156 continue; 1157 const LiveInterval &LI = LIS->getInterval(Reg); 1158 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1159 if (!DefVNI) 1160 continue; 1161 1162 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1163 const SUnit *DefSU = getSUnit(DefMI); 1164 if (!DefSU) 1165 continue; 1166 1167 unsigned LiveOutHeight = DefSU->getHeight(); 1168 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1169 // Visit all local users of the vreg def. 1170 for (const VReg2SUnit &V2SU 1171 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1172 SUnit *SU = V2SU.SU; 1173 if (SU == &ExitSU) 1174 continue; 1175 1176 // Only consider uses of the phi. 1177 LiveQueryResult LRQ = 1178 LI.Query(LIS->getInstructionIndex(SU->getInstr())); 1179 if (!LRQ.valueIn()->isPHIDef()) 1180 continue; 1181 1182 // Assume that a path spanning two iterations is a cycle, which could 1183 // overestimate in strange cases. This allows cyclic latency to be 1184 // estimated as the minimum slack of the vreg's depth or height. 1185 unsigned CyclicLatency = 0; 1186 if (LiveOutDepth > SU->getDepth()) 1187 CyclicLatency = LiveOutDepth - SU->getDepth(); 1188 1189 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency; 1190 if (LiveInHeight > LiveOutHeight) { 1191 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1192 CyclicLatency = LiveInHeight - LiveOutHeight; 1193 } 1194 else 1195 CyclicLatency = 0; 1196 1197 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1198 << SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1199 if (CyclicLatency > MaxCyclicLatency) 1200 MaxCyclicLatency = CyclicLatency; 1201 } 1202 } 1203 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1204 return MaxCyclicLatency; 1205 } 1206 1207 /// Move an instruction and update register pressure. 1208 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1209 // Move the instruction to its new location in the instruction stream. 1210 MachineInstr *MI = SU->getInstr(); 1211 1212 if (IsTopNode) { 1213 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1214 if (&*CurrentTop == MI) 1215 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1216 else { 1217 moveInstruction(MI, CurrentTop); 1218 TopRPTracker.setPos(MI); 1219 } 1220 1221 if (ShouldTrackPressure) { 1222 // Update top scheduled pressure. 1223 TopRPTracker.advance(); 1224 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1225 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1226 } 1227 } 1228 else { 1229 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1230 MachineBasicBlock::iterator priorII = 1231 priorNonDebug(CurrentBottom, CurrentTop); 1232 if (&*priorII == MI) 1233 CurrentBottom = priorII; 1234 else { 1235 if (&*CurrentTop == MI) { 1236 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1237 TopRPTracker.setPos(CurrentTop); 1238 } 1239 moveInstruction(MI, CurrentBottom); 1240 CurrentBottom = MI; 1241 } 1242 if (ShouldTrackPressure) { 1243 // Update bottom scheduled pressure. 1244 SmallVector<unsigned, 8> LiveUses; 1245 BotRPTracker.recede(&LiveUses); 1246 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1247 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1248 updatePressureDiffs(LiveUses); 1249 } 1250 } 1251 } 1252 1253 //===----------------------------------------------------------------------===// 1254 // LoadClusterMutation - DAG post-processing to cluster loads. 1255 //===----------------------------------------------------------------------===// 1256 1257 namespace { 1258 /// \brief Post-process the DAG to create cluster edges between neighboring 1259 /// loads. 1260 class LoadClusterMutation : public ScheduleDAGMutation { 1261 struct LoadInfo { 1262 SUnit *SU; 1263 unsigned BaseReg; 1264 unsigned Offset; 1265 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 1266 : SU(su), BaseReg(reg), Offset(ofs) {} 1267 1268 bool operator<(const LoadInfo &RHS) const { 1269 return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset); 1270 } 1271 }; 1272 1273 const TargetInstrInfo *TII; 1274 const TargetRegisterInfo *TRI; 1275 public: 1276 LoadClusterMutation(const TargetInstrInfo *tii, 1277 const TargetRegisterInfo *tri) 1278 : TII(tii), TRI(tri) {} 1279 1280 void apply(ScheduleDAGMI *DAG) override; 1281 protected: 1282 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 1283 }; 1284 } // anonymous 1285 1286 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 1287 ScheduleDAGMI *DAG) { 1288 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 1289 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 1290 SUnit *SU = Loads[Idx]; 1291 unsigned BaseReg; 1292 unsigned Offset; 1293 if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 1294 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 1295 } 1296 if (LoadRecords.size() < 2) 1297 return; 1298 std::sort(LoadRecords.begin(), LoadRecords.end()); 1299 unsigned ClusterLength = 1; 1300 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 1301 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 1302 ClusterLength = 1; 1303 continue; 1304 } 1305 1306 SUnit *SUa = LoadRecords[Idx].SU; 1307 SUnit *SUb = LoadRecords[Idx+1].SU; 1308 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 1309 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1310 1311 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 1312 << SUb->NodeNum << ")\n"); 1313 // Copy successor edges from SUa to SUb. Interleaving computation 1314 // dependent on SUa can prevent load combining due to register reuse. 1315 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1316 // loads should have effectively the same inputs. 1317 for (SUnit::const_succ_iterator 1318 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 1319 if (SI->getSUnit() == SUb) 1320 continue; 1321 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 1322 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 1323 } 1324 ++ClusterLength; 1325 } 1326 else 1327 ClusterLength = 1; 1328 } 1329 } 1330 1331 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 1332 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 1333 // Map DAG NodeNum to store chain ID. 1334 DenseMap<unsigned, unsigned> StoreChainIDs; 1335 // Map each store chain to a set of dependent loads. 1336 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1337 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1338 SUnit *SU = &DAG->SUnits[Idx]; 1339 if (!SU->getInstr()->mayLoad()) 1340 continue; 1341 unsigned ChainPredID = DAG->SUnits.size(); 1342 for (SUnit::const_pred_iterator 1343 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1344 if (PI->isCtrl()) { 1345 ChainPredID = PI->getSUnit()->NodeNum; 1346 break; 1347 } 1348 } 1349 // Check if this chain-like pred has been seen 1350 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 1351 unsigned NumChains = StoreChainDependents.size(); 1352 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1353 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1354 if (Result.second) 1355 StoreChainDependents.resize(NumChains + 1); 1356 StoreChainDependents[Result.first->second].push_back(SU); 1357 } 1358 // Iterate over the store chains. 1359 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 1360 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 1361 } 1362 1363 //===----------------------------------------------------------------------===// 1364 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 1365 //===----------------------------------------------------------------------===// 1366 1367 namespace { 1368 /// \brief Post-process the DAG to create cluster edges between instructions 1369 /// that may be fused by the processor into a single operation. 1370 class MacroFusion : public ScheduleDAGMutation { 1371 const TargetInstrInfo &TII; 1372 const TargetRegisterInfo &TRI; 1373 public: 1374 MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) 1375 : TII(TII), TRI(TRI) {} 1376 1377 void apply(ScheduleDAGMI *DAG) override; 1378 }; 1379 } // anonymous 1380 1381 /// Returns true if \p MI reads a register written by \p Other. 1382 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI, 1383 const MachineInstr &Other) { 1384 for (const MachineOperand &MO : MI.uses()) { 1385 if (!MO.isReg() || !MO.readsReg()) 1386 continue; 1387 1388 unsigned Reg = MO.getReg(); 1389 if (Other.modifiesRegister(Reg, &TRI)) 1390 return true; 1391 } 1392 return false; 1393 } 1394 1395 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 1396 /// fused operations. 1397 void MacroFusion::apply(ScheduleDAGMI *DAG) { 1398 // For now, assume targets can only fuse with the branch. 1399 SUnit &ExitSU = DAG->ExitSU; 1400 MachineInstr *Branch = ExitSU.getInstr(); 1401 if (!Branch) 1402 return; 1403 1404 for (SUnit &SU : DAG->SUnits) { 1405 // SUnits with successors can't be schedule in front of the ExitSU. 1406 if (!SU.Succs.empty()) 1407 continue; 1408 // We only care if the node writes to a register that the branch reads. 1409 MachineInstr *Pred = SU.getInstr(); 1410 if (!HasDataDep(TRI, *Branch, *Pred)) 1411 continue; 1412 1413 if (!TII.shouldScheduleAdjacent(Pred, Branch)) 1414 continue; 1415 1416 // Create a single weak edge from SU to ExitSU. The only effect is to cause 1417 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 1418 // need to copy predecessor edges from ExitSU to SU, since top-down 1419 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 1420 // of SU, we could create an artificial edge from the deepest root, but it 1421 // hasn't been needed yet. 1422 bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster)); 1423 (void)Success; 1424 assert(Success && "No DAG nodes should be reachable from ExitSU"); 1425 1426 DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n"); 1427 break; 1428 } 1429 } 1430 1431 //===----------------------------------------------------------------------===// 1432 // CopyConstrain - DAG post-processing to encourage copy elimination. 1433 //===----------------------------------------------------------------------===// 1434 1435 namespace { 1436 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 1437 /// the one use that defines the copy's source vreg, most likely an induction 1438 /// variable increment. 1439 class CopyConstrain : public ScheduleDAGMutation { 1440 // Transient state. 1441 SlotIndex RegionBeginIdx; 1442 // RegionEndIdx is the slot index of the last non-debug instruction in the 1443 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1444 SlotIndex RegionEndIdx; 1445 public: 1446 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1447 1448 void apply(ScheduleDAGMI *DAG) override; 1449 1450 protected: 1451 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1452 }; 1453 } // anonymous 1454 1455 /// constrainLocalCopy handles two possibilities: 1456 /// 1) Local src: 1457 /// I0: = dst 1458 /// I1: src = ... 1459 /// I2: = dst 1460 /// I3: dst = src (copy) 1461 /// (create pred->succ edges I0->I1, I2->I1) 1462 /// 1463 /// 2) Local copy: 1464 /// I0: dst = src (copy) 1465 /// I1: = dst 1466 /// I2: src = ... 1467 /// I3: = dst 1468 /// (create pred->succ edges I1->I2, I3->I2) 1469 /// 1470 /// Although the MachineScheduler is currently constrained to single blocks, 1471 /// this algorithm should handle extended blocks. An EBB is a set of 1472 /// contiguously numbered blocks such that the previous block in the EBB is 1473 /// always the single predecessor. 1474 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1475 LiveIntervals *LIS = DAG->getLIS(); 1476 MachineInstr *Copy = CopySU->getInstr(); 1477 1478 // Check for pure vreg copies. 1479 unsigned SrcReg = Copy->getOperand(1).getReg(); 1480 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 1481 return; 1482 1483 unsigned DstReg = Copy->getOperand(0).getReg(); 1484 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 1485 return; 1486 1487 // Check if either the dest or source is local. If it's live across a back 1488 // edge, it's not local. Note that if both vregs are live across the back 1489 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1490 // If both the copy's source and dest are local live intervals, then we 1491 // should treat the dest as the global for the purpose of adding 1492 // constraints. This adds edges from source's other uses to the copy. 1493 unsigned LocalReg = SrcReg; 1494 unsigned GlobalReg = DstReg; 1495 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1496 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1497 LocalReg = DstReg; 1498 GlobalReg = SrcReg; 1499 LocalLI = &LIS->getInterval(LocalReg); 1500 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1501 return; 1502 } 1503 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1504 1505 // Find the global segment after the start of the local LI. 1506 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1507 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1508 // local live range. We could create edges from other global uses to the local 1509 // start, but the coalescer should have already eliminated these cases, so 1510 // don't bother dealing with it. 1511 if (GlobalSegment == GlobalLI->end()) 1512 return; 1513 1514 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1515 // returned the next global segment. But if GlobalSegment overlaps with 1516 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1517 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1518 if (GlobalSegment->contains(LocalLI->beginIndex())) 1519 ++GlobalSegment; 1520 1521 if (GlobalSegment == GlobalLI->end()) 1522 return; 1523 1524 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1525 if (GlobalSegment != GlobalLI->begin()) { 1526 // Two address defs have no hole. 1527 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1528 GlobalSegment->start)) { 1529 return; 1530 } 1531 // If the prior global segment may be defined by the same two-address 1532 // instruction that also defines LocalLI, then can't make a hole here. 1533 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1534 LocalLI->beginIndex())) { 1535 return; 1536 } 1537 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1538 // it would be a disconnected component in the live range. 1539 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1540 "Disconnected LRG within the scheduling region."); 1541 } 1542 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1543 if (!GlobalDef) 1544 return; 1545 1546 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1547 if (!GlobalSU) 1548 return; 1549 1550 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1551 // constraining the uses of the last local def to precede GlobalDef. 1552 SmallVector<SUnit*,8> LocalUses; 1553 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1554 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1555 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1556 for (SUnit::const_succ_iterator 1557 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1558 I != E; ++I) { 1559 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1560 continue; 1561 if (I->getSUnit() == GlobalSU) 1562 continue; 1563 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1564 return; 1565 LocalUses.push_back(I->getSUnit()); 1566 } 1567 // Open the top of the GlobalLI hole by constraining any earlier global uses 1568 // to precede the start of LocalLI. 1569 SmallVector<SUnit*,8> GlobalUses; 1570 MachineInstr *FirstLocalDef = 1571 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1572 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1573 for (SUnit::const_pred_iterator 1574 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1575 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1576 continue; 1577 if (I->getSUnit() == FirstLocalSU) 1578 continue; 1579 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1580 return; 1581 GlobalUses.push_back(I->getSUnit()); 1582 } 1583 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1584 // Add the weak edges. 1585 for (SmallVectorImpl<SUnit*>::const_iterator 1586 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1587 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1588 << GlobalSU->NodeNum << ")\n"); 1589 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1590 } 1591 for (SmallVectorImpl<SUnit*>::const_iterator 1592 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1593 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1594 << FirstLocalSU->NodeNum << ")\n"); 1595 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1596 } 1597 } 1598 1599 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1600 /// copy elimination. 1601 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1602 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1603 1604 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1605 if (FirstPos == DAG->end()) 1606 return; 1607 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); 1608 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1609 &*priorNonDebug(DAG->end(), DAG->begin())); 1610 1611 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1612 SUnit *SU = &DAG->SUnits[Idx]; 1613 if (!SU->getInstr()->isCopy()) 1614 continue; 1615 1616 constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG)); 1617 } 1618 } 1619 1620 //===----------------------------------------------------------------------===// 1621 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1622 // and possibly other custom schedulers. 1623 //===----------------------------------------------------------------------===// 1624 1625 static const unsigned InvalidCycle = ~0U; 1626 1627 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1628 1629 void SchedBoundary::reset() { 1630 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1631 // Destroying and reconstructing it is very expensive though. So keep 1632 // invalid, placeholder HazardRecs. 1633 if (HazardRec && HazardRec->isEnabled()) { 1634 delete HazardRec; 1635 HazardRec = nullptr; 1636 } 1637 Available.clear(); 1638 Pending.clear(); 1639 CheckPending = false; 1640 NextSUs.clear(); 1641 CurrCycle = 0; 1642 CurrMOps = 0; 1643 MinReadyCycle = UINT_MAX; 1644 ExpectedLatency = 0; 1645 DependentLatency = 0; 1646 RetiredMOps = 0; 1647 MaxExecutedResCount = 0; 1648 ZoneCritResIdx = 0; 1649 IsResourceLimited = false; 1650 ReservedCycles.clear(); 1651 #ifndef NDEBUG 1652 // Track the maximum number of stall cycles that could arise either from the 1653 // latency of a DAG edge or the number of cycles that a processor resource is 1654 // reserved (SchedBoundary::ReservedCycles). 1655 MaxObservedStall = 0; 1656 #endif 1657 // Reserve a zero-count for invalid CritResIdx. 1658 ExecutedResCounts.resize(1); 1659 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1660 } 1661 1662 void SchedRemainder:: 1663 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1664 reset(); 1665 if (!SchedModel->hasInstrSchedModel()) 1666 return; 1667 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1668 for (std::vector<SUnit>::iterator 1669 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1670 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1671 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC) 1672 * SchedModel->getMicroOpFactor(); 1673 for (TargetSchedModel::ProcResIter 1674 PI = SchedModel->getWriteProcResBegin(SC), 1675 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1676 unsigned PIdx = PI->ProcResourceIdx; 1677 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1678 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1679 } 1680 } 1681 } 1682 1683 void SchedBoundary:: 1684 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1685 reset(); 1686 DAG = dag; 1687 SchedModel = smodel; 1688 Rem = rem; 1689 if (SchedModel->hasInstrSchedModel()) { 1690 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1691 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle); 1692 } 1693 } 1694 1695 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 1696 /// these "soft stalls" differently than the hard stall cycles based on CPU 1697 /// resources and computed by checkHazard(). A fully in-order model 1698 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 1699 /// available for scheduling until they are ready. However, a weaker in-order 1700 /// model may use this for heuristics. For example, if a processor has in-order 1701 /// behavior when reading certain resources, this may come into play. 1702 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 1703 if (!SU->isUnbuffered) 1704 return 0; 1705 1706 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1707 if (ReadyCycle > CurrCycle) 1708 return ReadyCycle - CurrCycle; 1709 return 0; 1710 } 1711 1712 /// Compute the next cycle at which the given processor resource can be 1713 /// scheduled. 1714 unsigned SchedBoundary:: 1715 getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 1716 unsigned NextUnreserved = ReservedCycles[PIdx]; 1717 // If this resource has never been used, always return cycle zero. 1718 if (NextUnreserved == InvalidCycle) 1719 return 0; 1720 // For bottom-up scheduling add the cycles needed for the current operation. 1721 if (!isTop()) 1722 NextUnreserved += Cycles; 1723 return NextUnreserved; 1724 } 1725 1726 /// Does this SU have a hazard within the current instruction group. 1727 /// 1728 /// The scheduler supports two modes of hazard recognition. The first is the 1729 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1730 /// supports highly complicated in-order reservation tables 1731 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1732 /// 1733 /// The second is a streamlined mechanism that checks for hazards based on 1734 /// simple counters that the scheduler itself maintains. It explicitly checks 1735 /// for instruction dispatch limitations, including the number of micro-ops that 1736 /// can dispatch per cycle. 1737 /// 1738 /// TODO: Also check whether the SU must start a new group. 1739 bool SchedBoundary::checkHazard(SUnit *SU) { 1740 if (HazardRec->isEnabled() 1741 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 1742 return true; 1743 } 1744 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1745 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1746 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1747 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1748 return true; 1749 } 1750 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 1751 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1752 for (TargetSchedModel::ProcResIter 1753 PI = SchedModel->getWriteProcResBegin(SC), 1754 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1755 unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles); 1756 if (NRCycle > CurrCycle) { 1757 #ifndef NDEBUG 1758 MaxObservedStall = std::max(PI->Cycles, MaxObservedStall); 1759 #endif 1760 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 1761 << SchedModel->getResourceName(PI->ProcResourceIdx) 1762 << "=" << NRCycle << "c\n"); 1763 return true; 1764 } 1765 } 1766 } 1767 return false; 1768 } 1769 1770 // Find the unscheduled node in ReadySUs with the highest latency. 1771 unsigned SchedBoundary:: 1772 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1773 SUnit *LateSU = nullptr; 1774 unsigned RemLatency = 0; 1775 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end(); 1776 I != E; ++I) { 1777 unsigned L = getUnscheduledLatency(*I); 1778 if (L > RemLatency) { 1779 RemLatency = L; 1780 LateSU = *I; 1781 } 1782 } 1783 if (LateSU) { 1784 DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1785 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1786 } 1787 return RemLatency; 1788 } 1789 1790 // Count resources in this zone and the remaining unscheduled 1791 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 1792 // resource index, or zero if the zone is issue limited. 1793 unsigned SchedBoundary:: 1794 getOtherResourceCount(unsigned &OtherCritIdx) { 1795 OtherCritIdx = 0; 1796 if (!SchedModel->hasInstrSchedModel()) 1797 return 0; 1798 1799 unsigned OtherCritCount = Rem->RemIssueCount 1800 + (RetiredMOps * SchedModel->getMicroOpFactor()); 1801 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 1802 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 1803 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 1804 PIdx != PEnd; ++PIdx) { 1805 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 1806 if (OtherCount > OtherCritCount) { 1807 OtherCritCount = OtherCount; 1808 OtherCritIdx = PIdx; 1809 } 1810 } 1811 if (OtherCritIdx) { 1812 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: " 1813 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 1814 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 1815 } 1816 return OtherCritCount; 1817 } 1818 1819 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { 1820 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1821 1822 #ifndef NDEBUG 1823 // ReadyCycle was been bumped up to the CurrCycle when this node was 1824 // scheduled, but CurrCycle may have been eagerly advanced immediately after 1825 // scheduling, so may now be greater than ReadyCycle. 1826 if (ReadyCycle > CurrCycle) 1827 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 1828 #endif 1829 1830 if (ReadyCycle < MinReadyCycle) 1831 MinReadyCycle = ReadyCycle; 1832 1833 // Check for interlocks first. For the purpose of other heuristics, an 1834 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1835 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1836 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU)) 1837 Pending.push(SU); 1838 else 1839 Available.push(SU); 1840 1841 // Record this node as an immediate dependent of the scheduled node. 1842 NextSUs.insert(SU); 1843 } 1844 1845 void SchedBoundary::releaseTopNode(SUnit *SU) { 1846 if (SU->isScheduled) 1847 return; 1848 1849 releaseNode(SU, SU->TopReadyCycle); 1850 } 1851 1852 void SchedBoundary::releaseBottomNode(SUnit *SU) { 1853 if (SU->isScheduled) 1854 return; 1855 1856 releaseNode(SU, SU->BotReadyCycle); 1857 } 1858 1859 /// Move the boundary of scheduled code by one cycle. 1860 void SchedBoundary::bumpCycle(unsigned NextCycle) { 1861 if (SchedModel->getMicroOpBufferSize() == 0) { 1862 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1863 if (MinReadyCycle > NextCycle) 1864 NextCycle = MinReadyCycle; 1865 } 1866 // Update the current micro-ops, which will issue in the next cycle. 1867 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 1868 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 1869 1870 // Decrement DependentLatency based on the next cycle. 1871 if ((NextCycle - CurrCycle) > DependentLatency) 1872 DependentLatency = 0; 1873 else 1874 DependentLatency -= (NextCycle - CurrCycle); 1875 1876 if (!HazardRec->isEnabled()) { 1877 // Bypass HazardRec virtual calls. 1878 CurrCycle = NextCycle; 1879 } 1880 else { 1881 // Bypass getHazardType calls in case of long latency. 1882 for (; CurrCycle != NextCycle; ++CurrCycle) { 1883 if (isTop()) 1884 HazardRec->AdvanceCycle(); 1885 else 1886 HazardRec->RecedeCycle(); 1887 } 1888 } 1889 CheckPending = true; 1890 unsigned LFactor = SchedModel->getLatencyFactor(); 1891 IsResourceLimited = 1892 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1893 > (int)LFactor; 1894 1895 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n'); 1896 } 1897 1898 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 1899 ExecutedResCounts[PIdx] += Count; 1900 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 1901 MaxExecutedResCount = ExecutedResCounts[PIdx]; 1902 } 1903 1904 /// Add the given processor resource to this scheduled zone. 1905 /// 1906 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 1907 /// during which this resource is consumed. 1908 /// 1909 /// \return the next cycle at which the instruction may execute without 1910 /// oversubscribing resources. 1911 unsigned SchedBoundary:: 1912 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 1913 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1914 unsigned Count = Factor * Cycles; 1915 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) 1916 << " +" << Cycles << "x" << Factor << "u\n"); 1917 1918 // Update Executed resources counts. 1919 incExecutedResources(PIdx, Count); 1920 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1921 Rem->RemainingCounts[PIdx] -= Count; 1922 1923 // Check if this resource exceeds the current critical resource. If so, it 1924 // becomes the critical resource. 1925 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 1926 ZoneCritResIdx = PIdx; 1927 DEBUG(dbgs() << " *** Critical resource " 1928 << SchedModel->getResourceName(PIdx) << ": " 1929 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n"); 1930 } 1931 // For reserved resources, record the highest cycle using the resource. 1932 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles); 1933 if (NextAvailable > CurrCycle) { 1934 DEBUG(dbgs() << " Resource conflict: " 1935 << SchedModel->getProcResource(PIdx)->Name << " reserved until @" 1936 << NextAvailable << "\n"); 1937 } 1938 return NextAvailable; 1939 } 1940 1941 /// Move the boundary of scheduled code by one SUnit. 1942 void SchedBoundary::bumpNode(SUnit *SU) { 1943 // Update the reservation table. 1944 if (HazardRec->isEnabled()) { 1945 if (!isTop() && SU->isCall) { 1946 // Calls are scheduled with their preceding instructions. For bottom-up 1947 // scheduling, clear the pipeline state before emitting. 1948 HazardRec->Reset(); 1949 } 1950 HazardRec->EmitInstruction(SU); 1951 } 1952 // checkHazard should prevent scheduling multiple instructions per cycle that 1953 // exceed the issue width. 1954 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1955 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 1956 assert( 1957 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 1958 "Cannot schedule this instruction's MicroOps in the current cycle."); 1959 1960 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1961 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 1962 1963 unsigned NextCycle = CurrCycle; 1964 switch (SchedModel->getMicroOpBufferSize()) { 1965 case 0: 1966 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 1967 break; 1968 case 1: 1969 if (ReadyCycle > NextCycle) { 1970 NextCycle = ReadyCycle; 1971 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 1972 } 1973 break; 1974 default: 1975 // We don't currently model the OOO reorder buffer, so consider all 1976 // scheduled MOps to be "retired". We do loosely model in-order resource 1977 // latency. If this instruction uses an in-order resource, account for any 1978 // likely stall cycles. 1979 if (SU->isUnbuffered && ReadyCycle > NextCycle) 1980 NextCycle = ReadyCycle; 1981 break; 1982 } 1983 RetiredMOps += IncMOps; 1984 1985 // Update resource counts and critical resource. 1986 if (SchedModel->hasInstrSchedModel()) { 1987 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 1988 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 1989 Rem->RemIssueCount -= DecRemIssue; 1990 if (ZoneCritResIdx) { 1991 // Scale scheduled micro-ops for comparing with the critical resource. 1992 unsigned ScaledMOps = 1993 RetiredMOps * SchedModel->getMicroOpFactor(); 1994 1995 // If scaled micro-ops are now more than the previous critical resource by 1996 // a full cycle, then micro-ops issue becomes critical. 1997 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 1998 >= (int)SchedModel->getLatencyFactor()) { 1999 ZoneCritResIdx = 0; 2000 DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2001 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); 2002 } 2003 } 2004 for (TargetSchedModel::ProcResIter 2005 PI = SchedModel->getWriteProcResBegin(SC), 2006 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2007 unsigned RCycle = 2008 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 2009 if (RCycle > NextCycle) 2010 NextCycle = RCycle; 2011 } 2012 if (SU->hasReservedResource) { 2013 // For reserved resources, record the highest cycle using the resource. 2014 // For top-down scheduling, this is the cycle in which we schedule this 2015 // instruction plus the number of cycles the operations reserves the 2016 // resource. For bottom-up is it simply the instruction's cycle. 2017 for (TargetSchedModel::ProcResIter 2018 PI = SchedModel->getWriteProcResBegin(SC), 2019 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2020 unsigned PIdx = PI->ProcResourceIdx; 2021 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 2022 if (isTop()) { 2023 ReservedCycles[PIdx] = 2024 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles); 2025 } 2026 else 2027 ReservedCycles[PIdx] = NextCycle; 2028 } 2029 } 2030 } 2031 } 2032 // Update ExpectedLatency and DependentLatency. 2033 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2034 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2035 if (SU->getDepth() > TopLatency) { 2036 TopLatency = SU->getDepth(); 2037 DEBUG(dbgs() << " " << Available.getName() 2038 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n"); 2039 } 2040 if (SU->getHeight() > BotLatency) { 2041 BotLatency = SU->getHeight(); 2042 DEBUG(dbgs() << " " << Available.getName() 2043 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n"); 2044 } 2045 // If we stall for any reason, bump the cycle. 2046 if (NextCycle > CurrCycle) { 2047 bumpCycle(NextCycle); 2048 } 2049 else { 2050 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2051 // resource limited. If a stall occurred, bumpCycle does this. 2052 unsigned LFactor = SchedModel->getLatencyFactor(); 2053 IsResourceLimited = 2054 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 2055 > (int)LFactor; 2056 } 2057 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2058 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2059 // one cycle. Since we commonly reach the max MOps here, opportunistically 2060 // bump the cycle to avoid uselessly checking everything in the readyQ. 2061 CurrMOps += IncMOps; 2062 while (CurrMOps >= SchedModel->getIssueWidth()) { 2063 DEBUG(dbgs() << " *** Max MOps " << CurrMOps 2064 << " at cycle " << CurrCycle << '\n'); 2065 bumpCycle(++NextCycle); 2066 } 2067 DEBUG(dumpScheduledState()); 2068 } 2069 2070 /// Release pending ready nodes in to the available queue. This makes them 2071 /// visible to heuristics. 2072 void SchedBoundary::releasePending() { 2073 // If the available queue is empty, it is safe to reset MinReadyCycle. 2074 if (Available.empty()) 2075 MinReadyCycle = UINT_MAX; 2076 2077 // Check to see if any of the pending instructions are ready to issue. If 2078 // so, add them to the available queue. 2079 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2080 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2081 SUnit *SU = *(Pending.begin()+i); 2082 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2083 2084 if (ReadyCycle < MinReadyCycle) 2085 MinReadyCycle = ReadyCycle; 2086 2087 if (!IsBuffered && ReadyCycle > CurrCycle) 2088 continue; 2089 2090 if (checkHazard(SU)) 2091 continue; 2092 2093 Available.push(SU); 2094 Pending.remove(Pending.begin()+i); 2095 --i; --e; 2096 } 2097 DEBUG(if (!Pending.empty()) Pending.dump()); 2098 CheckPending = false; 2099 } 2100 2101 /// Remove SU from the ready set for this boundary. 2102 void SchedBoundary::removeReady(SUnit *SU) { 2103 if (Available.isInQueue(SU)) 2104 Available.remove(Available.find(SU)); 2105 else { 2106 assert(Pending.isInQueue(SU) && "bad ready count"); 2107 Pending.remove(Pending.find(SU)); 2108 } 2109 } 2110 2111 /// If this queue only has one ready candidate, return it. As a side effect, 2112 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2113 /// one node is ready. If multiple instructions are ready, return NULL. 2114 SUnit *SchedBoundary::pickOnlyChoice() { 2115 if (CheckPending) 2116 releasePending(); 2117 2118 if (CurrMOps > 0) { 2119 // Defer any ready instrs that now have a hazard. 2120 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2121 if (checkHazard(*I)) { 2122 Pending.push(*I); 2123 I = Available.remove(I); 2124 continue; 2125 } 2126 ++I; 2127 } 2128 } 2129 for (unsigned i = 0; Available.empty(); ++i) { 2130 // FIXME: Re-enable assert once PR20057 is resolved. 2131 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2132 // "permanent hazard"); 2133 (void)i; 2134 bumpCycle(CurrCycle + 1); 2135 releasePending(); 2136 } 2137 if (Available.size() == 1) 2138 return *Available.begin(); 2139 return nullptr; 2140 } 2141 2142 #ifndef NDEBUG 2143 // This is useful information to dump after bumpNode. 2144 // Note that the Queue contents are more useful before pickNodeFromQueue. 2145 void SchedBoundary::dumpScheduledState() { 2146 unsigned ResFactor; 2147 unsigned ResCount; 2148 if (ZoneCritResIdx) { 2149 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2150 ResCount = getResourceCount(ZoneCritResIdx); 2151 } 2152 else { 2153 ResFactor = SchedModel->getMicroOpFactor(); 2154 ResCount = RetiredMOps * SchedModel->getMicroOpFactor(); 2155 } 2156 unsigned LFactor = SchedModel->getLatencyFactor(); 2157 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2158 << " Retired: " << RetiredMOps; 2159 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2160 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2161 << ResCount / ResFactor << " " 2162 << SchedModel->getResourceName(ZoneCritResIdx) 2163 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2164 << (IsResourceLimited ? " - Resource" : " - Latency") 2165 << " limited.\n"; 2166 } 2167 #endif 2168 2169 //===----------------------------------------------------------------------===// 2170 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2171 //===----------------------------------------------------------------------===// 2172 2173 void GenericSchedulerBase::SchedCandidate:: 2174 initResourceDelta(const ScheduleDAGMI *DAG, 2175 const TargetSchedModel *SchedModel) { 2176 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2177 return; 2178 2179 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2180 for (TargetSchedModel::ProcResIter 2181 PI = SchedModel->getWriteProcResBegin(SC), 2182 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2183 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2184 ResDelta.CritResources += PI->Cycles; 2185 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2186 ResDelta.DemandedResources += PI->Cycles; 2187 } 2188 } 2189 2190 /// Set the CandPolicy given a scheduling zone given the current resources and 2191 /// latencies inside and outside the zone. 2192 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, 2193 bool IsPostRA, 2194 SchedBoundary &CurrZone, 2195 SchedBoundary *OtherZone) { 2196 // Apply preemptive heuristics based on the total latency and resources 2197 // inside and outside this zone. Potential stalls should be considered before 2198 // following this policy. 2199 2200 // Compute remaining latency. We need this both to determine whether the 2201 // overall schedule has become latency-limited and whether the instructions 2202 // outside this zone are resource or latency limited. 2203 // 2204 // The "dependent" latency is updated incrementally during scheduling as the 2205 // max height/depth of scheduled nodes minus the cycles since it was 2206 // scheduled: 2207 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2208 // 2209 // The "independent" latency is the max ready queue depth: 2210 // ILat = max N.depth for N in Available|Pending 2211 // 2212 // RemainingLatency is the greater of independent and dependent latency. 2213 unsigned RemLatency = CurrZone.getDependentLatency(); 2214 RemLatency = std::max(RemLatency, 2215 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2216 RemLatency = std::max(RemLatency, 2217 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2218 2219 // Compute the critical resource outside the zone. 2220 unsigned OtherCritIdx = 0; 2221 unsigned OtherCount = 2222 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2223 2224 bool OtherResLimited = false; 2225 if (SchedModel->hasInstrSchedModel()) { 2226 unsigned LFactor = SchedModel->getLatencyFactor(); 2227 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor; 2228 } 2229 // Schedule aggressively for latency in PostRA mode. We don't check for 2230 // acyclic latency during PostRA, and highly out-of-order processors will 2231 // skip PostRA scheduling. 2232 if (!OtherResLimited) { 2233 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) { 2234 Policy.ReduceLatency |= true; 2235 DEBUG(dbgs() << " " << CurrZone.Available.getName() 2236 << " RemainingLatency " << RemLatency << " + " 2237 << CurrZone.getCurrCycle() << "c > CritPath " 2238 << Rem.CriticalPath << "\n"); 2239 } 2240 } 2241 // If the same resource is limiting inside and outside the zone, do nothing. 2242 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2243 return; 2244 2245 DEBUG( 2246 if (CurrZone.isResourceLimited()) { 2247 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2248 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) 2249 << "\n"; 2250 } 2251 if (OtherResLimited) 2252 dbgs() << " RemainingLimit: " 2253 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2254 if (!CurrZone.isResourceLimited() && !OtherResLimited) 2255 dbgs() << " Latency limited both directions.\n"); 2256 2257 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2258 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2259 2260 if (OtherResLimited) 2261 Policy.DemandResIdx = OtherCritIdx; 2262 } 2263 2264 #ifndef NDEBUG 2265 const char *GenericSchedulerBase::getReasonStr( 2266 GenericSchedulerBase::CandReason Reason) { 2267 switch (Reason) { 2268 case NoCand: return "NOCAND "; 2269 case PhysRegCopy: return "PREG-COPY"; 2270 case RegExcess: return "REG-EXCESS"; 2271 case RegCritical: return "REG-CRIT "; 2272 case Stall: return "STALL "; 2273 case Cluster: return "CLUSTER "; 2274 case Weak: return "WEAK "; 2275 case RegMax: return "REG-MAX "; 2276 case ResourceReduce: return "RES-REDUCE"; 2277 case ResourceDemand: return "RES-DEMAND"; 2278 case TopDepthReduce: return "TOP-DEPTH "; 2279 case TopPathReduce: return "TOP-PATH "; 2280 case BotHeightReduce:return "BOT-HEIGHT"; 2281 case BotPathReduce: return "BOT-PATH "; 2282 case NextDefUse: return "DEF-USE "; 2283 case NodeOrder: return "ORDER "; 2284 }; 2285 llvm_unreachable("Unknown reason!"); 2286 } 2287 2288 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2289 PressureChange P; 2290 unsigned ResIdx = 0; 2291 unsigned Latency = 0; 2292 switch (Cand.Reason) { 2293 default: 2294 break; 2295 case RegExcess: 2296 P = Cand.RPDelta.Excess; 2297 break; 2298 case RegCritical: 2299 P = Cand.RPDelta.CriticalMax; 2300 break; 2301 case RegMax: 2302 P = Cand.RPDelta.CurrentMax; 2303 break; 2304 case ResourceReduce: 2305 ResIdx = Cand.Policy.ReduceResIdx; 2306 break; 2307 case ResourceDemand: 2308 ResIdx = Cand.Policy.DemandResIdx; 2309 break; 2310 case TopDepthReduce: 2311 Latency = Cand.SU->getDepth(); 2312 break; 2313 case TopPathReduce: 2314 Latency = Cand.SU->getHeight(); 2315 break; 2316 case BotHeightReduce: 2317 Latency = Cand.SU->getHeight(); 2318 break; 2319 case BotPathReduce: 2320 Latency = Cand.SU->getDepth(); 2321 break; 2322 } 2323 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2324 if (P.isValid()) 2325 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2326 << ":" << P.getUnitInc() << " "; 2327 else 2328 dbgs() << " "; 2329 if (ResIdx) 2330 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2331 else 2332 dbgs() << " "; 2333 if (Latency) 2334 dbgs() << " " << Latency << " cycles "; 2335 else 2336 dbgs() << " "; 2337 dbgs() << '\n'; 2338 } 2339 #endif 2340 2341 /// Return true if this heuristic determines order. 2342 static bool tryLess(int TryVal, int CandVal, 2343 GenericSchedulerBase::SchedCandidate &TryCand, 2344 GenericSchedulerBase::SchedCandidate &Cand, 2345 GenericSchedulerBase::CandReason Reason) { 2346 if (TryVal < CandVal) { 2347 TryCand.Reason = Reason; 2348 return true; 2349 } 2350 if (TryVal > CandVal) { 2351 if (Cand.Reason > Reason) 2352 Cand.Reason = Reason; 2353 return true; 2354 } 2355 Cand.setRepeat(Reason); 2356 return false; 2357 } 2358 2359 static bool tryGreater(int TryVal, int CandVal, 2360 GenericSchedulerBase::SchedCandidate &TryCand, 2361 GenericSchedulerBase::SchedCandidate &Cand, 2362 GenericSchedulerBase::CandReason Reason) { 2363 if (TryVal > CandVal) { 2364 TryCand.Reason = Reason; 2365 return true; 2366 } 2367 if (TryVal < CandVal) { 2368 if (Cand.Reason > Reason) 2369 Cand.Reason = Reason; 2370 return true; 2371 } 2372 Cand.setRepeat(Reason); 2373 return false; 2374 } 2375 2376 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2377 GenericSchedulerBase::SchedCandidate &Cand, 2378 SchedBoundary &Zone) { 2379 if (Zone.isTop()) { 2380 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2381 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2382 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2383 return true; 2384 } 2385 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2386 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2387 return true; 2388 } 2389 else { 2390 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2391 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2392 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2393 return true; 2394 } 2395 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2396 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2397 return true; 2398 } 2399 return false; 2400 } 2401 2402 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand, 2403 bool IsTop) { 2404 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2405 << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n'); 2406 } 2407 2408 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2409 assert(dag->hasVRegLiveness() && 2410 "(PreRA)GenericScheduler needs vreg liveness"); 2411 DAG = static_cast<ScheduleDAGMILive*>(dag); 2412 SchedModel = DAG->getSchedModel(); 2413 TRI = DAG->TRI; 2414 2415 Rem.init(DAG, SchedModel); 2416 Top.init(DAG, SchedModel, &Rem); 2417 Bot.init(DAG, SchedModel, &Rem); 2418 2419 // Initialize resource counts. 2420 2421 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2422 // are disabled, then these HazardRecs will be disabled. 2423 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2424 if (!Top.HazardRec) { 2425 Top.HazardRec = 2426 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2427 Itin, DAG); 2428 } 2429 if (!Bot.HazardRec) { 2430 Bot.HazardRec = 2431 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2432 Itin, DAG); 2433 } 2434 } 2435 2436 /// Initialize the per-region scheduling policy. 2437 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2438 MachineBasicBlock::iterator End, 2439 unsigned NumRegionInstrs) { 2440 const MachineFunction &MF = *Begin->getParent()->getParent(); 2441 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 2442 2443 // Avoid setting up the register pressure tracker for small regions to save 2444 // compile time. As a rough heuristic, only track pressure when the number of 2445 // schedulable instructions exceeds half the integer register file. 2446 RegionPolicy.ShouldTrackPressure = true; 2447 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2448 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2449 if (TLI->isTypeLegal(LegalIntVT)) { 2450 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2451 TLI->getRegClassFor(LegalIntVT)); 2452 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2453 } 2454 } 2455 2456 // For generic targets, we default to bottom-up, because it's simpler and more 2457 // compile-time optimizations have been implemented in that direction. 2458 RegionPolicy.OnlyBottomUp = true; 2459 2460 // Allow the subtarget to override default policy. 2461 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End, 2462 NumRegionInstrs); 2463 2464 // After subtarget overrides, apply command line options. 2465 if (!EnableRegPressure) 2466 RegionPolicy.ShouldTrackPressure = false; 2467 2468 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2469 // e.g. -misched-bottomup=false allows scheduling in both directions. 2470 assert((!ForceTopDown || !ForceBottomUp) && 2471 "-misched-topdown incompatible with -misched-bottomup"); 2472 if (ForceBottomUp.getNumOccurrences() > 0) { 2473 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2474 if (RegionPolicy.OnlyBottomUp) 2475 RegionPolicy.OnlyTopDown = false; 2476 } 2477 if (ForceTopDown.getNumOccurrences() > 0) { 2478 RegionPolicy.OnlyTopDown = ForceTopDown; 2479 if (RegionPolicy.OnlyTopDown) 2480 RegionPolicy.OnlyBottomUp = false; 2481 } 2482 } 2483 2484 void GenericScheduler::dumpPolicy() { 2485 dbgs() << "GenericScheduler RegionPolicy: " 2486 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure 2487 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown 2488 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp 2489 << "\n"; 2490 } 2491 2492 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2493 /// critical path by more cycles than it takes to drain the instruction buffer. 2494 /// We estimate an upper bounds on in-flight instructions as: 2495 /// 2496 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2497 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2498 /// InFlightResources = InFlightIterations * LoopResources 2499 /// 2500 /// TODO: Check execution resources in addition to IssueCount. 2501 void GenericScheduler::checkAcyclicLatency() { 2502 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2503 return; 2504 2505 // Scaled number of cycles per loop iteration. 2506 unsigned IterCount = 2507 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2508 Rem.RemIssueCount); 2509 // Scaled acyclic critical path. 2510 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2511 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2512 unsigned InFlightCount = 2513 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2514 unsigned BufferLimit = 2515 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2516 2517 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2518 2519 DEBUG(dbgs() << "IssueCycles=" 2520 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2521 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2522 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount 2523 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2524 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2525 if (Rem.IsAcyclicLatencyLimited) 2526 dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2527 } 2528 2529 void GenericScheduler::registerRoots() { 2530 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2531 2532 // Some roots may not feed into ExitSU. Check all of them in case. 2533 for (std::vector<SUnit*>::const_iterator 2534 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 2535 if ((*I)->getDepth() > Rem.CriticalPath) 2536 Rem.CriticalPath = (*I)->getDepth(); 2537 } 2538 DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 2539 if (DumpCriticalPathLength) { 2540 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 2541 } 2542 2543 if (EnableCyclicPath) { 2544 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2545 checkAcyclicLatency(); 2546 } 2547 } 2548 2549 static bool tryPressure(const PressureChange &TryP, 2550 const PressureChange &CandP, 2551 GenericSchedulerBase::SchedCandidate &TryCand, 2552 GenericSchedulerBase::SchedCandidate &Cand, 2553 GenericSchedulerBase::CandReason Reason) { 2554 int TryRank = TryP.getPSetOrMax(); 2555 int CandRank = CandP.getPSetOrMax(); 2556 // If both candidates affect the same set, go with the smallest increase. 2557 if (TryRank == CandRank) { 2558 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2559 Reason); 2560 } 2561 // If one candidate decreases and the other increases, go with it. 2562 // Invalid candidates have UnitInc==0. 2563 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2564 Reason)) { 2565 return true; 2566 } 2567 // If the candidates are decreasing pressure, reverse priority. 2568 if (TryP.getUnitInc() < 0) 2569 std::swap(TryRank, CandRank); 2570 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2571 } 2572 2573 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2574 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2575 } 2576 2577 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2578 /// their physreg def/use. 2579 /// 2580 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2581 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2582 /// with the operation that produces or consumes the physreg. We'll do this when 2583 /// regalloc has support for parallel copies. 2584 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2585 const MachineInstr *MI = SU->getInstr(); 2586 if (!MI->isCopy()) 2587 return 0; 2588 2589 unsigned ScheduledOper = isTop ? 1 : 0; 2590 unsigned UnscheduledOper = isTop ? 0 : 1; 2591 // If we have already scheduled the physreg produce/consumer, immediately 2592 // schedule the copy. 2593 if (TargetRegisterInfo::isPhysicalRegister( 2594 MI->getOperand(ScheduledOper).getReg())) 2595 return 1; 2596 // If the physreg is at the boundary, defer it. Otherwise schedule it 2597 // immediately to free the dependent. We can hoist the copy later. 2598 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2599 if (TargetRegisterInfo::isPhysicalRegister( 2600 MI->getOperand(UnscheduledOper).getReg())) 2601 return AtBoundary ? -1 : 1; 2602 return 0; 2603 } 2604 2605 /// Apply a set of heursitics to a new candidate. Heuristics are currently 2606 /// hierarchical. This may be more efficient than a graduated cost model because 2607 /// we don't need to evaluate all aspects of the model for each node in the 2608 /// queue. But it's really done to make the heuristics easier to debug and 2609 /// statistically analyze. 2610 /// 2611 /// \param Cand provides the policy and current best candidate. 2612 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2613 /// \param Zone describes the scheduled zone that we are extending. 2614 /// \param RPTracker describes reg pressure within the scheduled zone. 2615 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 2616 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 2617 SchedCandidate &TryCand, 2618 SchedBoundary &Zone, 2619 const RegPressureTracker &RPTracker, 2620 RegPressureTracker &TempTracker) { 2621 2622 if (DAG->isTrackingPressure()) { 2623 // Always initialize TryCand's RPDelta. 2624 if (Zone.isTop()) { 2625 TempTracker.getMaxDownwardPressureDelta( 2626 TryCand.SU->getInstr(), 2627 TryCand.RPDelta, 2628 DAG->getRegionCriticalPSets(), 2629 DAG->getRegPressure().MaxSetPressure); 2630 } 2631 else { 2632 if (VerifyScheduling) { 2633 TempTracker.getMaxUpwardPressureDelta( 2634 TryCand.SU->getInstr(), 2635 &DAG->getPressureDiff(TryCand.SU), 2636 TryCand.RPDelta, 2637 DAG->getRegionCriticalPSets(), 2638 DAG->getRegPressure().MaxSetPressure); 2639 } 2640 else { 2641 RPTracker.getUpwardPressureDelta( 2642 TryCand.SU->getInstr(), 2643 DAG->getPressureDiff(TryCand.SU), 2644 TryCand.RPDelta, 2645 DAG->getRegionCriticalPSets(), 2646 DAG->getRegPressure().MaxSetPressure); 2647 } 2648 } 2649 } 2650 DEBUG(if (TryCand.RPDelta.Excess.isValid()) 2651 dbgs() << " Try SU(" << TryCand.SU->NodeNum << ") " 2652 << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet()) 2653 << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n"); 2654 2655 // Initialize the candidate if needed. 2656 if (!Cand.isValid()) { 2657 TryCand.Reason = NodeOrder; 2658 return; 2659 } 2660 2661 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 2662 biasPhysRegCopy(Cand.SU, Zone.isTop()), 2663 TryCand, Cand, PhysRegCopy)) 2664 return; 2665 2666 // Avoid exceeding the target's limit. 2667 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 2668 Cand.RPDelta.Excess, 2669 TryCand, Cand, RegExcess)) 2670 return; 2671 2672 // Avoid increasing the max critical pressure in the scheduled region. 2673 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 2674 Cand.RPDelta.CriticalMax, 2675 TryCand, Cand, RegCritical)) 2676 return; 2677 2678 // For loops that are acyclic path limited, aggressively schedule for latency. 2679 // This can result in very long dependence chains scheduled in sequence, so 2680 // once every cycle (when CurrMOps == 0), switch to normal heuristics. 2681 if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps() 2682 && tryLatency(TryCand, Cand, Zone)) 2683 return; 2684 2685 // Prioritize instructions that read unbuffered resources by stall cycles. 2686 if (tryLess(Zone.getLatencyStallCycles(TryCand.SU), 2687 Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2688 return; 2689 2690 // Keep clustered nodes together to encourage downstream peephole 2691 // optimizations which may reduce resource requirements. 2692 // 2693 // This is a best effort to set things up for a post-RA pass. Optimizations 2694 // like generating loads of multiple registers should ideally be done within 2695 // the scheduler pass by combining the loads during DAG postprocessing. 2696 const SUnit *NextClusterSU = 2697 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2698 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 2699 TryCand, Cand, Cluster)) 2700 return; 2701 2702 // Weak edges are for clustering and other constraints. 2703 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 2704 getWeakLeft(Cand.SU, Zone.isTop()), 2705 TryCand, Cand, Weak)) { 2706 return; 2707 } 2708 // Avoid increasing the max pressure of the entire region. 2709 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 2710 Cand.RPDelta.CurrentMax, 2711 TryCand, Cand, RegMax)) 2712 return; 2713 2714 // Avoid critical resource consumption and balance the schedule. 2715 TryCand.initResourceDelta(DAG, SchedModel); 2716 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2717 TryCand, Cand, ResourceReduce)) 2718 return; 2719 if (tryGreater(TryCand.ResDelta.DemandedResources, 2720 Cand.ResDelta.DemandedResources, 2721 TryCand, Cand, ResourceDemand)) 2722 return; 2723 2724 // Avoid serializing long latency dependence chains. 2725 // For acyclic path limited loops, latency was already checked above. 2726 if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency && 2727 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) { 2728 return; 2729 } 2730 2731 // Prefer immediate defs/users of the last scheduled instruction. This is a 2732 // local pressure avoidance strategy that also makes the machine code 2733 // readable. 2734 if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU), 2735 TryCand, Cand, NextDefUse)) 2736 return; 2737 2738 // Fall through to original instruction order. 2739 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2740 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2741 TryCand.Reason = NodeOrder; 2742 } 2743 } 2744 2745 /// Pick the best candidate from the queue. 2746 /// 2747 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2748 /// DAG building. To adjust for the current scheduling location we need to 2749 /// maintain the number of vreg uses remaining to be top-scheduled. 2750 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2751 const RegPressureTracker &RPTracker, 2752 SchedCandidate &Cand) { 2753 ReadyQueue &Q = Zone.Available; 2754 2755 DEBUG(Q.dump()); 2756 2757 // getMaxPressureDelta temporarily modifies the tracker. 2758 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2759 2760 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2761 2762 SchedCandidate TryCand(Cand.Policy); 2763 TryCand.SU = *I; 2764 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2765 if (TryCand.Reason != NoCand) { 2766 // Initialize resource delta if needed in case future heuristics query it. 2767 if (TryCand.ResDelta == SchedResourceDelta()) 2768 TryCand.initResourceDelta(DAG, SchedModel); 2769 Cand.setBest(TryCand); 2770 DEBUG(traceCandidate(Cand)); 2771 } 2772 } 2773 } 2774 2775 /// Pick the best candidate node from either the top or bottom queue. 2776 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 2777 // Schedule as far as possible in the direction of no choice. This is most 2778 // efficient, but also provides the best heuristics for CriticalPSets. 2779 if (SUnit *SU = Bot.pickOnlyChoice()) { 2780 IsTopNode = false; 2781 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2782 return SU; 2783 } 2784 if (SUnit *SU = Top.pickOnlyChoice()) { 2785 IsTopNode = true; 2786 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2787 return SU; 2788 } 2789 CandPolicy NoPolicy; 2790 SchedCandidate BotCand(NoPolicy); 2791 SchedCandidate TopCand(NoPolicy); 2792 // Set the bottom-up policy based on the state of the current bottom zone and 2793 // the instructions outside the zone, including the top zone. 2794 setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top); 2795 // Set the top-down policy based on the state of the current top zone and 2796 // the instructions outside the zone, including the bottom zone. 2797 setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot); 2798 2799 // Prefer bottom scheduling when heuristics are silent. 2800 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2801 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2802 2803 // If either Q has a single candidate that provides the least increase in 2804 // Excess pressure, we can immediately schedule from that Q. 2805 // 2806 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2807 // affects picking from either Q. If scheduling in one direction must 2808 // increase pressure for one of the excess PSets, then schedule in that 2809 // direction first to provide more freedom in the other direction. 2810 if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess)) 2811 || (BotCand.Reason == RegCritical 2812 && !BotCand.isRepeat(RegCritical))) 2813 { 2814 IsTopNode = false; 2815 tracePick(BotCand, IsTopNode); 2816 return BotCand.SU; 2817 } 2818 // Check if the top Q has a better candidate. 2819 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2820 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2821 2822 // Choose the queue with the most important (lowest enum) reason. 2823 if (TopCand.Reason < BotCand.Reason) { 2824 IsTopNode = true; 2825 tracePick(TopCand, IsTopNode); 2826 return TopCand.SU; 2827 } 2828 // Otherwise prefer the bottom candidate, in node order if all else failed. 2829 IsTopNode = false; 2830 tracePick(BotCand, IsTopNode); 2831 return BotCand.SU; 2832 } 2833 2834 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2835 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 2836 if (DAG->top() == DAG->bottom()) { 2837 assert(Top.Available.empty() && Top.Pending.empty() && 2838 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2839 return nullptr; 2840 } 2841 SUnit *SU; 2842 do { 2843 if (RegionPolicy.OnlyTopDown) { 2844 SU = Top.pickOnlyChoice(); 2845 if (!SU) { 2846 CandPolicy NoPolicy; 2847 SchedCandidate TopCand(NoPolicy); 2848 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2849 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2850 tracePick(TopCand, true); 2851 SU = TopCand.SU; 2852 } 2853 IsTopNode = true; 2854 } 2855 else if (RegionPolicy.OnlyBottomUp) { 2856 SU = Bot.pickOnlyChoice(); 2857 if (!SU) { 2858 CandPolicy NoPolicy; 2859 SchedCandidate BotCand(NoPolicy); 2860 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2861 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 2862 tracePick(BotCand, false); 2863 SU = BotCand.SU; 2864 } 2865 IsTopNode = false; 2866 } 2867 else { 2868 SU = pickNodeBidirectional(IsTopNode); 2869 } 2870 } while (SU->isScheduled); 2871 2872 if (SU->isTopReady()) 2873 Top.removeReady(SU); 2874 if (SU->isBottomReady()) 2875 Bot.removeReady(SU); 2876 2877 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2878 return SU; 2879 } 2880 2881 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2882 2883 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2884 if (!isTop) 2885 ++InsertPos; 2886 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2887 2888 // Find already scheduled copies with a single physreg dependence and move 2889 // them just above the scheduled instruction. 2890 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2891 I != E; ++I) { 2892 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2893 continue; 2894 SUnit *DepSU = I->getSUnit(); 2895 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2896 continue; 2897 MachineInstr *Copy = DepSU->getInstr(); 2898 if (!Copy->isCopy()) 2899 continue; 2900 DEBUG(dbgs() << " Rescheduling physreg copy "; 2901 I->getSUnit()->dump(DAG)); 2902 DAG->moveInstruction(Copy, InsertPos); 2903 } 2904 } 2905 2906 /// Update the scheduler's state after scheduling a node. This is the same node 2907 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 2908 /// update it's state based on the current cycle before MachineSchedStrategy 2909 /// does. 2910 /// 2911 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2912 /// them here. See comments in biasPhysRegCopy. 2913 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2914 if (IsTopNode) { 2915 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 2916 Top.bumpNode(SU); 2917 if (SU->hasPhysRegUses) 2918 reschedulePhysRegCopies(SU, true); 2919 } 2920 else { 2921 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 2922 Bot.bumpNode(SU); 2923 if (SU->hasPhysRegDefs) 2924 reschedulePhysRegCopies(SU, false); 2925 } 2926 } 2927 2928 /// Create the standard converging machine scheduler. This will be used as the 2929 /// default scheduler if the target does not set a default. 2930 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) { 2931 ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)); 2932 // Register DAG post-processors. 2933 // 2934 // FIXME: extend the mutation API to allow earlier mutations to instantiate 2935 // data and pass it to later mutations. Have a single mutation that gathers 2936 // the interesting nodes in one pass. 2937 DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI)); 2938 if (EnableLoadCluster && DAG->TII->enableClusterLoads()) 2939 DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI)); 2940 if (EnableMacroFusion) 2941 DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI)); 2942 return DAG; 2943 } 2944 2945 static MachineSchedRegistry 2946 GenericSchedRegistry("converge", "Standard converging scheduler.", 2947 createGenericSchedLive); 2948 2949 //===----------------------------------------------------------------------===// 2950 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 2951 //===----------------------------------------------------------------------===// 2952 2953 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 2954 DAG = Dag; 2955 SchedModel = DAG->getSchedModel(); 2956 TRI = DAG->TRI; 2957 2958 Rem.init(DAG, SchedModel); 2959 Top.init(DAG, SchedModel, &Rem); 2960 BotRoots.clear(); 2961 2962 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 2963 // or are disabled, then these HazardRecs will be disabled. 2964 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2965 if (!Top.HazardRec) { 2966 Top.HazardRec = 2967 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2968 Itin, DAG); 2969 } 2970 } 2971 2972 2973 void PostGenericScheduler::registerRoots() { 2974 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2975 2976 // Some roots may not feed into ExitSU. Check all of them in case. 2977 for (SmallVectorImpl<SUnit*>::const_iterator 2978 I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) { 2979 if ((*I)->getDepth() > Rem.CriticalPath) 2980 Rem.CriticalPath = (*I)->getDepth(); 2981 } 2982 DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 2983 if (DumpCriticalPathLength) { 2984 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 2985 } 2986 } 2987 2988 /// Apply a set of heursitics to a new candidate for PostRA scheduling. 2989 /// 2990 /// \param Cand provides the policy and current best candidate. 2991 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2992 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 2993 SchedCandidate &TryCand) { 2994 2995 // Initialize the candidate if needed. 2996 if (!Cand.isValid()) { 2997 TryCand.Reason = NodeOrder; 2998 return; 2999 } 3000 3001 // Prioritize instructions that read unbuffered resources by stall cycles. 3002 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3003 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3004 return; 3005 3006 // Avoid critical resource consumption and balance the schedule. 3007 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3008 TryCand, Cand, ResourceReduce)) 3009 return; 3010 if (tryGreater(TryCand.ResDelta.DemandedResources, 3011 Cand.ResDelta.DemandedResources, 3012 TryCand, Cand, ResourceDemand)) 3013 return; 3014 3015 // Avoid serializing long latency dependence chains. 3016 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3017 return; 3018 } 3019 3020 // Fall through to original instruction order. 3021 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 3022 TryCand.Reason = NodeOrder; 3023 } 3024 3025 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3026 ReadyQueue &Q = Top.Available; 3027 3028 DEBUG(Q.dump()); 3029 3030 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 3031 SchedCandidate TryCand(Cand.Policy); 3032 TryCand.SU = *I; 3033 TryCand.initResourceDelta(DAG, SchedModel); 3034 tryCandidate(Cand, TryCand); 3035 if (TryCand.Reason != NoCand) { 3036 Cand.setBest(TryCand); 3037 DEBUG(traceCandidate(Cand)); 3038 } 3039 } 3040 } 3041 3042 /// Pick the next node to schedule. 3043 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3044 if (DAG->top() == DAG->bottom()) { 3045 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3046 return nullptr; 3047 } 3048 SUnit *SU; 3049 do { 3050 SU = Top.pickOnlyChoice(); 3051 if (!SU) { 3052 CandPolicy NoPolicy; 3053 SchedCandidate TopCand(NoPolicy); 3054 // Set the top-down policy based on the state of the current top zone and 3055 // the instructions outside the zone, including the bottom zone. 3056 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 3057 pickNodeFromQueue(TopCand); 3058 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3059 tracePick(TopCand, true); 3060 SU = TopCand.SU; 3061 } 3062 } while (SU->isScheduled); 3063 3064 IsTopNode = true; 3065 Top.removeReady(SU); 3066 3067 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 3068 return SU; 3069 } 3070 3071 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3072 /// scheduled/remaining flags in the DAG nodes. 3073 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3074 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3075 Top.bumpNode(SU); 3076 } 3077 3078 /// Create a generic scheduler with no vreg liveness or DAG mutation passes. 3079 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) { 3080 return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true); 3081 } 3082 3083 //===----------------------------------------------------------------------===// 3084 // ILP Scheduler. Currently for experimental analysis of heuristics. 3085 //===----------------------------------------------------------------------===// 3086 3087 namespace { 3088 /// \brief Order nodes by the ILP metric. 3089 struct ILPOrder { 3090 const SchedDFSResult *DFSResult; 3091 const BitVector *ScheduledTrees; 3092 bool MaximizeILP; 3093 3094 ILPOrder(bool MaxILP) 3095 : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {} 3096 3097 /// \brief Apply a less-than relation on node priority. 3098 /// 3099 /// (Return true if A comes after B in the Q.) 3100 bool operator()(const SUnit *A, const SUnit *B) const { 3101 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3102 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3103 if (SchedTreeA != SchedTreeB) { 3104 // Unscheduled trees have lower priority. 3105 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3106 return ScheduledTrees->test(SchedTreeB); 3107 3108 // Trees with shallower connections have have lower priority. 3109 if (DFSResult->getSubtreeLevel(SchedTreeA) 3110 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3111 return DFSResult->getSubtreeLevel(SchedTreeA) 3112 < DFSResult->getSubtreeLevel(SchedTreeB); 3113 } 3114 } 3115 if (MaximizeILP) 3116 return DFSResult->getILP(A) < DFSResult->getILP(B); 3117 else 3118 return DFSResult->getILP(A) > DFSResult->getILP(B); 3119 } 3120 }; 3121 3122 /// \brief Schedule based on the ILP metric. 3123 class ILPScheduler : public MachineSchedStrategy { 3124 ScheduleDAGMILive *DAG; 3125 ILPOrder Cmp; 3126 3127 std::vector<SUnit*> ReadyQ; 3128 public: 3129 ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {} 3130 3131 void initialize(ScheduleDAGMI *dag) override { 3132 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3133 DAG = static_cast<ScheduleDAGMILive*>(dag); 3134 DAG->computeDFSResult(); 3135 Cmp.DFSResult = DAG->getDFSResult(); 3136 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3137 ReadyQ.clear(); 3138 } 3139 3140 void registerRoots() override { 3141 // Restore the heap in ReadyQ with the updated DFS results. 3142 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3143 } 3144 3145 /// Implement MachineSchedStrategy interface. 3146 /// ----------------------------------------- 3147 3148 /// Callback to select the highest priority node from the ready Q. 3149 SUnit *pickNode(bool &IsTopNode) override { 3150 if (ReadyQ.empty()) return nullptr; 3151 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3152 SUnit *SU = ReadyQ.back(); 3153 ReadyQ.pop_back(); 3154 IsTopNode = false; 3155 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 3156 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3157 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 3158 << DAG->getDFSResult()->getSubtreeLevel( 3159 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 3160 << "Scheduling " << *SU->getInstr()); 3161 return SU; 3162 } 3163 3164 /// \brief Scheduler callback to notify that a new subtree is scheduled. 3165 void scheduleTree(unsigned SubtreeID) override { 3166 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3167 } 3168 3169 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3170 /// DFSResults, and resort the priority Q. 3171 void schedNode(SUnit *SU, bool IsTopNode) override { 3172 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3173 } 3174 3175 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3176 3177 void releaseBottomNode(SUnit *SU) override { 3178 ReadyQ.push_back(SU); 3179 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3180 } 3181 }; 3182 } // namespace 3183 3184 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3185 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true)); 3186 } 3187 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3188 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false)); 3189 } 3190 static MachineSchedRegistry ILPMaxRegistry( 3191 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3192 static MachineSchedRegistry ILPMinRegistry( 3193 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3194 3195 //===----------------------------------------------------------------------===// 3196 // Machine Instruction Shuffler for Correctness Testing 3197 //===----------------------------------------------------------------------===// 3198 3199 #ifndef NDEBUG 3200 namespace { 3201 /// Apply a less-than relation on the node order, which corresponds to the 3202 /// instruction order prior to scheduling. IsReverse implements greater-than. 3203 template<bool IsReverse> 3204 struct SUnitOrder { 3205 bool operator()(SUnit *A, SUnit *B) const { 3206 if (IsReverse) 3207 return A->NodeNum > B->NodeNum; 3208 else 3209 return A->NodeNum < B->NodeNum; 3210 } 3211 }; 3212 3213 /// Reorder instructions as much as possible. 3214 class InstructionShuffler : public MachineSchedStrategy { 3215 bool IsAlternating; 3216 bool IsTopDown; 3217 3218 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3219 // gives nodes with a higher number higher priority causing the latest 3220 // instructions to be scheduled first. 3221 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 3222 TopQ; 3223 // When scheduling bottom-up, use greater-than as the queue priority. 3224 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 3225 BottomQ; 3226 public: 3227 InstructionShuffler(bool alternate, bool topdown) 3228 : IsAlternating(alternate), IsTopDown(topdown) {} 3229 3230 void initialize(ScheduleDAGMI*) override { 3231 TopQ.clear(); 3232 BottomQ.clear(); 3233 } 3234 3235 /// Implement MachineSchedStrategy interface. 3236 /// ----------------------------------------- 3237 3238 SUnit *pickNode(bool &IsTopNode) override { 3239 SUnit *SU; 3240 if (IsTopDown) { 3241 do { 3242 if (TopQ.empty()) return nullptr; 3243 SU = TopQ.top(); 3244 TopQ.pop(); 3245 } while (SU->isScheduled); 3246 IsTopNode = true; 3247 } 3248 else { 3249 do { 3250 if (BottomQ.empty()) return nullptr; 3251 SU = BottomQ.top(); 3252 BottomQ.pop(); 3253 } while (SU->isScheduled); 3254 IsTopNode = false; 3255 } 3256 if (IsAlternating) 3257 IsTopDown = !IsTopDown; 3258 return SU; 3259 } 3260 3261 void schedNode(SUnit *SU, bool IsTopNode) override {} 3262 3263 void releaseTopNode(SUnit *SU) override { 3264 TopQ.push(SU); 3265 } 3266 void releaseBottomNode(SUnit *SU) override { 3267 BottomQ.push(SU); 3268 } 3269 }; 3270 } // namespace 3271 3272 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3273 bool Alternate = !ForceTopDown && !ForceBottomUp; 3274 bool TopDown = !ForceBottomUp; 3275 assert((TopDown || !ForceTopDown) && 3276 "-misched-topdown incompatible with -misched-bottomup"); 3277 return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown)); 3278 } 3279 static MachineSchedRegistry ShufflerRegistry( 3280 "shuffle", "Shuffle machine instructions alternating directions", 3281 createInstructionShuffler); 3282 #endif // !NDEBUG 3283 3284 //===----------------------------------------------------------------------===// 3285 // GraphWriter support for ScheduleDAGMILive. 3286 //===----------------------------------------------------------------------===// 3287 3288 #ifndef NDEBUG 3289 namespace llvm { 3290 3291 template<> struct GraphTraits< 3292 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3293 3294 template<> 3295 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3296 3297 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 3298 3299 static std::string getGraphName(const ScheduleDAG *G) { 3300 return G->MF.getName(); 3301 } 3302 3303 static bool renderGraphFromBottomUp() { 3304 return true; 3305 } 3306 3307 static bool isNodeHidden(const SUnit *Node) { 3308 if (ViewMISchedCutoff == 0) 3309 return false; 3310 return (Node->Preds.size() > ViewMISchedCutoff 3311 || Node->Succs.size() > ViewMISchedCutoff); 3312 } 3313 3314 /// If you want to override the dot attributes printed for a particular 3315 /// edge, override this method. 3316 static std::string getEdgeAttributes(const SUnit *Node, 3317 SUnitIterator EI, 3318 const ScheduleDAG *Graph) { 3319 if (EI.isArtificialDep()) 3320 return "color=cyan,style=dashed"; 3321 if (EI.isCtrlDep()) 3322 return "color=blue,style=dashed"; 3323 return ""; 3324 } 3325 3326 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3327 std::string Str; 3328 raw_string_ostream SS(Str); 3329 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3330 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3331 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3332 SS << "SU:" << SU->NodeNum; 3333 if (DFS) 3334 SS << " I:" << DFS->getNumInstrs(SU); 3335 return SS.str(); 3336 } 3337 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3338 return G->getGraphNodeLabel(SU); 3339 } 3340 3341 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3342 std::string Str("shape=Mrecord"); 3343 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3344 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3345 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3346 if (DFS) { 3347 Str += ",style=filled,fillcolor=\"#"; 3348 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3349 Str += '"'; 3350 } 3351 return Str; 3352 } 3353 }; 3354 } // namespace llvm 3355 #endif // NDEBUG 3356 3357 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3358 /// rendered using 'dot'. 3359 /// 3360 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3361 #ifndef NDEBUG 3362 ViewGraph(this, Name, false, Title); 3363 #else 3364 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3365 << "systems with Graphviz or gv!\n"; 3366 #endif // NDEBUG 3367 } 3368 3369 /// Out-of-line implementation with no arguments is handy for gdb. 3370 void ScheduleDAGMI::viewGraph() { 3371 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3372 } 3373