1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineScheduler.h" 16 #include "llvm/ADT/PriorityQueue.h" 17 #include "llvm/Analysis/AliasAnalysis.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineDominators.h" 20 #include "llvm/CodeGen/MachineLoopInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/Passes.h" 23 #include "llvm/CodeGen/RegisterClassInfo.h" 24 #include "llvm/CodeGen/ScheduleDFS.h" 25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/GraphWriter.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/Target/TargetInstrInfo.h" 32 #include <queue> 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "misched" 37 38 namespace llvm { 39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 40 cl::desc("Force top-down list scheduling")); 41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 42 cl::desc("Force bottom-up list scheduling")); 43 cl::opt<bool> 44 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 45 cl::desc("Print critical path length to stdout")); 46 } 47 48 #ifndef NDEBUG 49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 50 cl::desc("Pop up a window to show MISched dags after they are processed")); 51 52 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 53 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 54 55 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 56 cl::desc("Only schedule this function")); 57 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 58 cl::desc("Only schedule this MBB#")); 59 #else 60 static bool ViewMISchedDAGs = false; 61 #endif // NDEBUG 62 63 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 64 cl::desc("Enable register pressure scheduling."), cl::init(true)); 65 66 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 67 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 68 69 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 70 cl::desc("Enable load clustering."), cl::init(true)); 71 72 // Experimental heuristics 73 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 74 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 75 76 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 77 cl::desc("Verify machine instrs before and after machine scheduling")); 78 79 // DAG subtrees must have at least this many nodes. 80 static const unsigned MinSubtreeSize = 8; 81 82 // Pin the vtables to this file. 83 void MachineSchedStrategy::anchor() {} 84 void ScheduleDAGMutation::anchor() {} 85 86 //===----------------------------------------------------------------------===// 87 // Machine Instruction Scheduling Pass and Registry 88 //===----------------------------------------------------------------------===// 89 90 MachineSchedContext::MachineSchedContext(): 91 MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) { 92 RegClassInfo = new RegisterClassInfo(); 93 } 94 95 MachineSchedContext::~MachineSchedContext() { 96 delete RegClassInfo; 97 } 98 99 namespace { 100 /// Base class for a machine scheduler class that can run at any point. 101 class MachineSchedulerBase : public MachineSchedContext, 102 public MachineFunctionPass { 103 public: 104 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 105 106 void print(raw_ostream &O, const Module* = nullptr) const override; 107 108 protected: 109 void scheduleRegions(ScheduleDAGInstrs &Scheduler); 110 }; 111 112 /// MachineScheduler runs after coalescing and before register allocation. 113 class MachineScheduler : public MachineSchedulerBase { 114 public: 115 MachineScheduler(); 116 117 void getAnalysisUsage(AnalysisUsage &AU) const override; 118 119 bool runOnMachineFunction(MachineFunction&) override; 120 121 static char ID; // Class identification, replacement for typeinfo 122 123 protected: 124 ScheduleDAGInstrs *createMachineScheduler(); 125 }; 126 127 /// PostMachineScheduler runs after shortly before code emission. 128 class PostMachineScheduler : public MachineSchedulerBase { 129 public: 130 PostMachineScheduler(); 131 132 void getAnalysisUsage(AnalysisUsage &AU) const override; 133 134 bool runOnMachineFunction(MachineFunction&) override; 135 136 static char ID; // Class identification, replacement for typeinfo 137 138 protected: 139 ScheduleDAGInstrs *createPostMachineScheduler(); 140 }; 141 } // namespace 142 143 char MachineScheduler::ID = 0; 144 145 char &llvm::MachineSchedulerID = MachineScheduler::ID; 146 147 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler", 148 "Machine Instruction Scheduler", false, false) 149 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 150 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 151 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 152 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler", 153 "Machine Instruction Scheduler", false, false) 154 155 MachineScheduler::MachineScheduler() 156 : MachineSchedulerBase(ID) { 157 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 158 } 159 160 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 161 AU.setPreservesCFG(); 162 AU.addRequiredID(MachineDominatorsID); 163 AU.addRequired<MachineLoopInfo>(); 164 AU.addRequired<AliasAnalysis>(); 165 AU.addRequired<TargetPassConfig>(); 166 AU.addRequired<SlotIndexes>(); 167 AU.addPreserved<SlotIndexes>(); 168 AU.addRequired<LiveIntervals>(); 169 AU.addPreserved<LiveIntervals>(); 170 MachineFunctionPass::getAnalysisUsage(AU); 171 } 172 173 char PostMachineScheduler::ID = 0; 174 175 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 176 177 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 178 "PostRA Machine Instruction Scheduler", false, false) 179 180 PostMachineScheduler::PostMachineScheduler() 181 : MachineSchedulerBase(ID) { 182 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 183 } 184 185 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 186 AU.setPreservesCFG(); 187 AU.addRequiredID(MachineDominatorsID); 188 AU.addRequired<MachineLoopInfo>(); 189 AU.addRequired<TargetPassConfig>(); 190 MachineFunctionPass::getAnalysisUsage(AU); 191 } 192 193 MachinePassRegistry MachineSchedRegistry::Registry; 194 195 /// A dummy default scheduler factory indicates whether the scheduler 196 /// is overridden on the command line. 197 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 198 return nullptr; 199 } 200 201 /// MachineSchedOpt allows command line selection of the scheduler. 202 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 203 RegisterPassParser<MachineSchedRegistry> > 204 MachineSchedOpt("misched", 205 cl::init(&useDefaultMachineSched), cl::Hidden, 206 cl::desc("Machine instruction scheduler to use")); 207 208 static MachineSchedRegistry 209 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 210 useDefaultMachineSched); 211 212 /// Forward declare the standard machine scheduler. This will be used as the 213 /// default scheduler if the target does not set a default. 214 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C); 215 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C); 216 217 /// Decrement this iterator until reaching the top or a non-debug instr. 218 static MachineBasicBlock::const_iterator 219 priorNonDebug(MachineBasicBlock::const_iterator I, 220 MachineBasicBlock::const_iterator Beg) { 221 assert(I != Beg && "reached the top of the region, cannot decrement"); 222 while (--I != Beg) { 223 if (!I->isDebugValue()) 224 break; 225 } 226 return I; 227 } 228 229 /// Non-const version. 230 static MachineBasicBlock::iterator 231 priorNonDebug(MachineBasicBlock::iterator I, 232 MachineBasicBlock::const_iterator Beg) { 233 return const_cast<MachineInstr*>( 234 &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)); 235 } 236 237 /// If this iterator is a debug value, increment until reaching the End or a 238 /// non-debug instruction. 239 static MachineBasicBlock::const_iterator 240 nextIfDebug(MachineBasicBlock::const_iterator I, 241 MachineBasicBlock::const_iterator End) { 242 for(; I != End; ++I) { 243 if (!I->isDebugValue()) 244 break; 245 } 246 return I; 247 } 248 249 /// Non-const version. 250 static MachineBasicBlock::iterator 251 nextIfDebug(MachineBasicBlock::iterator I, 252 MachineBasicBlock::const_iterator End) { 253 // Cast the return value to nonconst MachineInstr, then cast to an 254 // instr_iterator, which does not check for null, finally return a 255 // bundle_iterator. 256 return MachineBasicBlock::instr_iterator( 257 const_cast<MachineInstr*>( 258 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 259 } 260 261 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 262 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 263 // Select the scheduler, or set the default. 264 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 265 if (Ctor != useDefaultMachineSched) 266 return Ctor(this); 267 268 // Get the default scheduler set by the target for this function. 269 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 270 if (Scheduler) 271 return Scheduler; 272 273 // Default to GenericScheduler. 274 return createGenericSchedLive(this); 275 } 276 277 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 278 /// the caller. We don't have a command line option to override the postRA 279 /// scheduler. The Target must configure it. 280 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 281 // Get the postRA scheduler set by the target for this function. 282 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 283 if (Scheduler) 284 return Scheduler; 285 286 // Default to GenericScheduler. 287 return createGenericSchedPostRA(this); 288 } 289 290 /// Top-level MachineScheduler pass driver. 291 /// 292 /// Visit blocks in function order. Divide each block into scheduling regions 293 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 294 /// consistent with the DAG builder, which traverses the interior of the 295 /// scheduling regions bottom-up. 296 /// 297 /// This design avoids exposing scheduling boundaries to the DAG builder, 298 /// simplifying the DAG builder's support for "special" target instructions. 299 /// At the same time the design allows target schedulers to operate across 300 /// scheduling boundaries, for example to bundle the boudary instructions 301 /// without reordering them. This creates complexity, because the target 302 /// scheduler must update the RegionBegin and RegionEnd positions cached by 303 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 304 /// design would be to split blocks at scheduling boundaries, but LLVM has a 305 /// general bias against block splitting purely for implementation simplicity. 306 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 307 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 308 309 // Initialize the context of the pass. 310 MF = &mf; 311 MLI = &getAnalysis<MachineLoopInfo>(); 312 MDT = &getAnalysis<MachineDominatorTree>(); 313 PassConfig = &getAnalysis<TargetPassConfig>(); 314 AA = &getAnalysis<AliasAnalysis>(); 315 316 LIS = &getAnalysis<LiveIntervals>(); 317 318 if (VerifyScheduling) { 319 DEBUG(LIS->dump()); 320 MF->verify(this, "Before machine scheduling."); 321 } 322 RegClassInfo->runOnMachineFunction(*MF); 323 324 // Instantiate the selected scheduler for this target, function, and 325 // optimization level. 326 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 327 scheduleRegions(*Scheduler); 328 329 DEBUG(LIS->dump()); 330 if (VerifyScheduling) 331 MF->verify(this, "After machine scheduling."); 332 return true; 333 } 334 335 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 336 if (skipOptnoneFunction(*mf.getFunction())) 337 return false; 338 339 const TargetSubtargetInfo &ST = 340 mf.getTarget().getSubtarget<TargetSubtargetInfo>(); 341 if (!ST.enablePostMachineScheduler()) { 342 DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 343 return false; 344 } 345 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 346 347 // Initialize the context of the pass. 348 MF = &mf; 349 PassConfig = &getAnalysis<TargetPassConfig>(); 350 351 if (VerifyScheduling) 352 MF->verify(this, "Before post machine scheduling."); 353 354 // Instantiate the selected scheduler for this target, function, and 355 // optimization level. 356 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 357 scheduleRegions(*Scheduler); 358 359 if (VerifyScheduling) 360 MF->verify(this, "After post machine scheduling."); 361 return true; 362 } 363 364 /// Return true of the given instruction should not be included in a scheduling 365 /// region. 366 /// 367 /// MachineScheduler does not currently support scheduling across calls. To 368 /// handle calls, the DAG builder needs to be modified to create register 369 /// anti/output dependencies on the registers clobbered by the call's regmask 370 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 371 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 372 /// the boundary, but there would be no benefit to postRA scheduling across 373 /// calls this late anyway. 374 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 375 MachineBasicBlock *MBB, 376 MachineFunction *MF, 377 const TargetInstrInfo *TII, 378 bool IsPostRA) { 379 return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF); 380 } 381 382 /// Main driver for both MachineScheduler and PostMachineScheduler. 383 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) { 384 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 385 bool IsPostRA = Scheduler.isPostRA(); 386 387 // Visit all machine basic blocks. 388 // 389 // TODO: Visit blocks in global postorder or postorder within the bottom-up 390 // loop tree. Then we can optionally compute global RegPressure. 391 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 392 MBB != MBBEnd; ++MBB) { 393 394 Scheduler.startBlock(MBB); 395 396 #ifndef NDEBUG 397 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 398 continue; 399 if (SchedOnlyBlock.getNumOccurrences() 400 && (int)SchedOnlyBlock != MBB->getNumber()) 401 continue; 402 #endif 403 404 // Break the block into scheduling regions [I, RegionEnd), and schedule each 405 // region as soon as it is discovered. RegionEnd points the scheduling 406 // boundary at the bottom of the region. The DAG does not include RegionEnd, 407 // but the region does (i.e. the next RegionEnd is above the previous 408 // RegionBegin). If the current block has no terminator then RegionEnd == 409 // MBB->end() for the bottom region. 410 // 411 // The Scheduler may insert instructions during either schedule() or 412 // exitRegion(), even for empty regions. So the local iterators 'I' and 413 // 'RegionEnd' are invalid across these calls. 414 // 415 // MBB::size() uses instr_iterator to count. Here we need a bundle to count 416 // as a single instruction. 417 unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end()); 418 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 419 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) { 420 421 // Avoid decrementing RegionEnd for blocks with no terminator. 422 if (RegionEnd != MBB->end() || 423 isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) { 424 --RegionEnd; 425 // Count the boundary instruction. 426 --RemainingInstrs; 427 } 428 429 // The next region starts above the previous region. Look backward in the 430 // instruction stream until we find the nearest boundary. 431 unsigned NumRegionInstrs = 0; 432 MachineBasicBlock::iterator I = RegionEnd; 433 for(;I != MBB->begin(); --I, --RemainingInstrs) { 434 if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA)) 435 break; 436 if (!I->isDebugValue()) 437 ++NumRegionInstrs; 438 } 439 // Notify the scheduler of the region, even if we may skip scheduling 440 // it. Perhaps it still needs to be bundled. 441 Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs); 442 443 // Skip empty scheduling regions (0 or 1 schedulable instructions). 444 if (I == RegionEnd || I == std::prev(RegionEnd)) { 445 // Close the current region. Bundle the terminator if needed. 446 // This invalidates 'RegionEnd' and 'I'. 447 Scheduler.exitRegion(); 448 continue; 449 } 450 DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "") 451 << "MI Scheduling **********\n"); 452 DEBUG(dbgs() << MF->getName() 453 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 454 << "\n From: " << *I << " To: "; 455 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 456 else dbgs() << "End"; 457 dbgs() << " RegionInstrs: " << NumRegionInstrs 458 << " Remaining: " << RemainingInstrs << "\n"); 459 if (DumpCriticalPathLength) { 460 errs() << MF->getName(); 461 errs() << ":BB# " << MBB->getNumber(); 462 errs() << " " << MBB->getName() << " \n"; 463 } 464 465 // Schedule a region: possibly reorder instructions. 466 // This invalidates 'RegionEnd' and 'I'. 467 Scheduler.schedule(); 468 469 // Close the current region. 470 Scheduler.exitRegion(); 471 472 // Scheduling has invalidated the current iterator 'I'. Ask the 473 // scheduler for the top of it's scheduled region. 474 RegionEnd = Scheduler.begin(); 475 } 476 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 477 Scheduler.finishBlock(); 478 if (Scheduler.isPostRA()) { 479 // FIXME: Ideally, no further passes should rely on kill flags. However, 480 // thumb2 size reduction is currently an exception. 481 Scheduler.fixupKills(MBB); 482 } 483 } 484 Scheduler.finalizeSchedule(); 485 } 486 487 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 488 // unimplemented 489 } 490 491 LLVM_DUMP_METHOD 492 void ReadyQueue::dump() { 493 dbgs() << Name << ": "; 494 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 495 dbgs() << Queue[i]->NodeNum << " "; 496 dbgs() << "\n"; 497 } 498 499 //===----------------------------------------------------------------------===// 500 // ScheduleDAGMI - Basic machine instruction scheduling. This is 501 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 502 // virtual registers. 503 // ===----------------------------------------------------------------------===/ 504 505 // Provide a vtable anchor. 506 ScheduleDAGMI::~ScheduleDAGMI() { 507 } 508 509 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 510 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 511 } 512 513 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 514 if (SuccSU != &ExitSU) { 515 // Do not use WillCreateCycle, it assumes SD scheduling. 516 // If Pred is reachable from Succ, then the edge creates a cycle. 517 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 518 return false; 519 Topo.AddPred(SuccSU, PredDep.getSUnit()); 520 } 521 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 522 // Return true regardless of whether a new edge needed to be inserted. 523 return true; 524 } 525 526 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 527 /// NumPredsLeft reaches zero, release the successor node. 528 /// 529 /// FIXME: Adjust SuccSU height based on MinLatency. 530 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 531 SUnit *SuccSU = SuccEdge->getSUnit(); 532 533 if (SuccEdge->isWeak()) { 534 --SuccSU->WeakPredsLeft; 535 if (SuccEdge->isCluster()) 536 NextClusterSucc = SuccSU; 537 return; 538 } 539 #ifndef NDEBUG 540 if (SuccSU->NumPredsLeft == 0) { 541 dbgs() << "*** Scheduling failed! ***\n"; 542 SuccSU->dump(this); 543 dbgs() << " has been released too many times!\n"; 544 llvm_unreachable(nullptr); 545 } 546 #endif 547 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 548 // CurrCycle may have advanced since then. 549 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 550 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 551 552 --SuccSU->NumPredsLeft; 553 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 554 SchedImpl->releaseTopNode(SuccSU); 555 } 556 557 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 558 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 559 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 560 I != E; ++I) { 561 releaseSucc(SU, &*I); 562 } 563 } 564 565 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 566 /// NumSuccsLeft reaches zero, release the predecessor node. 567 /// 568 /// FIXME: Adjust PredSU height based on MinLatency. 569 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 570 SUnit *PredSU = PredEdge->getSUnit(); 571 572 if (PredEdge->isWeak()) { 573 --PredSU->WeakSuccsLeft; 574 if (PredEdge->isCluster()) 575 NextClusterPred = PredSU; 576 return; 577 } 578 #ifndef NDEBUG 579 if (PredSU->NumSuccsLeft == 0) { 580 dbgs() << "*** Scheduling failed! ***\n"; 581 PredSU->dump(this); 582 dbgs() << " has been released too many times!\n"; 583 llvm_unreachable(nullptr); 584 } 585 #endif 586 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 587 // CurrCycle may have advanced since then. 588 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 589 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 590 591 --PredSU->NumSuccsLeft; 592 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 593 SchedImpl->releaseBottomNode(PredSU); 594 } 595 596 /// releasePredecessors - Call releasePred on each of SU's predecessors. 597 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 598 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 599 I != E; ++I) { 600 releasePred(SU, &*I); 601 } 602 } 603 604 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 605 /// crossing a scheduling boundary. [begin, end) includes all instructions in 606 /// the region, including the boundary itself and single-instruction regions 607 /// that don't get scheduled. 608 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 609 MachineBasicBlock::iterator begin, 610 MachineBasicBlock::iterator end, 611 unsigned regioninstrs) 612 { 613 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 614 615 SchedImpl->initPolicy(begin, end, regioninstrs); 616 } 617 618 /// This is normally called from the main scheduler loop but may also be invoked 619 /// by the scheduling strategy to perform additional code motion. 620 void ScheduleDAGMI::moveInstruction( 621 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 622 // Advance RegionBegin if the first instruction moves down. 623 if (&*RegionBegin == MI) 624 ++RegionBegin; 625 626 // Update the instruction stream. 627 BB->splice(InsertPos, BB, MI); 628 629 // Update LiveIntervals 630 if (LIS) 631 LIS->handleMove(MI, /*UpdateFlags=*/true); 632 633 // Recede RegionBegin if an instruction moves above the first. 634 if (RegionBegin == InsertPos) 635 RegionBegin = MI; 636 } 637 638 bool ScheduleDAGMI::checkSchedLimit() { 639 #ifndef NDEBUG 640 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 641 CurrentTop = CurrentBottom; 642 return false; 643 } 644 ++NumInstrsScheduled; 645 #endif 646 return true; 647 } 648 649 /// Per-region scheduling driver, called back from 650 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 651 /// does not consider liveness or register pressure. It is useful for PostRA 652 /// scheduling and potentially other custom schedulers. 653 void ScheduleDAGMI::schedule() { 654 // Build the DAG. 655 buildSchedGraph(AA); 656 657 Topo.InitDAGTopologicalSorting(); 658 659 postprocessDAG(); 660 661 SmallVector<SUnit*, 8> TopRoots, BotRoots; 662 findRootsAndBiasEdges(TopRoots, BotRoots); 663 664 // Initialize the strategy before modifying the DAG. 665 // This may initialize a DFSResult to be used for queue priority. 666 SchedImpl->initialize(this); 667 668 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 669 SUnits[su].dumpAll(this)); 670 if (ViewMISchedDAGs) viewGraph(); 671 672 // Initialize ready queues now that the DAG and priority data are finalized. 673 initQueues(TopRoots, BotRoots); 674 675 bool IsTopNode = false; 676 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 677 assert(!SU->isScheduled && "Node already scheduled"); 678 if (!checkSchedLimit()) 679 break; 680 681 MachineInstr *MI = SU->getInstr(); 682 if (IsTopNode) { 683 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 684 if (&*CurrentTop == MI) 685 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 686 else 687 moveInstruction(MI, CurrentTop); 688 } 689 else { 690 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 691 MachineBasicBlock::iterator priorII = 692 priorNonDebug(CurrentBottom, CurrentTop); 693 if (&*priorII == MI) 694 CurrentBottom = priorII; 695 else { 696 if (&*CurrentTop == MI) 697 CurrentTop = nextIfDebug(++CurrentTop, priorII); 698 moveInstruction(MI, CurrentBottom); 699 CurrentBottom = MI; 700 } 701 } 702 // Notify the scheduling strategy before updating the DAG. 703 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 704 // runs, it can then use the accurate ReadyCycle time to determine whether 705 // newly released nodes can move to the readyQ. 706 SchedImpl->schedNode(SU, IsTopNode); 707 708 updateQueues(SU, IsTopNode); 709 } 710 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 711 712 placeDebugValues(); 713 714 DEBUG({ 715 unsigned BBNum = begin()->getParent()->getNumber(); 716 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 717 dumpSchedule(); 718 dbgs() << '\n'; 719 }); 720 } 721 722 /// Apply each ScheduleDAGMutation step in order. 723 void ScheduleDAGMI::postprocessDAG() { 724 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 725 Mutations[i]->apply(this); 726 } 727 } 728 729 void ScheduleDAGMI:: 730 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 731 SmallVectorImpl<SUnit*> &BotRoots) { 732 for (std::vector<SUnit>::iterator 733 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 734 SUnit *SU = &(*I); 735 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 736 737 // Order predecessors so DFSResult follows the critical path. 738 SU->biasCriticalPath(); 739 740 // A SUnit is ready to top schedule if it has no predecessors. 741 if (!I->NumPredsLeft) 742 TopRoots.push_back(SU); 743 // A SUnit is ready to bottom schedule if it has no successors. 744 if (!I->NumSuccsLeft) 745 BotRoots.push_back(SU); 746 } 747 ExitSU.biasCriticalPath(); 748 } 749 750 /// Identify DAG roots and setup scheduler queues. 751 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 752 ArrayRef<SUnit*> BotRoots) { 753 NextClusterSucc = nullptr; 754 NextClusterPred = nullptr; 755 756 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 757 // 758 // Nodes with unreleased weak edges can still be roots. 759 // Release top roots in forward order. 760 for (SmallVectorImpl<SUnit*>::const_iterator 761 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 762 SchedImpl->releaseTopNode(*I); 763 } 764 // Release bottom roots in reverse order so the higher priority nodes appear 765 // first. This is more natural and slightly more efficient. 766 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 767 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 768 SchedImpl->releaseBottomNode(*I); 769 } 770 771 releaseSuccessors(&EntrySU); 772 releasePredecessors(&ExitSU); 773 774 SchedImpl->registerRoots(); 775 776 // Advance past initial DebugValues. 777 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 778 CurrentBottom = RegionEnd; 779 } 780 781 /// Update scheduler queues after scheduling an instruction. 782 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 783 // Release dependent instructions for scheduling. 784 if (IsTopNode) 785 releaseSuccessors(SU); 786 else 787 releasePredecessors(SU); 788 789 SU->isScheduled = true; 790 } 791 792 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 793 void ScheduleDAGMI::placeDebugValues() { 794 // If first instruction was a DBG_VALUE then put it back. 795 if (FirstDbgValue) { 796 BB->splice(RegionBegin, BB, FirstDbgValue); 797 RegionBegin = FirstDbgValue; 798 } 799 800 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 801 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 802 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 803 MachineInstr *DbgValue = P.first; 804 MachineBasicBlock::iterator OrigPrevMI = P.second; 805 if (&*RegionBegin == DbgValue) 806 ++RegionBegin; 807 BB->splice(++OrigPrevMI, BB, DbgValue); 808 if (OrigPrevMI == std::prev(RegionEnd)) 809 RegionEnd = DbgValue; 810 } 811 DbgValues.clear(); 812 FirstDbgValue = nullptr; 813 } 814 815 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 816 void ScheduleDAGMI::dumpSchedule() const { 817 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 818 if (SUnit *SU = getSUnit(&(*MI))) 819 SU->dump(this); 820 else 821 dbgs() << "Missing SUnit\n"; 822 } 823 } 824 #endif 825 826 //===----------------------------------------------------------------------===// 827 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 828 // preservation. 829 //===----------------------------------------------------------------------===// 830 831 ScheduleDAGMILive::~ScheduleDAGMILive() { 832 delete DFSResult; 833 } 834 835 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 836 /// crossing a scheduling boundary. [begin, end) includes all instructions in 837 /// the region, including the boundary itself and single-instruction regions 838 /// that don't get scheduled. 839 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 840 MachineBasicBlock::iterator begin, 841 MachineBasicBlock::iterator end, 842 unsigned regioninstrs) 843 { 844 // ScheduleDAGMI initializes SchedImpl's per-region policy. 845 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 846 847 // For convenience remember the end of the liveness region. 848 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 849 850 SUPressureDiffs.clear(); 851 852 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 853 } 854 855 // Setup the register pressure trackers for the top scheduled top and bottom 856 // scheduled regions. 857 void ScheduleDAGMILive::initRegPressure() { 858 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 859 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 860 861 // Close the RPTracker to finalize live ins. 862 RPTracker.closeRegion(); 863 864 DEBUG(RPTracker.dump()); 865 866 // Initialize the live ins and live outs. 867 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 868 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 869 870 // Close one end of the tracker so we can call 871 // getMaxUpward/DownwardPressureDelta before advancing across any 872 // instructions. This converts currently live regs into live ins/outs. 873 TopRPTracker.closeTop(); 874 BotRPTracker.closeBottom(); 875 876 BotRPTracker.initLiveThru(RPTracker); 877 if (!BotRPTracker.getLiveThru().empty()) { 878 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 879 DEBUG(dbgs() << "Live Thru: "; 880 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 881 }; 882 883 // For each live out vreg reduce the pressure change associated with other 884 // uses of the same vreg below the live-out reaching def. 885 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 886 887 // Account for liveness generated by the region boundary. 888 if (LiveRegionEnd != RegionEnd) { 889 SmallVector<unsigned, 8> LiveUses; 890 BotRPTracker.recede(&LiveUses); 891 updatePressureDiffs(LiveUses); 892 } 893 894 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 895 896 // Cache the list of excess pressure sets in this region. This will also track 897 // the max pressure in the scheduled code for these sets. 898 RegionCriticalPSets.clear(); 899 const std::vector<unsigned> &RegionPressure = 900 RPTracker.getPressure().MaxSetPressure; 901 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 902 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 903 if (RegionPressure[i] > Limit) { 904 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 905 << " Limit " << Limit 906 << " Actual " << RegionPressure[i] << "\n"); 907 RegionCriticalPSets.push_back(PressureChange(i)); 908 } 909 } 910 DEBUG(dbgs() << "Excess PSets: "; 911 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 912 dbgs() << TRI->getRegPressureSetName( 913 RegionCriticalPSets[i].getPSet()) << " "; 914 dbgs() << "\n"); 915 } 916 917 void ScheduleDAGMILive:: 918 updateScheduledPressure(const SUnit *SU, 919 const std::vector<unsigned> &NewMaxPressure) { 920 const PressureDiff &PDiff = getPressureDiff(SU); 921 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 922 for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end(); 923 I != E; ++I) { 924 if (!I->isValid()) 925 break; 926 unsigned ID = I->getPSet(); 927 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 928 ++CritIdx; 929 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 930 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 931 && NewMaxPressure[ID] <= INT16_MAX) 932 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 933 } 934 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 935 if (NewMaxPressure[ID] >= Limit - 2) { 936 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 937 << NewMaxPressure[ID] << " > " << Limit << "(+ " 938 << BotRPTracker.getLiveThru()[ID] << " livethru)\n"); 939 } 940 } 941 } 942 943 /// Update the PressureDiff array for liveness after scheduling this 944 /// instruction. 945 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) { 946 for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) { 947 /// FIXME: Currently assuming single-use physregs. 948 unsigned Reg = LiveUses[LUIdx]; 949 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n"); 950 if (!TRI->isVirtualRegister(Reg)) 951 continue; 952 953 // This may be called before CurrentBottom has been initialized. However, 954 // BotRPTracker must have a valid position. We want the value live into the 955 // instruction or live out of the block, so ask for the previous 956 // instruction's live-out. 957 const LiveInterval &LI = LIS->getInterval(Reg); 958 VNInfo *VNI; 959 MachineBasicBlock::const_iterator I = 960 nextIfDebug(BotRPTracker.getPos(), BB->end()); 961 if (I == BB->end()) 962 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 963 else { 964 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I)); 965 VNI = LRQ.valueIn(); 966 } 967 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 968 assert(VNI && "No live value at use."); 969 for (VReg2UseMap::iterator 970 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 971 SUnit *SU = UI->SU; 972 DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 973 << *SU->getInstr()); 974 // If this use comes before the reaching def, it cannot be a last use, so 975 // descrease its pressure change. 976 if (!SU->isScheduled && SU != &ExitSU) { 977 LiveQueryResult LRQ 978 = LI.Query(LIS->getInstructionIndex(SU->getInstr())); 979 if (LRQ.valueIn() == VNI) 980 getPressureDiff(SU).addPressureChange(Reg, true, &MRI); 981 } 982 } 983 } 984 } 985 986 /// schedule - Called back from MachineScheduler::runOnMachineFunction 987 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 988 /// only includes instructions that have DAG nodes, not scheduling boundaries. 989 /// 990 /// This is a skeletal driver, with all the functionality pushed into helpers, 991 /// so that it can be easilly extended by experimental schedulers. Generally, 992 /// implementing MachineSchedStrategy should be sufficient to implement a new 993 /// scheduling algorithm. However, if a scheduler further subclasses 994 /// ScheduleDAGMILive then it will want to override this virtual method in order 995 /// to update any specialized state. 996 void ScheduleDAGMILive::schedule() { 997 buildDAGWithRegPressure(); 998 999 Topo.InitDAGTopologicalSorting(); 1000 1001 postprocessDAG(); 1002 1003 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1004 findRootsAndBiasEdges(TopRoots, BotRoots); 1005 1006 // Initialize the strategy before modifying the DAG. 1007 // This may initialize a DFSResult to be used for queue priority. 1008 SchedImpl->initialize(this); 1009 1010 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 1011 SUnits[su].dumpAll(this)); 1012 if (ViewMISchedDAGs) viewGraph(); 1013 1014 // Initialize ready queues now that the DAG and priority data are finalized. 1015 initQueues(TopRoots, BotRoots); 1016 1017 if (ShouldTrackPressure) { 1018 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1019 TopRPTracker.setPos(CurrentTop); 1020 } 1021 1022 bool IsTopNode = false; 1023 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 1024 assert(!SU->isScheduled && "Node already scheduled"); 1025 if (!checkSchedLimit()) 1026 break; 1027 1028 scheduleMI(SU, IsTopNode); 1029 1030 updateQueues(SU, IsTopNode); 1031 1032 if (DFSResult) { 1033 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1034 if (!ScheduledTrees.test(SubtreeID)) { 1035 ScheduledTrees.set(SubtreeID); 1036 DFSResult->scheduleTree(SubtreeID); 1037 SchedImpl->scheduleTree(SubtreeID); 1038 } 1039 } 1040 1041 // Notify the scheduling strategy after updating the DAG. 1042 SchedImpl->schedNode(SU, IsTopNode); 1043 } 1044 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1045 1046 placeDebugValues(); 1047 1048 DEBUG({ 1049 unsigned BBNum = begin()->getParent()->getNumber(); 1050 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 1051 dumpSchedule(); 1052 dbgs() << '\n'; 1053 }); 1054 } 1055 1056 /// Build the DAG and setup three register pressure trackers. 1057 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1058 if (!ShouldTrackPressure) { 1059 RPTracker.reset(); 1060 RegionCriticalPSets.clear(); 1061 buildSchedGraph(AA); 1062 return; 1063 } 1064 1065 // Initialize the register pressure tracker used by buildSchedGraph. 1066 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1067 /*TrackUntiedDefs=*/true); 1068 1069 // Account for liveness generate by the region boundary. 1070 if (LiveRegionEnd != RegionEnd) 1071 RPTracker.recede(); 1072 1073 // Build the DAG, and compute current register pressure. 1074 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs); 1075 1076 // Initialize top/bottom trackers after computing region pressure. 1077 initRegPressure(); 1078 } 1079 1080 void ScheduleDAGMILive::computeDFSResult() { 1081 if (!DFSResult) 1082 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1083 DFSResult->clear(); 1084 ScheduledTrees.clear(); 1085 DFSResult->resize(SUnits.size()); 1086 DFSResult->compute(SUnits); 1087 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1088 } 1089 1090 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1091 /// only provides the critical path for single block loops. To handle loops that 1092 /// span blocks, we could use the vreg path latencies provided by 1093 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1094 /// available for use in the scheduler. 1095 /// 1096 /// The cyclic path estimation identifies a def-use pair that crosses the back 1097 /// edge and considers the depth and height of the nodes. For example, consider 1098 /// the following instruction sequence where each instruction has unit latency 1099 /// and defines an epomymous virtual register: 1100 /// 1101 /// a->b(a,c)->c(b)->d(c)->exit 1102 /// 1103 /// The cyclic critical path is a two cycles: b->c->b 1104 /// The acyclic critical path is four cycles: a->b->c->d->exit 1105 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1106 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1107 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1108 /// LiveInDepth = depth(b) = len(a->b) = 1 1109 /// 1110 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1111 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1112 /// CyclicCriticalPath = min(2, 2) = 2 1113 /// 1114 /// This could be relevant to PostRA scheduling, but is currently implemented 1115 /// assuming LiveIntervals. 1116 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1117 // This only applies to single block loop. 1118 if (!BB->isSuccessor(BB)) 1119 return 0; 1120 1121 unsigned MaxCyclicLatency = 0; 1122 // Visit each live out vreg def to find def/use pairs that cross iterations. 1123 ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs; 1124 for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end(); 1125 RI != RE; ++RI) { 1126 unsigned Reg = *RI; 1127 if (!TRI->isVirtualRegister(Reg)) 1128 continue; 1129 const LiveInterval &LI = LIS->getInterval(Reg); 1130 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1131 if (!DefVNI) 1132 continue; 1133 1134 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1135 const SUnit *DefSU = getSUnit(DefMI); 1136 if (!DefSU) 1137 continue; 1138 1139 unsigned LiveOutHeight = DefSU->getHeight(); 1140 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1141 // Visit all local users of the vreg def. 1142 for (VReg2UseMap::iterator 1143 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 1144 if (UI->SU == &ExitSU) 1145 continue; 1146 1147 // Only consider uses of the phi. 1148 LiveQueryResult LRQ = 1149 LI.Query(LIS->getInstructionIndex(UI->SU->getInstr())); 1150 if (!LRQ.valueIn()->isPHIDef()) 1151 continue; 1152 1153 // Assume that a path spanning two iterations is a cycle, which could 1154 // overestimate in strange cases. This allows cyclic latency to be 1155 // estimated as the minimum slack of the vreg's depth or height. 1156 unsigned CyclicLatency = 0; 1157 if (LiveOutDepth > UI->SU->getDepth()) 1158 CyclicLatency = LiveOutDepth - UI->SU->getDepth(); 1159 1160 unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency; 1161 if (LiveInHeight > LiveOutHeight) { 1162 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1163 CyclicLatency = LiveInHeight - LiveOutHeight; 1164 } 1165 else 1166 CyclicLatency = 0; 1167 1168 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1169 << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1170 if (CyclicLatency > MaxCyclicLatency) 1171 MaxCyclicLatency = CyclicLatency; 1172 } 1173 } 1174 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1175 return MaxCyclicLatency; 1176 } 1177 1178 /// Move an instruction and update register pressure. 1179 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1180 // Move the instruction to its new location in the instruction stream. 1181 MachineInstr *MI = SU->getInstr(); 1182 1183 if (IsTopNode) { 1184 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1185 if (&*CurrentTop == MI) 1186 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1187 else { 1188 moveInstruction(MI, CurrentTop); 1189 TopRPTracker.setPos(MI); 1190 } 1191 1192 if (ShouldTrackPressure) { 1193 // Update top scheduled pressure. 1194 TopRPTracker.advance(); 1195 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1196 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1197 } 1198 } 1199 else { 1200 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1201 MachineBasicBlock::iterator priorII = 1202 priorNonDebug(CurrentBottom, CurrentTop); 1203 if (&*priorII == MI) 1204 CurrentBottom = priorII; 1205 else { 1206 if (&*CurrentTop == MI) { 1207 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1208 TopRPTracker.setPos(CurrentTop); 1209 } 1210 moveInstruction(MI, CurrentBottom); 1211 CurrentBottom = MI; 1212 } 1213 if (ShouldTrackPressure) { 1214 // Update bottom scheduled pressure. 1215 SmallVector<unsigned, 8> LiveUses; 1216 BotRPTracker.recede(&LiveUses); 1217 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1218 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1219 updatePressureDiffs(LiveUses); 1220 } 1221 } 1222 } 1223 1224 //===----------------------------------------------------------------------===// 1225 // LoadClusterMutation - DAG post-processing to cluster loads. 1226 //===----------------------------------------------------------------------===// 1227 1228 namespace { 1229 /// \brief Post-process the DAG to create cluster edges between neighboring 1230 /// loads. 1231 class LoadClusterMutation : public ScheduleDAGMutation { 1232 struct LoadInfo { 1233 SUnit *SU; 1234 unsigned BaseReg; 1235 unsigned Offset; 1236 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 1237 : SU(su), BaseReg(reg), Offset(ofs) {} 1238 1239 bool operator<(const LoadInfo &RHS) const { 1240 return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset); 1241 } 1242 }; 1243 1244 const TargetInstrInfo *TII; 1245 const TargetRegisterInfo *TRI; 1246 public: 1247 LoadClusterMutation(const TargetInstrInfo *tii, 1248 const TargetRegisterInfo *tri) 1249 : TII(tii), TRI(tri) {} 1250 1251 void apply(ScheduleDAGMI *DAG) override; 1252 protected: 1253 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 1254 }; 1255 } // anonymous 1256 1257 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 1258 ScheduleDAGMI *DAG) { 1259 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 1260 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 1261 SUnit *SU = Loads[Idx]; 1262 unsigned BaseReg; 1263 unsigned Offset; 1264 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 1265 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 1266 } 1267 if (LoadRecords.size() < 2) 1268 return; 1269 std::sort(LoadRecords.begin(), LoadRecords.end()); 1270 unsigned ClusterLength = 1; 1271 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 1272 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 1273 ClusterLength = 1; 1274 continue; 1275 } 1276 1277 SUnit *SUa = LoadRecords[Idx].SU; 1278 SUnit *SUb = LoadRecords[Idx+1].SU; 1279 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 1280 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1281 1282 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 1283 << SUb->NodeNum << ")\n"); 1284 // Copy successor edges from SUa to SUb. Interleaving computation 1285 // dependent on SUa can prevent load combining due to register reuse. 1286 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1287 // loads should have effectively the same inputs. 1288 for (SUnit::const_succ_iterator 1289 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 1290 if (SI->getSUnit() == SUb) 1291 continue; 1292 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 1293 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 1294 } 1295 ++ClusterLength; 1296 } 1297 else 1298 ClusterLength = 1; 1299 } 1300 } 1301 1302 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 1303 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 1304 // Map DAG NodeNum to store chain ID. 1305 DenseMap<unsigned, unsigned> StoreChainIDs; 1306 // Map each store chain to a set of dependent loads. 1307 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1308 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1309 SUnit *SU = &DAG->SUnits[Idx]; 1310 if (!SU->getInstr()->mayLoad()) 1311 continue; 1312 unsigned ChainPredID = DAG->SUnits.size(); 1313 for (SUnit::const_pred_iterator 1314 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1315 if (PI->isCtrl()) { 1316 ChainPredID = PI->getSUnit()->NodeNum; 1317 break; 1318 } 1319 } 1320 // Check if this chain-like pred has been seen 1321 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 1322 unsigned NumChains = StoreChainDependents.size(); 1323 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1324 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1325 if (Result.second) 1326 StoreChainDependents.resize(NumChains + 1); 1327 StoreChainDependents[Result.first->second].push_back(SU); 1328 } 1329 // Iterate over the store chains. 1330 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 1331 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 1332 } 1333 1334 //===----------------------------------------------------------------------===// 1335 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 1336 //===----------------------------------------------------------------------===// 1337 1338 namespace { 1339 /// \brief Post-process the DAG to create cluster edges between instructions 1340 /// that may be fused by the processor into a single operation. 1341 class MacroFusion : public ScheduleDAGMutation { 1342 const TargetInstrInfo *TII; 1343 public: 1344 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 1345 1346 void apply(ScheduleDAGMI *DAG) override; 1347 }; 1348 } // anonymous 1349 1350 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 1351 /// fused operations. 1352 void MacroFusion::apply(ScheduleDAGMI *DAG) { 1353 // For now, assume targets can only fuse with the branch. 1354 MachineInstr *Branch = DAG->ExitSU.getInstr(); 1355 if (!Branch) 1356 return; 1357 1358 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 1359 SUnit *SU = &DAG->SUnits[--Idx]; 1360 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 1361 continue; 1362 1363 // Create a single weak edge from SU to ExitSU. The only effect is to cause 1364 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 1365 // need to copy predecessor edges from ExitSU to SU, since top-down 1366 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 1367 // of SU, we could create an artificial edge from the deepest root, but it 1368 // hasn't been needed yet. 1369 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 1370 (void)Success; 1371 assert(Success && "No DAG nodes should be reachable from ExitSU"); 1372 1373 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 1374 break; 1375 } 1376 } 1377 1378 //===----------------------------------------------------------------------===// 1379 // CopyConstrain - DAG post-processing to encourage copy elimination. 1380 //===----------------------------------------------------------------------===// 1381 1382 namespace { 1383 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 1384 /// the one use that defines the copy's source vreg, most likely an induction 1385 /// variable increment. 1386 class CopyConstrain : public ScheduleDAGMutation { 1387 // Transient state. 1388 SlotIndex RegionBeginIdx; 1389 // RegionEndIdx is the slot index of the last non-debug instruction in the 1390 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1391 SlotIndex RegionEndIdx; 1392 public: 1393 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1394 1395 void apply(ScheduleDAGMI *DAG) override; 1396 1397 protected: 1398 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1399 }; 1400 } // anonymous 1401 1402 /// constrainLocalCopy handles two possibilities: 1403 /// 1) Local src: 1404 /// I0: = dst 1405 /// I1: src = ... 1406 /// I2: = dst 1407 /// I3: dst = src (copy) 1408 /// (create pred->succ edges I0->I1, I2->I1) 1409 /// 1410 /// 2) Local copy: 1411 /// I0: dst = src (copy) 1412 /// I1: = dst 1413 /// I2: src = ... 1414 /// I3: = dst 1415 /// (create pred->succ edges I1->I2, I3->I2) 1416 /// 1417 /// Although the MachineScheduler is currently constrained to single blocks, 1418 /// this algorithm should handle extended blocks. An EBB is a set of 1419 /// contiguously numbered blocks such that the previous block in the EBB is 1420 /// always the single predecessor. 1421 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1422 LiveIntervals *LIS = DAG->getLIS(); 1423 MachineInstr *Copy = CopySU->getInstr(); 1424 1425 // Check for pure vreg copies. 1426 unsigned SrcReg = Copy->getOperand(1).getReg(); 1427 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 1428 return; 1429 1430 unsigned DstReg = Copy->getOperand(0).getReg(); 1431 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 1432 return; 1433 1434 // Check if either the dest or source is local. If it's live across a back 1435 // edge, it's not local. Note that if both vregs are live across the back 1436 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1437 unsigned LocalReg = DstReg; 1438 unsigned GlobalReg = SrcReg; 1439 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1440 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1441 LocalReg = SrcReg; 1442 GlobalReg = DstReg; 1443 LocalLI = &LIS->getInterval(LocalReg); 1444 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1445 return; 1446 } 1447 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1448 1449 // Find the global segment after the start of the local LI. 1450 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1451 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1452 // local live range. We could create edges from other global uses to the local 1453 // start, but the coalescer should have already eliminated these cases, so 1454 // don't bother dealing with it. 1455 if (GlobalSegment == GlobalLI->end()) 1456 return; 1457 1458 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1459 // returned the next global segment. But if GlobalSegment overlaps with 1460 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1461 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1462 if (GlobalSegment->contains(LocalLI->beginIndex())) 1463 ++GlobalSegment; 1464 1465 if (GlobalSegment == GlobalLI->end()) 1466 return; 1467 1468 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1469 if (GlobalSegment != GlobalLI->begin()) { 1470 // Two address defs have no hole. 1471 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1472 GlobalSegment->start)) { 1473 return; 1474 } 1475 // If the prior global segment may be defined by the same two-address 1476 // instruction that also defines LocalLI, then can't make a hole here. 1477 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1478 LocalLI->beginIndex())) { 1479 return; 1480 } 1481 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1482 // it would be a disconnected component in the live range. 1483 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1484 "Disconnected LRG within the scheduling region."); 1485 } 1486 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1487 if (!GlobalDef) 1488 return; 1489 1490 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1491 if (!GlobalSU) 1492 return; 1493 1494 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1495 // constraining the uses of the last local def to precede GlobalDef. 1496 SmallVector<SUnit*,8> LocalUses; 1497 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1498 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1499 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1500 for (SUnit::const_succ_iterator 1501 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1502 I != E; ++I) { 1503 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1504 continue; 1505 if (I->getSUnit() == GlobalSU) 1506 continue; 1507 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1508 return; 1509 LocalUses.push_back(I->getSUnit()); 1510 } 1511 // Open the top of the GlobalLI hole by constraining any earlier global uses 1512 // to precede the start of LocalLI. 1513 SmallVector<SUnit*,8> GlobalUses; 1514 MachineInstr *FirstLocalDef = 1515 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1516 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1517 for (SUnit::const_pred_iterator 1518 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1519 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1520 continue; 1521 if (I->getSUnit() == FirstLocalSU) 1522 continue; 1523 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1524 return; 1525 GlobalUses.push_back(I->getSUnit()); 1526 } 1527 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1528 // Add the weak edges. 1529 for (SmallVectorImpl<SUnit*>::const_iterator 1530 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1531 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1532 << GlobalSU->NodeNum << ")\n"); 1533 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1534 } 1535 for (SmallVectorImpl<SUnit*>::const_iterator 1536 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1537 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1538 << FirstLocalSU->NodeNum << ")\n"); 1539 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1540 } 1541 } 1542 1543 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1544 /// copy elimination. 1545 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1546 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1547 1548 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1549 if (FirstPos == DAG->end()) 1550 return; 1551 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); 1552 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1553 &*priorNonDebug(DAG->end(), DAG->begin())); 1554 1555 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1556 SUnit *SU = &DAG->SUnits[Idx]; 1557 if (!SU->getInstr()->isCopy()) 1558 continue; 1559 1560 constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG)); 1561 } 1562 } 1563 1564 //===----------------------------------------------------------------------===// 1565 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1566 // and possibly other custom schedulers. 1567 //===----------------------------------------------------------------------===// 1568 1569 static const unsigned InvalidCycle = ~0U; 1570 1571 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1572 1573 void SchedBoundary::reset() { 1574 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1575 // Destroying and reconstructing it is very expensive though. So keep 1576 // invalid, placeholder HazardRecs. 1577 if (HazardRec && HazardRec->isEnabled()) { 1578 delete HazardRec; 1579 HazardRec = nullptr; 1580 } 1581 Available.clear(); 1582 Pending.clear(); 1583 CheckPending = false; 1584 NextSUs.clear(); 1585 CurrCycle = 0; 1586 CurrMOps = 0; 1587 MinReadyCycle = UINT_MAX; 1588 ExpectedLatency = 0; 1589 DependentLatency = 0; 1590 RetiredMOps = 0; 1591 MaxExecutedResCount = 0; 1592 ZoneCritResIdx = 0; 1593 IsResourceLimited = false; 1594 ReservedCycles.clear(); 1595 #ifndef NDEBUG 1596 // Track the maximum number of stall cycles that could arise either from the 1597 // latency of a DAG edge or the number of cycles that a processor resource is 1598 // reserved (SchedBoundary::ReservedCycles). 1599 MaxObservedStall = 0; 1600 #endif 1601 // Reserve a zero-count for invalid CritResIdx. 1602 ExecutedResCounts.resize(1); 1603 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1604 } 1605 1606 void SchedRemainder:: 1607 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1608 reset(); 1609 if (!SchedModel->hasInstrSchedModel()) 1610 return; 1611 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1612 for (std::vector<SUnit>::iterator 1613 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1614 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1615 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC) 1616 * SchedModel->getMicroOpFactor(); 1617 for (TargetSchedModel::ProcResIter 1618 PI = SchedModel->getWriteProcResBegin(SC), 1619 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1620 unsigned PIdx = PI->ProcResourceIdx; 1621 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1622 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1623 } 1624 } 1625 } 1626 1627 void SchedBoundary:: 1628 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1629 reset(); 1630 DAG = dag; 1631 SchedModel = smodel; 1632 Rem = rem; 1633 if (SchedModel->hasInstrSchedModel()) { 1634 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1635 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle); 1636 } 1637 } 1638 1639 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 1640 /// these "soft stalls" differently than the hard stall cycles based on CPU 1641 /// resources and computed by checkHazard(). A fully in-order model 1642 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 1643 /// available for scheduling until they are ready. However, a weaker in-order 1644 /// model may use this for heuristics. For example, if a processor has in-order 1645 /// behavior when reading certain resources, this may come into play. 1646 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 1647 if (!SU->isUnbuffered) 1648 return 0; 1649 1650 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1651 if (ReadyCycle > CurrCycle) 1652 return ReadyCycle - CurrCycle; 1653 return 0; 1654 } 1655 1656 /// Compute the next cycle at which the given processor resource can be 1657 /// scheduled. 1658 unsigned SchedBoundary:: 1659 getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 1660 unsigned NextUnreserved = ReservedCycles[PIdx]; 1661 // If this resource has never been used, always return cycle zero. 1662 if (NextUnreserved == InvalidCycle) 1663 return 0; 1664 // For bottom-up scheduling add the cycles needed for the current operation. 1665 if (!isTop()) 1666 NextUnreserved += Cycles; 1667 return NextUnreserved; 1668 } 1669 1670 /// Does this SU have a hazard within the current instruction group. 1671 /// 1672 /// The scheduler supports two modes of hazard recognition. The first is the 1673 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1674 /// supports highly complicated in-order reservation tables 1675 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1676 /// 1677 /// The second is a streamlined mechanism that checks for hazards based on 1678 /// simple counters that the scheduler itself maintains. It explicitly checks 1679 /// for instruction dispatch limitations, including the number of micro-ops that 1680 /// can dispatch per cycle. 1681 /// 1682 /// TODO: Also check whether the SU must start a new group. 1683 bool SchedBoundary::checkHazard(SUnit *SU) { 1684 if (HazardRec->isEnabled() 1685 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 1686 return true; 1687 } 1688 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1689 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1690 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1691 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1692 return true; 1693 } 1694 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 1695 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1696 for (TargetSchedModel::ProcResIter 1697 PI = SchedModel->getWriteProcResBegin(SC), 1698 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1699 unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles); 1700 if (NRCycle > CurrCycle) { 1701 #ifndef NDEBUG 1702 MaxObservedStall = std::max(PI->Cycles, MaxObservedStall); 1703 #endif 1704 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 1705 << SchedModel->getResourceName(PI->ProcResourceIdx) 1706 << "=" << NRCycle << "c\n"); 1707 return true; 1708 } 1709 } 1710 } 1711 return false; 1712 } 1713 1714 // Find the unscheduled node in ReadySUs with the highest latency. 1715 unsigned SchedBoundary:: 1716 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1717 SUnit *LateSU = nullptr; 1718 unsigned RemLatency = 0; 1719 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end(); 1720 I != E; ++I) { 1721 unsigned L = getUnscheduledLatency(*I); 1722 if (L > RemLatency) { 1723 RemLatency = L; 1724 LateSU = *I; 1725 } 1726 } 1727 if (LateSU) { 1728 DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1729 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1730 } 1731 return RemLatency; 1732 } 1733 1734 // Count resources in this zone and the remaining unscheduled 1735 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 1736 // resource index, or zero if the zone is issue limited. 1737 unsigned SchedBoundary:: 1738 getOtherResourceCount(unsigned &OtherCritIdx) { 1739 OtherCritIdx = 0; 1740 if (!SchedModel->hasInstrSchedModel()) 1741 return 0; 1742 1743 unsigned OtherCritCount = Rem->RemIssueCount 1744 + (RetiredMOps * SchedModel->getMicroOpFactor()); 1745 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 1746 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 1747 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 1748 PIdx != PEnd; ++PIdx) { 1749 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 1750 if (OtherCount > OtherCritCount) { 1751 OtherCritCount = OtherCount; 1752 OtherCritIdx = PIdx; 1753 } 1754 } 1755 if (OtherCritIdx) { 1756 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: " 1757 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 1758 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 1759 } 1760 return OtherCritCount; 1761 } 1762 1763 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { 1764 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1765 1766 #ifndef NDEBUG 1767 // ReadyCycle was been bumped up to the CurrCycle when this node was 1768 // scheduled, but CurrCycle may have been eagerly advanced immediately after 1769 // scheduling, so may now be greater than ReadyCycle. 1770 if (ReadyCycle > CurrCycle) 1771 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 1772 #endif 1773 1774 if (ReadyCycle < MinReadyCycle) 1775 MinReadyCycle = ReadyCycle; 1776 1777 // Check for interlocks first. For the purpose of other heuristics, an 1778 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1779 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1780 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU)) 1781 Pending.push(SU); 1782 else 1783 Available.push(SU); 1784 1785 // Record this node as an immediate dependent of the scheduled node. 1786 NextSUs.insert(SU); 1787 } 1788 1789 void SchedBoundary::releaseTopNode(SUnit *SU) { 1790 if (SU->isScheduled) 1791 return; 1792 1793 releaseNode(SU, SU->TopReadyCycle); 1794 } 1795 1796 void SchedBoundary::releaseBottomNode(SUnit *SU) { 1797 if (SU->isScheduled) 1798 return; 1799 1800 releaseNode(SU, SU->BotReadyCycle); 1801 } 1802 1803 /// Move the boundary of scheduled code by one cycle. 1804 void SchedBoundary::bumpCycle(unsigned NextCycle) { 1805 if (SchedModel->getMicroOpBufferSize() == 0) { 1806 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1807 if (MinReadyCycle > NextCycle) 1808 NextCycle = MinReadyCycle; 1809 } 1810 // Update the current micro-ops, which will issue in the next cycle. 1811 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 1812 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 1813 1814 // Decrement DependentLatency based on the next cycle. 1815 if ((NextCycle - CurrCycle) > DependentLatency) 1816 DependentLatency = 0; 1817 else 1818 DependentLatency -= (NextCycle - CurrCycle); 1819 1820 if (!HazardRec->isEnabled()) { 1821 // Bypass HazardRec virtual calls. 1822 CurrCycle = NextCycle; 1823 } 1824 else { 1825 // Bypass getHazardType calls in case of long latency. 1826 for (; CurrCycle != NextCycle; ++CurrCycle) { 1827 if (isTop()) 1828 HazardRec->AdvanceCycle(); 1829 else 1830 HazardRec->RecedeCycle(); 1831 } 1832 } 1833 CheckPending = true; 1834 unsigned LFactor = SchedModel->getLatencyFactor(); 1835 IsResourceLimited = 1836 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1837 > (int)LFactor; 1838 1839 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n'); 1840 } 1841 1842 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 1843 ExecutedResCounts[PIdx] += Count; 1844 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 1845 MaxExecutedResCount = ExecutedResCounts[PIdx]; 1846 } 1847 1848 /// Add the given processor resource to this scheduled zone. 1849 /// 1850 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 1851 /// during which this resource is consumed. 1852 /// 1853 /// \return the next cycle at which the instruction may execute without 1854 /// oversubscribing resources. 1855 unsigned SchedBoundary:: 1856 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 1857 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1858 unsigned Count = Factor * Cycles; 1859 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) 1860 << " +" << Cycles << "x" << Factor << "u\n"); 1861 1862 // Update Executed resources counts. 1863 incExecutedResources(PIdx, Count); 1864 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1865 Rem->RemainingCounts[PIdx] -= Count; 1866 1867 // Check if this resource exceeds the current critical resource. If so, it 1868 // becomes the critical resource. 1869 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 1870 ZoneCritResIdx = PIdx; 1871 DEBUG(dbgs() << " *** Critical resource " 1872 << SchedModel->getResourceName(PIdx) << ": " 1873 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n"); 1874 } 1875 // For reserved resources, record the highest cycle using the resource. 1876 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles); 1877 if (NextAvailable > CurrCycle) { 1878 DEBUG(dbgs() << " Resource conflict: " 1879 << SchedModel->getProcResource(PIdx)->Name << " reserved until @" 1880 << NextAvailable << "\n"); 1881 } 1882 return NextAvailable; 1883 } 1884 1885 /// Move the boundary of scheduled code by one SUnit. 1886 void SchedBoundary::bumpNode(SUnit *SU) { 1887 // Update the reservation table. 1888 if (HazardRec->isEnabled()) { 1889 if (!isTop() && SU->isCall) { 1890 // Calls are scheduled with their preceding instructions. For bottom-up 1891 // scheduling, clear the pipeline state before emitting. 1892 HazardRec->Reset(); 1893 } 1894 HazardRec->EmitInstruction(SU); 1895 } 1896 // checkHazard should prevent scheduling multiple instructions per cycle that 1897 // exceed the issue width. 1898 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1899 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 1900 assert( 1901 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 1902 "Cannot schedule this instruction's MicroOps in the current cycle."); 1903 1904 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1905 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 1906 1907 unsigned NextCycle = CurrCycle; 1908 switch (SchedModel->getMicroOpBufferSize()) { 1909 case 0: 1910 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 1911 break; 1912 case 1: 1913 if (ReadyCycle > NextCycle) { 1914 NextCycle = ReadyCycle; 1915 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 1916 } 1917 break; 1918 default: 1919 // We don't currently model the OOO reorder buffer, so consider all 1920 // scheduled MOps to be "retired". We do loosely model in-order resource 1921 // latency. If this instruction uses an in-order resource, account for any 1922 // likely stall cycles. 1923 if (SU->isUnbuffered && ReadyCycle > NextCycle) 1924 NextCycle = ReadyCycle; 1925 break; 1926 } 1927 RetiredMOps += IncMOps; 1928 1929 // Update resource counts and critical resource. 1930 if (SchedModel->hasInstrSchedModel()) { 1931 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 1932 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 1933 Rem->RemIssueCount -= DecRemIssue; 1934 if (ZoneCritResIdx) { 1935 // Scale scheduled micro-ops for comparing with the critical resource. 1936 unsigned ScaledMOps = 1937 RetiredMOps * SchedModel->getMicroOpFactor(); 1938 1939 // If scaled micro-ops are now more than the previous critical resource by 1940 // a full cycle, then micro-ops issue becomes critical. 1941 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 1942 >= (int)SchedModel->getLatencyFactor()) { 1943 ZoneCritResIdx = 0; 1944 DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 1945 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); 1946 } 1947 } 1948 for (TargetSchedModel::ProcResIter 1949 PI = SchedModel->getWriteProcResBegin(SC), 1950 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1951 unsigned RCycle = 1952 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 1953 if (RCycle > NextCycle) 1954 NextCycle = RCycle; 1955 } 1956 if (SU->hasReservedResource) { 1957 // For reserved resources, record the highest cycle using the resource. 1958 // For top-down scheduling, this is the cycle in which we schedule this 1959 // instruction plus the number of cycles the operations reserves the 1960 // resource. For bottom-up is it simply the instruction's cycle. 1961 for (TargetSchedModel::ProcResIter 1962 PI = SchedModel->getWriteProcResBegin(SC), 1963 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1964 unsigned PIdx = PI->ProcResourceIdx; 1965 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 1966 if (isTop()) { 1967 ReservedCycles[PIdx] = 1968 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles); 1969 } 1970 else 1971 ReservedCycles[PIdx] = NextCycle; 1972 } 1973 } 1974 } 1975 } 1976 // Update ExpectedLatency and DependentLatency. 1977 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 1978 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 1979 if (SU->getDepth() > TopLatency) { 1980 TopLatency = SU->getDepth(); 1981 DEBUG(dbgs() << " " << Available.getName() 1982 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n"); 1983 } 1984 if (SU->getHeight() > BotLatency) { 1985 BotLatency = SU->getHeight(); 1986 DEBUG(dbgs() << " " << Available.getName() 1987 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n"); 1988 } 1989 // If we stall for any reason, bump the cycle. 1990 if (NextCycle > CurrCycle) { 1991 bumpCycle(NextCycle); 1992 } 1993 else { 1994 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 1995 // resource limited. If a stall occurred, bumpCycle does this. 1996 unsigned LFactor = SchedModel->getLatencyFactor(); 1997 IsResourceLimited = 1998 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1999 > (int)LFactor; 2000 } 2001 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2002 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2003 // one cycle. Since we commonly reach the max MOps here, opportunistically 2004 // bump the cycle to avoid uselessly checking everything in the readyQ. 2005 CurrMOps += IncMOps; 2006 while (CurrMOps >= SchedModel->getIssueWidth()) { 2007 DEBUG(dbgs() << " *** Max MOps " << CurrMOps 2008 << " at cycle " << CurrCycle << '\n'); 2009 bumpCycle(++NextCycle); 2010 } 2011 DEBUG(dumpScheduledState()); 2012 } 2013 2014 /// Release pending ready nodes in to the available queue. This makes them 2015 /// visible to heuristics. 2016 void SchedBoundary::releasePending() { 2017 // If the available queue is empty, it is safe to reset MinReadyCycle. 2018 if (Available.empty()) 2019 MinReadyCycle = UINT_MAX; 2020 2021 // Check to see if any of the pending instructions are ready to issue. If 2022 // so, add them to the available queue. 2023 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2024 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2025 SUnit *SU = *(Pending.begin()+i); 2026 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2027 2028 if (ReadyCycle < MinReadyCycle) 2029 MinReadyCycle = ReadyCycle; 2030 2031 if (!IsBuffered && ReadyCycle > CurrCycle) 2032 continue; 2033 2034 if (checkHazard(SU)) 2035 continue; 2036 2037 Available.push(SU); 2038 Pending.remove(Pending.begin()+i); 2039 --i; --e; 2040 } 2041 DEBUG(if (!Pending.empty()) Pending.dump()); 2042 CheckPending = false; 2043 } 2044 2045 /// Remove SU from the ready set for this boundary. 2046 void SchedBoundary::removeReady(SUnit *SU) { 2047 if (Available.isInQueue(SU)) 2048 Available.remove(Available.find(SU)); 2049 else { 2050 assert(Pending.isInQueue(SU) && "bad ready count"); 2051 Pending.remove(Pending.find(SU)); 2052 } 2053 } 2054 2055 /// If this queue only has one ready candidate, return it. As a side effect, 2056 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2057 /// one node is ready. If multiple instructions are ready, return NULL. 2058 SUnit *SchedBoundary::pickOnlyChoice() { 2059 if (CheckPending) 2060 releasePending(); 2061 2062 if (CurrMOps > 0) { 2063 // Defer any ready instrs that now have a hazard. 2064 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2065 if (checkHazard(*I)) { 2066 Pending.push(*I); 2067 I = Available.remove(I); 2068 continue; 2069 } 2070 ++I; 2071 } 2072 } 2073 for (unsigned i = 0; Available.empty(); ++i) { 2074 // FIXME: Re-enable assert once PR20057 is resolved. 2075 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2076 // "permanent hazard"); 2077 (void)i; 2078 bumpCycle(CurrCycle + 1); 2079 releasePending(); 2080 } 2081 if (Available.size() == 1) 2082 return *Available.begin(); 2083 return nullptr; 2084 } 2085 2086 #ifndef NDEBUG 2087 // This is useful information to dump after bumpNode. 2088 // Note that the Queue contents are more useful before pickNodeFromQueue. 2089 void SchedBoundary::dumpScheduledState() { 2090 unsigned ResFactor; 2091 unsigned ResCount; 2092 if (ZoneCritResIdx) { 2093 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2094 ResCount = getResourceCount(ZoneCritResIdx); 2095 } 2096 else { 2097 ResFactor = SchedModel->getMicroOpFactor(); 2098 ResCount = RetiredMOps * SchedModel->getMicroOpFactor(); 2099 } 2100 unsigned LFactor = SchedModel->getLatencyFactor(); 2101 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2102 << " Retired: " << RetiredMOps; 2103 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2104 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2105 << ResCount / ResFactor << " " 2106 << SchedModel->getResourceName(ZoneCritResIdx) 2107 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2108 << (IsResourceLimited ? " - Resource" : " - Latency") 2109 << " limited.\n"; 2110 } 2111 #endif 2112 2113 //===----------------------------------------------------------------------===// 2114 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2115 //===----------------------------------------------------------------------===// 2116 2117 void GenericSchedulerBase::SchedCandidate:: 2118 initResourceDelta(const ScheduleDAGMI *DAG, 2119 const TargetSchedModel *SchedModel) { 2120 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2121 return; 2122 2123 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2124 for (TargetSchedModel::ProcResIter 2125 PI = SchedModel->getWriteProcResBegin(SC), 2126 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2127 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2128 ResDelta.CritResources += PI->Cycles; 2129 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2130 ResDelta.DemandedResources += PI->Cycles; 2131 } 2132 } 2133 2134 /// Set the CandPolicy given a scheduling zone given the current resources and 2135 /// latencies inside and outside the zone. 2136 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, 2137 bool IsPostRA, 2138 SchedBoundary &CurrZone, 2139 SchedBoundary *OtherZone) { 2140 // Apply preemptive heuristics based on the the total latency and resources 2141 // inside and outside this zone. Potential stalls should be considered before 2142 // following this policy. 2143 2144 // Compute remaining latency. We need this both to determine whether the 2145 // overall schedule has become latency-limited and whether the instructions 2146 // outside this zone are resource or latency limited. 2147 // 2148 // The "dependent" latency is updated incrementally during scheduling as the 2149 // max height/depth of scheduled nodes minus the cycles since it was 2150 // scheduled: 2151 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2152 // 2153 // The "independent" latency is the max ready queue depth: 2154 // ILat = max N.depth for N in Available|Pending 2155 // 2156 // RemainingLatency is the greater of independent and dependent latency. 2157 unsigned RemLatency = CurrZone.getDependentLatency(); 2158 RemLatency = std::max(RemLatency, 2159 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2160 RemLatency = std::max(RemLatency, 2161 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2162 2163 // Compute the critical resource outside the zone. 2164 unsigned OtherCritIdx = 0; 2165 unsigned OtherCount = 2166 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2167 2168 bool OtherResLimited = false; 2169 if (SchedModel->hasInstrSchedModel()) { 2170 unsigned LFactor = SchedModel->getLatencyFactor(); 2171 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor; 2172 } 2173 // Schedule aggressively for latency in PostRA mode. We don't check for 2174 // acyclic latency during PostRA, and highly out-of-order processors will 2175 // skip PostRA scheduling. 2176 if (!OtherResLimited) { 2177 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) { 2178 Policy.ReduceLatency |= true; 2179 DEBUG(dbgs() << " " << CurrZone.Available.getName() 2180 << " RemainingLatency " << RemLatency << " + " 2181 << CurrZone.getCurrCycle() << "c > CritPath " 2182 << Rem.CriticalPath << "\n"); 2183 } 2184 } 2185 // If the same resource is limiting inside and outside the zone, do nothing. 2186 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2187 return; 2188 2189 DEBUG( 2190 if (CurrZone.isResourceLimited()) { 2191 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2192 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) 2193 << "\n"; 2194 } 2195 if (OtherResLimited) 2196 dbgs() << " RemainingLimit: " 2197 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2198 if (!CurrZone.isResourceLimited() && !OtherResLimited) 2199 dbgs() << " Latency limited both directions.\n"); 2200 2201 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2202 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2203 2204 if (OtherResLimited) 2205 Policy.DemandResIdx = OtherCritIdx; 2206 } 2207 2208 #ifndef NDEBUG 2209 const char *GenericSchedulerBase::getReasonStr( 2210 GenericSchedulerBase::CandReason Reason) { 2211 switch (Reason) { 2212 case NoCand: return "NOCAND "; 2213 case PhysRegCopy: return "PREG-COPY"; 2214 case RegExcess: return "REG-EXCESS"; 2215 case RegCritical: return "REG-CRIT "; 2216 case Stall: return "STALL "; 2217 case Cluster: return "CLUSTER "; 2218 case Weak: return "WEAK "; 2219 case RegMax: return "REG-MAX "; 2220 case ResourceReduce: return "RES-REDUCE"; 2221 case ResourceDemand: return "RES-DEMAND"; 2222 case TopDepthReduce: return "TOP-DEPTH "; 2223 case TopPathReduce: return "TOP-PATH "; 2224 case BotHeightReduce:return "BOT-HEIGHT"; 2225 case BotPathReduce: return "BOT-PATH "; 2226 case NextDefUse: return "DEF-USE "; 2227 case NodeOrder: return "ORDER "; 2228 }; 2229 llvm_unreachable("Unknown reason!"); 2230 } 2231 2232 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2233 PressureChange P; 2234 unsigned ResIdx = 0; 2235 unsigned Latency = 0; 2236 switch (Cand.Reason) { 2237 default: 2238 break; 2239 case RegExcess: 2240 P = Cand.RPDelta.Excess; 2241 break; 2242 case RegCritical: 2243 P = Cand.RPDelta.CriticalMax; 2244 break; 2245 case RegMax: 2246 P = Cand.RPDelta.CurrentMax; 2247 break; 2248 case ResourceReduce: 2249 ResIdx = Cand.Policy.ReduceResIdx; 2250 break; 2251 case ResourceDemand: 2252 ResIdx = Cand.Policy.DemandResIdx; 2253 break; 2254 case TopDepthReduce: 2255 Latency = Cand.SU->getDepth(); 2256 break; 2257 case TopPathReduce: 2258 Latency = Cand.SU->getHeight(); 2259 break; 2260 case BotHeightReduce: 2261 Latency = Cand.SU->getHeight(); 2262 break; 2263 case BotPathReduce: 2264 Latency = Cand.SU->getDepth(); 2265 break; 2266 } 2267 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2268 if (P.isValid()) 2269 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2270 << ":" << P.getUnitInc() << " "; 2271 else 2272 dbgs() << " "; 2273 if (ResIdx) 2274 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2275 else 2276 dbgs() << " "; 2277 if (Latency) 2278 dbgs() << " " << Latency << " cycles "; 2279 else 2280 dbgs() << " "; 2281 dbgs() << '\n'; 2282 } 2283 #endif 2284 2285 /// Return true if this heuristic determines order. 2286 static bool tryLess(int TryVal, int CandVal, 2287 GenericSchedulerBase::SchedCandidate &TryCand, 2288 GenericSchedulerBase::SchedCandidate &Cand, 2289 GenericSchedulerBase::CandReason Reason) { 2290 if (TryVal < CandVal) { 2291 TryCand.Reason = Reason; 2292 return true; 2293 } 2294 if (TryVal > CandVal) { 2295 if (Cand.Reason > Reason) 2296 Cand.Reason = Reason; 2297 return true; 2298 } 2299 Cand.setRepeat(Reason); 2300 return false; 2301 } 2302 2303 static bool tryGreater(int TryVal, int CandVal, 2304 GenericSchedulerBase::SchedCandidate &TryCand, 2305 GenericSchedulerBase::SchedCandidate &Cand, 2306 GenericSchedulerBase::CandReason Reason) { 2307 if (TryVal > CandVal) { 2308 TryCand.Reason = Reason; 2309 return true; 2310 } 2311 if (TryVal < CandVal) { 2312 if (Cand.Reason > Reason) 2313 Cand.Reason = Reason; 2314 return true; 2315 } 2316 Cand.setRepeat(Reason); 2317 return false; 2318 } 2319 2320 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2321 GenericSchedulerBase::SchedCandidate &Cand, 2322 SchedBoundary &Zone) { 2323 if (Zone.isTop()) { 2324 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2325 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2326 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2327 return true; 2328 } 2329 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2330 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2331 return true; 2332 } 2333 else { 2334 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2335 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2336 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2337 return true; 2338 } 2339 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2340 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2341 return true; 2342 } 2343 return false; 2344 } 2345 2346 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand, 2347 bool IsTop) { 2348 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2349 << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n'); 2350 } 2351 2352 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2353 assert(dag->hasVRegLiveness() && 2354 "(PreRA)GenericScheduler needs vreg liveness"); 2355 DAG = static_cast<ScheduleDAGMILive*>(dag); 2356 SchedModel = DAG->getSchedModel(); 2357 TRI = DAG->TRI; 2358 2359 Rem.init(DAG, SchedModel); 2360 Top.init(DAG, SchedModel, &Rem); 2361 Bot.init(DAG, SchedModel, &Rem); 2362 2363 // Initialize resource counts. 2364 2365 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2366 // are disabled, then these HazardRecs will be disabled. 2367 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2368 if (!Top.HazardRec) { 2369 Top.HazardRec = 2370 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2371 Itin, DAG); 2372 } 2373 if (!Bot.HazardRec) { 2374 Bot.HazardRec = 2375 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2376 Itin, DAG); 2377 } 2378 } 2379 2380 /// Initialize the per-region scheduling policy. 2381 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2382 MachineBasicBlock::iterator End, 2383 unsigned NumRegionInstrs) { 2384 const MachineFunction &MF = *Begin->getParent()->getParent(); 2385 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 2386 2387 // Avoid setting up the register pressure tracker for small regions to save 2388 // compile time. As a rough heuristic, only track pressure when the number of 2389 // schedulable instructions exceeds half the integer register file. 2390 RegionPolicy.ShouldTrackPressure = true; 2391 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2392 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2393 if (TLI->isTypeLegal(LegalIntVT)) { 2394 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2395 TLI->getRegClassFor(LegalIntVT)); 2396 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2397 } 2398 } 2399 2400 // For generic targets, we default to bottom-up, because it's simpler and more 2401 // compile-time optimizations have been implemented in that direction. 2402 RegionPolicy.OnlyBottomUp = true; 2403 2404 // Allow the subtarget to override default policy. 2405 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End, 2406 NumRegionInstrs); 2407 2408 // After subtarget overrides, apply command line options. 2409 if (!EnableRegPressure) 2410 RegionPolicy.ShouldTrackPressure = false; 2411 2412 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2413 // e.g. -misched-bottomup=false allows scheduling in both directions. 2414 assert((!ForceTopDown || !ForceBottomUp) && 2415 "-misched-topdown incompatible with -misched-bottomup"); 2416 if (ForceBottomUp.getNumOccurrences() > 0) { 2417 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2418 if (RegionPolicy.OnlyBottomUp) 2419 RegionPolicy.OnlyTopDown = false; 2420 } 2421 if (ForceTopDown.getNumOccurrences() > 0) { 2422 RegionPolicy.OnlyTopDown = ForceTopDown; 2423 if (RegionPolicy.OnlyTopDown) 2424 RegionPolicy.OnlyBottomUp = false; 2425 } 2426 } 2427 2428 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2429 /// critical path by more cycles than it takes to drain the instruction buffer. 2430 /// We estimate an upper bounds on in-flight instructions as: 2431 /// 2432 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2433 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2434 /// InFlightResources = InFlightIterations * LoopResources 2435 /// 2436 /// TODO: Check execution resources in addition to IssueCount. 2437 void GenericScheduler::checkAcyclicLatency() { 2438 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2439 return; 2440 2441 // Scaled number of cycles per loop iteration. 2442 unsigned IterCount = 2443 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2444 Rem.RemIssueCount); 2445 // Scaled acyclic critical path. 2446 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2447 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2448 unsigned InFlightCount = 2449 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2450 unsigned BufferLimit = 2451 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2452 2453 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2454 2455 DEBUG(dbgs() << "IssueCycles=" 2456 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2457 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2458 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount 2459 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2460 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2461 if (Rem.IsAcyclicLatencyLimited) 2462 dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2463 } 2464 2465 void GenericScheduler::registerRoots() { 2466 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2467 2468 // Some roots may not feed into ExitSU. Check all of them in case. 2469 for (std::vector<SUnit*>::const_iterator 2470 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 2471 if ((*I)->getDepth() > Rem.CriticalPath) 2472 Rem.CriticalPath = (*I)->getDepth(); 2473 } 2474 DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 2475 if (DumpCriticalPathLength) { 2476 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 2477 } 2478 2479 if (EnableCyclicPath) { 2480 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2481 checkAcyclicLatency(); 2482 } 2483 } 2484 2485 static bool tryPressure(const PressureChange &TryP, 2486 const PressureChange &CandP, 2487 GenericSchedulerBase::SchedCandidate &TryCand, 2488 GenericSchedulerBase::SchedCandidate &Cand, 2489 GenericSchedulerBase::CandReason Reason) { 2490 int TryRank = TryP.getPSetOrMax(); 2491 int CandRank = CandP.getPSetOrMax(); 2492 // If both candidates affect the same set, go with the smallest increase. 2493 if (TryRank == CandRank) { 2494 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2495 Reason); 2496 } 2497 // If one candidate decreases and the other increases, go with it. 2498 // Invalid candidates have UnitInc==0. 2499 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2500 Reason)) { 2501 return true; 2502 } 2503 // If the candidates are decreasing pressure, reverse priority. 2504 if (TryP.getUnitInc() < 0) 2505 std::swap(TryRank, CandRank); 2506 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2507 } 2508 2509 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2510 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2511 } 2512 2513 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2514 /// their physreg def/use. 2515 /// 2516 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2517 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2518 /// with the operation that produces or consumes the physreg. We'll do this when 2519 /// regalloc has support for parallel copies. 2520 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2521 const MachineInstr *MI = SU->getInstr(); 2522 if (!MI->isCopy()) 2523 return 0; 2524 2525 unsigned ScheduledOper = isTop ? 1 : 0; 2526 unsigned UnscheduledOper = isTop ? 0 : 1; 2527 // If we have already scheduled the physreg produce/consumer, immediately 2528 // schedule the copy. 2529 if (TargetRegisterInfo::isPhysicalRegister( 2530 MI->getOperand(ScheduledOper).getReg())) 2531 return 1; 2532 // If the physreg is at the boundary, defer it. Otherwise schedule it 2533 // immediately to free the dependent. We can hoist the copy later. 2534 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2535 if (TargetRegisterInfo::isPhysicalRegister( 2536 MI->getOperand(UnscheduledOper).getReg())) 2537 return AtBoundary ? -1 : 1; 2538 return 0; 2539 } 2540 2541 /// Apply a set of heursitics to a new candidate. Heuristics are currently 2542 /// hierarchical. This may be more efficient than a graduated cost model because 2543 /// we don't need to evaluate all aspects of the model for each node in the 2544 /// queue. But it's really done to make the heuristics easier to debug and 2545 /// statistically analyze. 2546 /// 2547 /// \param Cand provides the policy and current best candidate. 2548 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2549 /// \param Zone describes the scheduled zone that we are extending. 2550 /// \param RPTracker describes reg pressure within the scheduled zone. 2551 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 2552 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 2553 SchedCandidate &TryCand, 2554 SchedBoundary &Zone, 2555 const RegPressureTracker &RPTracker, 2556 RegPressureTracker &TempTracker) { 2557 2558 if (DAG->isTrackingPressure()) { 2559 // Always initialize TryCand's RPDelta. 2560 if (Zone.isTop()) { 2561 TempTracker.getMaxDownwardPressureDelta( 2562 TryCand.SU->getInstr(), 2563 TryCand.RPDelta, 2564 DAG->getRegionCriticalPSets(), 2565 DAG->getRegPressure().MaxSetPressure); 2566 } 2567 else { 2568 if (VerifyScheduling) { 2569 TempTracker.getMaxUpwardPressureDelta( 2570 TryCand.SU->getInstr(), 2571 &DAG->getPressureDiff(TryCand.SU), 2572 TryCand.RPDelta, 2573 DAG->getRegionCriticalPSets(), 2574 DAG->getRegPressure().MaxSetPressure); 2575 } 2576 else { 2577 RPTracker.getUpwardPressureDelta( 2578 TryCand.SU->getInstr(), 2579 DAG->getPressureDiff(TryCand.SU), 2580 TryCand.RPDelta, 2581 DAG->getRegionCriticalPSets(), 2582 DAG->getRegPressure().MaxSetPressure); 2583 } 2584 } 2585 } 2586 DEBUG(if (TryCand.RPDelta.Excess.isValid()) 2587 dbgs() << " SU(" << TryCand.SU->NodeNum << ") " 2588 << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet()) 2589 << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n"); 2590 2591 // Initialize the candidate if needed. 2592 if (!Cand.isValid()) { 2593 TryCand.Reason = NodeOrder; 2594 return; 2595 } 2596 2597 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 2598 biasPhysRegCopy(Cand.SU, Zone.isTop()), 2599 TryCand, Cand, PhysRegCopy)) 2600 return; 2601 2602 // Avoid exceeding the target's limit. If signed PSetID is negative, it is 2603 // invalid; convert it to INT_MAX to give it lowest priority. 2604 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 2605 Cand.RPDelta.Excess, 2606 TryCand, Cand, RegExcess)) 2607 return; 2608 2609 // Avoid increasing the max critical pressure in the scheduled region. 2610 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 2611 Cand.RPDelta.CriticalMax, 2612 TryCand, Cand, RegCritical)) 2613 return; 2614 2615 // For loops that are acyclic path limited, aggressively schedule for latency. 2616 // This can result in very long dependence chains scheduled in sequence, so 2617 // once every cycle (when CurrMOps == 0), switch to normal heuristics. 2618 if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps() 2619 && tryLatency(TryCand, Cand, Zone)) 2620 return; 2621 2622 // Prioritize instructions that read unbuffered resources by stall cycles. 2623 if (tryLess(Zone.getLatencyStallCycles(TryCand.SU), 2624 Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2625 return; 2626 2627 // Keep clustered nodes together to encourage downstream peephole 2628 // optimizations which may reduce resource requirements. 2629 // 2630 // This is a best effort to set things up for a post-RA pass. Optimizations 2631 // like generating loads of multiple registers should ideally be done within 2632 // the scheduler pass by combining the loads during DAG postprocessing. 2633 const SUnit *NextClusterSU = 2634 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2635 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 2636 TryCand, Cand, Cluster)) 2637 return; 2638 2639 // Weak edges are for clustering and other constraints. 2640 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 2641 getWeakLeft(Cand.SU, Zone.isTop()), 2642 TryCand, Cand, Weak)) { 2643 return; 2644 } 2645 // Avoid increasing the max pressure of the entire region. 2646 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 2647 Cand.RPDelta.CurrentMax, 2648 TryCand, Cand, RegMax)) 2649 return; 2650 2651 // Avoid critical resource consumption and balance the schedule. 2652 TryCand.initResourceDelta(DAG, SchedModel); 2653 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2654 TryCand, Cand, ResourceReduce)) 2655 return; 2656 if (tryGreater(TryCand.ResDelta.DemandedResources, 2657 Cand.ResDelta.DemandedResources, 2658 TryCand, Cand, ResourceDemand)) 2659 return; 2660 2661 // Avoid serializing long latency dependence chains. 2662 // For acyclic path limited loops, latency was already checked above. 2663 if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited 2664 && tryLatency(TryCand, Cand, Zone)) { 2665 return; 2666 } 2667 2668 // Prefer immediate defs/users of the last scheduled instruction. This is a 2669 // local pressure avoidance strategy that also makes the machine code 2670 // readable. 2671 if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU), 2672 TryCand, Cand, NextDefUse)) 2673 return; 2674 2675 // Fall through to original instruction order. 2676 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2677 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2678 TryCand.Reason = NodeOrder; 2679 } 2680 } 2681 2682 /// Pick the best candidate from the queue. 2683 /// 2684 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2685 /// DAG building. To adjust for the current scheduling location we need to 2686 /// maintain the number of vreg uses remaining to be top-scheduled. 2687 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2688 const RegPressureTracker &RPTracker, 2689 SchedCandidate &Cand) { 2690 ReadyQueue &Q = Zone.Available; 2691 2692 DEBUG(Q.dump()); 2693 2694 // getMaxPressureDelta temporarily modifies the tracker. 2695 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2696 2697 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2698 2699 SchedCandidate TryCand(Cand.Policy); 2700 TryCand.SU = *I; 2701 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2702 if (TryCand.Reason != NoCand) { 2703 // Initialize resource delta if needed in case future heuristics query it. 2704 if (TryCand.ResDelta == SchedResourceDelta()) 2705 TryCand.initResourceDelta(DAG, SchedModel); 2706 Cand.setBest(TryCand); 2707 DEBUG(traceCandidate(Cand)); 2708 } 2709 } 2710 } 2711 2712 /// Pick the best candidate node from either the top or bottom queue. 2713 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 2714 // Schedule as far as possible in the direction of no choice. This is most 2715 // efficient, but also provides the best heuristics for CriticalPSets. 2716 if (SUnit *SU = Bot.pickOnlyChoice()) { 2717 IsTopNode = false; 2718 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2719 return SU; 2720 } 2721 if (SUnit *SU = Top.pickOnlyChoice()) { 2722 IsTopNode = true; 2723 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2724 return SU; 2725 } 2726 CandPolicy NoPolicy; 2727 SchedCandidate BotCand(NoPolicy); 2728 SchedCandidate TopCand(NoPolicy); 2729 // Set the bottom-up policy based on the state of the current bottom zone and 2730 // the instructions outside the zone, including the top zone. 2731 setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top); 2732 // Set the top-down policy based on the state of the current top zone and 2733 // the instructions outside the zone, including the bottom zone. 2734 setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot); 2735 2736 // Prefer bottom scheduling when heuristics are silent. 2737 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2738 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2739 2740 // If either Q has a single candidate that provides the least increase in 2741 // Excess pressure, we can immediately schedule from that Q. 2742 // 2743 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2744 // affects picking from either Q. If scheduling in one direction must 2745 // increase pressure for one of the excess PSets, then schedule in that 2746 // direction first to provide more freedom in the other direction. 2747 if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess)) 2748 || (BotCand.Reason == RegCritical 2749 && !BotCand.isRepeat(RegCritical))) 2750 { 2751 IsTopNode = false; 2752 tracePick(BotCand, IsTopNode); 2753 return BotCand.SU; 2754 } 2755 // Check if the top Q has a better candidate. 2756 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2757 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2758 2759 // Choose the queue with the most important (lowest enum) reason. 2760 if (TopCand.Reason < BotCand.Reason) { 2761 IsTopNode = true; 2762 tracePick(TopCand, IsTopNode); 2763 return TopCand.SU; 2764 } 2765 // Otherwise prefer the bottom candidate, in node order if all else failed. 2766 IsTopNode = false; 2767 tracePick(BotCand, IsTopNode); 2768 return BotCand.SU; 2769 } 2770 2771 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2772 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 2773 if (DAG->top() == DAG->bottom()) { 2774 assert(Top.Available.empty() && Top.Pending.empty() && 2775 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2776 return nullptr; 2777 } 2778 SUnit *SU; 2779 do { 2780 if (RegionPolicy.OnlyTopDown) { 2781 SU = Top.pickOnlyChoice(); 2782 if (!SU) { 2783 CandPolicy NoPolicy; 2784 SchedCandidate TopCand(NoPolicy); 2785 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2786 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2787 tracePick(TopCand, true); 2788 SU = TopCand.SU; 2789 } 2790 IsTopNode = true; 2791 } 2792 else if (RegionPolicy.OnlyBottomUp) { 2793 SU = Bot.pickOnlyChoice(); 2794 if (!SU) { 2795 CandPolicy NoPolicy; 2796 SchedCandidate BotCand(NoPolicy); 2797 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2798 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 2799 tracePick(BotCand, false); 2800 SU = BotCand.SU; 2801 } 2802 IsTopNode = false; 2803 } 2804 else { 2805 SU = pickNodeBidirectional(IsTopNode); 2806 } 2807 } while (SU->isScheduled); 2808 2809 if (SU->isTopReady()) 2810 Top.removeReady(SU); 2811 if (SU->isBottomReady()) 2812 Bot.removeReady(SU); 2813 2814 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2815 return SU; 2816 } 2817 2818 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2819 2820 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2821 if (!isTop) 2822 ++InsertPos; 2823 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2824 2825 // Find already scheduled copies with a single physreg dependence and move 2826 // them just above the scheduled instruction. 2827 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2828 I != E; ++I) { 2829 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2830 continue; 2831 SUnit *DepSU = I->getSUnit(); 2832 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2833 continue; 2834 MachineInstr *Copy = DepSU->getInstr(); 2835 if (!Copy->isCopy()) 2836 continue; 2837 DEBUG(dbgs() << " Rescheduling physreg copy "; 2838 I->getSUnit()->dump(DAG)); 2839 DAG->moveInstruction(Copy, InsertPos); 2840 } 2841 } 2842 2843 /// Update the scheduler's state after scheduling a node. This is the same node 2844 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 2845 /// update it's state based on the current cycle before MachineSchedStrategy 2846 /// does. 2847 /// 2848 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2849 /// them here. See comments in biasPhysRegCopy. 2850 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2851 if (IsTopNode) { 2852 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 2853 Top.bumpNode(SU); 2854 if (SU->hasPhysRegUses) 2855 reschedulePhysRegCopies(SU, true); 2856 } 2857 else { 2858 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 2859 Bot.bumpNode(SU); 2860 if (SU->hasPhysRegDefs) 2861 reschedulePhysRegCopies(SU, false); 2862 } 2863 } 2864 2865 /// Create the standard converging machine scheduler. This will be used as the 2866 /// default scheduler if the target does not set a default. 2867 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) { 2868 ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)); 2869 // Register DAG post-processors. 2870 // 2871 // FIXME: extend the mutation API to allow earlier mutations to instantiate 2872 // data and pass it to later mutations. Have a single mutation that gathers 2873 // the interesting nodes in one pass. 2874 DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI)); 2875 if (EnableLoadCluster && DAG->TII->enableClusterLoads()) 2876 DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI)); 2877 if (EnableMacroFusion) 2878 DAG->addMutation(make_unique<MacroFusion>(DAG->TII)); 2879 return DAG; 2880 } 2881 2882 static MachineSchedRegistry 2883 GenericSchedRegistry("converge", "Standard converging scheduler.", 2884 createGenericSchedLive); 2885 2886 //===----------------------------------------------------------------------===// 2887 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 2888 //===----------------------------------------------------------------------===// 2889 2890 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 2891 DAG = Dag; 2892 SchedModel = DAG->getSchedModel(); 2893 TRI = DAG->TRI; 2894 2895 Rem.init(DAG, SchedModel); 2896 Top.init(DAG, SchedModel, &Rem); 2897 BotRoots.clear(); 2898 2899 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 2900 // or are disabled, then these HazardRecs will be disabled. 2901 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2902 if (!Top.HazardRec) { 2903 Top.HazardRec = 2904 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2905 Itin, DAG); 2906 } 2907 } 2908 2909 2910 void PostGenericScheduler::registerRoots() { 2911 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2912 2913 // Some roots may not feed into ExitSU. Check all of them in case. 2914 for (SmallVectorImpl<SUnit*>::const_iterator 2915 I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) { 2916 if ((*I)->getDepth() > Rem.CriticalPath) 2917 Rem.CriticalPath = (*I)->getDepth(); 2918 } 2919 DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 2920 if (DumpCriticalPathLength) { 2921 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 2922 } 2923 } 2924 2925 /// Apply a set of heursitics to a new candidate for PostRA scheduling. 2926 /// 2927 /// \param Cand provides the policy and current best candidate. 2928 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2929 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 2930 SchedCandidate &TryCand) { 2931 2932 // Initialize the candidate if needed. 2933 if (!Cand.isValid()) { 2934 TryCand.Reason = NodeOrder; 2935 return; 2936 } 2937 2938 // Prioritize instructions that read unbuffered resources by stall cycles. 2939 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 2940 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2941 return; 2942 2943 // Avoid critical resource consumption and balance the schedule. 2944 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2945 TryCand, Cand, ResourceReduce)) 2946 return; 2947 if (tryGreater(TryCand.ResDelta.DemandedResources, 2948 Cand.ResDelta.DemandedResources, 2949 TryCand, Cand, ResourceDemand)) 2950 return; 2951 2952 // Avoid serializing long latency dependence chains. 2953 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 2954 return; 2955 } 2956 2957 // Fall through to original instruction order. 2958 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 2959 TryCand.Reason = NodeOrder; 2960 } 2961 2962 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 2963 ReadyQueue &Q = Top.Available; 2964 2965 DEBUG(Q.dump()); 2966 2967 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2968 SchedCandidate TryCand(Cand.Policy); 2969 TryCand.SU = *I; 2970 TryCand.initResourceDelta(DAG, SchedModel); 2971 tryCandidate(Cand, TryCand); 2972 if (TryCand.Reason != NoCand) { 2973 Cand.setBest(TryCand); 2974 DEBUG(traceCandidate(Cand)); 2975 } 2976 } 2977 } 2978 2979 /// Pick the next node to schedule. 2980 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 2981 if (DAG->top() == DAG->bottom()) { 2982 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 2983 return nullptr; 2984 } 2985 SUnit *SU; 2986 do { 2987 SU = Top.pickOnlyChoice(); 2988 if (!SU) { 2989 CandPolicy NoPolicy; 2990 SchedCandidate TopCand(NoPolicy); 2991 // Set the top-down policy based on the state of the current top zone and 2992 // the instructions outside the zone, including the bottom zone. 2993 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 2994 pickNodeFromQueue(TopCand); 2995 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2996 tracePick(TopCand, true); 2997 SU = TopCand.SU; 2998 } 2999 } while (SU->isScheduled); 3000 3001 IsTopNode = true; 3002 Top.removeReady(SU); 3003 3004 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 3005 return SU; 3006 } 3007 3008 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3009 /// scheduled/remaining flags in the DAG nodes. 3010 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3011 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3012 Top.bumpNode(SU); 3013 } 3014 3015 /// Create a generic scheduler with no vreg liveness or DAG mutation passes. 3016 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) { 3017 return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true); 3018 } 3019 3020 //===----------------------------------------------------------------------===// 3021 // ILP Scheduler. Currently for experimental analysis of heuristics. 3022 //===----------------------------------------------------------------------===// 3023 3024 namespace { 3025 /// \brief Order nodes by the ILP metric. 3026 struct ILPOrder { 3027 const SchedDFSResult *DFSResult; 3028 const BitVector *ScheduledTrees; 3029 bool MaximizeILP; 3030 3031 ILPOrder(bool MaxILP) 3032 : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {} 3033 3034 /// \brief Apply a less-than relation on node priority. 3035 /// 3036 /// (Return true if A comes after B in the Q.) 3037 bool operator()(const SUnit *A, const SUnit *B) const { 3038 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3039 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3040 if (SchedTreeA != SchedTreeB) { 3041 // Unscheduled trees have lower priority. 3042 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3043 return ScheduledTrees->test(SchedTreeB); 3044 3045 // Trees with shallower connections have have lower priority. 3046 if (DFSResult->getSubtreeLevel(SchedTreeA) 3047 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3048 return DFSResult->getSubtreeLevel(SchedTreeA) 3049 < DFSResult->getSubtreeLevel(SchedTreeB); 3050 } 3051 } 3052 if (MaximizeILP) 3053 return DFSResult->getILP(A) < DFSResult->getILP(B); 3054 else 3055 return DFSResult->getILP(A) > DFSResult->getILP(B); 3056 } 3057 }; 3058 3059 /// \brief Schedule based on the ILP metric. 3060 class ILPScheduler : public MachineSchedStrategy { 3061 ScheduleDAGMILive *DAG; 3062 ILPOrder Cmp; 3063 3064 std::vector<SUnit*> ReadyQ; 3065 public: 3066 ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {} 3067 3068 void initialize(ScheduleDAGMI *dag) override { 3069 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3070 DAG = static_cast<ScheduleDAGMILive*>(dag); 3071 DAG->computeDFSResult(); 3072 Cmp.DFSResult = DAG->getDFSResult(); 3073 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3074 ReadyQ.clear(); 3075 } 3076 3077 void registerRoots() override { 3078 // Restore the heap in ReadyQ with the updated DFS results. 3079 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3080 } 3081 3082 /// Implement MachineSchedStrategy interface. 3083 /// ----------------------------------------- 3084 3085 /// Callback to select the highest priority node from the ready Q. 3086 SUnit *pickNode(bool &IsTopNode) override { 3087 if (ReadyQ.empty()) return nullptr; 3088 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3089 SUnit *SU = ReadyQ.back(); 3090 ReadyQ.pop_back(); 3091 IsTopNode = false; 3092 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 3093 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3094 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 3095 << DAG->getDFSResult()->getSubtreeLevel( 3096 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 3097 << "Scheduling " << *SU->getInstr()); 3098 return SU; 3099 } 3100 3101 /// \brief Scheduler callback to notify that a new subtree is scheduled. 3102 void scheduleTree(unsigned SubtreeID) override { 3103 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3104 } 3105 3106 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3107 /// DFSResults, and resort the priority Q. 3108 void schedNode(SUnit *SU, bool IsTopNode) override { 3109 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3110 } 3111 3112 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3113 3114 void releaseBottomNode(SUnit *SU) override { 3115 ReadyQ.push_back(SU); 3116 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3117 } 3118 }; 3119 } // namespace 3120 3121 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3122 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true)); 3123 } 3124 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3125 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false)); 3126 } 3127 static MachineSchedRegistry ILPMaxRegistry( 3128 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3129 static MachineSchedRegistry ILPMinRegistry( 3130 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3131 3132 //===----------------------------------------------------------------------===// 3133 // Machine Instruction Shuffler for Correctness Testing 3134 //===----------------------------------------------------------------------===// 3135 3136 #ifndef NDEBUG 3137 namespace { 3138 /// Apply a less-than relation on the node order, which corresponds to the 3139 /// instruction order prior to scheduling. IsReverse implements greater-than. 3140 template<bool IsReverse> 3141 struct SUnitOrder { 3142 bool operator()(SUnit *A, SUnit *B) const { 3143 if (IsReverse) 3144 return A->NodeNum > B->NodeNum; 3145 else 3146 return A->NodeNum < B->NodeNum; 3147 } 3148 }; 3149 3150 /// Reorder instructions as much as possible. 3151 class InstructionShuffler : public MachineSchedStrategy { 3152 bool IsAlternating; 3153 bool IsTopDown; 3154 3155 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3156 // gives nodes with a higher number higher priority causing the latest 3157 // instructions to be scheduled first. 3158 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 3159 TopQ; 3160 // When scheduling bottom-up, use greater-than as the queue priority. 3161 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 3162 BottomQ; 3163 public: 3164 InstructionShuffler(bool alternate, bool topdown) 3165 : IsAlternating(alternate), IsTopDown(topdown) {} 3166 3167 void initialize(ScheduleDAGMI*) override { 3168 TopQ.clear(); 3169 BottomQ.clear(); 3170 } 3171 3172 /// Implement MachineSchedStrategy interface. 3173 /// ----------------------------------------- 3174 3175 SUnit *pickNode(bool &IsTopNode) override { 3176 SUnit *SU; 3177 if (IsTopDown) { 3178 do { 3179 if (TopQ.empty()) return nullptr; 3180 SU = TopQ.top(); 3181 TopQ.pop(); 3182 } while (SU->isScheduled); 3183 IsTopNode = true; 3184 } 3185 else { 3186 do { 3187 if (BottomQ.empty()) return nullptr; 3188 SU = BottomQ.top(); 3189 BottomQ.pop(); 3190 } while (SU->isScheduled); 3191 IsTopNode = false; 3192 } 3193 if (IsAlternating) 3194 IsTopDown = !IsTopDown; 3195 return SU; 3196 } 3197 3198 void schedNode(SUnit *SU, bool IsTopNode) override {} 3199 3200 void releaseTopNode(SUnit *SU) override { 3201 TopQ.push(SU); 3202 } 3203 void releaseBottomNode(SUnit *SU) override { 3204 BottomQ.push(SU); 3205 } 3206 }; 3207 } // namespace 3208 3209 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3210 bool Alternate = !ForceTopDown && !ForceBottomUp; 3211 bool TopDown = !ForceBottomUp; 3212 assert((TopDown || !ForceTopDown) && 3213 "-misched-topdown incompatible with -misched-bottomup"); 3214 return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown)); 3215 } 3216 static MachineSchedRegistry ShufflerRegistry( 3217 "shuffle", "Shuffle machine instructions alternating directions", 3218 createInstructionShuffler); 3219 #endif // !NDEBUG 3220 3221 //===----------------------------------------------------------------------===// 3222 // GraphWriter support for ScheduleDAGMILive. 3223 //===----------------------------------------------------------------------===// 3224 3225 #ifndef NDEBUG 3226 namespace llvm { 3227 3228 template<> struct GraphTraits< 3229 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3230 3231 template<> 3232 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3233 3234 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 3235 3236 static std::string getGraphName(const ScheduleDAG *G) { 3237 return G->MF.getName(); 3238 } 3239 3240 static bool renderGraphFromBottomUp() { 3241 return true; 3242 } 3243 3244 static bool isNodeHidden(const SUnit *Node) { 3245 return (Node->Preds.size() > 10 || Node->Succs.size() > 10); 3246 } 3247 3248 static bool hasNodeAddressLabel(const SUnit *Node, 3249 const ScheduleDAG *Graph) { 3250 return false; 3251 } 3252 3253 /// If you want to override the dot attributes printed for a particular 3254 /// edge, override this method. 3255 static std::string getEdgeAttributes(const SUnit *Node, 3256 SUnitIterator EI, 3257 const ScheduleDAG *Graph) { 3258 if (EI.isArtificialDep()) 3259 return "color=cyan,style=dashed"; 3260 if (EI.isCtrlDep()) 3261 return "color=blue,style=dashed"; 3262 return ""; 3263 } 3264 3265 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3266 std::string Str; 3267 raw_string_ostream SS(Str); 3268 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3269 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3270 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3271 SS << "SU:" << SU->NodeNum; 3272 if (DFS) 3273 SS << " I:" << DFS->getNumInstrs(SU); 3274 return SS.str(); 3275 } 3276 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3277 return G->getGraphNodeLabel(SU); 3278 } 3279 3280 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3281 std::string Str("shape=Mrecord"); 3282 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3283 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3284 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3285 if (DFS) { 3286 Str += ",style=filled,fillcolor=\"#"; 3287 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3288 Str += '"'; 3289 } 3290 return Str; 3291 } 3292 }; 3293 } // namespace llvm 3294 #endif // NDEBUG 3295 3296 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3297 /// rendered using 'dot'. 3298 /// 3299 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3300 #ifndef NDEBUG 3301 ViewGraph(this, Name, false, Title); 3302 #else 3303 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3304 << "systems with Graphviz or gv!\n"; 3305 #endif // NDEBUG 3306 } 3307 3308 /// Out-of-line implementation with no arguments is handy for gdb. 3309 void ScheduleDAGMI::viewGraph() { 3310 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3311 } 3312