1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineScheduler.h" 16 #include "llvm/ADT/PriorityQueue.h" 17 #include "llvm/Analysis/AliasAnalysis.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineDominators.h" 20 #include "llvm/CodeGen/MachineLoopInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/Passes.h" 23 #include "llvm/CodeGen/RegisterClassInfo.h" 24 #include "llvm/CodeGen/ScheduleDFS.h" 25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/GraphWriter.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/Target/TargetInstrInfo.h" 32 #include <queue> 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "misched" 37 38 namespace llvm { 39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 40 cl::desc("Force top-down list scheduling")); 41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 42 cl::desc("Force bottom-up list scheduling")); 43 } 44 45 #ifndef NDEBUG 46 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 47 cl::desc("Pop up a window to show MISched dags after they are processed")); 48 49 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 50 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 51 52 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 53 cl::desc("Only schedule this function")); 54 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 55 cl::desc("Only schedule this MBB#")); 56 #else 57 static bool ViewMISchedDAGs = false; 58 #endif // NDEBUG 59 60 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 61 cl::desc("Enable register pressure scheduling."), cl::init(true)); 62 63 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 64 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 65 66 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 67 cl::desc("Enable load clustering."), cl::init(true)); 68 69 // Experimental heuristics 70 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 71 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 72 73 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 74 cl::desc("Verify machine instrs before and after machine scheduling")); 75 76 // DAG subtrees must have at least this many nodes. 77 static const unsigned MinSubtreeSize = 8; 78 79 // Pin the vtables to this file. 80 void MachineSchedStrategy::anchor() {} 81 void ScheduleDAGMutation::anchor() {} 82 83 //===----------------------------------------------------------------------===// 84 // Machine Instruction Scheduling Pass and Registry 85 //===----------------------------------------------------------------------===// 86 87 MachineSchedContext::MachineSchedContext(): 88 MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) { 89 RegClassInfo = new RegisterClassInfo(); 90 } 91 92 MachineSchedContext::~MachineSchedContext() { 93 delete RegClassInfo; 94 } 95 96 namespace { 97 /// Base class for a machine scheduler class that can run at any point. 98 class MachineSchedulerBase : public MachineSchedContext, 99 public MachineFunctionPass { 100 public: 101 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 102 103 void print(raw_ostream &O, const Module* = nullptr) const override; 104 105 protected: 106 void scheduleRegions(ScheduleDAGInstrs &Scheduler); 107 }; 108 109 /// MachineScheduler runs after coalescing and before register allocation. 110 class MachineScheduler : public MachineSchedulerBase { 111 public: 112 MachineScheduler(); 113 114 void getAnalysisUsage(AnalysisUsage &AU) const override; 115 116 bool runOnMachineFunction(MachineFunction&) override; 117 118 static char ID; // Class identification, replacement for typeinfo 119 120 protected: 121 ScheduleDAGInstrs *createMachineScheduler(); 122 }; 123 124 /// PostMachineScheduler runs after shortly before code emission. 125 class PostMachineScheduler : public MachineSchedulerBase { 126 public: 127 PostMachineScheduler(); 128 129 void getAnalysisUsage(AnalysisUsage &AU) const override; 130 131 bool runOnMachineFunction(MachineFunction&) override; 132 133 static char ID; // Class identification, replacement for typeinfo 134 135 protected: 136 ScheduleDAGInstrs *createPostMachineScheduler(); 137 }; 138 } // namespace 139 140 char MachineScheduler::ID = 0; 141 142 char &llvm::MachineSchedulerID = MachineScheduler::ID; 143 144 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 145 "Machine Instruction Scheduler", false, false) 146 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 147 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 148 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 149 INITIALIZE_PASS_END(MachineScheduler, "misched", 150 "Machine Instruction Scheduler", false, false) 151 152 MachineScheduler::MachineScheduler() 153 : MachineSchedulerBase(ID) { 154 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 155 } 156 157 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 158 AU.setPreservesCFG(); 159 AU.addRequiredID(MachineDominatorsID); 160 AU.addRequired<MachineLoopInfo>(); 161 AU.addRequired<AliasAnalysis>(); 162 AU.addRequired<TargetPassConfig>(); 163 AU.addRequired<SlotIndexes>(); 164 AU.addPreserved<SlotIndexes>(); 165 AU.addRequired<LiveIntervals>(); 166 AU.addPreserved<LiveIntervals>(); 167 MachineFunctionPass::getAnalysisUsage(AU); 168 } 169 170 char PostMachineScheduler::ID = 0; 171 172 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 173 174 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 175 "PostRA Machine Instruction Scheduler", false, false) 176 177 PostMachineScheduler::PostMachineScheduler() 178 : MachineSchedulerBase(ID) { 179 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 180 } 181 182 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 183 AU.setPreservesCFG(); 184 AU.addRequiredID(MachineDominatorsID); 185 AU.addRequired<MachineLoopInfo>(); 186 AU.addRequired<TargetPassConfig>(); 187 MachineFunctionPass::getAnalysisUsage(AU); 188 } 189 190 MachinePassRegistry MachineSchedRegistry::Registry; 191 192 /// A dummy default scheduler factory indicates whether the scheduler 193 /// is overridden on the command line. 194 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 195 return nullptr; 196 } 197 198 /// MachineSchedOpt allows command line selection of the scheduler. 199 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 200 RegisterPassParser<MachineSchedRegistry> > 201 MachineSchedOpt("misched", 202 cl::init(&useDefaultMachineSched), cl::Hidden, 203 cl::desc("Machine instruction scheduler to use")); 204 205 static MachineSchedRegistry 206 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 207 useDefaultMachineSched); 208 209 /// Forward declare the standard machine scheduler. This will be used as the 210 /// default scheduler if the target does not set a default. 211 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C); 212 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C); 213 214 /// Decrement this iterator until reaching the top or a non-debug instr. 215 static MachineBasicBlock::const_iterator 216 priorNonDebug(MachineBasicBlock::const_iterator I, 217 MachineBasicBlock::const_iterator Beg) { 218 assert(I != Beg && "reached the top of the region, cannot decrement"); 219 while (--I != Beg) { 220 if (!I->isDebugValue()) 221 break; 222 } 223 return I; 224 } 225 226 /// Non-const version. 227 static MachineBasicBlock::iterator 228 priorNonDebug(MachineBasicBlock::iterator I, 229 MachineBasicBlock::const_iterator Beg) { 230 return const_cast<MachineInstr*>( 231 &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)); 232 } 233 234 /// If this iterator is a debug value, increment until reaching the End or a 235 /// non-debug instruction. 236 static MachineBasicBlock::const_iterator 237 nextIfDebug(MachineBasicBlock::const_iterator I, 238 MachineBasicBlock::const_iterator End) { 239 for(; I != End; ++I) { 240 if (!I->isDebugValue()) 241 break; 242 } 243 return I; 244 } 245 246 /// Non-const version. 247 static MachineBasicBlock::iterator 248 nextIfDebug(MachineBasicBlock::iterator I, 249 MachineBasicBlock::const_iterator End) { 250 // Cast the return value to nonconst MachineInstr, then cast to an 251 // instr_iterator, which does not check for null, finally return a 252 // bundle_iterator. 253 return MachineBasicBlock::instr_iterator( 254 const_cast<MachineInstr*>( 255 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 256 } 257 258 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 259 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 260 // Select the scheduler, or set the default. 261 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 262 if (Ctor != useDefaultMachineSched) 263 return Ctor(this); 264 265 // Get the default scheduler set by the target for this function. 266 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 267 if (Scheduler) 268 return Scheduler; 269 270 // Default to GenericScheduler. 271 return createGenericSchedLive(this); 272 } 273 274 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 275 /// the caller. We don't have a command line option to override the postRA 276 /// scheduler. The Target must configure it. 277 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 278 // Get the postRA scheduler set by the target for this function. 279 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 280 if (Scheduler) 281 return Scheduler; 282 283 // Default to GenericScheduler. 284 return createGenericSchedPostRA(this); 285 } 286 287 /// Top-level MachineScheduler pass driver. 288 /// 289 /// Visit blocks in function order. Divide each block into scheduling regions 290 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 291 /// consistent with the DAG builder, which traverses the interior of the 292 /// scheduling regions bottom-up. 293 /// 294 /// This design avoids exposing scheduling boundaries to the DAG builder, 295 /// simplifying the DAG builder's support for "special" target instructions. 296 /// At the same time the design allows target schedulers to operate across 297 /// scheduling boundaries, for example to bundle the boudary instructions 298 /// without reordering them. This creates complexity, because the target 299 /// scheduler must update the RegionBegin and RegionEnd positions cached by 300 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 301 /// design would be to split blocks at scheduling boundaries, but LLVM has a 302 /// general bias against block splitting purely for implementation simplicity. 303 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 304 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 305 306 // Initialize the context of the pass. 307 MF = &mf; 308 MLI = &getAnalysis<MachineLoopInfo>(); 309 MDT = &getAnalysis<MachineDominatorTree>(); 310 PassConfig = &getAnalysis<TargetPassConfig>(); 311 AA = &getAnalysis<AliasAnalysis>(); 312 313 LIS = &getAnalysis<LiveIntervals>(); 314 315 if (VerifyScheduling) { 316 DEBUG(LIS->dump()); 317 MF->verify(this, "Before machine scheduling."); 318 } 319 RegClassInfo->runOnMachineFunction(*MF); 320 321 // Instantiate the selected scheduler for this target, function, and 322 // optimization level. 323 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 324 scheduleRegions(*Scheduler); 325 326 DEBUG(LIS->dump()); 327 if (VerifyScheduling) 328 MF->verify(this, "After machine scheduling."); 329 return true; 330 } 331 332 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 333 if (skipOptnoneFunction(*mf.getFunction())) 334 return false; 335 336 const TargetSubtargetInfo &ST = 337 mf.getTarget().getSubtarget<TargetSubtargetInfo>(); 338 if (!ST.enablePostMachineScheduler()) { 339 DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 340 return false; 341 } 342 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 343 344 // Initialize the context of the pass. 345 MF = &mf; 346 PassConfig = &getAnalysis<TargetPassConfig>(); 347 348 if (VerifyScheduling) 349 MF->verify(this, "Before post machine scheduling."); 350 351 // Instantiate the selected scheduler for this target, function, and 352 // optimization level. 353 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 354 scheduleRegions(*Scheduler); 355 356 if (VerifyScheduling) 357 MF->verify(this, "After post machine scheduling."); 358 return true; 359 } 360 361 /// Return true of the given instruction should not be included in a scheduling 362 /// region. 363 /// 364 /// MachineScheduler does not currently support scheduling across calls. To 365 /// handle calls, the DAG builder needs to be modified to create register 366 /// anti/output dependencies on the registers clobbered by the call's regmask 367 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 368 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 369 /// the boundary, but there would be no benefit to postRA scheduling across 370 /// calls this late anyway. 371 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 372 MachineBasicBlock *MBB, 373 MachineFunction *MF, 374 const TargetInstrInfo *TII, 375 bool IsPostRA) { 376 return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF); 377 } 378 379 /// Main driver for both MachineScheduler and PostMachineScheduler. 380 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) { 381 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 382 bool IsPostRA = Scheduler.isPostRA(); 383 384 // Visit all machine basic blocks. 385 // 386 // TODO: Visit blocks in global postorder or postorder within the bottom-up 387 // loop tree. Then we can optionally compute global RegPressure. 388 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 389 MBB != MBBEnd; ++MBB) { 390 391 Scheduler.startBlock(MBB); 392 393 #ifndef NDEBUG 394 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 395 continue; 396 if (SchedOnlyBlock.getNumOccurrences() 397 && (int)SchedOnlyBlock != MBB->getNumber()) 398 continue; 399 #endif 400 401 // Break the block into scheduling regions [I, RegionEnd), and schedule each 402 // region as soon as it is discovered. RegionEnd points the scheduling 403 // boundary at the bottom of the region. The DAG does not include RegionEnd, 404 // but the region does (i.e. the next RegionEnd is above the previous 405 // RegionBegin). If the current block has no terminator then RegionEnd == 406 // MBB->end() for the bottom region. 407 // 408 // The Scheduler may insert instructions during either schedule() or 409 // exitRegion(), even for empty regions. So the local iterators 'I' and 410 // 'RegionEnd' are invalid across these calls. 411 // 412 // MBB::size() uses instr_iterator to count. Here we need a bundle to count 413 // as a single instruction. 414 unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end()); 415 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 416 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) { 417 418 // Avoid decrementing RegionEnd for blocks with no terminator. 419 if (RegionEnd != MBB->end() || 420 isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) { 421 --RegionEnd; 422 // Count the boundary instruction. 423 --RemainingInstrs; 424 } 425 426 // The next region starts above the previous region. Look backward in the 427 // instruction stream until we find the nearest boundary. 428 unsigned NumRegionInstrs = 0; 429 MachineBasicBlock::iterator I = RegionEnd; 430 for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) { 431 if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA)) 432 break; 433 } 434 // Notify the scheduler of the region, even if we may skip scheduling 435 // it. Perhaps it still needs to be bundled. 436 Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs); 437 438 // Skip empty scheduling regions (0 or 1 schedulable instructions). 439 if (I == RegionEnd || I == std::prev(RegionEnd)) { 440 // Close the current region. Bundle the terminator if needed. 441 // This invalidates 'RegionEnd' and 'I'. 442 Scheduler.exitRegion(); 443 continue; 444 } 445 DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "") 446 << "MI Scheduling **********\n"); 447 DEBUG(dbgs() << MF->getName() 448 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 449 << "\n From: " << *I << " To: "; 450 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 451 else dbgs() << "End"; 452 dbgs() << " RegionInstrs: " << NumRegionInstrs 453 << " Remaining: " << RemainingInstrs << "\n"); 454 455 // Schedule a region: possibly reorder instructions. 456 // This invalidates 'RegionEnd' and 'I'. 457 Scheduler.schedule(); 458 459 // Close the current region. 460 Scheduler.exitRegion(); 461 462 // Scheduling has invalidated the current iterator 'I'. Ask the 463 // scheduler for the top of it's scheduled region. 464 RegionEnd = Scheduler.begin(); 465 } 466 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 467 Scheduler.finishBlock(); 468 if (Scheduler.isPostRA()) { 469 // FIXME: Ideally, no further passes should rely on kill flags. However, 470 // thumb2 size reduction is currently an exception. 471 Scheduler.fixupKills(MBB); 472 } 473 } 474 Scheduler.finalizeSchedule(); 475 } 476 477 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 478 // unimplemented 479 } 480 481 LLVM_DUMP_METHOD 482 void ReadyQueue::dump() { 483 dbgs() << Name << ": "; 484 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 485 dbgs() << Queue[i]->NodeNum << " "; 486 dbgs() << "\n"; 487 } 488 489 //===----------------------------------------------------------------------===// 490 // ScheduleDAGMI - Basic machine instruction scheduling. This is 491 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 492 // virtual registers. 493 // ===----------------------------------------------------------------------===/ 494 495 // Provide a vtable anchor. 496 ScheduleDAGMI::~ScheduleDAGMI() { 497 } 498 499 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 500 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 501 } 502 503 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 504 if (SuccSU != &ExitSU) { 505 // Do not use WillCreateCycle, it assumes SD scheduling. 506 // If Pred is reachable from Succ, then the edge creates a cycle. 507 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 508 return false; 509 Topo.AddPred(SuccSU, PredDep.getSUnit()); 510 } 511 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 512 // Return true regardless of whether a new edge needed to be inserted. 513 return true; 514 } 515 516 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 517 /// NumPredsLeft reaches zero, release the successor node. 518 /// 519 /// FIXME: Adjust SuccSU height based on MinLatency. 520 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 521 SUnit *SuccSU = SuccEdge->getSUnit(); 522 523 if (SuccEdge->isWeak()) { 524 --SuccSU->WeakPredsLeft; 525 if (SuccEdge->isCluster()) 526 NextClusterSucc = SuccSU; 527 return; 528 } 529 #ifndef NDEBUG 530 if (SuccSU->NumPredsLeft == 0) { 531 dbgs() << "*** Scheduling failed! ***\n"; 532 SuccSU->dump(this); 533 dbgs() << " has been released too many times!\n"; 534 llvm_unreachable(nullptr); 535 } 536 #endif 537 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 538 // CurrCycle may have advanced since then. 539 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 540 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 541 542 --SuccSU->NumPredsLeft; 543 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 544 SchedImpl->releaseTopNode(SuccSU); 545 } 546 547 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 548 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 549 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 550 I != E; ++I) { 551 releaseSucc(SU, &*I); 552 } 553 } 554 555 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 556 /// NumSuccsLeft reaches zero, release the predecessor node. 557 /// 558 /// FIXME: Adjust PredSU height based on MinLatency. 559 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 560 SUnit *PredSU = PredEdge->getSUnit(); 561 562 if (PredEdge->isWeak()) { 563 --PredSU->WeakSuccsLeft; 564 if (PredEdge->isCluster()) 565 NextClusterPred = PredSU; 566 return; 567 } 568 #ifndef NDEBUG 569 if (PredSU->NumSuccsLeft == 0) { 570 dbgs() << "*** Scheduling failed! ***\n"; 571 PredSU->dump(this); 572 dbgs() << " has been released too many times!\n"; 573 llvm_unreachable(nullptr); 574 } 575 #endif 576 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 577 // CurrCycle may have advanced since then. 578 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 579 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 580 581 --PredSU->NumSuccsLeft; 582 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 583 SchedImpl->releaseBottomNode(PredSU); 584 } 585 586 /// releasePredecessors - Call releasePred on each of SU's predecessors. 587 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 588 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 589 I != E; ++I) { 590 releasePred(SU, &*I); 591 } 592 } 593 594 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 595 /// crossing a scheduling boundary. [begin, end) includes all instructions in 596 /// the region, including the boundary itself and single-instruction regions 597 /// that don't get scheduled. 598 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 599 MachineBasicBlock::iterator begin, 600 MachineBasicBlock::iterator end, 601 unsigned regioninstrs) 602 { 603 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 604 605 SchedImpl->initPolicy(begin, end, regioninstrs); 606 } 607 608 /// This is normally called from the main scheduler loop but may also be invoked 609 /// by the scheduling strategy to perform additional code motion. 610 void ScheduleDAGMI::moveInstruction( 611 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 612 // Advance RegionBegin if the first instruction moves down. 613 if (&*RegionBegin == MI) 614 ++RegionBegin; 615 616 // Update the instruction stream. 617 BB->splice(InsertPos, BB, MI); 618 619 // Update LiveIntervals 620 if (LIS) 621 LIS->handleMove(MI, /*UpdateFlags=*/true); 622 623 // Recede RegionBegin if an instruction moves above the first. 624 if (RegionBegin == InsertPos) 625 RegionBegin = MI; 626 } 627 628 bool ScheduleDAGMI::checkSchedLimit() { 629 #ifndef NDEBUG 630 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 631 CurrentTop = CurrentBottom; 632 return false; 633 } 634 ++NumInstrsScheduled; 635 #endif 636 return true; 637 } 638 639 /// Per-region scheduling driver, called back from 640 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 641 /// does not consider liveness or register pressure. It is useful for PostRA 642 /// scheduling and potentially other custom schedulers. 643 void ScheduleDAGMI::schedule() { 644 // Build the DAG. 645 buildSchedGraph(AA); 646 647 Topo.InitDAGTopologicalSorting(); 648 649 postprocessDAG(); 650 651 SmallVector<SUnit*, 8> TopRoots, BotRoots; 652 findRootsAndBiasEdges(TopRoots, BotRoots); 653 654 // Initialize the strategy before modifying the DAG. 655 // This may initialize a DFSResult to be used for queue priority. 656 SchedImpl->initialize(this); 657 658 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 659 SUnits[su].dumpAll(this)); 660 if (ViewMISchedDAGs) viewGraph(); 661 662 // Initialize ready queues now that the DAG and priority data are finalized. 663 initQueues(TopRoots, BotRoots); 664 665 bool IsTopNode = false; 666 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 667 assert(!SU->isScheduled && "Node already scheduled"); 668 if (!checkSchedLimit()) 669 break; 670 671 MachineInstr *MI = SU->getInstr(); 672 if (IsTopNode) { 673 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 674 if (&*CurrentTop == MI) 675 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 676 else 677 moveInstruction(MI, CurrentTop); 678 } 679 else { 680 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 681 MachineBasicBlock::iterator priorII = 682 priorNonDebug(CurrentBottom, CurrentTop); 683 if (&*priorII == MI) 684 CurrentBottom = priorII; 685 else { 686 if (&*CurrentTop == MI) 687 CurrentTop = nextIfDebug(++CurrentTop, priorII); 688 moveInstruction(MI, CurrentBottom); 689 CurrentBottom = MI; 690 } 691 } 692 // Notify the scheduling strategy before updating the DAG. 693 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 694 // runs, it can then use the accurate ReadyCycle time to determine whether 695 // newly released nodes can move to the readyQ. 696 SchedImpl->schedNode(SU, IsTopNode); 697 698 updateQueues(SU, IsTopNode); 699 } 700 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 701 702 placeDebugValues(); 703 704 DEBUG({ 705 unsigned BBNum = begin()->getParent()->getNumber(); 706 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 707 dumpSchedule(); 708 dbgs() << '\n'; 709 }); 710 } 711 712 /// Apply each ScheduleDAGMutation step in order. 713 void ScheduleDAGMI::postprocessDAG() { 714 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 715 Mutations[i]->apply(this); 716 } 717 } 718 719 void ScheduleDAGMI:: 720 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 721 SmallVectorImpl<SUnit*> &BotRoots) { 722 for (std::vector<SUnit>::iterator 723 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 724 SUnit *SU = &(*I); 725 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 726 727 // Order predecessors so DFSResult follows the critical path. 728 SU->biasCriticalPath(); 729 730 // A SUnit is ready to top schedule if it has no predecessors. 731 if (!I->NumPredsLeft) 732 TopRoots.push_back(SU); 733 // A SUnit is ready to bottom schedule if it has no successors. 734 if (!I->NumSuccsLeft) 735 BotRoots.push_back(SU); 736 } 737 ExitSU.biasCriticalPath(); 738 } 739 740 /// Identify DAG roots and setup scheduler queues. 741 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 742 ArrayRef<SUnit*> BotRoots) { 743 NextClusterSucc = nullptr; 744 NextClusterPred = nullptr; 745 746 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 747 // 748 // Nodes with unreleased weak edges can still be roots. 749 // Release top roots in forward order. 750 for (SmallVectorImpl<SUnit*>::const_iterator 751 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 752 SchedImpl->releaseTopNode(*I); 753 } 754 // Release bottom roots in reverse order so the higher priority nodes appear 755 // first. This is more natural and slightly more efficient. 756 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 757 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 758 SchedImpl->releaseBottomNode(*I); 759 } 760 761 releaseSuccessors(&EntrySU); 762 releasePredecessors(&ExitSU); 763 764 SchedImpl->registerRoots(); 765 766 // Advance past initial DebugValues. 767 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 768 CurrentBottom = RegionEnd; 769 } 770 771 /// Update scheduler queues after scheduling an instruction. 772 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 773 // Release dependent instructions for scheduling. 774 if (IsTopNode) 775 releaseSuccessors(SU); 776 else 777 releasePredecessors(SU); 778 779 SU->isScheduled = true; 780 } 781 782 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 783 void ScheduleDAGMI::placeDebugValues() { 784 // If first instruction was a DBG_VALUE then put it back. 785 if (FirstDbgValue) { 786 BB->splice(RegionBegin, BB, FirstDbgValue); 787 RegionBegin = FirstDbgValue; 788 } 789 790 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 791 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 792 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 793 MachineInstr *DbgValue = P.first; 794 MachineBasicBlock::iterator OrigPrevMI = P.second; 795 if (&*RegionBegin == DbgValue) 796 ++RegionBegin; 797 BB->splice(++OrigPrevMI, BB, DbgValue); 798 if (OrigPrevMI == std::prev(RegionEnd)) 799 RegionEnd = DbgValue; 800 } 801 DbgValues.clear(); 802 FirstDbgValue = nullptr; 803 } 804 805 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 806 void ScheduleDAGMI::dumpSchedule() const { 807 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 808 if (SUnit *SU = getSUnit(&(*MI))) 809 SU->dump(this); 810 else 811 dbgs() << "Missing SUnit\n"; 812 } 813 } 814 #endif 815 816 //===----------------------------------------------------------------------===// 817 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 818 // preservation. 819 //===----------------------------------------------------------------------===// 820 821 ScheduleDAGMILive::~ScheduleDAGMILive() { 822 delete DFSResult; 823 } 824 825 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 826 /// crossing a scheduling boundary. [begin, end) includes all instructions in 827 /// the region, including the boundary itself and single-instruction regions 828 /// that don't get scheduled. 829 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 830 MachineBasicBlock::iterator begin, 831 MachineBasicBlock::iterator end, 832 unsigned regioninstrs) 833 { 834 // ScheduleDAGMI initializes SchedImpl's per-region policy. 835 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 836 837 // For convenience remember the end of the liveness region. 838 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 839 840 SUPressureDiffs.clear(); 841 842 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 843 } 844 845 // Setup the register pressure trackers for the top scheduled top and bottom 846 // scheduled regions. 847 void ScheduleDAGMILive::initRegPressure() { 848 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 849 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 850 851 // Close the RPTracker to finalize live ins. 852 RPTracker.closeRegion(); 853 854 DEBUG(RPTracker.dump()); 855 856 // Initialize the live ins and live outs. 857 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 858 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 859 860 // Close one end of the tracker so we can call 861 // getMaxUpward/DownwardPressureDelta before advancing across any 862 // instructions. This converts currently live regs into live ins/outs. 863 TopRPTracker.closeTop(); 864 BotRPTracker.closeBottom(); 865 866 BotRPTracker.initLiveThru(RPTracker); 867 if (!BotRPTracker.getLiveThru().empty()) { 868 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 869 DEBUG(dbgs() << "Live Thru: "; 870 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 871 }; 872 873 // For each live out vreg reduce the pressure change associated with other 874 // uses of the same vreg below the live-out reaching def. 875 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 876 877 // Account for liveness generated by the region boundary. 878 if (LiveRegionEnd != RegionEnd) { 879 SmallVector<unsigned, 8> LiveUses; 880 BotRPTracker.recede(&LiveUses); 881 updatePressureDiffs(LiveUses); 882 } 883 884 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 885 886 // Cache the list of excess pressure sets in this region. This will also track 887 // the max pressure in the scheduled code for these sets. 888 RegionCriticalPSets.clear(); 889 const std::vector<unsigned> &RegionPressure = 890 RPTracker.getPressure().MaxSetPressure; 891 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 892 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 893 if (RegionPressure[i] > Limit) { 894 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 895 << " Limit " << Limit 896 << " Actual " << RegionPressure[i] << "\n"); 897 RegionCriticalPSets.push_back(PressureChange(i)); 898 } 899 } 900 DEBUG(dbgs() << "Excess PSets: "; 901 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 902 dbgs() << TRI->getRegPressureSetName( 903 RegionCriticalPSets[i].getPSet()) << " "; 904 dbgs() << "\n"); 905 } 906 907 void ScheduleDAGMILive:: 908 updateScheduledPressure(const SUnit *SU, 909 const std::vector<unsigned> &NewMaxPressure) { 910 const PressureDiff &PDiff = getPressureDiff(SU); 911 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 912 for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end(); 913 I != E; ++I) { 914 if (!I->isValid()) 915 break; 916 unsigned ID = I->getPSet(); 917 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 918 ++CritIdx; 919 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 920 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 921 && NewMaxPressure[ID] <= INT16_MAX) 922 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 923 } 924 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 925 if (NewMaxPressure[ID] >= Limit - 2) { 926 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 927 << NewMaxPressure[ID] << " > " << Limit << "(+ " 928 << BotRPTracker.getLiveThru()[ID] << " livethru)\n"); 929 } 930 } 931 } 932 933 /// Update the PressureDiff array for liveness after scheduling this 934 /// instruction. 935 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) { 936 for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) { 937 /// FIXME: Currently assuming single-use physregs. 938 unsigned Reg = LiveUses[LUIdx]; 939 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n"); 940 if (!TRI->isVirtualRegister(Reg)) 941 continue; 942 943 // This may be called before CurrentBottom has been initialized. However, 944 // BotRPTracker must have a valid position. We want the value live into the 945 // instruction or live out of the block, so ask for the previous 946 // instruction's live-out. 947 const LiveInterval &LI = LIS->getInterval(Reg); 948 VNInfo *VNI; 949 MachineBasicBlock::const_iterator I = 950 nextIfDebug(BotRPTracker.getPos(), BB->end()); 951 if (I == BB->end()) 952 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 953 else { 954 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I)); 955 VNI = LRQ.valueIn(); 956 } 957 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 958 assert(VNI && "No live value at use."); 959 for (VReg2UseMap::iterator 960 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 961 SUnit *SU = UI->SU; 962 DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 963 << *SU->getInstr()); 964 // If this use comes before the reaching def, it cannot be a last use, so 965 // descrease its pressure change. 966 if (!SU->isScheduled && SU != &ExitSU) { 967 LiveQueryResult LRQ 968 = LI.Query(LIS->getInstructionIndex(SU->getInstr())); 969 if (LRQ.valueIn() == VNI) 970 getPressureDiff(SU).addPressureChange(Reg, true, &MRI); 971 } 972 } 973 } 974 } 975 976 /// schedule - Called back from MachineScheduler::runOnMachineFunction 977 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 978 /// only includes instructions that have DAG nodes, not scheduling boundaries. 979 /// 980 /// This is a skeletal driver, with all the functionality pushed into helpers, 981 /// so that it can be easilly extended by experimental schedulers. Generally, 982 /// implementing MachineSchedStrategy should be sufficient to implement a new 983 /// scheduling algorithm. However, if a scheduler further subclasses 984 /// ScheduleDAGMILive then it will want to override this virtual method in order 985 /// to update any specialized state. 986 void ScheduleDAGMILive::schedule() { 987 buildDAGWithRegPressure(); 988 989 Topo.InitDAGTopologicalSorting(); 990 991 postprocessDAG(); 992 993 SmallVector<SUnit*, 8> TopRoots, BotRoots; 994 findRootsAndBiasEdges(TopRoots, BotRoots); 995 996 // Initialize the strategy before modifying the DAG. 997 // This may initialize a DFSResult to be used for queue priority. 998 SchedImpl->initialize(this); 999 1000 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 1001 SUnits[su].dumpAll(this)); 1002 if (ViewMISchedDAGs) viewGraph(); 1003 1004 // Initialize ready queues now that the DAG and priority data are finalized. 1005 initQueues(TopRoots, BotRoots); 1006 1007 if (ShouldTrackPressure) { 1008 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1009 TopRPTracker.setPos(CurrentTop); 1010 } 1011 1012 bool IsTopNode = false; 1013 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 1014 assert(!SU->isScheduled && "Node already scheduled"); 1015 if (!checkSchedLimit()) 1016 break; 1017 1018 scheduleMI(SU, IsTopNode); 1019 1020 updateQueues(SU, IsTopNode); 1021 1022 if (DFSResult) { 1023 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1024 if (!ScheduledTrees.test(SubtreeID)) { 1025 ScheduledTrees.set(SubtreeID); 1026 DFSResult->scheduleTree(SubtreeID); 1027 SchedImpl->scheduleTree(SubtreeID); 1028 } 1029 } 1030 1031 // Notify the scheduling strategy after updating the DAG. 1032 SchedImpl->schedNode(SU, IsTopNode); 1033 } 1034 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1035 1036 placeDebugValues(); 1037 1038 DEBUG({ 1039 unsigned BBNum = begin()->getParent()->getNumber(); 1040 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 1041 dumpSchedule(); 1042 dbgs() << '\n'; 1043 }); 1044 } 1045 1046 /// Build the DAG and setup three register pressure trackers. 1047 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1048 if (!ShouldTrackPressure) { 1049 RPTracker.reset(); 1050 RegionCriticalPSets.clear(); 1051 buildSchedGraph(AA); 1052 return; 1053 } 1054 1055 // Initialize the register pressure tracker used by buildSchedGraph. 1056 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1057 /*TrackUntiedDefs=*/true); 1058 1059 // Account for liveness generate by the region boundary. 1060 if (LiveRegionEnd != RegionEnd) 1061 RPTracker.recede(); 1062 1063 // Build the DAG, and compute current register pressure. 1064 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs); 1065 1066 // Initialize top/bottom trackers after computing region pressure. 1067 initRegPressure(); 1068 } 1069 1070 void ScheduleDAGMILive::computeDFSResult() { 1071 if (!DFSResult) 1072 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1073 DFSResult->clear(); 1074 ScheduledTrees.clear(); 1075 DFSResult->resize(SUnits.size()); 1076 DFSResult->compute(SUnits); 1077 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1078 } 1079 1080 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1081 /// only provides the critical path for single block loops. To handle loops that 1082 /// span blocks, we could use the vreg path latencies provided by 1083 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1084 /// available for use in the scheduler. 1085 /// 1086 /// The cyclic path estimation identifies a def-use pair that crosses the back 1087 /// edge and considers the depth and height of the nodes. For example, consider 1088 /// the following instruction sequence where each instruction has unit latency 1089 /// and defines an epomymous virtual register: 1090 /// 1091 /// a->b(a,c)->c(b)->d(c)->exit 1092 /// 1093 /// The cyclic critical path is a two cycles: b->c->b 1094 /// The acyclic critical path is four cycles: a->b->c->d->exit 1095 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1096 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1097 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1098 /// LiveInDepth = depth(b) = len(a->b) = 1 1099 /// 1100 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1101 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1102 /// CyclicCriticalPath = min(2, 2) = 2 1103 /// 1104 /// This could be relevant to PostRA scheduling, but is currently implemented 1105 /// assuming LiveIntervals. 1106 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1107 // This only applies to single block loop. 1108 if (!BB->isSuccessor(BB)) 1109 return 0; 1110 1111 unsigned MaxCyclicLatency = 0; 1112 // Visit each live out vreg def to find def/use pairs that cross iterations. 1113 ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs; 1114 for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end(); 1115 RI != RE; ++RI) { 1116 unsigned Reg = *RI; 1117 if (!TRI->isVirtualRegister(Reg)) 1118 continue; 1119 const LiveInterval &LI = LIS->getInterval(Reg); 1120 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1121 if (!DefVNI) 1122 continue; 1123 1124 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1125 const SUnit *DefSU = getSUnit(DefMI); 1126 if (!DefSU) 1127 continue; 1128 1129 unsigned LiveOutHeight = DefSU->getHeight(); 1130 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1131 // Visit all local users of the vreg def. 1132 for (VReg2UseMap::iterator 1133 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 1134 if (UI->SU == &ExitSU) 1135 continue; 1136 1137 // Only consider uses of the phi. 1138 LiveQueryResult LRQ = 1139 LI.Query(LIS->getInstructionIndex(UI->SU->getInstr())); 1140 if (!LRQ.valueIn()->isPHIDef()) 1141 continue; 1142 1143 // Assume that a path spanning two iterations is a cycle, which could 1144 // overestimate in strange cases. This allows cyclic latency to be 1145 // estimated as the minimum slack of the vreg's depth or height. 1146 unsigned CyclicLatency = 0; 1147 if (LiveOutDepth > UI->SU->getDepth()) 1148 CyclicLatency = LiveOutDepth - UI->SU->getDepth(); 1149 1150 unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency; 1151 if (LiveInHeight > LiveOutHeight) { 1152 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1153 CyclicLatency = LiveInHeight - LiveOutHeight; 1154 } 1155 else 1156 CyclicLatency = 0; 1157 1158 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1159 << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1160 if (CyclicLatency > MaxCyclicLatency) 1161 MaxCyclicLatency = CyclicLatency; 1162 } 1163 } 1164 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1165 return MaxCyclicLatency; 1166 } 1167 1168 /// Move an instruction and update register pressure. 1169 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1170 // Move the instruction to its new location in the instruction stream. 1171 MachineInstr *MI = SU->getInstr(); 1172 1173 if (IsTopNode) { 1174 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1175 if (&*CurrentTop == MI) 1176 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1177 else { 1178 moveInstruction(MI, CurrentTop); 1179 TopRPTracker.setPos(MI); 1180 } 1181 1182 if (ShouldTrackPressure) { 1183 // Update top scheduled pressure. 1184 TopRPTracker.advance(); 1185 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1186 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1187 } 1188 } 1189 else { 1190 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1191 MachineBasicBlock::iterator priorII = 1192 priorNonDebug(CurrentBottom, CurrentTop); 1193 if (&*priorII == MI) 1194 CurrentBottom = priorII; 1195 else { 1196 if (&*CurrentTop == MI) { 1197 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1198 TopRPTracker.setPos(CurrentTop); 1199 } 1200 moveInstruction(MI, CurrentBottom); 1201 CurrentBottom = MI; 1202 } 1203 if (ShouldTrackPressure) { 1204 // Update bottom scheduled pressure. 1205 SmallVector<unsigned, 8> LiveUses; 1206 BotRPTracker.recede(&LiveUses); 1207 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1208 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1209 updatePressureDiffs(LiveUses); 1210 } 1211 } 1212 } 1213 1214 //===----------------------------------------------------------------------===// 1215 // LoadClusterMutation - DAG post-processing to cluster loads. 1216 //===----------------------------------------------------------------------===// 1217 1218 namespace { 1219 /// \brief Post-process the DAG to create cluster edges between neighboring 1220 /// loads. 1221 class LoadClusterMutation : public ScheduleDAGMutation { 1222 struct LoadInfo { 1223 SUnit *SU; 1224 unsigned BaseReg; 1225 unsigned Offset; 1226 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 1227 : SU(su), BaseReg(reg), Offset(ofs) {} 1228 1229 bool operator<(const LoadInfo &RHS) const { 1230 return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset); 1231 } 1232 }; 1233 1234 const TargetInstrInfo *TII; 1235 const TargetRegisterInfo *TRI; 1236 public: 1237 LoadClusterMutation(const TargetInstrInfo *tii, 1238 const TargetRegisterInfo *tri) 1239 : TII(tii), TRI(tri) {} 1240 1241 void apply(ScheduleDAGMI *DAG) override; 1242 protected: 1243 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 1244 }; 1245 } // anonymous 1246 1247 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 1248 ScheduleDAGMI *DAG) { 1249 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 1250 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 1251 SUnit *SU = Loads[Idx]; 1252 unsigned BaseReg; 1253 unsigned Offset; 1254 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 1255 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 1256 } 1257 if (LoadRecords.size() < 2) 1258 return; 1259 std::sort(LoadRecords.begin(), LoadRecords.end()); 1260 unsigned ClusterLength = 1; 1261 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 1262 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 1263 ClusterLength = 1; 1264 continue; 1265 } 1266 1267 SUnit *SUa = LoadRecords[Idx].SU; 1268 SUnit *SUb = LoadRecords[Idx+1].SU; 1269 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 1270 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1271 1272 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 1273 << SUb->NodeNum << ")\n"); 1274 // Copy successor edges from SUa to SUb. Interleaving computation 1275 // dependent on SUa can prevent load combining due to register reuse. 1276 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1277 // loads should have effectively the same inputs. 1278 for (SUnit::const_succ_iterator 1279 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 1280 if (SI->getSUnit() == SUb) 1281 continue; 1282 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 1283 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 1284 } 1285 ++ClusterLength; 1286 } 1287 else 1288 ClusterLength = 1; 1289 } 1290 } 1291 1292 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 1293 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 1294 // Map DAG NodeNum to store chain ID. 1295 DenseMap<unsigned, unsigned> StoreChainIDs; 1296 // Map each store chain to a set of dependent loads. 1297 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1298 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1299 SUnit *SU = &DAG->SUnits[Idx]; 1300 if (!SU->getInstr()->mayLoad()) 1301 continue; 1302 unsigned ChainPredID = DAG->SUnits.size(); 1303 for (SUnit::const_pred_iterator 1304 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1305 if (PI->isCtrl()) { 1306 ChainPredID = PI->getSUnit()->NodeNum; 1307 break; 1308 } 1309 } 1310 // Check if this chain-like pred has been seen 1311 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 1312 unsigned NumChains = StoreChainDependents.size(); 1313 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1314 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1315 if (Result.second) 1316 StoreChainDependents.resize(NumChains + 1); 1317 StoreChainDependents[Result.first->second].push_back(SU); 1318 } 1319 // Iterate over the store chains. 1320 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 1321 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 1322 } 1323 1324 //===----------------------------------------------------------------------===// 1325 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 1326 //===----------------------------------------------------------------------===// 1327 1328 namespace { 1329 /// \brief Post-process the DAG to create cluster edges between instructions 1330 /// that may be fused by the processor into a single operation. 1331 class MacroFusion : public ScheduleDAGMutation { 1332 const TargetInstrInfo *TII; 1333 public: 1334 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 1335 1336 void apply(ScheduleDAGMI *DAG) override; 1337 }; 1338 } // anonymous 1339 1340 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 1341 /// fused operations. 1342 void MacroFusion::apply(ScheduleDAGMI *DAG) { 1343 // For now, assume targets can only fuse with the branch. 1344 MachineInstr *Branch = DAG->ExitSU.getInstr(); 1345 if (!Branch) 1346 return; 1347 1348 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 1349 SUnit *SU = &DAG->SUnits[--Idx]; 1350 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 1351 continue; 1352 1353 // Create a single weak edge from SU to ExitSU. The only effect is to cause 1354 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 1355 // need to copy predecessor edges from ExitSU to SU, since top-down 1356 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 1357 // of SU, we could create an artificial edge from the deepest root, but it 1358 // hasn't been needed yet. 1359 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 1360 (void)Success; 1361 assert(Success && "No DAG nodes should be reachable from ExitSU"); 1362 1363 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 1364 break; 1365 } 1366 } 1367 1368 //===----------------------------------------------------------------------===// 1369 // CopyConstrain - DAG post-processing to encourage copy elimination. 1370 //===----------------------------------------------------------------------===// 1371 1372 namespace { 1373 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 1374 /// the one use that defines the copy's source vreg, most likely an induction 1375 /// variable increment. 1376 class CopyConstrain : public ScheduleDAGMutation { 1377 // Transient state. 1378 SlotIndex RegionBeginIdx; 1379 // RegionEndIdx is the slot index of the last non-debug instruction in the 1380 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1381 SlotIndex RegionEndIdx; 1382 public: 1383 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1384 1385 void apply(ScheduleDAGMI *DAG) override; 1386 1387 protected: 1388 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1389 }; 1390 } // anonymous 1391 1392 /// constrainLocalCopy handles two possibilities: 1393 /// 1) Local src: 1394 /// I0: = dst 1395 /// I1: src = ... 1396 /// I2: = dst 1397 /// I3: dst = src (copy) 1398 /// (create pred->succ edges I0->I1, I2->I1) 1399 /// 1400 /// 2) Local copy: 1401 /// I0: dst = src (copy) 1402 /// I1: = dst 1403 /// I2: src = ... 1404 /// I3: = dst 1405 /// (create pred->succ edges I1->I2, I3->I2) 1406 /// 1407 /// Although the MachineScheduler is currently constrained to single blocks, 1408 /// this algorithm should handle extended blocks. An EBB is a set of 1409 /// contiguously numbered blocks such that the previous block in the EBB is 1410 /// always the single predecessor. 1411 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1412 LiveIntervals *LIS = DAG->getLIS(); 1413 MachineInstr *Copy = CopySU->getInstr(); 1414 1415 // Check for pure vreg copies. 1416 unsigned SrcReg = Copy->getOperand(1).getReg(); 1417 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 1418 return; 1419 1420 unsigned DstReg = Copy->getOperand(0).getReg(); 1421 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 1422 return; 1423 1424 // Check if either the dest or source is local. If it's live across a back 1425 // edge, it's not local. Note that if both vregs are live across the back 1426 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1427 unsigned LocalReg = DstReg; 1428 unsigned GlobalReg = SrcReg; 1429 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1430 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1431 LocalReg = SrcReg; 1432 GlobalReg = DstReg; 1433 LocalLI = &LIS->getInterval(LocalReg); 1434 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1435 return; 1436 } 1437 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1438 1439 // Find the global segment after the start of the local LI. 1440 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1441 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1442 // local live range. We could create edges from other global uses to the local 1443 // start, but the coalescer should have already eliminated these cases, so 1444 // don't bother dealing with it. 1445 if (GlobalSegment == GlobalLI->end()) 1446 return; 1447 1448 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1449 // returned the next global segment. But if GlobalSegment overlaps with 1450 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1451 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1452 if (GlobalSegment->contains(LocalLI->beginIndex())) 1453 ++GlobalSegment; 1454 1455 if (GlobalSegment == GlobalLI->end()) 1456 return; 1457 1458 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1459 if (GlobalSegment != GlobalLI->begin()) { 1460 // Two address defs have no hole. 1461 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1462 GlobalSegment->start)) { 1463 return; 1464 } 1465 // If the prior global segment may be defined by the same two-address 1466 // instruction that also defines LocalLI, then can't make a hole here. 1467 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1468 LocalLI->beginIndex())) { 1469 return; 1470 } 1471 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1472 // it would be a disconnected component in the live range. 1473 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1474 "Disconnected LRG within the scheduling region."); 1475 } 1476 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1477 if (!GlobalDef) 1478 return; 1479 1480 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1481 if (!GlobalSU) 1482 return; 1483 1484 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1485 // constraining the uses of the last local def to precede GlobalDef. 1486 SmallVector<SUnit*,8> LocalUses; 1487 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1488 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1489 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1490 for (SUnit::const_succ_iterator 1491 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1492 I != E; ++I) { 1493 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1494 continue; 1495 if (I->getSUnit() == GlobalSU) 1496 continue; 1497 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1498 return; 1499 LocalUses.push_back(I->getSUnit()); 1500 } 1501 // Open the top of the GlobalLI hole by constraining any earlier global uses 1502 // to precede the start of LocalLI. 1503 SmallVector<SUnit*,8> GlobalUses; 1504 MachineInstr *FirstLocalDef = 1505 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1506 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1507 for (SUnit::const_pred_iterator 1508 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1509 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1510 continue; 1511 if (I->getSUnit() == FirstLocalSU) 1512 continue; 1513 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1514 return; 1515 GlobalUses.push_back(I->getSUnit()); 1516 } 1517 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1518 // Add the weak edges. 1519 for (SmallVectorImpl<SUnit*>::const_iterator 1520 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1521 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1522 << GlobalSU->NodeNum << ")\n"); 1523 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1524 } 1525 for (SmallVectorImpl<SUnit*>::const_iterator 1526 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1527 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1528 << FirstLocalSU->NodeNum << ")\n"); 1529 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1530 } 1531 } 1532 1533 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1534 /// copy elimination. 1535 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1536 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1537 1538 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1539 if (FirstPos == DAG->end()) 1540 return; 1541 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); 1542 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1543 &*priorNonDebug(DAG->end(), DAG->begin())); 1544 1545 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1546 SUnit *SU = &DAG->SUnits[Idx]; 1547 if (!SU->getInstr()->isCopy()) 1548 continue; 1549 1550 constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG)); 1551 } 1552 } 1553 1554 //===----------------------------------------------------------------------===// 1555 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1556 // and possibly other custom schedulers. 1557 //===----------------------------------------------------------------------===// 1558 1559 static const unsigned InvalidCycle = ~0U; 1560 1561 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1562 1563 void SchedBoundary::reset() { 1564 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1565 // Destroying and reconstructing it is very expensive though. So keep 1566 // invalid, placeholder HazardRecs. 1567 if (HazardRec && HazardRec->isEnabled()) { 1568 delete HazardRec; 1569 HazardRec = nullptr; 1570 } 1571 Available.clear(); 1572 Pending.clear(); 1573 CheckPending = false; 1574 NextSUs.clear(); 1575 CurrCycle = 0; 1576 CurrMOps = 0; 1577 MinReadyCycle = UINT_MAX; 1578 ExpectedLatency = 0; 1579 DependentLatency = 0; 1580 RetiredMOps = 0; 1581 MaxExecutedResCount = 0; 1582 ZoneCritResIdx = 0; 1583 IsResourceLimited = false; 1584 ReservedCycles.clear(); 1585 #ifndef NDEBUG 1586 // Track the maximum number of stall cycles that could arise either from the 1587 // latency of a DAG edge or the number of cycles that a processor resource is 1588 // reserved (SchedBoundary::ReservedCycles). 1589 MaxObservedStall = 0; 1590 #endif 1591 // Reserve a zero-count for invalid CritResIdx. 1592 ExecutedResCounts.resize(1); 1593 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1594 } 1595 1596 void SchedRemainder:: 1597 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1598 reset(); 1599 if (!SchedModel->hasInstrSchedModel()) 1600 return; 1601 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1602 for (std::vector<SUnit>::iterator 1603 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1604 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1605 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC) 1606 * SchedModel->getMicroOpFactor(); 1607 for (TargetSchedModel::ProcResIter 1608 PI = SchedModel->getWriteProcResBegin(SC), 1609 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1610 unsigned PIdx = PI->ProcResourceIdx; 1611 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1612 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1613 } 1614 } 1615 } 1616 1617 void SchedBoundary:: 1618 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1619 reset(); 1620 DAG = dag; 1621 SchedModel = smodel; 1622 Rem = rem; 1623 if (SchedModel->hasInstrSchedModel()) { 1624 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1625 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle); 1626 } 1627 } 1628 1629 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 1630 /// these "soft stalls" differently than the hard stall cycles based on CPU 1631 /// resources and computed by checkHazard(). A fully in-order model 1632 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 1633 /// available for scheduling until they are ready. However, a weaker in-order 1634 /// model may use this for heuristics. For example, if a processor has in-order 1635 /// behavior when reading certain resources, this may come into play. 1636 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 1637 if (!SU->isUnbuffered) 1638 return 0; 1639 1640 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1641 if (ReadyCycle > CurrCycle) 1642 return ReadyCycle - CurrCycle; 1643 return 0; 1644 } 1645 1646 /// Compute the next cycle at which the given processor resource can be 1647 /// scheduled. 1648 unsigned SchedBoundary:: 1649 getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 1650 unsigned NextUnreserved = ReservedCycles[PIdx]; 1651 // If this resource has never been used, always return cycle zero. 1652 if (NextUnreserved == InvalidCycle) 1653 return 0; 1654 // For bottom-up scheduling add the cycles needed for the current operation. 1655 if (!isTop()) 1656 NextUnreserved += Cycles; 1657 return NextUnreserved; 1658 } 1659 1660 /// Does this SU have a hazard within the current instruction group. 1661 /// 1662 /// The scheduler supports two modes of hazard recognition. The first is the 1663 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1664 /// supports highly complicated in-order reservation tables 1665 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1666 /// 1667 /// The second is a streamlined mechanism that checks for hazards based on 1668 /// simple counters that the scheduler itself maintains. It explicitly checks 1669 /// for instruction dispatch limitations, including the number of micro-ops that 1670 /// can dispatch per cycle. 1671 /// 1672 /// TODO: Also check whether the SU must start a new group. 1673 bool SchedBoundary::checkHazard(SUnit *SU) { 1674 if (HazardRec->isEnabled() 1675 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 1676 return true; 1677 } 1678 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1679 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1680 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1681 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1682 return true; 1683 } 1684 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 1685 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1686 for (TargetSchedModel::ProcResIter 1687 PI = SchedModel->getWriteProcResBegin(SC), 1688 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1689 unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles); 1690 if (NRCycle > CurrCycle) { 1691 #ifndef NDEBUG 1692 MaxObservedStall = std::max(NRCycle - CurrCycle, MaxObservedStall); 1693 #endif 1694 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 1695 << SchedModel->getResourceName(PI->ProcResourceIdx) 1696 << "=" << NRCycle << "c\n"); 1697 return true; 1698 } 1699 } 1700 } 1701 return false; 1702 } 1703 1704 // Find the unscheduled node in ReadySUs with the highest latency. 1705 unsigned SchedBoundary:: 1706 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1707 SUnit *LateSU = nullptr; 1708 unsigned RemLatency = 0; 1709 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end(); 1710 I != E; ++I) { 1711 unsigned L = getUnscheduledLatency(*I); 1712 if (L > RemLatency) { 1713 RemLatency = L; 1714 LateSU = *I; 1715 } 1716 } 1717 if (LateSU) { 1718 DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1719 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1720 } 1721 return RemLatency; 1722 } 1723 1724 // Count resources in this zone and the remaining unscheduled 1725 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 1726 // resource index, or zero if the zone is issue limited. 1727 unsigned SchedBoundary:: 1728 getOtherResourceCount(unsigned &OtherCritIdx) { 1729 OtherCritIdx = 0; 1730 if (!SchedModel->hasInstrSchedModel()) 1731 return 0; 1732 1733 unsigned OtherCritCount = Rem->RemIssueCount 1734 + (RetiredMOps * SchedModel->getMicroOpFactor()); 1735 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 1736 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 1737 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 1738 PIdx != PEnd; ++PIdx) { 1739 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 1740 if (OtherCount > OtherCritCount) { 1741 OtherCritCount = OtherCount; 1742 OtherCritIdx = PIdx; 1743 } 1744 } 1745 if (OtherCritIdx) { 1746 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: " 1747 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 1748 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 1749 } 1750 return OtherCritCount; 1751 } 1752 1753 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { 1754 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1755 1756 #ifndef NDEBUG 1757 // ReadyCycle was been bumped up to the CurrCycle when this node was 1758 // scheduled, but CurrCycle may have been eagerly advanced immediately after 1759 // scheduling, so may now be greater than ReadyCycle. 1760 if (ReadyCycle > CurrCycle) 1761 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 1762 #endif 1763 1764 if (ReadyCycle < MinReadyCycle) 1765 MinReadyCycle = ReadyCycle; 1766 1767 // Check for interlocks first. For the purpose of other heuristics, an 1768 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1769 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1770 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU)) 1771 Pending.push(SU); 1772 else 1773 Available.push(SU); 1774 1775 // Record this node as an immediate dependent of the scheduled node. 1776 NextSUs.insert(SU); 1777 } 1778 1779 void SchedBoundary::releaseTopNode(SUnit *SU) { 1780 if (SU->isScheduled) 1781 return; 1782 1783 releaseNode(SU, SU->TopReadyCycle); 1784 } 1785 1786 void SchedBoundary::releaseBottomNode(SUnit *SU) { 1787 if (SU->isScheduled) 1788 return; 1789 1790 releaseNode(SU, SU->BotReadyCycle); 1791 } 1792 1793 /// Move the boundary of scheduled code by one cycle. 1794 void SchedBoundary::bumpCycle(unsigned NextCycle) { 1795 if (SchedModel->getMicroOpBufferSize() == 0) { 1796 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1797 if (MinReadyCycle > NextCycle) 1798 NextCycle = MinReadyCycle; 1799 } 1800 // Update the current micro-ops, which will issue in the next cycle. 1801 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 1802 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 1803 1804 // Decrement DependentLatency based on the next cycle. 1805 if ((NextCycle - CurrCycle) > DependentLatency) 1806 DependentLatency = 0; 1807 else 1808 DependentLatency -= (NextCycle - CurrCycle); 1809 1810 if (!HazardRec->isEnabled()) { 1811 // Bypass HazardRec virtual calls. 1812 CurrCycle = NextCycle; 1813 } 1814 else { 1815 // Bypass getHazardType calls in case of long latency. 1816 for (; CurrCycle != NextCycle; ++CurrCycle) { 1817 if (isTop()) 1818 HazardRec->AdvanceCycle(); 1819 else 1820 HazardRec->RecedeCycle(); 1821 } 1822 } 1823 CheckPending = true; 1824 unsigned LFactor = SchedModel->getLatencyFactor(); 1825 IsResourceLimited = 1826 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1827 > (int)LFactor; 1828 1829 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n'); 1830 } 1831 1832 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 1833 ExecutedResCounts[PIdx] += Count; 1834 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 1835 MaxExecutedResCount = ExecutedResCounts[PIdx]; 1836 } 1837 1838 /// Add the given processor resource to this scheduled zone. 1839 /// 1840 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 1841 /// during which this resource is consumed. 1842 /// 1843 /// \return the next cycle at which the instruction may execute without 1844 /// oversubscribing resources. 1845 unsigned SchedBoundary:: 1846 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 1847 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1848 unsigned Count = Factor * Cycles; 1849 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) 1850 << " +" << Cycles << "x" << Factor << "u\n"); 1851 1852 // Update Executed resources counts. 1853 incExecutedResources(PIdx, Count); 1854 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1855 Rem->RemainingCounts[PIdx] -= Count; 1856 1857 // Check if this resource exceeds the current critical resource. If so, it 1858 // becomes the critical resource. 1859 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 1860 ZoneCritResIdx = PIdx; 1861 DEBUG(dbgs() << " *** Critical resource " 1862 << SchedModel->getResourceName(PIdx) << ": " 1863 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n"); 1864 } 1865 // For reserved resources, record the highest cycle using the resource. 1866 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles); 1867 if (NextAvailable > CurrCycle) { 1868 DEBUG(dbgs() << " Resource conflict: " 1869 << SchedModel->getProcResource(PIdx)->Name << " reserved until @" 1870 << NextAvailable << "\n"); 1871 } 1872 return NextAvailable; 1873 } 1874 1875 /// Move the boundary of scheduled code by one SUnit. 1876 void SchedBoundary::bumpNode(SUnit *SU) { 1877 // Update the reservation table. 1878 if (HazardRec->isEnabled()) { 1879 if (!isTop() && SU->isCall) { 1880 // Calls are scheduled with their preceding instructions. For bottom-up 1881 // scheduling, clear the pipeline state before emitting. 1882 HazardRec->Reset(); 1883 } 1884 HazardRec->EmitInstruction(SU); 1885 } 1886 // checkHazard should prevent scheduling multiple instructions per cycle that 1887 // exceed the issue width. 1888 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1889 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 1890 assert( 1891 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 1892 "Cannot schedule this instruction's MicroOps in the current cycle."); 1893 1894 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1895 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 1896 1897 unsigned NextCycle = CurrCycle; 1898 switch (SchedModel->getMicroOpBufferSize()) { 1899 case 0: 1900 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 1901 break; 1902 case 1: 1903 if (ReadyCycle > NextCycle) { 1904 NextCycle = ReadyCycle; 1905 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 1906 } 1907 break; 1908 default: 1909 // We don't currently model the OOO reorder buffer, so consider all 1910 // scheduled MOps to be "retired". We do loosely model in-order resource 1911 // latency. If this instruction uses an in-order resource, account for any 1912 // likely stall cycles. 1913 if (SU->isUnbuffered && ReadyCycle > NextCycle) 1914 NextCycle = ReadyCycle; 1915 break; 1916 } 1917 RetiredMOps += IncMOps; 1918 1919 // Update resource counts and critical resource. 1920 if (SchedModel->hasInstrSchedModel()) { 1921 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 1922 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 1923 Rem->RemIssueCount -= DecRemIssue; 1924 if (ZoneCritResIdx) { 1925 // Scale scheduled micro-ops for comparing with the critical resource. 1926 unsigned ScaledMOps = 1927 RetiredMOps * SchedModel->getMicroOpFactor(); 1928 1929 // If scaled micro-ops are now more than the previous critical resource by 1930 // a full cycle, then micro-ops issue becomes critical. 1931 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 1932 >= (int)SchedModel->getLatencyFactor()) { 1933 ZoneCritResIdx = 0; 1934 DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 1935 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); 1936 } 1937 } 1938 for (TargetSchedModel::ProcResIter 1939 PI = SchedModel->getWriteProcResBegin(SC), 1940 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1941 unsigned RCycle = 1942 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 1943 if (RCycle > NextCycle) 1944 NextCycle = RCycle; 1945 } 1946 if (SU->hasReservedResource) { 1947 // For reserved resources, record the highest cycle using the resource. 1948 // For top-down scheduling, this is the cycle in which we schedule this 1949 // instruction plus the number of cycles the operations reserves the 1950 // resource. For bottom-up is it simply the instruction's cycle. 1951 for (TargetSchedModel::ProcResIter 1952 PI = SchedModel->getWriteProcResBegin(SC), 1953 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1954 unsigned PIdx = PI->ProcResourceIdx; 1955 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 1956 ReservedCycles[PIdx] = isTop() ? NextCycle + PI->Cycles : NextCycle; 1957 #ifndef NDEBUG 1958 MaxObservedStall = std::max(PI->Cycles, MaxObservedStall); 1959 #endif 1960 } 1961 } 1962 } 1963 } 1964 // Update ExpectedLatency and DependentLatency. 1965 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 1966 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 1967 if (SU->getDepth() > TopLatency) { 1968 TopLatency = SU->getDepth(); 1969 DEBUG(dbgs() << " " << Available.getName() 1970 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n"); 1971 } 1972 if (SU->getHeight() > BotLatency) { 1973 BotLatency = SU->getHeight(); 1974 DEBUG(dbgs() << " " << Available.getName() 1975 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n"); 1976 } 1977 // If we stall for any reason, bump the cycle. 1978 if (NextCycle > CurrCycle) { 1979 bumpCycle(NextCycle); 1980 } 1981 else { 1982 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 1983 // resource limited. If a stall occurred, bumpCycle does this. 1984 unsigned LFactor = SchedModel->getLatencyFactor(); 1985 IsResourceLimited = 1986 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1987 > (int)LFactor; 1988 } 1989 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 1990 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 1991 // one cycle. Since we commonly reach the max MOps here, opportunistically 1992 // bump the cycle to avoid uselessly checking everything in the readyQ. 1993 CurrMOps += IncMOps; 1994 while (CurrMOps >= SchedModel->getIssueWidth()) { 1995 DEBUG(dbgs() << " *** Max MOps " << CurrMOps 1996 << " at cycle " << CurrCycle << '\n'); 1997 bumpCycle(++NextCycle); 1998 } 1999 DEBUG(dumpScheduledState()); 2000 } 2001 2002 /// Release pending ready nodes in to the available queue. This makes them 2003 /// visible to heuristics. 2004 void SchedBoundary::releasePending() { 2005 // If the available queue is empty, it is safe to reset MinReadyCycle. 2006 if (Available.empty()) 2007 MinReadyCycle = UINT_MAX; 2008 2009 // Check to see if any of the pending instructions are ready to issue. If 2010 // so, add them to the available queue. 2011 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2012 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2013 SUnit *SU = *(Pending.begin()+i); 2014 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2015 2016 if (ReadyCycle < MinReadyCycle) 2017 MinReadyCycle = ReadyCycle; 2018 2019 if (!IsBuffered && ReadyCycle > CurrCycle) 2020 continue; 2021 2022 if (checkHazard(SU)) 2023 continue; 2024 2025 Available.push(SU); 2026 Pending.remove(Pending.begin()+i); 2027 --i; --e; 2028 } 2029 DEBUG(if (!Pending.empty()) Pending.dump()); 2030 CheckPending = false; 2031 } 2032 2033 /// Remove SU from the ready set for this boundary. 2034 void SchedBoundary::removeReady(SUnit *SU) { 2035 if (Available.isInQueue(SU)) 2036 Available.remove(Available.find(SU)); 2037 else { 2038 assert(Pending.isInQueue(SU) && "bad ready count"); 2039 Pending.remove(Pending.find(SU)); 2040 } 2041 } 2042 2043 /// If this queue only has one ready candidate, return it. As a side effect, 2044 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2045 /// one node is ready. If multiple instructions are ready, return NULL. 2046 SUnit *SchedBoundary::pickOnlyChoice() { 2047 if (CheckPending) 2048 releasePending(); 2049 2050 if (CurrMOps > 0) { 2051 // Defer any ready instrs that now have a hazard. 2052 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2053 if (checkHazard(*I)) { 2054 Pending.push(*I); 2055 I = Available.remove(I); 2056 continue; 2057 } 2058 ++I; 2059 } 2060 } 2061 for (unsigned i = 0; Available.empty(); ++i) { 2062 assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2063 "permanent hazard"); (void)i; 2064 bumpCycle(CurrCycle + 1); 2065 releasePending(); 2066 } 2067 if (Available.size() == 1) 2068 return *Available.begin(); 2069 return nullptr; 2070 } 2071 2072 #ifndef NDEBUG 2073 // This is useful information to dump after bumpNode. 2074 // Note that the Queue contents are more useful before pickNodeFromQueue. 2075 void SchedBoundary::dumpScheduledState() { 2076 unsigned ResFactor; 2077 unsigned ResCount; 2078 if (ZoneCritResIdx) { 2079 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2080 ResCount = getResourceCount(ZoneCritResIdx); 2081 } 2082 else { 2083 ResFactor = SchedModel->getMicroOpFactor(); 2084 ResCount = RetiredMOps * SchedModel->getMicroOpFactor(); 2085 } 2086 unsigned LFactor = SchedModel->getLatencyFactor(); 2087 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2088 << " Retired: " << RetiredMOps; 2089 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2090 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2091 << ResCount / ResFactor << " " 2092 << SchedModel->getResourceName(ZoneCritResIdx) 2093 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2094 << (IsResourceLimited ? " - Resource" : " - Latency") 2095 << " limited.\n"; 2096 } 2097 #endif 2098 2099 //===----------------------------------------------------------------------===// 2100 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2101 //===----------------------------------------------------------------------===// 2102 2103 void GenericSchedulerBase::SchedCandidate:: 2104 initResourceDelta(const ScheduleDAGMI *DAG, 2105 const TargetSchedModel *SchedModel) { 2106 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2107 return; 2108 2109 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2110 for (TargetSchedModel::ProcResIter 2111 PI = SchedModel->getWriteProcResBegin(SC), 2112 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2113 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2114 ResDelta.CritResources += PI->Cycles; 2115 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2116 ResDelta.DemandedResources += PI->Cycles; 2117 } 2118 } 2119 2120 /// Set the CandPolicy given a scheduling zone given the current resources and 2121 /// latencies inside and outside the zone. 2122 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, 2123 bool IsPostRA, 2124 SchedBoundary &CurrZone, 2125 SchedBoundary *OtherZone) { 2126 // Apply preemptive heuristics based on the the total latency and resources 2127 // inside and outside this zone. Potential stalls should be considered before 2128 // following this policy. 2129 2130 // Compute remaining latency. We need this both to determine whether the 2131 // overall schedule has become latency-limited and whether the instructions 2132 // outside this zone are resource or latency limited. 2133 // 2134 // The "dependent" latency is updated incrementally during scheduling as the 2135 // max height/depth of scheduled nodes minus the cycles since it was 2136 // scheduled: 2137 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2138 // 2139 // The "independent" latency is the max ready queue depth: 2140 // ILat = max N.depth for N in Available|Pending 2141 // 2142 // RemainingLatency is the greater of independent and dependent latency. 2143 unsigned RemLatency = CurrZone.getDependentLatency(); 2144 RemLatency = std::max(RemLatency, 2145 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2146 RemLatency = std::max(RemLatency, 2147 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2148 2149 // Compute the critical resource outside the zone. 2150 unsigned OtherCritIdx = 0; 2151 unsigned OtherCount = 2152 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2153 2154 bool OtherResLimited = false; 2155 if (SchedModel->hasInstrSchedModel()) { 2156 unsigned LFactor = SchedModel->getLatencyFactor(); 2157 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor; 2158 } 2159 // Schedule aggressively for latency in PostRA mode. We don't check for 2160 // acyclic latency during PostRA, and highly out-of-order processors will 2161 // skip PostRA scheduling. 2162 if (!OtherResLimited) { 2163 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) { 2164 Policy.ReduceLatency |= true; 2165 DEBUG(dbgs() << " " << CurrZone.Available.getName() 2166 << " RemainingLatency " << RemLatency << " + " 2167 << CurrZone.getCurrCycle() << "c > CritPath " 2168 << Rem.CriticalPath << "\n"); 2169 } 2170 } 2171 // If the same resource is limiting inside and outside the zone, do nothing. 2172 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2173 return; 2174 2175 DEBUG( 2176 if (CurrZone.isResourceLimited()) { 2177 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2178 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) 2179 << "\n"; 2180 } 2181 if (OtherResLimited) 2182 dbgs() << " RemainingLimit: " 2183 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2184 if (!CurrZone.isResourceLimited() && !OtherResLimited) 2185 dbgs() << " Latency limited both directions.\n"); 2186 2187 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2188 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2189 2190 if (OtherResLimited) 2191 Policy.DemandResIdx = OtherCritIdx; 2192 } 2193 2194 #ifndef NDEBUG 2195 const char *GenericSchedulerBase::getReasonStr( 2196 GenericSchedulerBase::CandReason Reason) { 2197 switch (Reason) { 2198 case NoCand: return "NOCAND "; 2199 case PhysRegCopy: return "PREG-COPY"; 2200 case RegExcess: return "REG-EXCESS"; 2201 case RegCritical: return "REG-CRIT "; 2202 case Stall: return "STALL "; 2203 case Cluster: return "CLUSTER "; 2204 case Weak: return "WEAK "; 2205 case RegMax: return "REG-MAX "; 2206 case ResourceReduce: return "RES-REDUCE"; 2207 case ResourceDemand: return "RES-DEMAND"; 2208 case TopDepthReduce: return "TOP-DEPTH "; 2209 case TopPathReduce: return "TOP-PATH "; 2210 case BotHeightReduce:return "BOT-HEIGHT"; 2211 case BotPathReduce: return "BOT-PATH "; 2212 case NextDefUse: return "DEF-USE "; 2213 case NodeOrder: return "ORDER "; 2214 }; 2215 llvm_unreachable("Unknown reason!"); 2216 } 2217 2218 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2219 PressureChange P; 2220 unsigned ResIdx = 0; 2221 unsigned Latency = 0; 2222 switch (Cand.Reason) { 2223 default: 2224 break; 2225 case RegExcess: 2226 P = Cand.RPDelta.Excess; 2227 break; 2228 case RegCritical: 2229 P = Cand.RPDelta.CriticalMax; 2230 break; 2231 case RegMax: 2232 P = Cand.RPDelta.CurrentMax; 2233 break; 2234 case ResourceReduce: 2235 ResIdx = Cand.Policy.ReduceResIdx; 2236 break; 2237 case ResourceDemand: 2238 ResIdx = Cand.Policy.DemandResIdx; 2239 break; 2240 case TopDepthReduce: 2241 Latency = Cand.SU->getDepth(); 2242 break; 2243 case TopPathReduce: 2244 Latency = Cand.SU->getHeight(); 2245 break; 2246 case BotHeightReduce: 2247 Latency = Cand.SU->getHeight(); 2248 break; 2249 case BotPathReduce: 2250 Latency = Cand.SU->getDepth(); 2251 break; 2252 } 2253 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2254 if (P.isValid()) 2255 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2256 << ":" << P.getUnitInc() << " "; 2257 else 2258 dbgs() << " "; 2259 if (ResIdx) 2260 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2261 else 2262 dbgs() << " "; 2263 if (Latency) 2264 dbgs() << " " << Latency << " cycles "; 2265 else 2266 dbgs() << " "; 2267 dbgs() << '\n'; 2268 } 2269 #endif 2270 2271 /// Return true if this heuristic determines order. 2272 static bool tryLess(int TryVal, int CandVal, 2273 GenericSchedulerBase::SchedCandidate &TryCand, 2274 GenericSchedulerBase::SchedCandidate &Cand, 2275 GenericSchedulerBase::CandReason Reason) { 2276 if (TryVal < CandVal) { 2277 TryCand.Reason = Reason; 2278 return true; 2279 } 2280 if (TryVal > CandVal) { 2281 if (Cand.Reason > Reason) 2282 Cand.Reason = Reason; 2283 return true; 2284 } 2285 Cand.setRepeat(Reason); 2286 return false; 2287 } 2288 2289 static bool tryGreater(int TryVal, int CandVal, 2290 GenericSchedulerBase::SchedCandidate &TryCand, 2291 GenericSchedulerBase::SchedCandidate &Cand, 2292 GenericSchedulerBase::CandReason Reason) { 2293 if (TryVal > CandVal) { 2294 TryCand.Reason = Reason; 2295 return true; 2296 } 2297 if (TryVal < CandVal) { 2298 if (Cand.Reason > Reason) 2299 Cand.Reason = Reason; 2300 return true; 2301 } 2302 Cand.setRepeat(Reason); 2303 return false; 2304 } 2305 2306 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2307 GenericSchedulerBase::SchedCandidate &Cand, 2308 SchedBoundary &Zone) { 2309 if (Zone.isTop()) { 2310 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2311 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2312 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2313 return true; 2314 } 2315 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2316 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2317 return true; 2318 } 2319 else { 2320 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2321 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2322 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2323 return true; 2324 } 2325 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2326 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2327 return true; 2328 } 2329 return false; 2330 } 2331 2332 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand, 2333 bool IsTop) { 2334 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2335 << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n'); 2336 } 2337 2338 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2339 assert(dag->hasVRegLiveness() && 2340 "(PreRA)GenericScheduler needs vreg liveness"); 2341 DAG = static_cast<ScheduleDAGMILive*>(dag); 2342 SchedModel = DAG->getSchedModel(); 2343 TRI = DAG->TRI; 2344 2345 Rem.init(DAG, SchedModel); 2346 Top.init(DAG, SchedModel, &Rem); 2347 Bot.init(DAG, SchedModel, &Rem); 2348 2349 // Initialize resource counts. 2350 2351 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2352 // are disabled, then these HazardRecs will be disabled. 2353 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2354 const TargetMachine &TM = DAG->MF.getTarget(); 2355 if (!Top.HazardRec) { 2356 Top.HazardRec = 2357 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 2358 } 2359 if (!Bot.HazardRec) { 2360 Bot.HazardRec = 2361 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 2362 } 2363 } 2364 2365 /// Initialize the per-region scheduling policy. 2366 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2367 MachineBasicBlock::iterator End, 2368 unsigned NumRegionInstrs) { 2369 const TargetMachine &TM = Context->MF->getTarget(); 2370 const TargetLowering *TLI = TM.getTargetLowering(); 2371 2372 // Avoid setting up the register pressure tracker for small regions to save 2373 // compile time. As a rough heuristic, only track pressure when the number of 2374 // schedulable instructions exceeds half the integer register file. 2375 RegionPolicy.ShouldTrackPressure = true; 2376 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2377 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2378 if (TLI->isTypeLegal(LegalIntVT)) { 2379 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2380 TLI->getRegClassFor(LegalIntVT)); 2381 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2382 } 2383 } 2384 2385 // For generic targets, we default to bottom-up, because it's simpler and more 2386 // compile-time optimizations have been implemented in that direction. 2387 RegionPolicy.OnlyBottomUp = true; 2388 2389 // Allow the subtarget to override default policy. 2390 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 2391 ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs); 2392 2393 // After subtarget overrides, apply command line options. 2394 if (!EnableRegPressure) 2395 RegionPolicy.ShouldTrackPressure = false; 2396 2397 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2398 // e.g. -misched-bottomup=false allows scheduling in both directions. 2399 assert((!ForceTopDown || !ForceBottomUp) && 2400 "-misched-topdown incompatible with -misched-bottomup"); 2401 if (ForceBottomUp.getNumOccurrences() > 0) { 2402 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2403 if (RegionPolicy.OnlyBottomUp) 2404 RegionPolicy.OnlyTopDown = false; 2405 } 2406 if (ForceTopDown.getNumOccurrences() > 0) { 2407 RegionPolicy.OnlyTopDown = ForceTopDown; 2408 if (RegionPolicy.OnlyTopDown) 2409 RegionPolicy.OnlyBottomUp = false; 2410 } 2411 } 2412 2413 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2414 /// critical path by more cycles than it takes to drain the instruction buffer. 2415 /// We estimate an upper bounds on in-flight instructions as: 2416 /// 2417 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2418 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2419 /// InFlightResources = InFlightIterations * LoopResources 2420 /// 2421 /// TODO: Check execution resources in addition to IssueCount. 2422 void GenericScheduler::checkAcyclicLatency() { 2423 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2424 return; 2425 2426 // Scaled number of cycles per loop iteration. 2427 unsigned IterCount = 2428 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2429 Rem.RemIssueCount); 2430 // Scaled acyclic critical path. 2431 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2432 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2433 unsigned InFlightCount = 2434 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2435 unsigned BufferLimit = 2436 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2437 2438 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2439 2440 DEBUG(dbgs() << "IssueCycles=" 2441 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2442 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2443 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount 2444 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2445 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2446 if (Rem.IsAcyclicLatencyLimited) 2447 dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2448 } 2449 2450 void GenericScheduler::registerRoots() { 2451 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2452 2453 // Some roots may not feed into ExitSU. Check all of them in case. 2454 for (std::vector<SUnit*>::const_iterator 2455 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 2456 if ((*I)->getDepth() > Rem.CriticalPath) 2457 Rem.CriticalPath = (*I)->getDepth(); 2458 } 2459 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 2460 2461 if (EnableCyclicPath) { 2462 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2463 checkAcyclicLatency(); 2464 } 2465 } 2466 2467 static bool tryPressure(const PressureChange &TryP, 2468 const PressureChange &CandP, 2469 GenericSchedulerBase::SchedCandidate &TryCand, 2470 GenericSchedulerBase::SchedCandidate &Cand, 2471 GenericSchedulerBase::CandReason Reason) { 2472 int TryRank = TryP.getPSetOrMax(); 2473 int CandRank = CandP.getPSetOrMax(); 2474 // If both candidates affect the same set, go with the smallest increase. 2475 if (TryRank == CandRank) { 2476 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2477 Reason); 2478 } 2479 // If one candidate decreases and the other increases, go with it. 2480 // Invalid candidates have UnitInc==0. 2481 if (tryLess(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2482 Reason)) { 2483 return true; 2484 } 2485 // If the candidates are decreasing pressure, reverse priority. 2486 if (TryP.getUnitInc() < 0) 2487 std::swap(TryRank, CandRank); 2488 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2489 } 2490 2491 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2492 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2493 } 2494 2495 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2496 /// their physreg def/use. 2497 /// 2498 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2499 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2500 /// with the operation that produces or consumes the physreg. We'll do this when 2501 /// regalloc has support for parallel copies. 2502 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2503 const MachineInstr *MI = SU->getInstr(); 2504 if (!MI->isCopy()) 2505 return 0; 2506 2507 unsigned ScheduledOper = isTop ? 1 : 0; 2508 unsigned UnscheduledOper = isTop ? 0 : 1; 2509 // If we have already scheduled the physreg produce/consumer, immediately 2510 // schedule the copy. 2511 if (TargetRegisterInfo::isPhysicalRegister( 2512 MI->getOperand(ScheduledOper).getReg())) 2513 return 1; 2514 // If the physreg is at the boundary, defer it. Otherwise schedule it 2515 // immediately to free the dependent. We can hoist the copy later. 2516 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2517 if (TargetRegisterInfo::isPhysicalRegister( 2518 MI->getOperand(UnscheduledOper).getReg())) 2519 return AtBoundary ? -1 : 1; 2520 return 0; 2521 } 2522 2523 /// Apply a set of heursitics to a new candidate. Heuristics are currently 2524 /// hierarchical. This may be more efficient than a graduated cost model because 2525 /// we don't need to evaluate all aspects of the model for each node in the 2526 /// queue. But it's really done to make the heuristics easier to debug and 2527 /// statistically analyze. 2528 /// 2529 /// \param Cand provides the policy and current best candidate. 2530 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2531 /// \param Zone describes the scheduled zone that we are extending. 2532 /// \param RPTracker describes reg pressure within the scheduled zone. 2533 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 2534 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 2535 SchedCandidate &TryCand, 2536 SchedBoundary &Zone, 2537 const RegPressureTracker &RPTracker, 2538 RegPressureTracker &TempTracker) { 2539 2540 if (DAG->isTrackingPressure()) { 2541 // Always initialize TryCand's RPDelta. 2542 if (Zone.isTop()) { 2543 TempTracker.getMaxDownwardPressureDelta( 2544 TryCand.SU->getInstr(), 2545 TryCand.RPDelta, 2546 DAG->getRegionCriticalPSets(), 2547 DAG->getRegPressure().MaxSetPressure); 2548 } 2549 else { 2550 if (VerifyScheduling) { 2551 TempTracker.getMaxUpwardPressureDelta( 2552 TryCand.SU->getInstr(), 2553 &DAG->getPressureDiff(TryCand.SU), 2554 TryCand.RPDelta, 2555 DAG->getRegionCriticalPSets(), 2556 DAG->getRegPressure().MaxSetPressure); 2557 } 2558 else { 2559 RPTracker.getUpwardPressureDelta( 2560 TryCand.SU->getInstr(), 2561 DAG->getPressureDiff(TryCand.SU), 2562 TryCand.RPDelta, 2563 DAG->getRegionCriticalPSets(), 2564 DAG->getRegPressure().MaxSetPressure); 2565 } 2566 } 2567 } 2568 DEBUG(if (TryCand.RPDelta.Excess.isValid()) 2569 dbgs() << " SU(" << TryCand.SU->NodeNum << ") " 2570 << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet()) 2571 << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n"); 2572 2573 // Initialize the candidate if needed. 2574 if (!Cand.isValid()) { 2575 TryCand.Reason = NodeOrder; 2576 return; 2577 } 2578 2579 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 2580 biasPhysRegCopy(Cand.SU, Zone.isTop()), 2581 TryCand, Cand, PhysRegCopy)) 2582 return; 2583 2584 // Avoid exceeding the target's limit. If signed PSetID is negative, it is 2585 // invalid; convert it to INT_MAX to give it lowest priority. 2586 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 2587 Cand.RPDelta.Excess, 2588 TryCand, Cand, RegExcess)) 2589 return; 2590 2591 // Avoid increasing the max critical pressure in the scheduled region. 2592 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 2593 Cand.RPDelta.CriticalMax, 2594 TryCand, Cand, RegCritical)) 2595 return; 2596 2597 // For loops that are acyclic path limited, aggressively schedule for latency. 2598 // This can result in very long dependence chains scheduled in sequence, so 2599 // once every cycle (when CurrMOps == 0), switch to normal heuristics. 2600 if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps() 2601 && tryLatency(TryCand, Cand, Zone)) 2602 return; 2603 2604 // Prioritize instructions that read unbuffered resources by stall cycles. 2605 if (tryLess(Zone.getLatencyStallCycles(TryCand.SU), 2606 Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2607 return; 2608 2609 // Keep clustered nodes together to encourage downstream peephole 2610 // optimizations which may reduce resource requirements. 2611 // 2612 // This is a best effort to set things up for a post-RA pass. Optimizations 2613 // like generating loads of multiple registers should ideally be done within 2614 // the scheduler pass by combining the loads during DAG postprocessing. 2615 const SUnit *NextClusterSU = 2616 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2617 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 2618 TryCand, Cand, Cluster)) 2619 return; 2620 2621 // Weak edges are for clustering and other constraints. 2622 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 2623 getWeakLeft(Cand.SU, Zone.isTop()), 2624 TryCand, Cand, Weak)) { 2625 return; 2626 } 2627 // Avoid increasing the max pressure of the entire region. 2628 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 2629 Cand.RPDelta.CurrentMax, 2630 TryCand, Cand, RegMax)) 2631 return; 2632 2633 // Avoid critical resource consumption and balance the schedule. 2634 TryCand.initResourceDelta(DAG, SchedModel); 2635 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2636 TryCand, Cand, ResourceReduce)) 2637 return; 2638 if (tryGreater(TryCand.ResDelta.DemandedResources, 2639 Cand.ResDelta.DemandedResources, 2640 TryCand, Cand, ResourceDemand)) 2641 return; 2642 2643 // Avoid serializing long latency dependence chains. 2644 // For acyclic path limited loops, latency was already checked above. 2645 if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited 2646 && tryLatency(TryCand, Cand, Zone)) { 2647 return; 2648 } 2649 2650 // Prefer immediate defs/users of the last scheduled instruction. This is a 2651 // local pressure avoidance strategy that also makes the machine code 2652 // readable. 2653 if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU), 2654 TryCand, Cand, NextDefUse)) 2655 return; 2656 2657 // Fall through to original instruction order. 2658 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2659 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2660 TryCand.Reason = NodeOrder; 2661 } 2662 } 2663 2664 /// Pick the best candidate from the queue. 2665 /// 2666 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2667 /// DAG building. To adjust for the current scheduling location we need to 2668 /// maintain the number of vreg uses remaining to be top-scheduled. 2669 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2670 const RegPressureTracker &RPTracker, 2671 SchedCandidate &Cand) { 2672 ReadyQueue &Q = Zone.Available; 2673 2674 DEBUG(Q.dump()); 2675 2676 // getMaxPressureDelta temporarily modifies the tracker. 2677 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2678 2679 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2680 2681 SchedCandidate TryCand(Cand.Policy); 2682 TryCand.SU = *I; 2683 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2684 if (TryCand.Reason != NoCand) { 2685 // Initialize resource delta if needed in case future heuristics query it. 2686 if (TryCand.ResDelta == SchedResourceDelta()) 2687 TryCand.initResourceDelta(DAG, SchedModel); 2688 Cand.setBest(TryCand); 2689 DEBUG(traceCandidate(Cand)); 2690 } 2691 } 2692 } 2693 2694 /// Pick the best candidate node from either the top or bottom queue. 2695 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 2696 // Schedule as far as possible in the direction of no choice. This is most 2697 // efficient, but also provides the best heuristics for CriticalPSets. 2698 if (SUnit *SU = Bot.pickOnlyChoice()) { 2699 IsTopNode = false; 2700 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2701 return SU; 2702 } 2703 if (SUnit *SU = Top.pickOnlyChoice()) { 2704 IsTopNode = true; 2705 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2706 return SU; 2707 } 2708 CandPolicy NoPolicy; 2709 SchedCandidate BotCand(NoPolicy); 2710 SchedCandidate TopCand(NoPolicy); 2711 // Set the bottom-up policy based on the state of the current bottom zone and 2712 // the instructions outside the zone, including the top zone. 2713 setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top); 2714 // Set the top-down policy based on the state of the current top zone and 2715 // the instructions outside the zone, including the bottom zone. 2716 setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot); 2717 2718 // Prefer bottom scheduling when heuristics are silent. 2719 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2720 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2721 2722 // If either Q has a single candidate that provides the least increase in 2723 // Excess pressure, we can immediately schedule from that Q. 2724 // 2725 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2726 // affects picking from either Q. If scheduling in one direction must 2727 // increase pressure for one of the excess PSets, then schedule in that 2728 // direction first to provide more freedom in the other direction. 2729 if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess)) 2730 || (BotCand.Reason == RegCritical 2731 && !BotCand.isRepeat(RegCritical))) 2732 { 2733 IsTopNode = false; 2734 tracePick(BotCand, IsTopNode); 2735 return BotCand.SU; 2736 } 2737 // Check if the top Q has a better candidate. 2738 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2739 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2740 2741 // Choose the queue with the most important (lowest enum) reason. 2742 if (TopCand.Reason < BotCand.Reason) { 2743 IsTopNode = true; 2744 tracePick(TopCand, IsTopNode); 2745 return TopCand.SU; 2746 } 2747 // Otherwise prefer the bottom candidate, in node order if all else failed. 2748 IsTopNode = false; 2749 tracePick(BotCand, IsTopNode); 2750 return BotCand.SU; 2751 } 2752 2753 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2754 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 2755 if (DAG->top() == DAG->bottom()) { 2756 assert(Top.Available.empty() && Top.Pending.empty() && 2757 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2758 return nullptr; 2759 } 2760 SUnit *SU; 2761 do { 2762 if (RegionPolicy.OnlyTopDown) { 2763 SU = Top.pickOnlyChoice(); 2764 if (!SU) { 2765 CandPolicy NoPolicy; 2766 SchedCandidate TopCand(NoPolicy); 2767 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2768 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2769 tracePick(TopCand, true); 2770 SU = TopCand.SU; 2771 } 2772 IsTopNode = true; 2773 } 2774 else if (RegionPolicy.OnlyBottomUp) { 2775 SU = Bot.pickOnlyChoice(); 2776 if (!SU) { 2777 CandPolicy NoPolicy; 2778 SchedCandidate BotCand(NoPolicy); 2779 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2780 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 2781 tracePick(BotCand, false); 2782 SU = BotCand.SU; 2783 } 2784 IsTopNode = false; 2785 } 2786 else { 2787 SU = pickNodeBidirectional(IsTopNode); 2788 } 2789 } while (SU->isScheduled); 2790 2791 if (SU->isTopReady()) 2792 Top.removeReady(SU); 2793 if (SU->isBottomReady()) 2794 Bot.removeReady(SU); 2795 2796 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2797 return SU; 2798 } 2799 2800 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2801 2802 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2803 if (!isTop) 2804 ++InsertPos; 2805 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2806 2807 // Find already scheduled copies with a single physreg dependence and move 2808 // them just above the scheduled instruction. 2809 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2810 I != E; ++I) { 2811 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2812 continue; 2813 SUnit *DepSU = I->getSUnit(); 2814 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2815 continue; 2816 MachineInstr *Copy = DepSU->getInstr(); 2817 if (!Copy->isCopy()) 2818 continue; 2819 DEBUG(dbgs() << " Rescheduling physreg copy "; 2820 I->getSUnit()->dump(DAG)); 2821 DAG->moveInstruction(Copy, InsertPos); 2822 } 2823 } 2824 2825 /// Update the scheduler's state after scheduling a node. This is the same node 2826 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 2827 /// update it's state based on the current cycle before MachineSchedStrategy 2828 /// does. 2829 /// 2830 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2831 /// them here. See comments in biasPhysRegCopy. 2832 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2833 if (IsTopNode) { 2834 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 2835 Top.bumpNode(SU); 2836 if (SU->hasPhysRegUses) 2837 reschedulePhysRegCopies(SU, true); 2838 } 2839 else { 2840 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 2841 Bot.bumpNode(SU); 2842 if (SU->hasPhysRegDefs) 2843 reschedulePhysRegCopies(SU, false); 2844 } 2845 } 2846 2847 /// Create the standard converging machine scheduler. This will be used as the 2848 /// default scheduler if the target does not set a default. 2849 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) { 2850 ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)); 2851 // Register DAG post-processors. 2852 // 2853 // FIXME: extend the mutation API to allow earlier mutations to instantiate 2854 // data and pass it to later mutations. Have a single mutation that gathers 2855 // the interesting nodes in one pass. 2856 DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI)); 2857 if (EnableLoadCluster && DAG->TII->enableClusterLoads()) 2858 DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI)); 2859 if (EnableMacroFusion) 2860 DAG->addMutation(make_unique<MacroFusion>(DAG->TII)); 2861 return DAG; 2862 } 2863 2864 static MachineSchedRegistry 2865 GenericSchedRegistry("converge", "Standard converging scheduler.", 2866 createGenericSchedLive); 2867 2868 //===----------------------------------------------------------------------===// 2869 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 2870 //===----------------------------------------------------------------------===// 2871 2872 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 2873 DAG = Dag; 2874 SchedModel = DAG->getSchedModel(); 2875 TRI = DAG->TRI; 2876 2877 Rem.init(DAG, SchedModel); 2878 Top.init(DAG, SchedModel, &Rem); 2879 BotRoots.clear(); 2880 2881 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 2882 // or are disabled, then these HazardRecs will be disabled. 2883 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2884 const TargetMachine &TM = DAG->MF.getTarget(); 2885 if (!Top.HazardRec) { 2886 Top.HazardRec = 2887 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 2888 } 2889 } 2890 2891 2892 void PostGenericScheduler::registerRoots() { 2893 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2894 2895 // Some roots may not feed into ExitSU. Check all of them in case. 2896 for (SmallVectorImpl<SUnit*>::const_iterator 2897 I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) { 2898 if ((*I)->getDepth() > Rem.CriticalPath) 2899 Rem.CriticalPath = (*I)->getDepth(); 2900 } 2901 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 2902 } 2903 2904 /// Apply a set of heursitics to a new candidate for PostRA scheduling. 2905 /// 2906 /// \param Cand provides the policy and current best candidate. 2907 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2908 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 2909 SchedCandidate &TryCand) { 2910 2911 // Initialize the candidate if needed. 2912 if (!Cand.isValid()) { 2913 TryCand.Reason = NodeOrder; 2914 return; 2915 } 2916 2917 // Prioritize instructions that read unbuffered resources by stall cycles. 2918 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 2919 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2920 return; 2921 2922 // Avoid critical resource consumption and balance the schedule. 2923 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2924 TryCand, Cand, ResourceReduce)) 2925 return; 2926 if (tryGreater(TryCand.ResDelta.DemandedResources, 2927 Cand.ResDelta.DemandedResources, 2928 TryCand, Cand, ResourceDemand)) 2929 return; 2930 2931 // Avoid serializing long latency dependence chains. 2932 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 2933 return; 2934 } 2935 2936 // Fall through to original instruction order. 2937 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 2938 TryCand.Reason = NodeOrder; 2939 } 2940 2941 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 2942 ReadyQueue &Q = Top.Available; 2943 2944 DEBUG(Q.dump()); 2945 2946 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2947 SchedCandidate TryCand(Cand.Policy); 2948 TryCand.SU = *I; 2949 TryCand.initResourceDelta(DAG, SchedModel); 2950 tryCandidate(Cand, TryCand); 2951 if (TryCand.Reason != NoCand) { 2952 Cand.setBest(TryCand); 2953 DEBUG(traceCandidate(Cand)); 2954 } 2955 } 2956 } 2957 2958 /// Pick the next node to schedule. 2959 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 2960 if (DAG->top() == DAG->bottom()) { 2961 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 2962 return nullptr; 2963 } 2964 SUnit *SU; 2965 do { 2966 SU = Top.pickOnlyChoice(); 2967 if (!SU) { 2968 CandPolicy NoPolicy; 2969 SchedCandidate TopCand(NoPolicy); 2970 // Set the top-down policy based on the state of the current top zone and 2971 // the instructions outside the zone, including the bottom zone. 2972 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 2973 pickNodeFromQueue(TopCand); 2974 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2975 tracePick(TopCand, true); 2976 SU = TopCand.SU; 2977 } 2978 } while (SU->isScheduled); 2979 2980 IsTopNode = true; 2981 Top.removeReady(SU); 2982 2983 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2984 return SU; 2985 } 2986 2987 /// Called after ScheduleDAGMI has scheduled an instruction and updated 2988 /// scheduled/remaining flags in the DAG nodes. 2989 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2990 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 2991 Top.bumpNode(SU); 2992 } 2993 2994 /// Create a generic scheduler with no vreg liveness or DAG mutation passes. 2995 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) { 2996 return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true); 2997 } 2998 2999 //===----------------------------------------------------------------------===// 3000 // ILP Scheduler. Currently for experimental analysis of heuristics. 3001 //===----------------------------------------------------------------------===// 3002 3003 namespace { 3004 /// \brief Order nodes by the ILP metric. 3005 struct ILPOrder { 3006 const SchedDFSResult *DFSResult; 3007 const BitVector *ScheduledTrees; 3008 bool MaximizeILP; 3009 3010 ILPOrder(bool MaxILP) 3011 : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {} 3012 3013 /// \brief Apply a less-than relation on node priority. 3014 /// 3015 /// (Return true if A comes after B in the Q.) 3016 bool operator()(const SUnit *A, const SUnit *B) const { 3017 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3018 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3019 if (SchedTreeA != SchedTreeB) { 3020 // Unscheduled trees have lower priority. 3021 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3022 return ScheduledTrees->test(SchedTreeB); 3023 3024 // Trees with shallower connections have have lower priority. 3025 if (DFSResult->getSubtreeLevel(SchedTreeA) 3026 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3027 return DFSResult->getSubtreeLevel(SchedTreeA) 3028 < DFSResult->getSubtreeLevel(SchedTreeB); 3029 } 3030 } 3031 if (MaximizeILP) 3032 return DFSResult->getILP(A) < DFSResult->getILP(B); 3033 else 3034 return DFSResult->getILP(A) > DFSResult->getILP(B); 3035 } 3036 }; 3037 3038 /// \brief Schedule based on the ILP metric. 3039 class ILPScheduler : public MachineSchedStrategy { 3040 ScheduleDAGMILive *DAG; 3041 ILPOrder Cmp; 3042 3043 std::vector<SUnit*> ReadyQ; 3044 public: 3045 ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {} 3046 3047 void initialize(ScheduleDAGMI *dag) override { 3048 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3049 DAG = static_cast<ScheduleDAGMILive*>(dag); 3050 DAG->computeDFSResult(); 3051 Cmp.DFSResult = DAG->getDFSResult(); 3052 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3053 ReadyQ.clear(); 3054 } 3055 3056 void registerRoots() override { 3057 // Restore the heap in ReadyQ with the updated DFS results. 3058 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3059 } 3060 3061 /// Implement MachineSchedStrategy interface. 3062 /// ----------------------------------------- 3063 3064 /// Callback to select the highest priority node from the ready Q. 3065 SUnit *pickNode(bool &IsTopNode) override { 3066 if (ReadyQ.empty()) return nullptr; 3067 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3068 SUnit *SU = ReadyQ.back(); 3069 ReadyQ.pop_back(); 3070 IsTopNode = false; 3071 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 3072 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3073 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 3074 << DAG->getDFSResult()->getSubtreeLevel( 3075 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 3076 << "Scheduling " << *SU->getInstr()); 3077 return SU; 3078 } 3079 3080 /// \brief Scheduler callback to notify that a new subtree is scheduled. 3081 void scheduleTree(unsigned SubtreeID) override { 3082 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3083 } 3084 3085 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3086 /// DFSResults, and resort the priority Q. 3087 void schedNode(SUnit *SU, bool IsTopNode) override { 3088 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3089 } 3090 3091 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3092 3093 void releaseBottomNode(SUnit *SU) override { 3094 ReadyQ.push_back(SU); 3095 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3096 } 3097 }; 3098 } // namespace 3099 3100 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3101 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true)); 3102 } 3103 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3104 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false)); 3105 } 3106 static MachineSchedRegistry ILPMaxRegistry( 3107 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3108 static MachineSchedRegistry ILPMinRegistry( 3109 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3110 3111 //===----------------------------------------------------------------------===// 3112 // Machine Instruction Shuffler for Correctness Testing 3113 //===----------------------------------------------------------------------===// 3114 3115 #ifndef NDEBUG 3116 namespace { 3117 /// Apply a less-than relation on the node order, which corresponds to the 3118 /// instruction order prior to scheduling. IsReverse implements greater-than. 3119 template<bool IsReverse> 3120 struct SUnitOrder { 3121 bool operator()(SUnit *A, SUnit *B) const { 3122 if (IsReverse) 3123 return A->NodeNum > B->NodeNum; 3124 else 3125 return A->NodeNum < B->NodeNum; 3126 } 3127 }; 3128 3129 /// Reorder instructions as much as possible. 3130 class InstructionShuffler : public MachineSchedStrategy { 3131 bool IsAlternating; 3132 bool IsTopDown; 3133 3134 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3135 // gives nodes with a higher number higher priority causing the latest 3136 // instructions to be scheduled first. 3137 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 3138 TopQ; 3139 // When scheduling bottom-up, use greater-than as the queue priority. 3140 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 3141 BottomQ; 3142 public: 3143 InstructionShuffler(bool alternate, bool topdown) 3144 : IsAlternating(alternate), IsTopDown(topdown) {} 3145 3146 void initialize(ScheduleDAGMI*) override { 3147 TopQ.clear(); 3148 BottomQ.clear(); 3149 } 3150 3151 /// Implement MachineSchedStrategy interface. 3152 /// ----------------------------------------- 3153 3154 SUnit *pickNode(bool &IsTopNode) override { 3155 SUnit *SU; 3156 if (IsTopDown) { 3157 do { 3158 if (TopQ.empty()) return nullptr; 3159 SU = TopQ.top(); 3160 TopQ.pop(); 3161 } while (SU->isScheduled); 3162 IsTopNode = true; 3163 } 3164 else { 3165 do { 3166 if (BottomQ.empty()) return nullptr; 3167 SU = BottomQ.top(); 3168 BottomQ.pop(); 3169 } while (SU->isScheduled); 3170 IsTopNode = false; 3171 } 3172 if (IsAlternating) 3173 IsTopDown = !IsTopDown; 3174 return SU; 3175 } 3176 3177 void schedNode(SUnit *SU, bool IsTopNode) override {} 3178 3179 void releaseTopNode(SUnit *SU) override { 3180 TopQ.push(SU); 3181 } 3182 void releaseBottomNode(SUnit *SU) override { 3183 BottomQ.push(SU); 3184 } 3185 }; 3186 } // namespace 3187 3188 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3189 bool Alternate = !ForceTopDown && !ForceBottomUp; 3190 bool TopDown = !ForceBottomUp; 3191 assert((TopDown || !ForceTopDown) && 3192 "-misched-topdown incompatible with -misched-bottomup"); 3193 return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown)); 3194 } 3195 static MachineSchedRegistry ShufflerRegistry( 3196 "shuffle", "Shuffle machine instructions alternating directions", 3197 createInstructionShuffler); 3198 #endif // !NDEBUG 3199 3200 //===----------------------------------------------------------------------===// 3201 // GraphWriter support for ScheduleDAGMILive. 3202 //===----------------------------------------------------------------------===// 3203 3204 #ifndef NDEBUG 3205 namespace llvm { 3206 3207 template<> struct GraphTraits< 3208 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3209 3210 template<> 3211 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3212 3213 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 3214 3215 static std::string getGraphName(const ScheduleDAG *G) { 3216 return G->MF.getName(); 3217 } 3218 3219 static bool renderGraphFromBottomUp() { 3220 return true; 3221 } 3222 3223 static bool isNodeHidden(const SUnit *Node) { 3224 return (Node->Preds.size() > 10 || Node->Succs.size() > 10); 3225 } 3226 3227 static bool hasNodeAddressLabel(const SUnit *Node, 3228 const ScheduleDAG *Graph) { 3229 return false; 3230 } 3231 3232 /// If you want to override the dot attributes printed for a particular 3233 /// edge, override this method. 3234 static std::string getEdgeAttributes(const SUnit *Node, 3235 SUnitIterator EI, 3236 const ScheduleDAG *Graph) { 3237 if (EI.isArtificialDep()) 3238 return "color=cyan,style=dashed"; 3239 if (EI.isCtrlDep()) 3240 return "color=blue,style=dashed"; 3241 return ""; 3242 } 3243 3244 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3245 std::string Str; 3246 raw_string_ostream SS(Str); 3247 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3248 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3249 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3250 SS << "SU:" << SU->NodeNum; 3251 if (DFS) 3252 SS << " I:" << DFS->getNumInstrs(SU); 3253 return SS.str(); 3254 } 3255 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3256 return G->getGraphNodeLabel(SU); 3257 } 3258 3259 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3260 std::string Str("shape=Mrecord"); 3261 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3262 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3263 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3264 if (DFS) { 3265 Str += ",style=filled,fillcolor=\"#"; 3266 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3267 Str += '"'; 3268 } 3269 return Str; 3270 } 3271 }; 3272 } // namespace llvm 3273 #endif // NDEBUG 3274 3275 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3276 /// rendered using 'dot'. 3277 /// 3278 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3279 #ifndef NDEBUG 3280 ViewGraph(this, Name, false, Title); 3281 #else 3282 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3283 << "systems with Graphviz or gv!\n"; 3284 #endif // NDEBUG 3285 } 3286 3287 /// Out-of-line implementation with no arguments is handy for gdb. 3288 void ScheduleDAGMI::viewGraph() { 3289 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3290 } 3291