1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineScheduler.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/BitVector.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/LiveInterval.h" 25 #include "llvm/CodeGen/LiveIntervals.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineDominators.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineFunctionPass.h" 30 #include "llvm/CodeGen/MachineInstr.h" 31 #include "llvm/CodeGen/MachineLoopInfo.h" 32 #include "llvm/CodeGen/MachineOperand.h" 33 #include "llvm/CodeGen/MachinePassRegistry.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/Passes.h" 36 #include "llvm/CodeGen/RegisterClassInfo.h" 37 #include "llvm/CodeGen/RegisterPressure.h" 38 #include "llvm/CodeGen/ScheduleDAG.h" 39 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 40 #include "llvm/CodeGen/ScheduleDAGMutation.h" 41 #include "llvm/CodeGen/ScheduleDFS.h" 42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 43 #include "llvm/CodeGen/SlotIndexes.h" 44 #include "llvm/CodeGen/TargetInstrInfo.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/CodeGen/TargetPassConfig.h" 47 #include "llvm/CodeGen/TargetRegisterInfo.h" 48 #include "llvm/CodeGen/TargetSchedule.h" 49 #include "llvm/CodeGen/TargetSubtargetInfo.h" 50 #include "llvm/Config/llvm-config.h" 51 #include "llvm/MC/LaneBitmask.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/GraphWriter.h" 58 #include "llvm/Support/MachineValueType.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstdint> 63 #include <iterator> 64 #include <limits> 65 #include <memory> 66 #include <string> 67 #include <tuple> 68 #include <utility> 69 #include <vector> 70 71 using namespace llvm; 72 73 #define DEBUG_TYPE "machine-scheduler" 74 75 namespace llvm { 76 77 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 78 cl::desc("Force top-down list scheduling")); 79 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 80 cl::desc("Force bottom-up list scheduling")); 81 cl::opt<bool> 82 DumpCriticalPathLength("misched-dcpl", cl::Hidden, 83 cl::desc("Print critical path length to stdout")); 84 85 } // end namespace llvm 86 87 #ifndef NDEBUG 88 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 89 cl::desc("Pop up a window to show MISched dags after they are processed")); 90 91 /// In some situations a few uninteresting nodes depend on nearly all other 92 /// nodes in the graph, provide a cutoff to hide them. 93 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden, 94 cl::desc("Hide nodes with more predecessor/successor than cutoff")); 95 96 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 97 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 98 99 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 100 cl::desc("Only schedule this function")); 101 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 102 cl::desc("Only schedule this MBB#")); 103 #else 104 static bool ViewMISchedDAGs = false; 105 #endif // NDEBUG 106 107 /// Avoid quadratic complexity in unusually large basic blocks by limiting the 108 /// size of the ready lists. 109 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden, 110 cl::desc("Limit ready list to N instructions"), cl::init(256)); 111 112 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 113 cl::desc("Enable register pressure scheduling."), cl::init(true)); 114 115 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 116 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 117 118 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden, 119 cl::desc("Enable memop clustering."), 120 cl::init(true)); 121 122 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 123 cl::desc("Verify machine instrs before and after machine scheduling")); 124 125 // DAG subtrees must have at least this many nodes. 126 static const unsigned MinSubtreeSize = 8; 127 128 // Pin the vtables to this file. 129 void MachineSchedStrategy::anchor() {} 130 131 void ScheduleDAGMutation::anchor() {} 132 133 //===----------------------------------------------------------------------===// 134 // Machine Instruction Scheduling Pass and Registry 135 //===----------------------------------------------------------------------===// 136 137 MachineSchedContext::MachineSchedContext() { 138 RegClassInfo = new RegisterClassInfo(); 139 } 140 141 MachineSchedContext::~MachineSchedContext() { 142 delete RegClassInfo; 143 } 144 145 namespace { 146 147 /// Base class for a machine scheduler class that can run at any point. 148 class MachineSchedulerBase : public MachineSchedContext, 149 public MachineFunctionPass { 150 public: 151 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 152 153 void print(raw_ostream &O, const Module* = nullptr) const override; 154 155 protected: 156 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); 157 }; 158 159 /// MachineScheduler runs after coalescing and before register allocation. 160 class MachineScheduler : public MachineSchedulerBase { 161 public: 162 MachineScheduler(); 163 164 void getAnalysisUsage(AnalysisUsage &AU) const override; 165 166 bool runOnMachineFunction(MachineFunction&) override; 167 168 static char ID; // Class identification, replacement for typeinfo 169 170 protected: 171 ScheduleDAGInstrs *createMachineScheduler(); 172 }; 173 174 /// PostMachineScheduler runs after shortly before code emission. 175 class PostMachineScheduler : public MachineSchedulerBase { 176 public: 177 PostMachineScheduler(); 178 179 void getAnalysisUsage(AnalysisUsage &AU) const override; 180 181 bool runOnMachineFunction(MachineFunction&) override; 182 183 static char ID; // Class identification, replacement for typeinfo 184 185 protected: 186 ScheduleDAGInstrs *createPostMachineScheduler(); 187 }; 188 189 } // end anonymous namespace 190 191 char MachineScheduler::ID = 0; 192 193 char &llvm::MachineSchedulerID = MachineScheduler::ID; 194 195 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE, 196 "Machine Instruction Scheduler", false, false) 197 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 198 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 199 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 200 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 201 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE, 202 "Machine Instruction Scheduler", false, false) 203 204 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) { 205 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 206 } 207 208 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 209 AU.setPreservesCFG(); 210 AU.addRequiredID(MachineDominatorsID); 211 AU.addRequired<MachineLoopInfo>(); 212 AU.addRequired<AAResultsWrapperPass>(); 213 AU.addRequired<TargetPassConfig>(); 214 AU.addRequired<SlotIndexes>(); 215 AU.addPreserved<SlotIndexes>(); 216 AU.addRequired<LiveIntervals>(); 217 AU.addPreserved<LiveIntervals>(); 218 MachineFunctionPass::getAnalysisUsage(AU); 219 } 220 221 char PostMachineScheduler::ID = 0; 222 223 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 224 225 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 226 "PostRA Machine Instruction Scheduler", false, false) 227 228 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) { 229 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 230 } 231 232 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 233 AU.setPreservesCFG(); 234 AU.addRequiredID(MachineDominatorsID); 235 AU.addRequired<MachineLoopInfo>(); 236 AU.addRequired<TargetPassConfig>(); 237 MachineFunctionPass::getAnalysisUsage(AU); 238 } 239 240 MachinePassRegistry MachineSchedRegistry::Registry; 241 242 /// A dummy default scheduler factory indicates whether the scheduler 243 /// is overridden on the command line. 244 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 245 return nullptr; 246 } 247 248 /// MachineSchedOpt allows command line selection of the scheduler. 249 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 250 RegisterPassParser<MachineSchedRegistry>> 251 MachineSchedOpt("misched", 252 cl::init(&useDefaultMachineSched), cl::Hidden, 253 cl::desc("Machine instruction scheduler to use")); 254 255 static MachineSchedRegistry 256 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 257 useDefaultMachineSched); 258 259 static cl::opt<bool> EnableMachineSched( 260 "enable-misched", 261 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), 262 cl::Hidden); 263 264 static cl::opt<bool> EnablePostRAMachineSched( 265 "enable-post-misched", 266 cl::desc("Enable the post-ra machine instruction scheduling pass."), 267 cl::init(true), cl::Hidden); 268 269 /// Decrement this iterator until reaching the top or a non-debug instr. 270 static MachineBasicBlock::const_iterator 271 priorNonDebug(MachineBasicBlock::const_iterator I, 272 MachineBasicBlock::const_iterator Beg) { 273 assert(I != Beg && "reached the top of the region, cannot decrement"); 274 while (--I != Beg) { 275 if (!I->isDebugInstr()) 276 break; 277 } 278 return I; 279 } 280 281 /// Non-const version. 282 static MachineBasicBlock::iterator 283 priorNonDebug(MachineBasicBlock::iterator I, 284 MachineBasicBlock::const_iterator Beg) { 285 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg) 286 .getNonConstIterator(); 287 } 288 289 /// If this iterator is a debug value, increment until reaching the End or a 290 /// non-debug instruction. 291 static MachineBasicBlock::const_iterator 292 nextIfDebug(MachineBasicBlock::const_iterator I, 293 MachineBasicBlock::const_iterator End) { 294 for(; I != End; ++I) { 295 if (!I->isDebugInstr()) 296 break; 297 } 298 return I; 299 } 300 301 /// Non-const version. 302 static MachineBasicBlock::iterator 303 nextIfDebug(MachineBasicBlock::iterator I, 304 MachineBasicBlock::const_iterator End) { 305 return nextIfDebug(MachineBasicBlock::const_iterator(I), End) 306 .getNonConstIterator(); 307 } 308 309 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 310 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 311 // Select the scheduler, or set the default. 312 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 313 if (Ctor != useDefaultMachineSched) 314 return Ctor(this); 315 316 // Get the default scheduler set by the target for this function. 317 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 318 if (Scheduler) 319 return Scheduler; 320 321 // Default to GenericScheduler. 322 return createGenericSchedLive(this); 323 } 324 325 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 326 /// the caller. We don't have a command line option to override the postRA 327 /// scheduler. The Target must configure it. 328 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 329 // Get the postRA scheduler set by the target for this function. 330 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 331 if (Scheduler) 332 return Scheduler; 333 334 // Default to GenericScheduler. 335 return createGenericSchedPostRA(this); 336 } 337 338 /// Top-level MachineScheduler pass driver. 339 /// 340 /// Visit blocks in function order. Divide each block into scheduling regions 341 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 342 /// consistent with the DAG builder, which traverses the interior of the 343 /// scheduling regions bottom-up. 344 /// 345 /// This design avoids exposing scheduling boundaries to the DAG builder, 346 /// simplifying the DAG builder's support for "special" target instructions. 347 /// At the same time the design allows target schedulers to operate across 348 /// scheduling boundaries, for example to bundle the boundary instructions 349 /// without reordering them. This creates complexity, because the target 350 /// scheduler must update the RegionBegin and RegionEnd positions cached by 351 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 352 /// design would be to split blocks at scheduling boundaries, but LLVM has a 353 /// general bias against block splitting purely for implementation simplicity. 354 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 355 if (skipFunction(mf.getFunction())) 356 return false; 357 358 if (EnableMachineSched.getNumOccurrences()) { 359 if (!EnableMachineSched) 360 return false; 361 } else if (!mf.getSubtarget().enableMachineScheduler()) 362 return false; 363 364 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); 365 366 // Initialize the context of the pass. 367 MF = &mf; 368 MLI = &getAnalysis<MachineLoopInfo>(); 369 MDT = &getAnalysis<MachineDominatorTree>(); 370 PassConfig = &getAnalysis<TargetPassConfig>(); 371 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 372 373 LIS = &getAnalysis<LiveIntervals>(); 374 375 if (VerifyScheduling) { 376 LLVM_DEBUG(LIS->dump()); 377 MF->verify(this, "Before machine scheduling."); 378 } 379 RegClassInfo->runOnMachineFunction(*MF); 380 381 // Instantiate the selected scheduler for this target, function, and 382 // optimization level. 383 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 384 scheduleRegions(*Scheduler, false); 385 386 LLVM_DEBUG(LIS->dump()); 387 if (VerifyScheduling) 388 MF->verify(this, "After machine scheduling."); 389 return true; 390 } 391 392 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 393 if (skipFunction(mf.getFunction())) 394 return false; 395 396 if (EnablePostRAMachineSched.getNumOccurrences()) { 397 if (!EnablePostRAMachineSched) 398 return false; 399 } else if (!mf.getSubtarget().enablePostRAScheduler()) { 400 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); 401 return false; 402 } 403 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 404 405 // Initialize the context of the pass. 406 MF = &mf; 407 MLI = &getAnalysis<MachineLoopInfo>(); 408 PassConfig = &getAnalysis<TargetPassConfig>(); 409 410 if (VerifyScheduling) 411 MF->verify(this, "Before post machine scheduling."); 412 413 // Instantiate the selected scheduler for this target, function, and 414 // optimization level. 415 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 416 scheduleRegions(*Scheduler, true); 417 418 if (VerifyScheduling) 419 MF->verify(this, "After post machine scheduling."); 420 return true; 421 } 422 423 /// Return true of the given instruction should not be included in a scheduling 424 /// region. 425 /// 426 /// MachineScheduler does not currently support scheduling across calls. To 427 /// handle calls, the DAG builder needs to be modified to create register 428 /// anti/output dependencies on the registers clobbered by the call's regmask 429 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 430 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 431 /// the boundary, but there would be no benefit to postRA scheduling across 432 /// calls this late anyway. 433 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 434 MachineBasicBlock *MBB, 435 MachineFunction *MF, 436 const TargetInstrInfo *TII) { 437 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF); 438 } 439 440 /// A region of an MBB for scheduling. 441 namespace { 442 struct SchedRegion { 443 /// RegionBegin is the first instruction in the scheduling region, and 444 /// RegionEnd is either MBB->end() or the scheduling boundary after the 445 /// last instruction in the scheduling region. These iterators cannot refer 446 /// to instructions outside of the identified scheduling region because 447 /// those may be reordered before scheduling this region. 448 MachineBasicBlock::iterator RegionBegin; 449 MachineBasicBlock::iterator RegionEnd; 450 unsigned NumRegionInstrs; 451 452 SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, 453 unsigned N) : 454 RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {} 455 }; 456 } // end anonymous namespace 457 458 using MBBRegionsVector = SmallVector<SchedRegion, 16>; 459 460 static void 461 getSchedRegions(MachineBasicBlock *MBB, 462 MBBRegionsVector &Regions, 463 bool RegionsTopDown) { 464 MachineFunction *MF = MBB->getParent(); 465 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 466 467 MachineBasicBlock::iterator I = nullptr; 468 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 469 RegionEnd != MBB->begin(); RegionEnd = I) { 470 471 // Avoid decrementing RegionEnd for blocks with no terminator. 472 if (RegionEnd != MBB->end() || 473 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) { 474 --RegionEnd; 475 } 476 477 // The next region starts above the previous region. Look backward in the 478 // instruction stream until we find the nearest boundary. 479 unsigned NumRegionInstrs = 0; 480 I = RegionEnd; 481 for (;I != MBB->begin(); --I) { 482 MachineInstr &MI = *std::prev(I); 483 if (isSchedBoundary(&MI, &*MBB, MF, TII)) 484 break; 485 if (!MI.isDebugInstr()) 486 // MBB::size() uses instr_iterator to count. Here we need a bundle to 487 // count as a single instruction. 488 ++NumRegionInstrs; 489 } 490 491 Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs)); 492 } 493 494 if (RegionsTopDown) 495 std::reverse(Regions.begin(), Regions.end()); 496 } 497 498 /// Main driver for both MachineScheduler and PostMachineScheduler. 499 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler, 500 bool FixKillFlags) { 501 // Visit all machine basic blocks. 502 // 503 // TODO: Visit blocks in global postorder or postorder within the bottom-up 504 // loop tree. Then we can optionally compute global RegPressure. 505 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 506 MBB != MBBEnd; ++MBB) { 507 508 Scheduler.startBlock(&*MBB); 509 510 #ifndef NDEBUG 511 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 512 continue; 513 if (SchedOnlyBlock.getNumOccurrences() 514 && (int)SchedOnlyBlock != MBB->getNumber()) 515 continue; 516 #endif 517 518 // Break the block into scheduling regions [I, RegionEnd). RegionEnd 519 // points to the scheduling boundary at the bottom of the region. The DAG 520 // does not include RegionEnd, but the region does (i.e. the next 521 // RegionEnd is above the previous RegionBegin). If the current block has 522 // no terminator then RegionEnd == MBB->end() for the bottom region. 523 // 524 // All the regions of MBB are first found and stored in MBBRegions, which 525 // will be processed (MBB) top-down if initialized with true. 526 // 527 // The Scheduler may insert instructions during either schedule() or 528 // exitRegion(), even for empty regions. So the local iterators 'I' and 529 // 'RegionEnd' are invalid across these calls. Instructions must not be 530 // added to other regions than the current one without updating MBBRegions. 531 532 MBBRegionsVector MBBRegions; 533 getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown()); 534 for (MBBRegionsVector::iterator R = MBBRegions.begin(); 535 R != MBBRegions.end(); ++R) { 536 MachineBasicBlock::iterator I = R->RegionBegin; 537 MachineBasicBlock::iterator RegionEnd = R->RegionEnd; 538 unsigned NumRegionInstrs = R->NumRegionInstrs; 539 540 // Notify the scheduler of the region, even if we may skip scheduling 541 // it. Perhaps it still needs to be bundled. 542 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs); 543 544 // Skip empty scheduling regions (0 or 1 schedulable instructions). 545 if (I == RegionEnd || I == std::prev(RegionEnd)) { 546 // Close the current region. Bundle the terminator if needed. 547 // This invalidates 'RegionEnd' and 'I'. 548 Scheduler.exitRegion(); 549 continue; 550 } 551 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n"); 552 LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) 553 << " " << MBB->getName() << "\n From: " << *I 554 << " To: "; 555 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 556 else dbgs() << "End"; 557 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); 558 if (DumpCriticalPathLength) { 559 errs() << MF->getName(); 560 errs() << ":%bb. " << MBB->getNumber(); 561 errs() << " " << MBB->getName() << " \n"; 562 } 563 564 // Schedule a region: possibly reorder instructions. 565 // This invalidates the original region iterators. 566 Scheduler.schedule(); 567 568 // Close the current region. 569 Scheduler.exitRegion(); 570 } 571 Scheduler.finishBlock(); 572 // FIXME: Ideally, no further passes should rely on kill flags. However, 573 // thumb2 size reduction is currently an exception, so the PostMIScheduler 574 // needs to do this. 575 if (FixKillFlags) 576 Scheduler.fixupKills(*MBB); 577 } 578 Scheduler.finalizeSchedule(); 579 } 580 581 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 582 // unimplemented 583 } 584 585 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 586 LLVM_DUMP_METHOD void ReadyQueue::dump() const { 587 dbgs() << "Queue " << Name << ": "; 588 for (const SUnit *SU : Queue) 589 dbgs() << SU->NodeNum << " "; 590 dbgs() << "\n"; 591 } 592 #endif 593 594 //===----------------------------------------------------------------------===// 595 // ScheduleDAGMI - Basic machine instruction scheduling. This is 596 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 597 // virtual registers. 598 // ===----------------------------------------------------------------------===/ 599 600 // Provide a vtable anchor. 601 ScheduleDAGMI::~ScheduleDAGMI() = default; 602 603 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 604 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 605 } 606 607 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 608 if (SuccSU != &ExitSU) { 609 // Do not use WillCreateCycle, it assumes SD scheduling. 610 // If Pred is reachable from Succ, then the edge creates a cycle. 611 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 612 return false; 613 Topo.AddPred(SuccSU, PredDep.getSUnit()); 614 } 615 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 616 // Return true regardless of whether a new edge needed to be inserted. 617 return true; 618 } 619 620 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 621 /// NumPredsLeft reaches zero, release the successor node. 622 /// 623 /// FIXME: Adjust SuccSU height based on MinLatency. 624 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 625 SUnit *SuccSU = SuccEdge->getSUnit(); 626 627 if (SuccEdge->isWeak()) { 628 --SuccSU->WeakPredsLeft; 629 if (SuccEdge->isCluster()) 630 NextClusterSucc = SuccSU; 631 return; 632 } 633 #ifndef NDEBUG 634 if (SuccSU->NumPredsLeft == 0) { 635 dbgs() << "*** Scheduling failed! ***\n"; 636 dumpNode(*SuccSU); 637 dbgs() << " has been released too many times!\n"; 638 llvm_unreachable(nullptr); 639 } 640 #endif 641 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However, 642 // CurrCycle may have advanced since then. 643 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency()) 644 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency(); 645 646 --SuccSU->NumPredsLeft; 647 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 648 SchedImpl->releaseTopNode(SuccSU); 649 } 650 651 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 652 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 653 for (SDep &Succ : SU->Succs) 654 releaseSucc(SU, &Succ); 655 } 656 657 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 658 /// NumSuccsLeft reaches zero, release the predecessor node. 659 /// 660 /// FIXME: Adjust PredSU height based on MinLatency. 661 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 662 SUnit *PredSU = PredEdge->getSUnit(); 663 664 if (PredEdge->isWeak()) { 665 --PredSU->WeakSuccsLeft; 666 if (PredEdge->isCluster()) 667 NextClusterPred = PredSU; 668 return; 669 } 670 #ifndef NDEBUG 671 if (PredSU->NumSuccsLeft == 0) { 672 dbgs() << "*** Scheduling failed! ***\n"; 673 dumpNode(*PredSU); 674 dbgs() << " has been released too many times!\n"; 675 llvm_unreachable(nullptr); 676 } 677 #endif 678 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However, 679 // CurrCycle may have advanced since then. 680 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency()) 681 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency(); 682 683 --PredSU->NumSuccsLeft; 684 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 685 SchedImpl->releaseBottomNode(PredSU); 686 } 687 688 /// releasePredecessors - Call releasePred on each of SU's predecessors. 689 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 690 for (SDep &Pred : SU->Preds) 691 releasePred(SU, &Pred); 692 } 693 694 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) { 695 ScheduleDAGInstrs::startBlock(bb); 696 SchedImpl->enterMBB(bb); 697 } 698 699 void ScheduleDAGMI::finishBlock() { 700 SchedImpl->leaveMBB(); 701 ScheduleDAGInstrs::finishBlock(); 702 } 703 704 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 705 /// crossing a scheduling boundary. [begin, end) includes all instructions in 706 /// the region, including the boundary itself and single-instruction regions 707 /// that don't get scheduled. 708 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 709 MachineBasicBlock::iterator begin, 710 MachineBasicBlock::iterator end, 711 unsigned regioninstrs) 712 { 713 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 714 715 SchedImpl->initPolicy(begin, end, regioninstrs); 716 } 717 718 /// This is normally called from the main scheduler loop but may also be invoked 719 /// by the scheduling strategy to perform additional code motion. 720 void ScheduleDAGMI::moveInstruction( 721 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 722 // Advance RegionBegin if the first instruction moves down. 723 if (&*RegionBegin == MI) 724 ++RegionBegin; 725 726 // Update the instruction stream. 727 BB->splice(InsertPos, BB, MI); 728 729 // Update LiveIntervals 730 if (LIS) 731 LIS->handleMove(*MI, /*UpdateFlags=*/true); 732 733 // Recede RegionBegin if an instruction moves above the first. 734 if (RegionBegin == InsertPos) 735 RegionBegin = MI; 736 } 737 738 bool ScheduleDAGMI::checkSchedLimit() { 739 #ifndef NDEBUG 740 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 741 CurrentTop = CurrentBottom; 742 return false; 743 } 744 ++NumInstrsScheduled; 745 #endif 746 return true; 747 } 748 749 /// Per-region scheduling driver, called back from 750 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 751 /// does not consider liveness or register pressure. It is useful for PostRA 752 /// scheduling and potentially other custom schedulers. 753 void ScheduleDAGMI::schedule() { 754 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n"); 755 LLVM_DEBUG(SchedImpl->dumpPolicy()); 756 757 // Build the DAG. 758 buildSchedGraph(AA); 759 760 Topo.InitDAGTopologicalSorting(); 761 762 postprocessDAG(); 763 764 SmallVector<SUnit*, 8> TopRoots, BotRoots; 765 findRootsAndBiasEdges(TopRoots, BotRoots); 766 767 LLVM_DEBUG(dump()); 768 if (ViewMISchedDAGs) viewGraph(); 769 770 // Initialize the strategy before modifying the DAG. 771 // This may initialize a DFSResult to be used for queue priority. 772 SchedImpl->initialize(this); 773 774 // Initialize ready queues now that the DAG and priority data are finalized. 775 initQueues(TopRoots, BotRoots); 776 777 bool IsTopNode = false; 778 while (true) { 779 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n"); 780 SUnit *SU = SchedImpl->pickNode(IsTopNode); 781 if (!SU) break; 782 783 assert(!SU->isScheduled && "Node already scheduled"); 784 if (!checkSchedLimit()) 785 break; 786 787 MachineInstr *MI = SU->getInstr(); 788 if (IsTopNode) { 789 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 790 if (&*CurrentTop == MI) 791 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 792 else 793 moveInstruction(MI, CurrentTop); 794 } else { 795 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 796 MachineBasicBlock::iterator priorII = 797 priorNonDebug(CurrentBottom, CurrentTop); 798 if (&*priorII == MI) 799 CurrentBottom = priorII; 800 else { 801 if (&*CurrentTop == MI) 802 CurrentTop = nextIfDebug(++CurrentTop, priorII); 803 moveInstruction(MI, CurrentBottom); 804 CurrentBottom = MI; 805 } 806 } 807 // Notify the scheduling strategy before updating the DAG. 808 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues 809 // runs, it can then use the accurate ReadyCycle time to determine whether 810 // newly released nodes can move to the readyQ. 811 SchedImpl->schedNode(SU, IsTopNode); 812 813 updateQueues(SU, IsTopNode); 814 } 815 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 816 817 placeDebugValues(); 818 819 LLVM_DEBUG({ 820 dbgs() << "*** Final schedule for " 821 << printMBBReference(*begin()->getParent()) << " ***\n"; 822 dumpSchedule(); 823 dbgs() << '\n'; 824 }); 825 } 826 827 /// Apply each ScheduleDAGMutation step in order. 828 void ScheduleDAGMI::postprocessDAG() { 829 for (auto &m : Mutations) 830 m->apply(this); 831 } 832 833 void ScheduleDAGMI:: 834 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 835 SmallVectorImpl<SUnit*> &BotRoots) { 836 for (SUnit &SU : SUnits) { 837 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits"); 838 839 // Order predecessors so DFSResult follows the critical path. 840 SU.biasCriticalPath(); 841 842 // A SUnit is ready to top schedule if it has no predecessors. 843 if (!SU.NumPredsLeft) 844 TopRoots.push_back(&SU); 845 // A SUnit is ready to bottom schedule if it has no successors. 846 if (!SU.NumSuccsLeft) 847 BotRoots.push_back(&SU); 848 } 849 ExitSU.biasCriticalPath(); 850 } 851 852 /// Identify DAG roots and setup scheduler queues. 853 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 854 ArrayRef<SUnit*> BotRoots) { 855 NextClusterSucc = nullptr; 856 NextClusterPred = nullptr; 857 858 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 859 // 860 // Nodes with unreleased weak edges can still be roots. 861 // Release top roots in forward order. 862 for (SUnit *SU : TopRoots) 863 SchedImpl->releaseTopNode(SU); 864 865 // Release bottom roots in reverse order so the higher priority nodes appear 866 // first. This is more natural and slightly more efficient. 867 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 868 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 869 SchedImpl->releaseBottomNode(*I); 870 } 871 872 releaseSuccessors(&EntrySU); 873 releasePredecessors(&ExitSU); 874 875 SchedImpl->registerRoots(); 876 877 // Advance past initial DebugValues. 878 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 879 CurrentBottom = RegionEnd; 880 } 881 882 /// Update scheduler queues after scheduling an instruction. 883 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 884 // Release dependent instructions for scheduling. 885 if (IsTopNode) 886 releaseSuccessors(SU); 887 else 888 releasePredecessors(SU); 889 890 SU->isScheduled = true; 891 } 892 893 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 894 void ScheduleDAGMI::placeDebugValues() { 895 // If first instruction was a DBG_VALUE then put it back. 896 if (FirstDbgValue) { 897 BB->splice(RegionBegin, BB, FirstDbgValue); 898 RegionBegin = FirstDbgValue; 899 } 900 901 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator 902 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 903 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 904 MachineInstr *DbgValue = P.first; 905 MachineBasicBlock::iterator OrigPrevMI = P.second; 906 if (&*RegionBegin == DbgValue) 907 ++RegionBegin; 908 BB->splice(++OrigPrevMI, BB, DbgValue); 909 if (OrigPrevMI == std::prev(RegionEnd)) 910 RegionEnd = DbgValue; 911 } 912 DbgValues.clear(); 913 FirstDbgValue = nullptr; 914 } 915 916 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 917 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const { 918 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 919 if (SUnit *SU = getSUnit(&(*MI))) 920 dumpNode(*SU); 921 else 922 dbgs() << "Missing SUnit\n"; 923 } 924 } 925 #endif 926 927 //===----------------------------------------------------------------------===// 928 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 929 // preservation. 930 //===----------------------------------------------------------------------===// 931 932 ScheduleDAGMILive::~ScheduleDAGMILive() { 933 delete DFSResult; 934 } 935 936 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) { 937 const MachineInstr &MI = *SU.getInstr(); 938 for (const MachineOperand &MO : MI.operands()) { 939 if (!MO.isReg()) 940 continue; 941 if (!MO.readsReg()) 942 continue; 943 if (TrackLaneMasks && !MO.isUse()) 944 continue; 945 946 unsigned Reg = MO.getReg(); 947 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 948 continue; 949 950 // Ignore re-defs. 951 if (TrackLaneMasks) { 952 bool FoundDef = false; 953 for (const MachineOperand &MO2 : MI.operands()) { 954 if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) { 955 FoundDef = true; 956 break; 957 } 958 } 959 if (FoundDef) 960 continue; 961 } 962 963 // Record this local VReg use. 964 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg); 965 for (; UI != VRegUses.end(); ++UI) { 966 if (UI->SU == &SU) 967 break; 968 } 969 if (UI == VRegUses.end()) 970 VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU)); 971 } 972 } 973 974 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 975 /// crossing a scheduling boundary. [begin, end) includes all instructions in 976 /// the region, including the boundary itself and single-instruction regions 977 /// that don't get scheduled. 978 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 979 MachineBasicBlock::iterator begin, 980 MachineBasicBlock::iterator end, 981 unsigned regioninstrs) 982 { 983 // ScheduleDAGMI initializes SchedImpl's per-region policy. 984 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 985 986 // For convenience remember the end of the liveness region. 987 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); 988 989 SUPressureDiffs.clear(); 990 991 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 992 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks(); 993 994 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) && 995 "ShouldTrackLaneMasks requires ShouldTrackPressure"); 996 } 997 998 // Setup the register pressure trackers for the top scheduled top and bottom 999 // scheduled regions. 1000 void ScheduleDAGMILive::initRegPressure() { 1001 VRegUses.clear(); 1002 VRegUses.setUniverse(MRI.getNumVirtRegs()); 1003 for (SUnit &SU : SUnits) 1004 collectVRegUses(SU); 1005 1006 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin, 1007 ShouldTrackLaneMasks, false); 1008 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1009 ShouldTrackLaneMasks, false); 1010 1011 // Close the RPTracker to finalize live ins. 1012 RPTracker.closeRegion(); 1013 1014 LLVM_DEBUG(RPTracker.dump()); 1015 1016 // Initialize the live ins and live outs. 1017 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 1018 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 1019 1020 // Close one end of the tracker so we can call 1021 // getMaxUpward/DownwardPressureDelta before advancing across any 1022 // instructions. This converts currently live regs into live ins/outs. 1023 TopRPTracker.closeTop(); 1024 BotRPTracker.closeBottom(); 1025 1026 BotRPTracker.initLiveThru(RPTracker); 1027 if (!BotRPTracker.getLiveThru().empty()) { 1028 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 1029 LLVM_DEBUG(dbgs() << "Live Thru: "; 1030 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 1031 }; 1032 1033 // For each live out vreg reduce the pressure change associated with other 1034 // uses of the same vreg below the live-out reaching def. 1035 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 1036 1037 // Account for liveness generated by the region boundary. 1038 if (LiveRegionEnd != RegionEnd) { 1039 SmallVector<RegisterMaskPair, 8> LiveUses; 1040 BotRPTracker.recede(&LiveUses); 1041 updatePressureDiffs(LiveUses); 1042 } 1043 1044 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; 1045 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI); 1046 dbgs() << "Bottom Pressure:\n"; 1047 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);); 1048 1049 assert((BotRPTracker.getPos() == RegionEnd || 1050 (RegionEnd->isDebugInstr() && 1051 BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) && 1052 "Can't find the region bottom"); 1053 1054 // Cache the list of excess pressure sets in this region. This will also track 1055 // the max pressure in the scheduled code for these sets. 1056 RegionCriticalPSets.clear(); 1057 const std::vector<unsigned> &RegionPressure = 1058 RPTracker.getPressure().MaxSetPressure; 1059 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 1060 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 1061 if (RegionPressure[i] > Limit) { 1062 LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit 1063 << " Actual " << RegionPressure[i] << "\n"); 1064 RegionCriticalPSets.push_back(PressureChange(i)); 1065 } 1066 } 1067 LLVM_DEBUG(dbgs() << "Excess PSets: "; 1068 for (const PressureChange &RCPS 1069 : RegionCriticalPSets) dbgs() 1070 << TRI->getRegPressureSetName(RCPS.getPSet()) << " "; 1071 dbgs() << "\n"); 1072 } 1073 1074 void ScheduleDAGMILive:: 1075 updateScheduledPressure(const SUnit *SU, 1076 const std::vector<unsigned> &NewMaxPressure) { 1077 const PressureDiff &PDiff = getPressureDiff(SU); 1078 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 1079 for (const PressureChange &PC : PDiff) { 1080 if (!PC.isValid()) 1081 break; 1082 unsigned ID = PC.getPSet(); 1083 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 1084 ++CritIdx; 1085 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 1086 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 1087 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max()) 1088 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 1089 } 1090 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 1091 if (NewMaxPressure[ID] >= Limit - 2) { 1092 LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 1093 << NewMaxPressure[ID] 1094 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") 1095 << Limit << "(+ " << BotRPTracker.getLiveThru()[ID] 1096 << " livethru)\n"); 1097 } 1098 } 1099 } 1100 1101 /// Update the PressureDiff array for liveness after scheduling this 1102 /// instruction. 1103 void ScheduleDAGMILive::updatePressureDiffs( 1104 ArrayRef<RegisterMaskPair> LiveUses) { 1105 for (const RegisterMaskPair &P : LiveUses) { 1106 unsigned Reg = P.RegUnit; 1107 /// FIXME: Currently assuming single-use physregs. 1108 if (!TRI->isVirtualRegister(Reg)) 1109 continue; 1110 1111 if (ShouldTrackLaneMasks) { 1112 // If the register has just become live then other uses won't change 1113 // this fact anymore => decrement pressure. 1114 // If the register has just become dead then other uses make it come 1115 // back to life => increment pressure. 1116 bool Decrement = P.LaneMask.any(); 1117 1118 for (const VReg2SUnit &V2SU 1119 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1120 SUnit &SU = *V2SU.SU; 1121 if (SU.isScheduled || &SU == &ExitSU) 1122 continue; 1123 1124 PressureDiff &PDiff = getPressureDiff(&SU); 1125 PDiff.addPressureChange(Reg, Decrement, &MRI); 1126 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") " 1127 << printReg(Reg, TRI) << ':' 1128 << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr(); 1129 dbgs() << " to "; PDiff.dump(*TRI);); 1130 } 1131 } else { 1132 assert(P.LaneMask.any()); 1133 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n"); 1134 // This may be called before CurrentBottom has been initialized. However, 1135 // BotRPTracker must have a valid position. We want the value live into the 1136 // instruction or live out of the block, so ask for the previous 1137 // instruction's live-out. 1138 const LiveInterval &LI = LIS->getInterval(Reg); 1139 VNInfo *VNI; 1140 MachineBasicBlock::const_iterator I = 1141 nextIfDebug(BotRPTracker.getPos(), BB->end()); 1142 if (I == BB->end()) 1143 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1144 else { 1145 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I)); 1146 VNI = LRQ.valueIn(); 1147 } 1148 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 1149 assert(VNI && "No live value at use."); 1150 for (const VReg2SUnit &V2SU 1151 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1152 SUnit *SU = V2SU.SU; 1153 // If this use comes before the reaching def, it cannot be a last use, 1154 // so decrease its pressure change. 1155 if (!SU->isScheduled && SU != &ExitSU) { 1156 LiveQueryResult LRQ = 1157 LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1158 if (LRQ.valueIn() == VNI) { 1159 PressureDiff &PDiff = getPressureDiff(SU); 1160 PDiff.addPressureChange(Reg, true, &MRI); 1161 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 1162 << *SU->getInstr(); 1163 dbgs() << " to "; PDiff.dump(*TRI);); 1164 } 1165 } 1166 } 1167 } 1168 } 1169 } 1170 1171 void ScheduleDAGMILive::dump() const { 1172 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1173 if (EntrySU.getInstr() != nullptr) 1174 dumpNodeAll(EntrySU); 1175 for (const SUnit &SU : SUnits) { 1176 dumpNodeAll(SU); 1177 if (ShouldTrackPressure) { 1178 dbgs() << " Pressure Diff : "; 1179 getPressureDiff(&SU).dump(*TRI); 1180 } 1181 dbgs() << " Single Issue : "; 1182 if (SchedModel.mustBeginGroup(SU.getInstr()) && 1183 SchedModel.mustEndGroup(SU.getInstr())) 1184 dbgs() << "true;"; 1185 else 1186 dbgs() << "false;"; 1187 dbgs() << '\n'; 1188 } 1189 if (ExitSU.getInstr() != nullptr) 1190 dumpNodeAll(ExitSU); 1191 #endif 1192 } 1193 1194 /// schedule - Called back from MachineScheduler::runOnMachineFunction 1195 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 1196 /// only includes instructions that have DAG nodes, not scheduling boundaries. 1197 /// 1198 /// This is a skeletal driver, with all the functionality pushed into helpers, 1199 /// so that it can be easily extended by experimental schedulers. Generally, 1200 /// implementing MachineSchedStrategy should be sufficient to implement a new 1201 /// scheduling algorithm. However, if a scheduler further subclasses 1202 /// ScheduleDAGMILive then it will want to override this virtual method in order 1203 /// to update any specialized state. 1204 void ScheduleDAGMILive::schedule() { 1205 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n"); 1206 LLVM_DEBUG(SchedImpl->dumpPolicy()); 1207 buildDAGWithRegPressure(); 1208 1209 Topo.InitDAGTopologicalSorting(); 1210 1211 postprocessDAG(); 1212 1213 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1214 findRootsAndBiasEdges(TopRoots, BotRoots); 1215 1216 // Initialize the strategy before modifying the DAG. 1217 // This may initialize a DFSResult to be used for queue priority. 1218 SchedImpl->initialize(this); 1219 1220 LLVM_DEBUG(dump()); 1221 if (ViewMISchedDAGs) viewGraph(); 1222 1223 // Initialize ready queues now that the DAG and priority data are finalized. 1224 initQueues(TopRoots, BotRoots); 1225 1226 bool IsTopNode = false; 1227 while (true) { 1228 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n"); 1229 SUnit *SU = SchedImpl->pickNode(IsTopNode); 1230 if (!SU) break; 1231 1232 assert(!SU->isScheduled && "Node already scheduled"); 1233 if (!checkSchedLimit()) 1234 break; 1235 1236 scheduleMI(SU, IsTopNode); 1237 1238 if (DFSResult) { 1239 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1240 if (!ScheduledTrees.test(SubtreeID)) { 1241 ScheduledTrees.set(SubtreeID); 1242 DFSResult->scheduleTree(SubtreeID); 1243 SchedImpl->scheduleTree(SubtreeID); 1244 } 1245 } 1246 1247 // Notify the scheduling strategy after updating the DAG. 1248 SchedImpl->schedNode(SU, IsTopNode); 1249 1250 updateQueues(SU, IsTopNode); 1251 } 1252 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1253 1254 placeDebugValues(); 1255 1256 LLVM_DEBUG({ 1257 dbgs() << "*** Final schedule for " 1258 << printMBBReference(*begin()->getParent()) << " ***\n"; 1259 dumpSchedule(); 1260 dbgs() << '\n'; 1261 }); 1262 } 1263 1264 /// Build the DAG and setup three register pressure trackers. 1265 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1266 if (!ShouldTrackPressure) { 1267 RPTracker.reset(); 1268 RegionCriticalPSets.clear(); 1269 buildSchedGraph(AA); 1270 return; 1271 } 1272 1273 // Initialize the register pressure tracker used by buildSchedGraph. 1274 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1275 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true); 1276 1277 // Account for liveness generate by the region boundary. 1278 if (LiveRegionEnd != RegionEnd) 1279 RPTracker.recede(); 1280 1281 // Build the DAG, and compute current register pressure. 1282 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks); 1283 1284 // Initialize top/bottom trackers after computing region pressure. 1285 initRegPressure(); 1286 } 1287 1288 void ScheduleDAGMILive::computeDFSResult() { 1289 if (!DFSResult) 1290 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1291 DFSResult->clear(); 1292 ScheduledTrees.clear(); 1293 DFSResult->resize(SUnits.size()); 1294 DFSResult->compute(SUnits); 1295 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1296 } 1297 1298 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1299 /// only provides the critical path for single block loops. To handle loops that 1300 /// span blocks, we could use the vreg path latencies provided by 1301 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1302 /// available for use in the scheduler. 1303 /// 1304 /// The cyclic path estimation identifies a def-use pair that crosses the back 1305 /// edge and considers the depth and height of the nodes. For example, consider 1306 /// the following instruction sequence where each instruction has unit latency 1307 /// and defines an epomymous virtual register: 1308 /// 1309 /// a->b(a,c)->c(b)->d(c)->exit 1310 /// 1311 /// The cyclic critical path is a two cycles: b->c->b 1312 /// The acyclic critical path is four cycles: a->b->c->d->exit 1313 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1314 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1315 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1316 /// LiveInDepth = depth(b) = len(a->b) = 1 1317 /// 1318 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1319 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1320 /// CyclicCriticalPath = min(2, 2) = 2 1321 /// 1322 /// This could be relevant to PostRA scheduling, but is currently implemented 1323 /// assuming LiveIntervals. 1324 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1325 // This only applies to single block loop. 1326 if (!BB->isSuccessor(BB)) 1327 return 0; 1328 1329 unsigned MaxCyclicLatency = 0; 1330 // Visit each live out vreg def to find def/use pairs that cross iterations. 1331 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) { 1332 unsigned Reg = P.RegUnit; 1333 if (!TRI->isVirtualRegister(Reg)) 1334 continue; 1335 const LiveInterval &LI = LIS->getInterval(Reg); 1336 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1337 if (!DefVNI) 1338 continue; 1339 1340 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1341 const SUnit *DefSU = getSUnit(DefMI); 1342 if (!DefSU) 1343 continue; 1344 1345 unsigned LiveOutHeight = DefSU->getHeight(); 1346 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1347 // Visit all local users of the vreg def. 1348 for (const VReg2SUnit &V2SU 1349 : make_range(VRegUses.find(Reg), VRegUses.end())) { 1350 SUnit *SU = V2SU.SU; 1351 if (SU == &ExitSU) 1352 continue; 1353 1354 // Only consider uses of the phi. 1355 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr())); 1356 if (!LRQ.valueIn()->isPHIDef()) 1357 continue; 1358 1359 // Assume that a path spanning two iterations is a cycle, which could 1360 // overestimate in strange cases. This allows cyclic latency to be 1361 // estimated as the minimum slack of the vreg's depth or height. 1362 unsigned CyclicLatency = 0; 1363 if (LiveOutDepth > SU->getDepth()) 1364 CyclicLatency = LiveOutDepth - SU->getDepth(); 1365 1366 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency; 1367 if (LiveInHeight > LiveOutHeight) { 1368 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1369 CyclicLatency = LiveInHeight - LiveOutHeight; 1370 } else 1371 CyclicLatency = 0; 1372 1373 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1374 << SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1375 if (CyclicLatency > MaxCyclicLatency) 1376 MaxCyclicLatency = CyclicLatency; 1377 } 1378 } 1379 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1380 return MaxCyclicLatency; 1381 } 1382 1383 /// Release ExitSU predecessors and setup scheduler queues. Re-position 1384 /// the Top RP tracker in case the region beginning has changed. 1385 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots, 1386 ArrayRef<SUnit*> BotRoots) { 1387 ScheduleDAGMI::initQueues(TopRoots, BotRoots); 1388 if (ShouldTrackPressure) { 1389 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1390 TopRPTracker.setPos(CurrentTop); 1391 } 1392 } 1393 1394 /// Move an instruction and update register pressure. 1395 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1396 // Move the instruction to its new location in the instruction stream. 1397 MachineInstr *MI = SU->getInstr(); 1398 1399 if (IsTopNode) { 1400 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1401 if (&*CurrentTop == MI) 1402 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1403 else { 1404 moveInstruction(MI, CurrentTop); 1405 TopRPTracker.setPos(MI); 1406 } 1407 1408 if (ShouldTrackPressure) { 1409 // Update top scheduled pressure. 1410 RegisterOperands RegOpers; 1411 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1412 if (ShouldTrackLaneMasks) { 1413 // Adjust liveness and add missing dead+read-undef flags. 1414 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1415 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1416 } else { 1417 // Adjust for missing dead-def flags. 1418 RegOpers.detectDeadDefs(*MI, *LIS); 1419 } 1420 1421 TopRPTracker.advance(RegOpers); 1422 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1423 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure( 1424 TopRPTracker.getRegSetPressureAtPos(), TRI);); 1425 1426 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1427 } 1428 } else { 1429 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1430 MachineBasicBlock::iterator priorII = 1431 priorNonDebug(CurrentBottom, CurrentTop); 1432 if (&*priorII == MI) 1433 CurrentBottom = priorII; 1434 else { 1435 if (&*CurrentTop == MI) { 1436 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1437 TopRPTracker.setPos(CurrentTop); 1438 } 1439 moveInstruction(MI, CurrentBottom); 1440 CurrentBottom = MI; 1441 BotRPTracker.setPos(CurrentBottom); 1442 } 1443 if (ShouldTrackPressure) { 1444 RegisterOperands RegOpers; 1445 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); 1446 if (ShouldTrackLaneMasks) { 1447 // Adjust liveness and add missing dead+read-undef flags. 1448 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); 1449 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); 1450 } else { 1451 // Adjust for missing dead-def flags. 1452 RegOpers.detectDeadDefs(*MI, *LIS); 1453 } 1454 1455 if (BotRPTracker.getPos() != CurrentBottom) 1456 BotRPTracker.recedeSkipDebugValues(); 1457 SmallVector<RegisterMaskPair, 8> LiveUses; 1458 BotRPTracker.recede(RegOpers, &LiveUses); 1459 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1460 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure( 1461 BotRPTracker.getRegSetPressureAtPos(), TRI);); 1462 1463 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1464 updatePressureDiffs(LiveUses); 1465 } 1466 } 1467 } 1468 1469 //===----------------------------------------------------------------------===// 1470 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores. 1471 //===----------------------------------------------------------------------===// 1472 1473 namespace { 1474 1475 /// Post-process the DAG to create cluster edges between neighboring 1476 /// loads or between neighboring stores. 1477 class BaseMemOpClusterMutation : public ScheduleDAGMutation { 1478 struct MemOpInfo { 1479 SUnit *SU; 1480 unsigned BaseReg; 1481 int64_t Offset; 1482 1483 MemOpInfo(SUnit *su, unsigned reg, int64_t ofs) 1484 : SU(su), BaseReg(reg), Offset(ofs) {} 1485 1486 bool operator<(const MemOpInfo&RHS) const { 1487 return std::tie(BaseReg, Offset, SU->NodeNum) < 1488 std::tie(RHS.BaseReg, RHS.Offset, RHS.SU->NodeNum); 1489 } 1490 }; 1491 1492 const TargetInstrInfo *TII; 1493 const TargetRegisterInfo *TRI; 1494 bool IsLoad; 1495 1496 public: 1497 BaseMemOpClusterMutation(const TargetInstrInfo *tii, 1498 const TargetRegisterInfo *tri, bool IsLoad) 1499 : TII(tii), TRI(tri), IsLoad(IsLoad) {} 1500 1501 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1502 1503 protected: 1504 void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG); 1505 }; 1506 1507 class StoreClusterMutation : public BaseMemOpClusterMutation { 1508 public: 1509 StoreClusterMutation(const TargetInstrInfo *tii, 1510 const TargetRegisterInfo *tri) 1511 : BaseMemOpClusterMutation(tii, tri, false) {} 1512 }; 1513 1514 class LoadClusterMutation : public BaseMemOpClusterMutation { 1515 public: 1516 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri) 1517 : BaseMemOpClusterMutation(tii, tri, true) {} 1518 }; 1519 1520 } // end anonymous namespace 1521 1522 namespace llvm { 1523 1524 std::unique_ptr<ScheduleDAGMutation> 1525 createLoadClusterDAGMutation(const TargetInstrInfo *TII, 1526 const TargetRegisterInfo *TRI) { 1527 return EnableMemOpCluster ? llvm::make_unique<LoadClusterMutation>(TII, TRI) 1528 : nullptr; 1529 } 1530 1531 std::unique_ptr<ScheduleDAGMutation> 1532 createStoreClusterDAGMutation(const TargetInstrInfo *TII, 1533 const TargetRegisterInfo *TRI) { 1534 return EnableMemOpCluster ? llvm::make_unique<StoreClusterMutation>(TII, TRI) 1535 : nullptr; 1536 } 1537 1538 } // end namespace llvm 1539 1540 void BaseMemOpClusterMutation::clusterNeighboringMemOps( 1541 ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) { 1542 SmallVector<MemOpInfo, 32> MemOpRecords; 1543 for (SUnit *SU : MemOps) { 1544 unsigned BaseReg; 1545 int64_t Offset; 1546 if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI)) 1547 MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset)); 1548 } 1549 if (MemOpRecords.size() < 2) 1550 return; 1551 1552 llvm::sort(MemOpRecords.begin(), MemOpRecords.end()); 1553 unsigned ClusterLength = 1; 1554 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { 1555 SUnit *SUa = MemOpRecords[Idx].SU; 1556 SUnit *SUb = MemOpRecords[Idx+1].SU; 1557 if (TII->shouldClusterMemOps(*SUa->getInstr(), MemOpRecords[Idx].BaseReg, 1558 *SUb->getInstr(), MemOpRecords[Idx+1].BaseReg, 1559 ClusterLength) && 1560 DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1561 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU(" 1562 << SUb->NodeNum << ")\n"); 1563 // Copy successor edges from SUa to SUb. Interleaving computation 1564 // dependent on SUa can prevent load combining due to register reuse. 1565 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1566 // loads should have effectively the same inputs. 1567 for (const SDep &Succ : SUa->Succs) { 1568 if (Succ.getSUnit() == SUb) 1569 continue; 1570 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum 1571 << ")\n"); 1572 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial)); 1573 } 1574 ++ClusterLength; 1575 } else 1576 ClusterLength = 1; 1577 } 1578 } 1579 1580 /// Callback from DAG postProcessing to create cluster edges for loads. 1581 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) { 1582 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1583 1584 // Map DAG NodeNum to store chain ID. 1585 DenseMap<unsigned, unsigned> StoreChainIDs; 1586 // Map each store chain to a set of dependent MemOps. 1587 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1588 for (SUnit &SU : DAG->SUnits) { 1589 if ((IsLoad && !SU.getInstr()->mayLoad()) || 1590 (!IsLoad && !SU.getInstr()->mayStore())) 1591 continue; 1592 1593 unsigned ChainPredID = DAG->SUnits.size(); 1594 for (const SDep &Pred : SU.Preds) { 1595 if (Pred.isCtrl()) { 1596 ChainPredID = Pred.getSUnit()->NodeNum; 1597 break; 1598 } 1599 } 1600 // Check if this chain-like pred has been seen 1601 // before. ChainPredID==MaxNodeID at the top of the schedule. 1602 unsigned NumChains = StoreChainDependents.size(); 1603 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1604 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1605 if (Result.second) 1606 StoreChainDependents.resize(NumChains + 1); 1607 StoreChainDependents[Result.first->second].push_back(&SU); 1608 } 1609 1610 // Iterate over the store chains. 1611 for (auto &SCD : StoreChainDependents) 1612 clusterNeighboringMemOps(SCD, DAG); 1613 } 1614 1615 //===----------------------------------------------------------------------===// 1616 // CopyConstrain - DAG post-processing to encourage copy elimination. 1617 //===----------------------------------------------------------------------===// 1618 1619 namespace { 1620 1621 /// Post-process the DAG to create weak edges from all uses of a copy to 1622 /// the one use that defines the copy's source vreg, most likely an induction 1623 /// variable increment. 1624 class CopyConstrain : public ScheduleDAGMutation { 1625 // Transient state. 1626 SlotIndex RegionBeginIdx; 1627 1628 // RegionEndIdx is the slot index of the last non-debug instruction in the 1629 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1630 SlotIndex RegionEndIdx; 1631 1632 public: 1633 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1634 1635 void apply(ScheduleDAGInstrs *DAGInstrs) override; 1636 1637 protected: 1638 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1639 }; 1640 1641 } // end anonymous namespace 1642 1643 namespace llvm { 1644 1645 std::unique_ptr<ScheduleDAGMutation> 1646 createCopyConstrainDAGMutation(const TargetInstrInfo *TII, 1647 const TargetRegisterInfo *TRI) { 1648 return llvm::make_unique<CopyConstrain>(TII, TRI); 1649 } 1650 1651 } // end namespace llvm 1652 1653 /// constrainLocalCopy handles two possibilities: 1654 /// 1) Local src: 1655 /// I0: = dst 1656 /// I1: src = ... 1657 /// I2: = dst 1658 /// I3: dst = src (copy) 1659 /// (create pred->succ edges I0->I1, I2->I1) 1660 /// 1661 /// 2) Local copy: 1662 /// I0: dst = src (copy) 1663 /// I1: = dst 1664 /// I2: src = ... 1665 /// I3: = dst 1666 /// (create pred->succ edges I1->I2, I3->I2) 1667 /// 1668 /// Although the MachineScheduler is currently constrained to single blocks, 1669 /// this algorithm should handle extended blocks. An EBB is a set of 1670 /// contiguously numbered blocks such that the previous block in the EBB is 1671 /// always the single predecessor. 1672 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1673 LiveIntervals *LIS = DAG->getLIS(); 1674 MachineInstr *Copy = CopySU->getInstr(); 1675 1676 // Check for pure vreg copies. 1677 const MachineOperand &SrcOp = Copy->getOperand(1); 1678 unsigned SrcReg = SrcOp.getReg(); 1679 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg()) 1680 return; 1681 1682 const MachineOperand &DstOp = Copy->getOperand(0); 1683 unsigned DstReg = DstOp.getReg(); 1684 if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead()) 1685 return; 1686 1687 // Check if either the dest or source is local. If it's live across a back 1688 // edge, it's not local. Note that if both vregs are live across the back 1689 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1690 // If both the copy's source and dest are local live intervals, then we 1691 // should treat the dest as the global for the purpose of adding 1692 // constraints. This adds edges from source's other uses to the copy. 1693 unsigned LocalReg = SrcReg; 1694 unsigned GlobalReg = DstReg; 1695 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1696 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1697 LocalReg = DstReg; 1698 GlobalReg = SrcReg; 1699 LocalLI = &LIS->getInterval(LocalReg); 1700 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1701 return; 1702 } 1703 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1704 1705 // Find the global segment after the start of the local LI. 1706 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1707 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1708 // local live range. We could create edges from other global uses to the local 1709 // start, but the coalescer should have already eliminated these cases, so 1710 // don't bother dealing with it. 1711 if (GlobalSegment == GlobalLI->end()) 1712 return; 1713 1714 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1715 // returned the next global segment. But if GlobalSegment overlaps with 1716 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI 1717 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1718 if (GlobalSegment->contains(LocalLI->beginIndex())) 1719 ++GlobalSegment; 1720 1721 if (GlobalSegment == GlobalLI->end()) 1722 return; 1723 1724 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1725 if (GlobalSegment != GlobalLI->begin()) { 1726 // Two address defs have no hole. 1727 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, 1728 GlobalSegment->start)) { 1729 return; 1730 } 1731 // If the prior global segment may be defined by the same two-address 1732 // instruction that also defines LocalLI, then can't make a hole here. 1733 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, 1734 LocalLI->beginIndex())) { 1735 return; 1736 } 1737 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1738 // it would be a disconnected component in the live range. 1739 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && 1740 "Disconnected LRG within the scheduling region."); 1741 } 1742 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1743 if (!GlobalDef) 1744 return; 1745 1746 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1747 if (!GlobalSU) 1748 return; 1749 1750 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1751 // constraining the uses of the last local def to precede GlobalDef. 1752 SmallVector<SUnit*,8> LocalUses; 1753 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1754 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1755 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1756 for (const SDep &Succ : LastLocalSU->Succs) { 1757 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg) 1758 continue; 1759 if (Succ.getSUnit() == GlobalSU) 1760 continue; 1761 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit())) 1762 return; 1763 LocalUses.push_back(Succ.getSUnit()); 1764 } 1765 // Open the top of the GlobalLI hole by constraining any earlier global uses 1766 // to precede the start of LocalLI. 1767 SmallVector<SUnit*,8> GlobalUses; 1768 MachineInstr *FirstLocalDef = 1769 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1770 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1771 for (const SDep &Pred : GlobalSU->Preds) { 1772 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg) 1773 continue; 1774 if (Pred.getSUnit() == FirstLocalSU) 1775 continue; 1776 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit())) 1777 return; 1778 GlobalUses.push_back(Pred.getSUnit()); 1779 } 1780 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1781 // Add the weak edges. 1782 for (SmallVectorImpl<SUnit*>::const_iterator 1783 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1784 LLVM_DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1785 << GlobalSU->NodeNum << ")\n"); 1786 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1787 } 1788 for (SmallVectorImpl<SUnit*>::const_iterator 1789 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1790 LLVM_DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1791 << FirstLocalSU->NodeNum << ")\n"); 1792 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1793 } 1794 } 1795 1796 /// Callback from DAG postProcessing to create weak edges to encourage 1797 /// copy elimination. 1798 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { 1799 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 1800 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1801 1802 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1803 if (FirstPos == DAG->end()) 1804 return; 1805 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos); 1806 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1807 *priorNonDebug(DAG->end(), DAG->begin())); 1808 1809 for (SUnit &SU : DAG->SUnits) { 1810 if (!SU.getInstr()->isCopy()) 1811 continue; 1812 1813 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG)); 1814 } 1815 } 1816 1817 //===----------------------------------------------------------------------===// 1818 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1819 // and possibly other custom schedulers. 1820 //===----------------------------------------------------------------------===// 1821 1822 static const unsigned InvalidCycle = ~0U; 1823 1824 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1825 1826 /// Given a Count of resource usage and a Latency value, return true if a 1827 /// SchedBoundary becomes resource limited. 1828 static bool checkResourceLimit(unsigned LFactor, unsigned Count, 1829 unsigned Latency) { 1830 return (int)(Count - (Latency * LFactor)) > (int)LFactor; 1831 } 1832 1833 void SchedBoundary::reset() { 1834 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1835 // Destroying and reconstructing it is very expensive though. So keep 1836 // invalid, placeholder HazardRecs. 1837 if (HazardRec && HazardRec->isEnabled()) { 1838 delete HazardRec; 1839 HazardRec = nullptr; 1840 } 1841 Available.clear(); 1842 Pending.clear(); 1843 CheckPending = false; 1844 CurrCycle = 0; 1845 CurrMOps = 0; 1846 MinReadyCycle = std::numeric_limits<unsigned>::max(); 1847 ExpectedLatency = 0; 1848 DependentLatency = 0; 1849 RetiredMOps = 0; 1850 MaxExecutedResCount = 0; 1851 ZoneCritResIdx = 0; 1852 IsResourceLimited = false; 1853 ReservedCycles.clear(); 1854 #ifndef NDEBUG 1855 // Track the maximum number of stall cycles that could arise either from the 1856 // latency of a DAG edge or the number of cycles that a processor resource is 1857 // reserved (SchedBoundary::ReservedCycles). 1858 MaxObservedStall = 0; 1859 #endif 1860 // Reserve a zero-count for invalid CritResIdx. 1861 ExecutedResCounts.resize(1); 1862 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1863 } 1864 1865 void SchedRemainder:: 1866 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1867 reset(); 1868 if (!SchedModel->hasInstrSchedModel()) 1869 return; 1870 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1871 for (SUnit &SU : DAG->SUnits) { 1872 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU); 1873 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC) 1874 * SchedModel->getMicroOpFactor(); 1875 for (TargetSchedModel::ProcResIter 1876 PI = SchedModel->getWriteProcResBegin(SC), 1877 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1878 unsigned PIdx = PI->ProcResourceIdx; 1879 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1880 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1881 } 1882 } 1883 } 1884 1885 void SchedBoundary:: 1886 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1887 reset(); 1888 DAG = dag; 1889 SchedModel = smodel; 1890 Rem = rem; 1891 if (SchedModel->hasInstrSchedModel()) { 1892 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1893 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle); 1894 } 1895 } 1896 1897 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 1898 /// these "soft stalls" differently than the hard stall cycles based on CPU 1899 /// resources and computed by checkHazard(). A fully in-order model 1900 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 1901 /// available for scheduling until they are ready. However, a weaker in-order 1902 /// model may use this for heuristics. For example, if a processor has in-order 1903 /// behavior when reading certain resources, this may come into play. 1904 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 1905 if (!SU->isUnbuffered) 1906 return 0; 1907 1908 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1909 if (ReadyCycle > CurrCycle) 1910 return ReadyCycle - CurrCycle; 1911 return 0; 1912 } 1913 1914 /// Compute the next cycle at which the given processor resource can be 1915 /// scheduled. 1916 unsigned SchedBoundary:: 1917 getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 1918 unsigned NextUnreserved = ReservedCycles[PIdx]; 1919 // If this resource has never been used, always return cycle zero. 1920 if (NextUnreserved == InvalidCycle) 1921 return 0; 1922 // For bottom-up scheduling add the cycles needed for the current operation. 1923 if (!isTop()) 1924 NextUnreserved += Cycles; 1925 return NextUnreserved; 1926 } 1927 1928 /// Does this SU have a hazard within the current instruction group. 1929 /// 1930 /// The scheduler supports two modes of hazard recognition. The first is the 1931 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1932 /// supports highly complicated in-order reservation tables 1933 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic. 1934 /// 1935 /// The second is a streamlined mechanism that checks for hazards based on 1936 /// simple counters that the scheduler itself maintains. It explicitly checks 1937 /// for instruction dispatch limitations, including the number of micro-ops that 1938 /// can dispatch per cycle. 1939 /// 1940 /// TODO: Also check whether the SU must start a new group. 1941 bool SchedBoundary::checkHazard(SUnit *SU) { 1942 if (HazardRec->isEnabled() 1943 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 1944 return true; 1945 } 1946 1947 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1948 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1949 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1950 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1951 return true; 1952 } 1953 1954 if (CurrMOps > 0 && 1955 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || 1956 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { 1957 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " 1958 << (isTop() ? "begin" : "end") << " group\n"); 1959 return true; 1960 } 1961 1962 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 1963 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1964 for (const MCWriteProcResEntry &PE : 1965 make_range(SchedModel->getWriteProcResBegin(SC), 1966 SchedModel->getWriteProcResEnd(SC))) { 1967 unsigned ResIdx = PE.ProcResourceIdx; 1968 unsigned Cycles = PE.Cycles; 1969 unsigned NRCycle = getNextResourceCycle(ResIdx, Cycles); 1970 if (NRCycle > CurrCycle) { 1971 #ifndef NDEBUG 1972 MaxObservedStall = std::max(Cycles, MaxObservedStall); 1973 #endif 1974 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " 1975 << SchedModel->getResourceName(ResIdx) << "=" 1976 << NRCycle << "c\n"); 1977 return true; 1978 } 1979 } 1980 } 1981 return false; 1982 } 1983 1984 // Find the unscheduled node in ReadySUs with the highest latency. 1985 unsigned SchedBoundary:: 1986 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1987 SUnit *LateSU = nullptr; 1988 unsigned RemLatency = 0; 1989 for (SUnit *SU : ReadySUs) { 1990 unsigned L = getUnscheduledLatency(SU); 1991 if (L > RemLatency) { 1992 RemLatency = L; 1993 LateSU = SU; 1994 } 1995 } 1996 if (LateSU) { 1997 LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1998 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1999 } 2000 return RemLatency; 2001 } 2002 2003 // Count resources in this zone and the remaining unscheduled 2004 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 2005 // resource index, or zero if the zone is issue limited. 2006 unsigned SchedBoundary:: 2007 getOtherResourceCount(unsigned &OtherCritIdx) { 2008 OtherCritIdx = 0; 2009 if (!SchedModel->hasInstrSchedModel()) 2010 return 0; 2011 2012 unsigned OtherCritCount = Rem->RemIssueCount 2013 + (RetiredMOps * SchedModel->getMicroOpFactor()); 2014 LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 2015 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 2016 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 2017 PIdx != PEnd; ++PIdx) { 2018 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 2019 if (OtherCount > OtherCritCount) { 2020 OtherCritCount = OtherCount; 2021 OtherCritIdx = PIdx; 2022 } 2023 } 2024 if (OtherCritIdx) { 2025 LLVM_DEBUG( 2026 dbgs() << " " << Available.getName() << " + Remain CritRes: " 2027 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 2028 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 2029 } 2030 return OtherCritCount; 2031 } 2032 2033 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { 2034 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 2035 2036 #ifndef NDEBUG 2037 // ReadyCycle was been bumped up to the CurrCycle when this node was 2038 // scheduled, but CurrCycle may have been eagerly advanced immediately after 2039 // scheduling, so may now be greater than ReadyCycle. 2040 if (ReadyCycle > CurrCycle) 2041 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); 2042 #endif 2043 2044 if (ReadyCycle < MinReadyCycle) 2045 MinReadyCycle = ReadyCycle; 2046 2047 // Check for interlocks first. For the purpose of other heuristics, an 2048 // instruction that cannot issue appears as if it's not in the ReadyQueue. 2049 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2050 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) || 2051 Available.size() >= ReadyListLimit) 2052 Pending.push(SU); 2053 else 2054 Available.push(SU); 2055 } 2056 2057 /// Move the boundary of scheduled code by one cycle. 2058 void SchedBoundary::bumpCycle(unsigned NextCycle) { 2059 if (SchedModel->getMicroOpBufferSize() == 0) { 2060 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() && 2061 "MinReadyCycle uninitialized"); 2062 if (MinReadyCycle > NextCycle) 2063 NextCycle = MinReadyCycle; 2064 } 2065 // Update the current micro-ops, which will issue in the next cycle. 2066 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 2067 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 2068 2069 // Decrement DependentLatency based on the next cycle. 2070 if ((NextCycle - CurrCycle) > DependentLatency) 2071 DependentLatency = 0; 2072 else 2073 DependentLatency -= (NextCycle - CurrCycle); 2074 2075 if (!HazardRec->isEnabled()) { 2076 // Bypass HazardRec virtual calls. 2077 CurrCycle = NextCycle; 2078 } else { 2079 // Bypass getHazardType calls in case of long latency. 2080 for (; CurrCycle != NextCycle; ++CurrCycle) { 2081 if (isTop()) 2082 HazardRec->AdvanceCycle(); 2083 else 2084 HazardRec->RecedeCycle(); 2085 } 2086 } 2087 CheckPending = true; 2088 IsResourceLimited = 2089 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2090 getScheduledLatency()); 2091 2092 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() 2093 << '\n'); 2094 } 2095 2096 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 2097 ExecutedResCounts[PIdx] += Count; 2098 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 2099 MaxExecutedResCount = ExecutedResCounts[PIdx]; 2100 } 2101 2102 /// Add the given processor resource to this scheduled zone. 2103 /// 2104 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 2105 /// during which this resource is consumed. 2106 /// 2107 /// \return the next cycle at which the instruction may execute without 2108 /// oversubscribing resources. 2109 unsigned SchedBoundary:: 2110 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 2111 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2112 unsigned Count = Factor * Cycles; 2113 LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +" 2114 << Cycles << "x" << Factor << "u\n"); 2115 2116 // Update Executed resources counts. 2117 incExecutedResources(PIdx, Count); 2118 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 2119 Rem->RemainingCounts[PIdx] -= Count; 2120 2121 // Check if this resource exceeds the current critical resource. If so, it 2122 // becomes the critical resource. 2123 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 2124 ZoneCritResIdx = PIdx; 2125 LLVM_DEBUG(dbgs() << " *** Critical resource " 2126 << SchedModel->getResourceName(PIdx) << ": " 2127 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() 2128 << "c\n"); 2129 } 2130 // For reserved resources, record the highest cycle using the resource. 2131 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles); 2132 if (NextAvailable > CurrCycle) { 2133 LLVM_DEBUG(dbgs() << " Resource conflict: " 2134 << SchedModel->getProcResource(PIdx)->Name 2135 << " reserved until @" << NextAvailable << "\n"); 2136 } 2137 return NextAvailable; 2138 } 2139 2140 /// Move the boundary of scheduled code by one SUnit. 2141 void SchedBoundary::bumpNode(SUnit *SU) { 2142 // Update the reservation table. 2143 if (HazardRec->isEnabled()) { 2144 if (!isTop() && SU->isCall) { 2145 // Calls are scheduled with their preceding instructions. For bottom-up 2146 // scheduling, clear the pipeline state before emitting. 2147 HazardRec->Reset(); 2148 } 2149 HazardRec->EmitInstruction(SU); 2150 } 2151 // checkHazard should prevent scheduling multiple instructions per cycle that 2152 // exceed the issue width. 2153 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2154 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 2155 assert( 2156 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 2157 "Cannot schedule this instruction's MicroOps in the current cycle."); 2158 2159 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2160 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 2161 2162 unsigned NextCycle = CurrCycle; 2163 switch (SchedModel->getMicroOpBufferSize()) { 2164 case 0: 2165 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 2166 break; 2167 case 1: 2168 if (ReadyCycle > NextCycle) { 2169 NextCycle = ReadyCycle; 2170 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 2171 } 2172 break; 2173 default: 2174 // We don't currently model the OOO reorder buffer, so consider all 2175 // scheduled MOps to be "retired". We do loosely model in-order resource 2176 // latency. If this instruction uses an in-order resource, account for any 2177 // likely stall cycles. 2178 if (SU->isUnbuffered && ReadyCycle > NextCycle) 2179 NextCycle = ReadyCycle; 2180 break; 2181 } 2182 RetiredMOps += IncMOps; 2183 2184 // Update resource counts and critical resource. 2185 if (SchedModel->hasInstrSchedModel()) { 2186 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 2187 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 2188 Rem->RemIssueCount -= DecRemIssue; 2189 if (ZoneCritResIdx) { 2190 // Scale scheduled micro-ops for comparing with the critical resource. 2191 unsigned ScaledMOps = 2192 RetiredMOps * SchedModel->getMicroOpFactor(); 2193 2194 // If scaled micro-ops are now more than the previous critical resource by 2195 // a full cycle, then micro-ops issue becomes critical. 2196 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 2197 >= (int)SchedModel->getLatencyFactor()) { 2198 ZoneCritResIdx = 0; 2199 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2200 << ScaledMOps / SchedModel->getLatencyFactor() 2201 << "c\n"); 2202 } 2203 } 2204 for (TargetSchedModel::ProcResIter 2205 PI = SchedModel->getWriteProcResBegin(SC), 2206 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2207 unsigned RCycle = 2208 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 2209 if (RCycle > NextCycle) 2210 NextCycle = RCycle; 2211 } 2212 if (SU->hasReservedResource) { 2213 // For reserved resources, record the highest cycle using the resource. 2214 // For top-down scheduling, this is the cycle in which we schedule this 2215 // instruction plus the number of cycles the operations reserves the 2216 // resource. For bottom-up is it simply the instruction's cycle. 2217 for (TargetSchedModel::ProcResIter 2218 PI = SchedModel->getWriteProcResBegin(SC), 2219 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2220 unsigned PIdx = PI->ProcResourceIdx; 2221 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 2222 if (isTop()) { 2223 ReservedCycles[PIdx] = 2224 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles); 2225 } 2226 else 2227 ReservedCycles[PIdx] = NextCycle; 2228 } 2229 } 2230 } 2231 } 2232 // Update ExpectedLatency and DependentLatency. 2233 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2234 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2235 if (SU->getDepth() > TopLatency) { 2236 TopLatency = SU->getDepth(); 2237 LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU(" 2238 << SU->NodeNum << ") " << TopLatency << "c\n"); 2239 } 2240 if (SU->getHeight() > BotLatency) { 2241 BotLatency = SU->getHeight(); 2242 LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU(" 2243 << SU->NodeNum << ") " << BotLatency << "c\n"); 2244 } 2245 // If we stall for any reason, bump the cycle. 2246 if (NextCycle > CurrCycle) 2247 bumpCycle(NextCycle); 2248 else 2249 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2250 // resource limited. If a stall occurred, bumpCycle does this. 2251 IsResourceLimited = 2252 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), 2253 getScheduledLatency()); 2254 2255 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 2256 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 2257 // one cycle. Since we commonly reach the max MOps here, opportunistically 2258 // bump the cycle to avoid uselessly checking everything in the readyQ. 2259 CurrMOps += IncMOps; 2260 2261 // Bump the cycle count for issue group constraints. 2262 // This must be done after NextCycle has been adjust for all other stalls. 2263 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set 2264 // currCycle to X. 2265 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) || 2266 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) { 2267 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin") 2268 << " group\n"); 2269 bumpCycle(++NextCycle); 2270 } 2271 2272 while (CurrMOps >= SchedModel->getIssueWidth()) { 2273 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle " 2274 << CurrCycle << '\n'); 2275 bumpCycle(++NextCycle); 2276 } 2277 LLVM_DEBUG(dumpScheduledState()); 2278 } 2279 2280 /// Release pending ready nodes in to the available queue. This makes them 2281 /// visible to heuristics. 2282 void SchedBoundary::releasePending() { 2283 // If the available queue is empty, it is safe to reset MinReadyCycle. 2284 if (Available.empty()) 2285 MinReadyCycle = std::numeric_limits<unsigned>::max(); 2286 2287 // Check to see if any of the pending instructions are ready to issue. If 2288 // so, add them to the available queue. 2289 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2290 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2291 SUnit *SU = *(Pending.begin()+i); 2292 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2293 2294 if (ReadyCycle < MinReadyCycle) 2295 MinReadyCycle = ReadyCycle; 2296 2297 if (!IsBuffered && ReadyCycle > CurrCycle) 2298 continue; 2299 2300 if (checkHazard(SU)) 2301 continue; 2302 2303 if (Available.size() >= ReadyListLimit) 2304 break; 2305 2306 Available.push(SU); 2307 Pending.remove(Pending.begin()+i); 2308 --i; --e; 2309 } 2310 CheckPending = false; 2311 } 2312 2313 /// Remove SU from the ready set for this boundary. 2314 void SchedBoundary::removeReady(SUnit *SU) { 2315 if (Available.isInQueue(SU)) 2316 Available.remove(Available.find(SU)); 2317 else { 2318 assert(Pending.isInQueue(SU) && "bad ready count"); 2319 Pending.remove(Pending.find(SU)); 2320 } 2321 } 2322 2323 /// If this queue only has one ready candidate, return it. As a side effect, 2324 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2325 /// one node is ready. If multiple instructions are ready, return NULL. 2326 SUnit *SchedBoundary::pickOnlyChoice() { 2327 if (CheckPending) 2328 releasePending(); 2329 2330 if (CurrMOps > 0) { 2331 // Defer any ready instrs that now have a hazard. 2332 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2333 if (checkHazard(*I)) { 2334 Pending.push(*I); 2335 I = Available.remove(I); 2336 continue; 2337 } 2338 ++I; 2339 } 2340 } 2341 for (unsigned i = 0; Available.empty(); ++i) { 2342 // FIXME: Re-enable assert once PR20057 is resolved. 2343 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && 2344 // "permanent hazard"); 2345 (void)i; 2346 bumpCycle(CurrCycle + 1); 2347 releasePending(); 2348 } 2349 2350 LLVM_DEBUG(Pending.dump()); 2351 LLVM_DEBUG(Available.dump()); 2352 2353 if (Available.size() == 1) 2354 return *Available.begin(); 2355 return nullptr; 2356 } 2357 2358 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2359 // This is useful information to dump after bumpNode. 2360 // Note that the Queue contents are more useful before pickNodeFromQueue. 2361 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const { 2362 unsigned ResFactor; 2363 unsigned ResCount; 2364 if (ZoneCritResIdx) { 2365 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2366 ResCount = getResourceCount(ZoneCritResIdx); 2367 } else { 2368 ResFactor = SchedModel->getMicroOpFactor(); 2369 ResCount = RetiredMOps * ResFactor; 2370 } 2371 unsigned LFactor = SchedModel->getLatencyFactor(); 2372 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2373 << " Retired: " << RetiredMOps; 2374 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2375 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2376 << ResCount / ResFactor << " " 2377 << SchedModel->getResourceName(ZoneCritResIdx) 2378 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2379 << (IsResourceLimited ? " - Resource" : " - Latency") 2380 << " limited.\n"; 2381 } 2382 #endif 2383 2384 //===----------------------------------------------------------------------===// 2385 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2386 //===----------------------------------------------------------------------===// 2387 2388 void GenericSchedulerBase::SchedCandidate:: 2389 initResourceDelta(const ScheduleDAGMI *DAG, 2390 const TargetSchedModel *SchedModel) { 2391 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2392 return; 2393 2394 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2395 for (TargetSchedModel::ProcResIter 2396 PI = SchedModel->getWriteProcResBegin(SC), 2397 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2398 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2399 ResDelta.CritResources += PI->Cycles; 2400 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2401 ResDelta.DemandedResources += PI->Cycles; 2402 } 2403 } 2404 2405 /// Compute remaining latency. We need this both to determine whether the 2406 /// overall schedule has become latency-limited and whether the instructions 2407 /// outside this zone are resource or latency limited. 2408 /// 2409 /// The "dependent" latency is updated incrementally during scheduling as the 2410 /// max height/depth of scheduled nodes minus the cycles since it was 2411 /// scheduled: 2412 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2413 /// 2414 /// The "independent" latency is the max ready queue depth: 2415 /// ILat = max N.depth for N in Available|Pending 2416 /// 2417 /// RemainingLatency is the greater of independent and dependent latency. 2418 /// 2419 /// These computations are expensive, especially in DAGs with many edges, so 2420 /// only do them if necessary. 2421 static unsigned computeRemLatency(SchedBoundary &CurrZone) { 2422 unsigned RemLatency = CurrZone.getDependentLatency(); 2423 RemLatency = std::max(RemLatency, 2424 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2425 RemLatency = std::max(RemLatency, 2426 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2427 return RemLatency; 2428 } 2429 2430 /// Returns true if the current cycle plus remaning latency is greater than 2431 /// the cirtical path in the scheduling region. 2432 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy, 2433 SchedBoundary &CurrZone, 2434 bool ComputeRemLatency, 2435 unsigned &RemLatency) const { 2436 // The current cycle is already greater than the critical path, so we are 2437 // already latnecy limited and don't need to compute the remaining latency. 2438 if (CurrZone.getCurrCycle() > Rem.CriticalPath) 2439 return true; 2440 2441 // If we haven't scheduled anything yet, then we aren't latency limited. 2442 if (CurrZone.getCurrCycle() == 0) 2443 return false; 2444 2445 if (ComputeRemLatency) 2446 RemLatency = computeRemLatency(CurrZone); 2447 2448 return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath; 2449 } 2450 2451 /// Set the CandPolicy given a scheduling zone given the current resources and 2452 /// latencies inside and outside the zone. 2453 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA, 2454 SchedBoundary &CurrZone, 2455 SchedBoundary *OtherZone) { 2456 // Apply preemptive heuristics based on the total latency and resources 2457 // inside and outside this zone. Potential stalls should be considered before 2458 // following this policy. 2459 2460 // Compute the critical resource outside the zone. 2461 unsigned OtherCritIdx = 0; 2462 unsigned OtherCount = 2463 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2464 2465 bool OtherResLimited = false; 2466 unsigned RemLatency = 0; 2467 bool RemLatencyComputed = false; 2468 if (SchedModel->hasInstrSchedModel() && OtherCount != 0) { 2469 RemLatency = computeRemLatency(CurrZone); 2470 RemLatencyComputed = true; 2471 OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(), 2472 OtherCount, RemLatency); 2473 } 2474 2475 // Schedule aggressively for latency in PostRA mode. We don't check for 2476 // acyclic latency during PostRA, and highly out-of-order processors will 2477 // skip PostRA scheduling. 2478 if (!OtherResLimited && 2479 (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed, 2480 RemLatency))) { 2481 Policy.ReduceLatency |= true; 2482 LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName() 2483 << " RemainingLatency " << RemLatency << " + " 2484 << CurrZone.getCurrCycle() << "c > CritPath " 2485 << Rem.CriticalPath << "\n"); 2486 } 2487 // If the same resource is limiting inside and outside the zone, do nothing. 2488 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2489 return; 2490 2491 LLVM_DEBUG(if (CurrZone.isResourceLimited()) { 2492 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2493 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n"; 2494 } if (OtherResLimited) dbgs() 2495 << " RemainingLimit: " 2496 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2497 if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs() 2498 << " Latency limited both directions.\n"); 2499 2500 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2501 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2502 2503 if (OtherResLimited) 2504 Policy.DemandResIdx = OtherCritIdx; 2505 } 2506 2507 #ifndef NDEBUG 2508 const char *GenericSchedulerBase::getReasonStr( 2509 GenericSchedulerBase::CandReason Reason) { 2510 switch (Reason) { 2511 case NoCand: return "NOCAND "; 2512 case Only1: return "ONLY1 "; 2513 case PhysRegCopy: return "PREG-COPY "; 2514 case RegExcess: return "REG-EXCESS"; 2515 case RegCritical: return "REG-CRIT "; 2516 case Stall: return "STALL "; 2517 case Cluster: return "CLUSTER "; 2518 case Weak: return "WEAK "; 2519 case RegMax: return "REG-MAX "; 2520 case ResourceReduce: return "RES-REDUCE"; 2521 case ResourceDemand: return "RES-DEMAND"; 2522 case TopDepthReduce: return "TOP-DEPTH "; 2523 case TopPathReduce: return "TOP-PATH "; 2524 case BotHeightReduce:return "BOT-HEIGHT"; 2525 case BotPathReduce: return "BOT-PATH "; 2526 case NextDefUse: return "DEF-USE "; 2527 case NodeOrder: return "ORDER "; 2528 }; 2529 llvm_unreachable("Unknown reason!"); 2530 } 2531 2532 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2533 PressureChange P; 2534 unsigned ResIdx = 0; 2535 unsigned Latency = 0; 2536 switch (Cand.Reason) { 2537 default: 2538 break; 2539 case RegExcess: 2540 P = Cand.RPDelta.Excess; 2541 break; 2542 case RegCritical: 2543 P = Cand.RPDelta.CriticalMax; 2544 break; 2545 case RegMax: 2546 P = Cand.RPDelta.CurrentMax; 2547 break; 2548 case ResourceReduce: 2549 ResIdx = Cand.Policy.ReduceResIdx; 2550 break; 2551 case ResourceDemand: 2552 ResIdx = Cand.Policy.DemandResIdx; 2553 break; 2554 case TopDepthReduce: 2555 Latency = Cand.SU->getDepth(); 2556 break; 2557 case TopPathReduce: 2558 Latency = Cand.SU->getHeight(); 2559 break; 2560 case BotHeightReduce: 2561 Latency = Cand.SU->getHeight(); 2562 break; 2563 case BotPathReduce: 2564 Latency = Cand.SU->getDepth(); 2565 break; 2566 } 2567 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2568 if (P.isValid()) 2569 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2570 << ":" << P.getUnitInc() << " "; 2571 else 2572 dbgs() << " "; 2573 if (ResIdx) 2574 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2575 else 2576 dbgs() << " "; 2577 if (Latency) 2578 dbgs() << " " << Latency << " cycles "; 2579 else 2580 dbgs() << " "; 2581 dbgs() << '\n'; 2582 } 2583 #endif 2584 2585 namespace llvm { 2586 /// Return true if this heuristic determines order. 2587 bool tryLess(int TryVal, int CandVal, 2588 GenericSchedulerBase::SchedCandidate &TryCand, 2589 GenericSchedulerBase::SchedCandidate &Cand, 2590 GenericSchedulerBase::CandReason Reason) { 2591 if (TryVal < CandVal) { 2592 TryCand.Reason = Reason; 2593 return true; 2594 } 2595 if (TryVal > CandVal) { 2596 if (Cand.Reason > Reason) 2597 Cand.Reason = Reason; 2598 return true; 2599 } 2600 return false; 2601 } 2602 2603 bool tryGreater(int TryVal, int CandVal, 2604 GenericSchedulerBase::SchedCandidate &TryCand, 2605 GenericSchedulerBase::SchedCandidate &Cand, 2606 GenericSchedulerBase::CandReason Reason) { 2607 if (TryVal > CandVal) { 2608 TryCand.Reason = Reason; 2609 return true; 2610 } 2611 if (TryVal < CandVal) { 2612 if (Cand.Reason > Reason) 2613 Cand.Reason = Reason; 2614 return true; 2615 } 2616 return false; 2617 } 2618 2619 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2620 GenericSchedulerBase::SchedCandidate &Cand, 2621 SchedBoundary &Zone) { 2622 if (Zone.isTop()) { 2623 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2624 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2625 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2626 return true; 2627 } 2628 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2629 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2630 return true; 2631 } else { 2632 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2633 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2634 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2635 return true; 2636 } 2637 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2638 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2639 return true; 2640 } 2641 return false; 2642 } 2643 } // end namespace llvm 2644 2645 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) { 2646 LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2647 << GenericSchedulerBase::getReasonStr(Reason) << '\n'); 2648 } 2649 2650 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) { 2651 tracePick(Cand.Reason, Cand.AtTop); 2652 } 2653 2654 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2655 assert(dag->hasVRegLiveness() && 2656 "(PreRA)GenericScheduler needs vreg liveness"); 2657 DAG = static_cast<ScheduleDAGMILive*>(dag); 2658 SchedModel = DAG->getSchedModel(); 2659 TRI = DAG->TRI; 2660 2661 Rem.init(DAG, SchedModel); 2662 Top.init(DAG, SchedModel, &Rem); 2663 Bot.init(DAG, SchedModel, &Rem); 2664 2665 // Initialize resource counts. 2666 2667 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2668 // are disabled, then these HazardRecs will be disabled. 2669 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2670 if (!Top.HazardRec) { 2671 Top.HazardRec = 2672 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2673 Itin, DAG); 2674 } 2675 if (!Bot.HazardRec) { 2676 Bot.HazardRec = 2677 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 2678 Itin, DAG); 2679 } 2680 TopCand.SU = nullptr; 2681 BotCand.SU = nullptr; 2682 } 2683 2684 /// Initialize the per-region scheduling policy. 2685 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2686 MachineBasicBlock::iterator End, 2687 unsigned NumRegionInstrs) { 2688 const MachineFunction &MF = *Begin->getMF(); 2689 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); 2690 2691 // Avoid setting up the register pressure tracker for small regions to save 2692 // compile time. As a rough heuristic, only track pressure when the number of 2693 // schedulable instructions exceeds half the integer register file. 2694 RegionPolicy.ShouldTrackPressure = true; 2695 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) { 2696 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT; 2697 if (TLI->isTypeLegal(LegalIntVT)) { 2698 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2699 TLI->getRegClassFor(LegalIntVT)); 2700 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2701 } 2702 } 2703 2704 // For generic targets, we default to bottom-up, because it's simpler and more 2705 // compile-time optimizations have been implemented in that direction. 2706 RegionPolicy.OnlyBottomUp = true; 2707 2708 // Allow the subtarget to override default policy. 2709 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs); 2710 2711 // After subtarget overrides, apply command line options. 2712 if (!EnableRegPressure) 2713 RegionPolicy.ShouldTrackPressure = false; 2714 2715 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2716 // e.g. -misched-bottomup=false allows scheduling in both directions. 2717 assert((!ForceTopDown || !ForceBottomUp) && 2718 "-misched-topdown incompatible with -misched-bottomup"); 2719 if (ForceBottomUp.getNumOccurrences() > 0) { 2720 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2721 if (RegionPolicy.OnlyBottomUp) 2722 RegionPolicy.OnlyTopDown = false; 2723 } 2724 if (ForceTopDown.getNumOccurrences() > 0) { 2725 RegionPolicy.OnlyTopDown = ForceTopDown; 2726 if (RegionPolicy.OnlyTopDown) 2727 RegionPolicy.OnlyBottomUp = false; 2728 } 2729 } 2730 2731 void GenericScheduler::dumpPolicy() const { 2732 // Cannot completely remove virtual function even in release mode. 2733 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2734 dbgs() << "GenericScheduler RegionPolicy: " 2735 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure 2736 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown 2737 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp 2738 << "\n"; 2739 #endif 2740 } 2741 2742 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2743 /// critical path by more cycles than it takes to drain the instruction buffer. 2744 /// We estimate an upper bounds on in-flight instructions as: 2745 /// 2746 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2747 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2748 /// InFlightResources = InFlightIterations * LoopResources 2749 /// 2750 /// TODO: Check execution resources in addition to IssueCount. 2751 void GenericScheduler::checkAcyclicLatency() { 2752 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2753 return; 2754 2755 // Scaled number of cycles per loop iteration. 2756 unsigned IterCount = 2757 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2758 Rem.RemIssueCount); 2759 // Scaled acyclic critical path. 2760 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2761 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2762 unsigned InFlightCount = 2763 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2764 unsigned BufferLimit = 2765 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2766 2767 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2768 2769 LLVM_DEBUG( 2770 dbgs() << "IssueCycles=" 2771 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2772 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2773 << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount 2774 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2775 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2776 if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2777 } 2778 2779 void GenericScheduler::registerRoots() { 2780 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2781 2782 // Some roots may not feed into ExitSU. Check all of them in case. 2783 for (const SUnit *SU : Bot.Available) { 2784 if (SU->getDepth() > Rem.CriticalPath) 2785 Rem.CriticalPath = SU->getDepth(); 2786 } 2787 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); 2788 if (DumpCriticalPathLength) { 2789 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; 2790 } 2791 2792 if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) { 2793 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2794 checkAcyclicLatency(); 2795 } 2796 } 2797 2798 namespace llvm { 2799 bool tryPressure(const PressureChange &TryP, 2800 const PressureChange &CandP, 2801 GenericSchedulerBase::SchedCandidate &TryCand, 2802 GenericSchedulerBase::SchedCandidate &Cand, 2803 GenericSchedulerBase::CandReason Reason, 2804 const TargetRegisterInfo *TRI, 2805 const MachineFunction &MF) { 2806 // If one candidate decreases and the other increases, go with it. 2807 // Invalid candidates have UnitInc==0. 2808 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2809 Reason)) { 2810 return true; 2811 } 2812 // Do not compare the magnitude of pressure changes between top and bottom 2813 // boundary. 2814 if (Cand.AtTop != TryCand.AtTop) 2815 return false; 2816 2817 // If both candidates affect the same set in the same boundary, go with the 2818 // smallest increase. 2819 unsigned TryPSet = TryP.getPSetOrMax(); 2820 unsigned CandPSet = CandP.getPSetOrMax(); 2821 if (TryPSet == CandPSet) { 2822 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2823 Reason); 2824 } 2825 2826 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) : 2827 std::numeric_limits<int>::max(); 2828 2829 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) : 2830 std::numeric_limits<int>::max(); 2831 2832 // If the candidates are decreasing pressure, reverse priority. 2833 if (TryP.getUnitInc() < 0) 2834 std::swap(TryRank, CandRank); 2835 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2836 } 2837 2838 unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2839 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2840 } 2841 2842 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2843 /// their physreg def/use. 2844 /// 2845 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2846 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2847 /// with the operation that produces or consumes the physreg. We'll do this when 2848 /// regalloc has support for parallel copies. 2849 int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2850 const MachineInstr *MI = SU->getInstr(); 2851 if (!MI->isCopy()) 2852 return 0; 2853 2854 unsigned ScheduledOper = isTop ? 1 : 0; 2855 unsigned UnscheduledOper = isTop ? 0 : 1; 2856 // If we have already scheduled the physreg produce/consumer, immediately 2857 // schedule the copy. 2858 if (TargetRegisterInfo::isPhysicalRegister( 2859 MI->getOperand(ScheduledOper).getReg())) 2860 return 1; 2861 // If the physreg is at the boundary, defer it. Otherwise schedule it 2862 // immediately to free the dependent. We can hoist the copy later. 2863 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2864 if (TargetRegisterInfo::isPhysicalRegister( 2865 MI->getOperand(UnscheduledOper).getReg())) 2866 return AtBoundary ? -1 : 1; 2867 return 0; 2868 } 2869 } // end namespace llvm 2870 2871 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU, 2872 bool AtTop, 2873 const RegPressureTracker &RPTracker, 2874 RegPressureTracker &TempTracker) { 2875 Cand.SU = SU; 2876 Cand.AtTop = AtTop; 2877 if (DAG->isTrackingPressure()) { 2878 if (AtTop) { 2879 TempTracker.getMaxDownwardPressureDelta( 2880 Cand.SU->getInstr(), 2881 Cand.RPDelta, 2882 DAG->getRegionCriticalPSets(), 2883 DAG->getRegPressure().MaxSetPressure); 2884 } else { 2885 if (VerifyScheduling) { 2886 TempTracker.getMaxUpwardPressureDelta( 2887 Cand.SU->getInstr(), 2888 &DAG->getPressureDiff(Cand.SU), 2889 Cand.RPDelta, 2890 DAG->getRegionCriticalPSets(), 2891 DAG->getRegPressure().MaxSetPressure); 2892 } else { 2893 RPTracker.getUpwardPressureDelta( 2894 Cand.SU->getInstr(), 2895 DAG->getPressureDiff(Cand.SU), 2896 Cand.RPDelta, 2897 DAG->getRegionCriticalPSets(), 2898 DAG->getRegPressure().MaxSetPressure); 2899 } 2900 } 2901 } 2902 LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs() 2903 << " Try SU(" << Cand.SU->NodeNum << ") " 2904 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":" 2905 << Cand.RPDelta.Excess.getUnitInc() << "\n"); 2906 } 2907 2908 /// Apply a set of heuristics to a new candidate. Heuristics are currently 2909 /// hierarchical. This may be more efficient than a graduated cost model because 2910 /// we don't need to evaluate all aspects of the model for each node in the 2911 /// queue. But it's really done to make the heuristics easier to debug and 2912 /// statistically analyze. 2913 /// 2914 /// \param Cand provides the policy and current best candidate. 2915 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2916 /// \param Zone describes the scheduled zone that we are extending, or nullptr 2917 // if Cand is from a different zone than TryCand. 2918 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 2919 SchedCandidate &TryCand, 2920 SchedBoundary *Zone) const { 2921 // Initialize the candidate if needed. 2922 if (!Cand.isValid()) { 2923 TryCand.Reason = NodeOrder; 2924 return; 2925 } 2926 2927 if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop), 2928 biasPhysRegCopy(Cand.SU, Cand.AtTop), 2929 TryCand, Cand, PhysRegCopy)) 2930 return; 2931 2932 // Avoid exceeding the target's limit. 2933 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 2934 Cand.RPDelta.Excess, 2935 TryCand, Cand, RegExcess, TRI, 2936 DAG->MF)) 2937 return; 2938 2939 // Avoid increasing the max critical pressure in the scheduled region. 2940 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 2941 Cand.RPDelta.CriticalMax, 2942 TryCand, Cand, RegCritical, TRI, 2943 DAG->MF)) 2944 return; 2945 2946 // We only compare a subset of features when comparing nodes between 2947 // Top and Bottom boundary. Some properties are simply incomparable, in many 2948 // other instances we should only override the other boundary if something 2949 // is a clear good pick on one boundary. Skip heuristics that are more 2950 // "tie-breaking" in nature. 2951 bool SameBoundary = Zone != nullptr; 2952 if (SameBoundary) { 2953 // For loops that are acyclic path limited, aggressively schedule for 2954 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal 2955 // heuristics to take precedence. 2956 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() && 2957 tryLatency(TryCand, Cand, *Zone)) 2958 return; 2959 2960 // Prioritize instructions that read unbuffered resources by stall cycles. 2961 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU), 2962 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2963 return; 2964 } 2965 2966 // Keep clustered nodes together to encourage downstream peephole 2967 // optimizations which may reduce resource requirements. 2968 // 2969 // This is a best effort to set things up for a post-RA pass. Optimizations 2970 // like generating loads of multiple registers should ideally be done within 2971 // the scheduler pass by combining the loads during DAG postprocessing. 2972 const SUnit *CandNextClusterSU = 2973 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2974 const SUnit *TryCandNextClusterSU = 2975 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2976 if (tryGreater(TryCand.SU == TryCandNextClusterSU, 2977 Cand.SU == CandNextClusterSU, 2978 TryCand, Cand, Cluster)) 2979 return; 2980 2981 if (SameBoundary) { 2982 // Weak edges are for clustering and other constraints. 2983 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop), 2984 getWeakLeft(Cand.SU, Cand.AtTop), 2985 TryCand, Cand, Weak)) 2986 return; 2987 } 2988 2989 // Avoid increasing the max pressure of the entire region. 2990 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 2991 Cand.RPDelta.CurrentMax, 2992 TryCand, Cand, RegMax, TRI, 2993 DAG->MF)) 2994 return; 2995 2996 if (SameBoundary) { 2997 // Avoid critical resource consumption and balance the schedule. 2998 TryCand.initResourceDelta(DAG, SchedModel); 2999 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3000 TryCand, Cand, ResourceReduce)) 3001 return; 3002 if (tryGreater(TryCand.ResDelta.DemandedResources, 3003 Cand.ResDelta.DemandedResources, 3004 TryCand, Cand, ResourceDemand)) 3005 return; 3006 3007 // Avoid serializing long latency dependence chains. 3008 // For acyclic path limited loops, latency was already checked above. 3009 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency && 3010 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone)) 3011 return; 3012 3013 // Fall through to original instruction order. 3014 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 3015 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 3016 TryCand.Reason = NodeOrder; 3017 } 3018 } 3019 } 3020 3021 /// Pick the best candidate from the queue. 3022 /// 3023 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 3024 /// DAG building. To adjust for the current scheduling location we need to 3025 /// maintain the number of vreg uses remaining to be top-scheduled. 3026 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 3027 const CandPolicy &ZonePolicy, 3028 const RegPressureTracker &RPTracker, 3029 SchedCandidate &Cand) { 3030 // getMaxPressureDelta temporarily modifies the tracker. 3031 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 3032 3033 ReadyQueue &Q = Zone.Available; 3034 for (SUnit *SU : Q) { 3035 3036 SchedCandidate TryCand(ZonePolicy); 3037 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker); 3038 // Pass SchedBoundary only when comparing nodes from the same boundary. 3039 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr; 3040 tryCandidate(Cand, TryCand, ZoneArg); 3041 if (TryCand.Reason != NoCand) { 3042 // Initialize resource delta if needed in case future heuristics query it. 3043 if (TryCand.ResDelta == SchedResourceDelta()) 3044 TryCand.initResourceDelta(DAG, SchedModel); 3045 Cand.setBest(TryCand); 3046 LLVM_DEBUG(traceCandidate(Cand)); 3047 } 3048 } 3049 } 3050 3051 /// Pick the best candidate node from either the top or bottom queue. 3052 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 3053 // Schedule as far as possible in the direction of no choice. This is most 3054 // efficient, but also provides the best heuristics for CriticalPSets. 3055 if (SUnit *SU = Bot.pickOnlyChoice()) { 3056 IsTopNode = false; 3057 tracePick(Only1, false); 3058 return SU; 3059 } 3060 if (SUnit *SU = Top.pickOnlyChoice()) { 3061 IsTopNode = true; 3062 tracePick(Only1, true); 3063 return SU; 3064 } 3065 // Set the bottom-up policy based on the state of the current bottom zone and 3066 // the instructions outside the zone, including the top zone. 3067 CandPolicy BotPolicy; 3068 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top); 3069 // Set the top-down policy based on the state of the current top zone and 3070 // the instructions outside the zone, including the bottom zone. 3071 CandPolicy TopPolicy; 3072 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot); 3073 3074 // See if BotCand is still valid (because we previously scheduled from Top). 3075 LLVM_DEBUG(dbgs() << "Picking from Bot:\n"); 3076 if (!BotCand.isValid() || BotCand.SU->isScheduled || 3077 BotCand.Policy != BotPolicy) { 3078 BotCand.reset(CandPolicy()); 3079 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand); 3080 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 3081 } else { 3082 LLVM_DEBUG(traceCandidate(BotCand)); 3083 #ifndef NDEBUG 3084 if (VerifyScheduling) { 3085 SchedCandidate TCand; 3086 TCand.reset(CandPolicy()); 3087 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand); 3088 assert(TCand.SU == BotCand.SU && 3089 "Last pick result should correspond to re-picking right now"); 3090 } 3091 #endif 3092 } 3093 3094 // Check if the top Q has a better candidate. 3095 LLVM_DEBUG(dbgs() << "Picking from Top:\n"); 3096 if (!TopCand.isValid() || TopCand.SU->isScheduled || 3097 TopCand.Policy != TopPolicy) { 3098 TopCand.reset(CandPolicy()); 3099 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand); 3100 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 3101 } else { 3102 LLVM_DEBUG(traceCandidate(TopCand)); 3103 #ifndef NDEBUG 3104 if (VerifyScheduling) { 3105 SchedCandidate TCand; 3106 TCand.reset(CandPolicy()); 3107 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand); 3108 assert(TCand.SU == TopCand.SU && 3109 "Last pick result should correspond to re-picking right now"); 3110 } 3111 #endif 3112 } 3113 3114 // Pick best from BotCand and TopCand. 3115 assert(BotCand.isValid()); 3116 assert(TopCand.isValid()); 3117 SchedCandidate Cand = BotCand; 3118 TopCand.Reason = NoCand; 3119 tryCandidate(Cand, TopCand, nullptr); 3120 if (TopCand.Reason != NoCand) { 3121 Cand.setBest(TopCand); 3122 LLVM_DEBUG(traceCandidate(Cand)); 3123 } 3124 3125 IsTopNode = Cand.AtTop; 3126 tracePick(Cand); 3127 return Cand.SU; 3128 } 3129 3130 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 3131 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 3132 if (DAG->top() == DAG->bottom()) { 3133 assert(Top.Available.empty() && Top.Pending.empty() && 3134 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 3135 return nullptr; 3136 } 3137 SUnit *SU; 3138 do { 3139 if (RegionPolicy.OnlyTopDown) { 3140 SU = Top.pickOnlyChoice(); 3141 if (!SU) { 3142 CandPolicy NoPolicy; 3143 TopCand.reset(NoPolicy); 3144 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); 3145 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3146 tracePick(TopCand); 3147 SU = TopCand.SU; 3148 } 3149 IsTopNode = true; 3150 } else if (RegionPolicy.OnlyBottomUp) { 3151 SU = Bot.pickOnlyChoice(); 3152 if (!SU) { 3153 CandPolicy NoPolicy; 3154 BotCand.reset(NoPolicy); 3155 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); 3156 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 3157 tracePick(BotCand); 3158 SU = BotCand.SU; 3159 } 3160 IsTopNode = false; 3161 } else { 3162 SU = pickNodeBidirectional(IsTopNode); 3163 } 3164 } while (SU->isScheduled); 3165 3166 if (SU->isTopReady()) 3167 Top.removeReady(SU); 3168 if (SU->isBottomReady()) 3169 Bot.removeReady(SU); 3170 3171 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3172 << *SU->getInstr()); 3173 return SU; 3174 } 3175 3176 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 3177 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 3178 if (!isTop) 3179 ++InsertPos; 3180 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 3181 3182 // Find already scheduled copies with a single physreg dependence and move 3183 // them just above the scheduled instruction. 3184 for (SDep &Dep : Deps) { 3185 if (Dep.getKind() != SDep::Data || !TRI->isPhysicalRegister(Dep.getReg())) 3186 continue; 3187 SUnit *DepSU = Dep.getSUnit(); 3188 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 3189 continue; 3190 MachineInstr *Copy = DepSU->getInstr(); 3191 if (!Copy->isCopy()) 3192 continue; 3193 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy "; 3194 DAG->dumpNode(*Dep.getSUnit())); 3195 DAG->moveInstruction(Copy, InsertPos); 3196 } 3197 } 3198 3199 /// Update the scheduler's state after scheduling a node. This is the same node 3200 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 3201 /// update it's state based on the current cycle before MachineSchedStrategy 3202 /// does. 3203 /// 3204 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 3205 /// them here. See comments in biasPhysRegCopy. 3206 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3207 if (IsTopNode) { 3208 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3209 Top.bumpNode(SU); 3210 if (SU->hasPhysRegUses) 3211 reschedulePhysRegCopies(SU, true); 3212 } else { 3213 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 3214 Bot.bumpNode(SU); 3215 if (SU->hasPhysRegDefs) 3216 reschedulePhysRegCopies(SU, false); 3217 } 3218 } 3219 3220 /// Create the standard converging machine scheduler. This will be used as the 3221 /// default scheduler if the target does not set a default. 3222 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) { 3223 ScheduleDAGMILive *DAG = 3224 new ScheduleDAGMILive(C, llvm::make_unique<GenericScheduler>(C)); 3225 // Register DAG post-processors. 3226 // 3227 // FIXME: extend the mutation API to allow earlier mutations to instantiate 3228 // data and pass it to later mutations. Have a single mutation that gathers 3229 // the interesting nodes in one pass. 3230 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI)); 3231 return DAG; 3232 } 3233 3234 static ScheduleDAGInstrs *createConveringSched(MachineSchedContext *C) { 3235 return createGenericSchedLive(C); 3236 } 3237 3238 static MachineSchedRegistry 3239 GenericSchedRegistry("converge", "Standard converging scheduler.", 3240 createConveringSched); 3241 3242 //===----------------------------------------------------------------------===// 3243 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 3244 //===----------------------------------------------------------------------===// 3245 3246 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) { 3247 DAG = Dag; 3248 SchedModel = DAG->getSchedModel(); 3249 TRI = DAG->TRI; 3250 3251 Rem.init(DAG, SchedModel); 3252 Top.init(DAG, SchedModel, &Rem); 3253 BotRoots.clear(); 3254 3255 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 3256 // or are disabled, then these HazardRecs will be disabled. 3257 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3258 if (!Top.HazardRec) { 3259 Top.HazardRec = 3260 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer( 3261 Itin, DAG); 3262 } 3263 } 3264 3265 void PostGenericScheduler::registerRoots() { 3266 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3267 3268 // Some roots may not feed into ExitSU. Check all of them in case. 3269 for (const SUnit *SU : BotRoots) { 3270 if (SU->getDepth() > Rem.CriticalPath) 3271 Rem.CriticalPath = SU->getDepth(); 3272 } 3273 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); 3274 if (DumpCriticalPathLength) { 3275 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; 3276 } 3277 } 3278 3279 /// Apply a set of heuristics to a new candidate for PostRA scheduling. 3280 /// 3281 /// \param Cand provides the policy and current best candidate. 3282 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3283 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 3284 SchedCandidate &TryCand) { 3285 // Initialize the candidate if needed. 3286 if (!Cand.isValid()) { 3287 TryCand.Reason = NodeOrder; 3288 return; 3289 } 3290 3291 // Prioritize instructions that read unbuffered resources by stall cycles. 3292 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3293 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3294 return; 3295 3296 // Keep clustered nodes together. 3297 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(), 3298 Cand.SU == DAG->getNextClusterSucc(), 3299 TryCand, Cand, Cluster)) 3300 return; 3301 3302 // Avoid critical resource consumption and balance the schedule. 3303 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3304 TryCand, Cand, ResourceReduce)) 3305 return; 3306 if (tryGreater(TryCand.ResDelta.DemandedResources, 3307 Cand.ResDelta.DemandedResources, 3308 TryCand, Cand, ResourceDemand)) 3309 return; 3310 3311 // Avoid serializing long latency dependence chains. 3312 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3313 return; 3314 } 3315 3316 // Fall through to original instruction order. 3317 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 3318 TryCand.Reason = NodeOrder; 3319 } 3320 3321 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3322 ReadyQueue &Q = Top.Available; 3323 for (SUnit *SU : Q) { 3324 SchedCandidate TryCand(Cand.Policy); 3325 TryCand.SU = SU; 3326 TryCand.AtTop = true; 3327 TryCand.initResourceDelta(DAG, SchedModel); 3328 tryCandidate(Cand, TryCand); 3329 if (TryCand.Reason != NoCand) { 3330 Cand.setBest(TryCand); 3331 LLVM_DEBUG(traceCandidate(Cand)); 3332 } 3333 } 3334 } 3335 3336 /// Pick the next node to schedule. 3337 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3338 if (DAG->top() == DAG->bottom()) { 3339 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3340 return nullptr; 3341 } 3342 SUnit *SU; 3343 do { 3344 SU = Top.pickOnlyChoice(); 3345 if (SU) { 3346 tracePick(Only1, true); 3347 } else { 3348 CandPolicy NoPolicy; 3349 SchedCandidate TopCand(NoPolicy); 3350 // Set the top-down policy based on the state of the current top zone and 3351 // the instructions outside the zone, including the bottom zone. 3352 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); 3353 pickNodeFromQueue(TopCand); 3354 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3355 tracePick(TopCand); 3356 SU = TopCand.SU; 3357 } 3358 } while (SU->isScheduled); 3359 3360 IsTopNode = true; 3361 Top.removeReady(SU); 3362 3363 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 3364 << *SU->getInstr()); 3365 return SU; 3366 } 3367 3368 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3369 /// scheduled/remaining flags in the DAG nodes. 3370 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3371 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3372 Top.bumpNode(SU); 3373 } 3374 3375 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) { 3376 return new ScheduleDAGMI(C, llvm::make_unique<PostGenericScheduler>(C), 3377 /*RemoveKillFlags=*/true); 3378 } 3379 3380 //===----------------------------------------------------------------------===// 3381 // ILP Scheduler. Currently for experimental analysis of heuristics. 3382 //===----------------------------------------------------------------------===// 3383 3384 namespace { 3385 3386 /// Order nodes by the ILP metric. 3387 struct ILPOrder { 3388 const SchedDFSResult *DFSResult = nullptr; 3389 const BitVector *ScheduledTrees = nullptr; 3390 bool MaximizeILP; 3391 3392 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {} 3393 3394 /// Apply a less-than relation on node priority. 3395 /// 3396 /// (Return true if A comes after B in the Q.) 3397 bool operator()(const SUnit *A, const SUnit *B) const { 3398 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3399 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3400 if (SchedTreeA != SchedTreeB) { 3401 // Unscheduled trees have lower priority. 3402 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3403 return ScheduledTrees->test(SchedTreeB); 3404 3405 // Trees with shallower connections have have lower priority. 3406 if (DFSResult->getSubtreeLevel(SchedTreeA) 3407 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3408 return DFSResult->getSubtreeLevel(SchedTreeA) 3409 < DFSResult->getSubtreeLevel(SchedTreeB); 3410 } 3411 } 3412 if (MaximizeILP) 3413 return DFSResult->getILP(A) < DFSResult->getILP(B); 3414 else 3415 return DFSResult->getILP(A) > DFSResult->getILP(B); 3416 } 3417 }; 3418 3419 /// Schedule based on the ILP metric. 3420 class ILPScheduler : public MachineSchedStrategy { 3421 ScheduleDAGMILive *DAG = nullptr; 3422 ILPOrder Cmp; 3423 3424 std::vector<SUnit*> ReadyQ; 3425 3426 public: 3427 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {} 3428 3429 void initialize(ScheduleDAGMI *dag) override { 3430 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3431 DAG = static_cast<ScheduleDAGMILive*>(dag); 3432 DAG->computeDFSResult(); 3433 Cmp.DFSResult = DAG->getDFSResult(); 3434 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3435 ReadyQ.clear(); 3436 } 3437 3438 void registerRoots() override { 3439 // Restore the heap in ReadyQ with the updated DFS results. 3440 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3441 } 3442 3443 /// Implement MachineSchedStrategy interface. 3444 /// ----------------------------------------- 3445 3446 /// Callback to select the highest priority node from the ready Q. 3447 SUnit *pickNode(bool &IsTopNode) override { 3448 if (ReadyQ.empty()) return nullptr; 3449 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3450 SUnit *SU = ReadyQ.back(); 3451 ReadyQ.pop_back(); 3452 IsTopNode = false; 3453 LLVM_DEBUG(dbgs() << "Pick node " 3454 << "SU(" << SU->NodeNum << ") " 3455 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3456 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) 3457 << " @" 3458 << DAG->getDFSResult()->getSubtreeLevel( 3459 DAG->getDFSResult()->getSubtreeID(SU)) 3460 << '\n' 3461 << "Scheduling " << *SU->getInstr()); 3462 return SU; 3463 } 3464 3465 /// Scheduler callback to notify that a new subtree is scheduled. 3466 void scheduleTree(unsigned SubtreeID) override { 3467 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3468 } 3469 3470 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3471 /// DFSResults, and resort the priority Q. 3472 void schedNode(SUnit *SU, bool IsTopNode) override { 3473 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3474 } 3475 3476 void releaseTopNode(SUnit *) override { /*only called for top roots*/ } 3477 3478 void releaseBottomNode(SUnit *SU) override { 3479 ReadyQ.push_back(SU); 3480 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3481 } 3482 }; 3483 3484 } // end anonymous namespace 3485 3486 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3487 return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(true)); 3488 } 3489 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3490 return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(false)); 3491 } 3492 3493 static MachineSchedRegistry ILPMaxRegistry( 3494 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3495 static MachineSchedRegistry ILPMinRegistry( 3496 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3497 3498 //===----------------------------------------------------------------------===// 3499 // Machine Instruction Shuffler for Correctness Testing 3500 //===----------------------------------------------------------------------===// 3501 3502 #ifndef NDEBUG 3503 namespace { 3504 3505 /// Apply a less-than relation on the node order, which corresponds to the 3506 /// instruction order prior to scheduling. IsReverse implements greater-than. 3507 template<bool IsReverse> 3508 struct SUnitOrder { 3509 bool operator()(SUnit *A, SUnit *B) const { 3510 if (IsReverse) 3511 return A->NodeNum > B->NodeNum; 3512 else 3513 return A->NodeNum < B->NodeNum; 3514 } 3515 }; 3516 3517 /// Reorder instructions as much as possible. 3518 class InstructionShuffler : public MachineSchedStrategy { 3519 bool IsAlternating; 3520 bool IsTopDown; 3521 3522 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3523 // gives nodes with a higher number higher priority causing the latest 3524 // instructions to be scheduled first. 3525 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>> 3526 TopQ; 3527 3528 // When scheduling bottom-up, use greater-than as the queue priority. 3529 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>> 3530 BottomQ; 3531 3532 public: 3533 InstructionShuffler(bool alternate, bool topdown) 3534 : IsAlternating(alternate), IsTopDown(topdown) {} 3535 3536 void initialize(ScheduleDAGMI*) override { 3537 TopQ.clear(); 3538 BottomQ.clear(); 3539 } 3540 3541 /// Implement MachineSchedStrategy interface. 3542 /// ----------------------------------------- 3543 3544 SUnit *pickNode(bool &IsTopNode) override { 3545 SUnit *SU; 3546 if (IsTopDown) { 3547 do { 3548 if (TopQ.empty()) return nullptr; 3549 SU = TopQ.top(); 3550 TopQ.pop(); 3551 } while (SU->isScheduled); 3552 IsTopNode = true; 3553 } else { 3554 do { 3555 if (BottomQ.empty()) return nullptr; 3556 SU = BottomQ.top(); 3557 BottomQ.pop(); 3558 } while (SU->isScheduled); 3559 IsTopNode = false; 3560 } 3561 if (IsAlternating) 3562 IsTopDown = !IsTopDown; 3563 return SU; 3564 } 3565 3566 void schedNode(SUnit *SU, bool IsTopNode) override {} 3567 3568 void releaseTopNode(SUnit *SU) override { 3569 TopQ.push(SU); 3570 } 3571 void releaseBottomNode(SUnit *SU) override { 3572 BottomQ.push(SU); 3573 } 3574 }; 3575 3576 } // end anonymous namespace 3577 3578 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3579 bool Alternate = !ForceTopDown && !ForceBottomUp; 3580 bool TopDown = !ForceBottomUp; 3581 assert((TopDown || !ForceTopDown) && 3582 "-misched-topdown incompatible with -misched-bottomup"); 3583 return new ScheduleDAGMILive( 3584 C, llvm::make_unique<InstructionShuffler>(Alternate, TopDown)); 3585 } 3586 3587 static MachineSchedRegistry ShufflerRegistry( 3588 "shuffle", "Shuffle machine instructions alternating directions", 3589 createInstructionShuffler); 3590 #endif // !NDEBUG 3591 3592 //===----------------------------------------------------------------------===// 3593 // GraphWriter support for ScheduleDAGMILive. 3594 //===----------------------------------------------------------------------===// 3595 3596 #ifndef NDEBUG 3597 namespace llvm { 3598 3599 template<> struct GraphTraits< 3600 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3601 3602 template<> 3603 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3604 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 3605 3606 static std::string getGraphName(const ScheduleDAG *G) { 3607 return G->MF.getName(); 3608 } 3609 3610 static bool renderGraphFromBottomUp() { 3611 return true; 3612 } 3613 3614 static bool isNodeHidden(const SUnit *Node) { 3615 if (ViewMISchedCutoff == 0) 3616 return false; 3617 return (Node->Preds.size() > ViewMISchedCutoff 3618 || Node->Succs.size() > ViewMISchedCutoff); 3619 } 3620 3621 /// If you want to override the dot attributes printed for a particular 3622 /// edge, override this method. 3623 static std::string getEdgeAttributes(const SUnit *Node, 3624 SUnitIterator EI, 3625 const ScheduleDAG *Graph) { 3626 if (EI.isArtificialDep()) 3627 return "color=cyan,style=dashed"; 3628 if (EI.isCtrlDep()) 3629 return "color=blue,style=dashed"; 3630 return ""; 3631 } 3632 3633 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3634 std::string Str; 3635 raw_string_ostream SS(Str); 3636 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3637 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3638 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3639 SS << "SU:" << SU->NodeNum; 3640 if (DFS) 3641 SS << " I:" << DFS->getNumInstrs(SU); 3642 return SS.str(); 3643 } 3644 3645 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3646 return G->getGraphNodeLabel(SU); 3647 } 3648 3649 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3650 std::string Str("shape=Mrecord"); 3651 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3652 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3653 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr; 3654 if (DFS) { 3655 Str += ",style=filled,fillcolor=\"#"; 3656 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3657 Str += '"'; 3658 } 3659 return Str; 3660 } 3661 }; 3662 3663 } // end namespace llvm 3664 #endif // NDEBUG 3665 3666 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3667 /// rendered using 'dot'. 3668 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3669 #ifndef NDEBUG 3670 ViewGraph(this, Name, false, Title); 3671 #else 3672 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3673 << "systems with Graphviz or gv!\n"; 3674 #endif // NDEBUG 3675 } 3676 3677 /// Out-of-line implementation with no arguments is handy for gdb. 3678 void ScheduleDAGMI::viewGraph() { 3679 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3680 } 3681