1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/MachineScheduler.h" 18 #include "llvm/ADT/OwningPtr.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/Passes.h" 26 #include "llvm/CodeGen/RegisterClassInfo.h" 27 #include "llvm/CodeGen/ScheduleDFS.h" 28 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/GraphWriter.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Target/TargetInstrInfo.h" 35 #include <queue> 36 37 using namespace llvm; 38 39 namespace llvm { 40 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 41 cl::desc("Force top-down list scheduling")); 42 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 43 cl::desc("Force bottom-up list scheduling")); 44 } 45 46 #ifndef NDEBUG 47 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 48 cl::desc("Pop up a window to show MISched dags after they are processed")); 49 50 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 51 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 52 53 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden, 54 cl::desc("Only schedule this function")); 55 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden, 56 cl::desc("Only schedule this MBB#")); 57 #else 58 static bool ViewMISchedDAGs = false; 59 #endif // NDEBUG 60 61 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 62 cl::desc("Enable register pressure scheduling."), cl::init(true)); 63 64 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 65 cl::desc("Enable cyclic critical path analysis."), cl::init(true)); 66 67 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 68 cl::desc("Enable load clustering."), cl::init(true)); 69 70 // Experimental heuristics 71 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 72 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 73 74 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 75 cl::desc("Verify machine instrs before and after machine scheduling")); 76 77 // DAG subtrees must have at least this many nodes. 78 static const unsigned MinSubtreeSize = 8; 79 80 // Pin the vtables to this file. 81 void MachineSchedStrategy::anchor() {} 82 void ScheduleDAGMutation::anchor() {} 83 84 //===----------------------------------------------------------------------===// 85 // Machine Instruction Scheduling Pass and Registry 86 //===----------------------------------------------------------------------===// 87 88 MachineSchedContext::MachineSchedContext(): 89 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 90 RegClassInfo = new RegisterClassInfo(); 91 } 92 93 MachineSchedContext::~MachineSchedContext() { 94 delete RegClassInfo; 95 } 96 97 namespace { 98 /// Base class for a machine scheduler class that can run at any point. 99 class MachineSchedulerBase : public MachineSchedContext, 100 public MachineFunctionPass { 101 public: 102 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {} 103 104 virtual void print(raw_ostream &O, const Module* = 0) const; 105 106 protected: 107 void scheduleRegions(ScheduleDAGInstrs &Scheduler); 108 }; 109 110 /// MachineScheduler runs after coalescing and before register allocation. 111 class MachineScheduler : public MachineSchedulerBase { 112 public: 113 MachineScheduler(); 114 115 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 116 117 virtual bool runOnMachineFunction(MachineFunction&); 118 119 static char ID; // Class identification, replacement for typeinfo 120 121 protected: 122 ScheduleDAGInstrs *createMachineScheduler(); 123 }; 124 125 /// PostMachineScheduler runs after shortly before code emission. 126 class PostMachineScheduler : public MachineSchedulerBase { 127 public: 128 PostMachineScheduler(); 129 130 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 131 132 virtual bool runOnMachineFunction(MachineFunction&); 133 134 static char ID; // Class identification, replacement for typeinfo 135 136 protected: 137 ScheduleDAGInstrs *createPostMachineScheduler(); 138 }; 139 } // namespace 140 141 char MachineScheduler::ID = 0; 142 143 char &llvm::MachineSchedulerID = MachineScheduler::ID; 144 145 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 146 "Machine Instruction Scheduler", false, false) 147 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 148 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 149 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 150 INITIALIZE_PASS_END(MachineScheduler, "misched", 151 "Machine Instruction Scheduler", false, false) 152 153 MachineScheduler::MachineScheduler() 154 : MachineSchedulerBase(ID) { 155 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 156 } 157 158 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 159 AU.setPreservesCFG(); 160 AU.addRequiredID(MachineDominatorsID); 161 AU.addRequired<MachineLoopInfo>(); 162 AU.addRequired<AliasAnalysis>(); 163 AU.addRequired<TargetPassConfig>(); 164 AU.addRequired<SlotIndexes>(); 165 AU.addPreserved<SlotIndexes>(); 166 AU.addRequired<LiveIntervals>(); 167 AU.addPreserved<LiveIntervals>(); 168 MachineFunctionPass::getAnalysisUsage(AU); 169 } 170 171 char PostMachineScheduler::ID = 0; 172 173 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; 174 175 INITIALIZE_PASS(PostMachineScheduler, "postmisched", 176 "PostRA Machine Instruction Scheduler", false, false) 177 178 PostMachineScheduler::PostMachineScheduler() 179 : MachineSchedulerBase(ID) { 180 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); 181 } 182 183 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 184 AU.setPreservesCFG(); 185 AU.addRequiredID(MachineDominatorsID); 186 AU.addRequired<MachineLoopInfo>(); 187 AU.addRequired<TargetPassConfig>(); 188 MachineFunctionPass::getAnalysisUsage(AU); 189 } 190 191 MachinePassRegistry MachineSchedRegistry::Registry; 192 193 /// A dummy default scheduler factory indicates whether the scheduler 194 /// is overridden on the command line. 195 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 196 return 0; 197 } 198 199 /// MachineSchedOpt allows command line selection of the scheduler. 200 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 201 RegisterPassParser<MachineSchedRegistry> > 202 MachineSchedOpt("misched", 203 cl::init(&useDefaultMachineSched), cl::Hidden, 204 cl::desc("Machine instruction scheduler to use")); 205 206 static MachineSchedRegistry 207 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 208 useDefaultMachineSched); 209 210 /// Forward declare the standard machine scheduler. This will be used as the 211 /// default scheduler if the target does not set a default. 212 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C); 213 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C); 214 215 /// Decrement this iterator until reaching the top or a non-debug instr. 216 static MachineBasicBlock::const_iterator 217 priorNonDebug(MachineBasicBlock::const_iterator I, 218 MachineBasicBlock::const_iterator Beg) { 219 assert(I != Beg && "reached the top of the region, cannot decrement"); 220 while (--I != Beg) { 221 if (!I->isDebugValue()) 222 break; 223 } 224 return I; 225 } 226 227 /// Non-const version. 228 static MachineBasicBlock::iterator 229 priorNonDebug(MachineBasicBlock::iterator I, 230 MachineBasicBlock::const_iterator Beg) { 231 return const_cast<MachineInstr*>( 232 &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)); 233 } 234 235 /// If this iterator is a debug value, increment until reaching the End or a 236 /// non-debug instruction. 237 static MachineBasicBlock::const_iterator 238 nextIfDebug(MachineBasicBlock::const_iterator I, 239 MachineBasicBlock::const_iterator End) { 240 for(; I != End; ++I) { 241 if (!I->isDebugValue()) 242 break; 243 } 244 return I; 245 } 246 247 /// Non-const version. 248 static MachineBasicBlock::iterator 249 nextIfDebug(MachineBasicBlock::iterator I, 250 MachineBasicBlock::const_iterator End) { 251 // Cast the return value to nonconst MachineInstr, then cast to an 252 // instr_iterator, which does not check for null, finally return a 253 // bundle_iterator. 254 return MachineBasicBlock::instr_iterator( 255 const_cast<MachineInstr*>( 256 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 257 } 258 259 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. 260 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { 261 // Select the scheduler, or set the default. 262 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 263 if (Ctor != useDefaultMachineSched) 264 return Ctor(this); 265 266 // Get the default scheduler set by the target for this function. 267 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this); 268 if (Scheduler) 269 return Scheduler; 270 271 // Default to GenericScheduler. 272 return createGenericSchedLive(this); 273 } 274 275 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by 276 /// the caller. We don't have a command line option to override the postRA 277 /// scheduler. The Target must configure it. 278 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { 279 // Get the postRA scheduler set by the target for this function. 280 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this); 281 if (Scheduler) 282 return Scheduler; 283 284 // Default to GenericScheduler. 285 return createGenericSchedPostRA(this); 286 } 287 288 /// Top-level MachineScheduler pass driver. 289 /// 290 /// Visit blocks in function order. Divide each block into scheduling regions 291 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 292 /// consistent with the DAG builder, which traverses the interior of the 293 /// scheduling regions bottom-up. 294 /// 295 /// This design avoids exposing scheduling boundaries to the DAG builder, 296 /// simplifying the DAG builder's support for "special" target instructions. 297 /// At the same time the design allows target schedulers to operate across 298 /// scheduling boundaries, for example to bundle the boudary instructions 299 /// without reordering them. This creates complexity, because the target 300 /// scheduler must update the RegionBegin and RegionEnd positions cached by 301 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 302 /// design would be to split blocks at scheduling boundaries, but LLVM has a 303 /// general bias against block splitting purely for implementation simplicity. 304 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 305 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 306 307 // Initialize the context of the pass. 308 MF = &mf; 309 MLI = &getAnalysis<MachineLoopInfo>(); 310 MDT = &getAnalysis<MachineDominatorTree>(); 311 PassConfig = &getAnalysis<TargetPassConfig>(); 312 AA = &getAnalysis<AliasAnalysis>(); 313 314 LIS = &getAnalysis<LiveIntervals>(); 315 316 if (VerifyScheduling) { 317 DEBUG(LIS->dump()); 318 MF->verify(this, "Before machine scheduling."); 319 } 320 RegClassInfo->runOnMachineFunction(*MF); 321 322 // Instantiate the selected scheduler for this target, function, and 323 // optimization level. 324 OwningPtr<ScheduleDAGInstrs> Scheduler(createMachineScheduler()); 325 scheduleRegions(*Scheduler); 326 327 DEBUG(LIS->dump()); 328 if (VerifyScheduling) 329 MF->verify(this, "After machine scheduling."); 330 return true; 331 } 332 333 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { 334 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); 335 336 // Initialize the context of the pass. 337 MF = &mf; 338 PassConfig = &getAnalysis<TargetPassConfig>(); 339 340 if (VerifyScheduling) 341 MF->verify(this, "Before post machine scheduling."); 342 343 // Instantiate the selected scheduler for this target, function, and 344 // optimization level. 345 OwningPtr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler()); 346 scheduleRegions(*Scheduler); 347 348 if (VerifyScheduling) 349 MF->verify(this, "After post machine scheduling."); 350 return true; 351 } 352 353 /// Return true of the given instruction should not be included in a scheduling 354 /// region. 355 /// 356 /// MachineScheduler does not currently support scheduling across calls. To 357 /// handle calls, the DAG builder needs to be modified to create register 358 /// anti/output dependencies on the registers clobbered by the call's regmask 359 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents 360 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce 361 /// the boundary, but there would be no benefit to postRA scheduling across 362 /// calls this late anyway. 363 static bool isSchedBoundary(MachineBasicBlock::iterator MI, 364 MachineBasicBlock *MBB, 365 MachineFunction *MF, 366 const TargetInstrInfo *TII, 367 bool IsPostRA) { 368 return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF); 369 } 370 371 /// Main driver for both MachineScheduler and PostMachineScheduler. 372 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) { 373 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 374 bool IsPostRA = Scheduler.isPostRA(); 375 376 // Visit all machine basic blocks. 377 // 378 // TODO: Visit blocks in global postorder or postorder within the bottom-up 379 // loop tree. Then we can optionally compute global RegPressure. 380 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 381 MBB != MBBEnd; ++MBB) { 382 383 Scheduler.startBlock(MBB); 384 385 #ifndef NDEBUG 386 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName()) 387 continue; 388 if (SchedOnlyBlock.getNumOccurrences() 389 && (int)SchedOnlyBlock != MBB->getNumber()) 390 continue; 391 #endif 392 393 // Break the block into scheduling regions [I, RegionEnd), and schedule each 394 // region as soon as it is discovered. RegionEnd points the scheduling 395 // boundary at the bottom of the region. The DAG does not include RegionEnd, 396 // but the region does (i.e. the next RegionEnd is above the previous 397 // RegionBegin). If the current block has no terminator then RegionEnd == 398 // MBB->end() for the bottom region. 399 // 400 // The Scheduler may insert instructions during either schedule() or 401 // exitRegion(), even for empty regions. So the local iterators 'I' and 402 // 'RegionEnd' are invalid across these calls. 403 // 404 // MBB::size() uses instr_iterator to count. Here we need a bundle to count 405 // as a single instruction. 406 unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end()); 407 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 408 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) { 409 410 // Avoid decrementing RegionEnd for blocks with no terminator. 411 if (RegionEnd != MBB->end() 412 || isSchedBoundary(llvm::prior(RegionEnd), MBB, MF, TII, IsPostRA)) { 413 --RegionEnd; 414 // Count the boundary instruction. 415 --RemainingInstrs; 416 } 417 418 // The next region starts above the previous region. Look backward in the 419 // instruction stream until we find the nearest boundary. 420 unsigned NumRegionInstrs = 0; 421 MachineBasicBlock::iterator I = RegionEnd; 422 for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) { 423 if (isSchedBoundary(llvm::prior(I), MBB, MF, TII, IsPostRA)) 424 break; 425 } 426 // Notify the scheduler of the region, even if we may skip scheduling 427 // it. Perhaps it still needs to be bundled. 428 Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs); 429 430 // Skip empty scheduling regions (0 or 1 schedulable instructions). 431 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 432 // Close the current region. Bundle the terminator if needed. 433 // This invalidates 'RegionEnd' and 'I'. 434 Scheduler.exitRegion(); 435 continue; 436 } 437 DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "") 438 << "MI Scheduling **********\n"); 439 DEBUG(dbgs() << MF->getName() 440 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 441 << "\n From: " << *I << " To: "; 442 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 443 else dbgs() << "End"; 444 dbgs() << " RegionInstrs: " << NumRegionInstrs 445 << " Remaining: " << RemainingInstrs << "\n"); 446 447 // Schedule a region: possibly reorder instructions. 448 // This invalidates 'RegionEnd' and 'I'. 449 Scheduler.schedule(); 450 451 // Close the current region. 452 Scheduler.exitRegion(); 453 454 // Scheduling has invalidated the current iterator 'I'. Ask the 455 // scheduler for the top of it's scheduled region. 456 RegionEnd = Scheduler.begin(); 457 } 458 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 459 Scheduler.finishBlock(); 460 if (Scheduler.isPostRA()) { 461 // FIXME: Ideally, no further passes should rely on kill flags. However, 462 // thumb2 size reduction is currently an exception. 463 Scheduler.fixupKills(MBB); 464 } 465 } 466 Scheduler.finalizeSchedule(); 467 } 468 469 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const { 470 // unimplemented 471 } 472 473 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 474 void ReadyQueue::dump() { 475 dbgs() << Name << ": "; 476 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 477 dbgs() << Queue[i]->NodeNum << " "; 478 dbgs() << "\n"; 479 } 480 #endif 481 482 //===----------------------------------------------------------------------===// 483 // ScheduleDAGMI - Basic machine instruction scheduling. This is 484 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for 485 // virtual registers. 486 // ===----------------------------------------------------------------------===/ 487 488 ScheduleDAGMI::~ScheduleDAGMI() { 489 DeleteContainerPointers(Mutations); 490 delete SchedImpl; 491 } 492 493 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 494 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 495 } 496 497 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 498 if (SuccSU != &ExitSU) { 499 // Do not use WillCreateCycle, it assumes SD scheduling. 500 // If Pred is reachable from Succ, then the edge creates a cycle. 501 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 502 return false; 503 Topo.AddPred(SuccSU, PredDep.getSUnit()); 504 } 505 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 506 // Return true regardless of whether a new edge needed to be inserted. 507 return true; 508 } 509 510 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 511 /// NumPredsLeft reaches zero, release the successor node. 512 /// 513 /// FIXME: Adjust SuccSU height based on MinLatency. 514 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 515 SUnit *SuccSU = SuccEdge->getSUnit(); 516 517 if (SuccEdge->isWeak()) { 518 --SuccSU->WeakPredsLeft; 519 if (SuccEdge->isCluster()) 520 NextClusterSucc = SuccSU; 521 return; 522 } 523 #ifndef NDEBUG 524 if (SuccSU->NumPredsLeft == 0) { 525 dbgs() << "*** Scheduling failed! ***\n"; 526 SuccSU->dump(this); 527 dbgs() << " has been released too many times!\n"; 528 llvm_unreachable(0); 529 } 530 #endif 531 --SuccSU->NumPredsLeft; 532 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 533 SchedImpl->releaseTopNode(SuccSU); 534 } 535 536 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 537 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 538 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 539 I != E; ++I) { 540 releaseSucc(SU, &*I); 541 } 542 } 543 544 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 545 /// NumSuccsLeft reaches zero, release the predecessor node. 546 /// 547 /// FIXME: Adjust PredSU height based on MinLatency. 548 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 549 SUnit *PredSU = PredEdge->getSUnit(); 550 551 if (PredEdge->isWeak()) { 552 --PredSU->WeakSuccsLeft; 553 if (PredEdge->isCluster()) 554 NextClusterPred = PredSU; 555 return; 556 } 557 #ifndef NDEBUG 558 if (PredSU->NumSuccsLeft == 0) { 559 dbgs() << "*** Scheduling failed! ***\n"; 560 PredSU->dump(this); 561 dbgs() << " has been released too many times!\n"; 562 llvm_unreachable(0); 563 } 564 #endif 565 --PredSU->NumSuccsLeft; 566 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 567 SchedImpl->releaseBottomNode(PredSU); 568 } 569 570 /// releasePredecessors - Call releasePred on each of SU's predecessors. 571 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 572 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 573 I != E; ++I) { 574 releasePred(SU, &*I); 575 } 576 } 577 578 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 579 /// crossing a scheduling boundary. [begin, end) includes all instructions in 580 /// the region, including the boundary itself and single-instruction regions 581 /// that don't get scheduled. 582 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 583 MachineBasicBlock::iterator begin, 584 MachineBasicBlock::iterator end, 585 unsigned regioninstrs) 586 { 587 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 588 589 SchedImpl->initPolicy(begin, end, regioninstrs); 590 } 591 592 /// This is normally called from the main scheduler loop but may also be invoked 593 /// by the scheduling strategy to perform additional code motion. 594 void ScheduleDAGMI::moveInstruction( 595 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { 596 // Advance RegionBegin if the first instruction moves down. 597 if (&*RegionBegin == MI) 598 ++RegionBegin; 599 600 // Update the instruction stream. 601 BB->splice(InsertPos, BB, MI); 602 603 // Update LiveIntervals 604 if (LIS) 605 LIS->handleMove(MI, /*UpdateFlags=*/true); 606 607 // Recede RegionBegin if an instruction moves above the first. 608 if (RegionBegin == InsertPos) 609 RegionBegin = MI; 610 } 611 612 bool ScheduleDAGMI::checkSchedLimit() { 613 #ifndef NDEBUG 614 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 615 CurrentTop = CurrentBottom; 616 return false; 617 } 618 ++NumInstrsScheduled; 619 #endif 620 return true; 621 } 622 623 /// Per-region scheduling driver, called back from 624 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that 625 /// does not consider liveness or register pressure. It is useful for PostRA 626 /// scheduling and potentially other custom schedulers. 627 void ScheduleDAGMI::schedule() { 628 // Build the DAG. 629 buildSchedGraph(AA); 630 631 Topo.InitDAGTopologicalSorting(); 632 633 postprocessDAG(); 634 635 SmallVector<SUnit*, 8> TopRoots, BotRoots; 636 findRootsAndBiasEdges(TopRoots, BotRoots); 637 638 // Initialize the strategy before modifying the DAG. 639 // This may initialize a DFSResult to be used for queue priority. 640 SchedImpl->initialize(this); 641 642 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 643 SUnits[su].dumpAll(this)); 644 if (ViewMISchedDAGs) viewGraph(); 645 646 // Initialize ready queues now that the DAG and priority data are finalized. 647 initQueues(TopRoots, BotRoots); 648 649 bool IsTopNode = false; 650 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 651 assert(!SU->isScheduled && "Node already scheduled"); 652 if (!checkSchedLimit()) 653 break; 654 655 MachineInstr *MI = SU->getInstr(); 656 if (IsTopNode) { 657 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 658 if (&*CurrentTop == MI) 659 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 660 else 661 moveInstruction(MI, CurrentTop); 662 } 663 else { 664 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 665 MachineBasicBlock::iterator priorII = 666 priorNonDebug(CurrentBottom, CurrentTop); 667 if (&*priorII == MI) 668 CurrentBottom = priorII; 669 else { 670 if (&*CurrentTop == MI) 671 CurrentTop = nextIfDebug(++CurrentTop, priorII); 672 moveInstruction(MI, CurrentBottom); 673 CurrentBottom = MI; 674 } 675 } 676 updateQueues(SU, IsTopNode); 677 678 // Notify the scheduling strategy after updating the DAG. 679 SchedImpl->schedNode(SU, IsTopNode); 680 } 681 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 682 683 placeDebugValues(); 684 685 DEBUG({ 686 unsigned BBNum = begin()->getParent()->getNumber(); 687 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 688 dumpSchedule(); 689 dbgs() << '\n'; 690 }); 691 } 692 693 /// Apply each ScheduleDAGMutation step in order. 694 void ScheduleDAGMI::postprocessDAG() { 695 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 696 Mutations[i]->apply(this); 697 } 698 } 699 700 void ScheduleDAGMI:: 701 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 702 SmallVectorImpl<SUnit*> &BotRoots) { 703 for (std::vector<SUnit>::iterator 704 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 705 SUnit *SU = &(*I); 706 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 707 708 // Order predecessors so DFSResult follows the critical path. 709 SU->biasCriticalPath(); 710 711 // A SUnit is ready to top schedule if it has no predecessors. 712 if (!I->NumPredsLeft) 713 TopRoots.push_back(SU); 714 // A SUnit is ready to bottom schedule if it has no successors. 715 if (!I->NumSuccsLeft) 716 BotRoots.push_back(SU); 717 } 718 ExitSU.biasCriticalPath(); 719 } 720 721 /// Identify DAG roots and setup scheduler queues. 722 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 723 ArrayRef<SUnit*> BotRoots) { 724 NextClusterSucc = NULL; 725 NextClusterPred = NULL; 726 727 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 728 // 729 // Nodes with unreleased weak edges can still be roots. 730 // Release top roots in forward order. 731 for (SmallVectorImpl<SUnit*>::const_iterator 732 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 733 SchedImpl->releaseTopNode(*I); 734 } 735 // Release bottom roots in reverse order so the higher priority nodes appear 736 // first. This is more natural and slightly more efficient. 737 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 738 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 739 SchedImpl->releaseBottomNode(*I); 740 } 741 742 releaseSuccessors(&EntrySU); 743 releasePredecessors(&ExitSU); 744 745 SchedImpl->registerRoots(); 746 747 // Advance past initial DebugValues. 748 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 749 CurrentBottom = RegionEnd; 750 } 751 752 /// Update scheduler queues after scheduling an instruction. 753 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 754 // Release dependent instructions for scheduling. 755 if (IsTopNode) 756 releaseSuccessors(SU); 757 else 758 releasePredecessors(SU); 759 760 SU->isScheduled = true; 761 } 762 763 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 764 void ScheduleDAGMI::placeDebugValues() { 765 // If first instruction was a DBG_VALUE then put it back. 766 if (FirstDbgValue) { 767 BB->splice(RegionBegin, BB, FirstDbgValue); 768 RegionBegin = FirstDbgValue; 769 } 770 771 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 772 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 773 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 774 MachineInstr *DbgValue = P.first; 775 MachineBasicBlock::iterator OrigPrevMI = P.second; 776 if (&*RegionBegin == DbgValue) 777 ++RegionBegin; 778 BB->splice(++OrigPrevMI, BB, DbgValue); 779 if (OrigPrevMI == llvm::prior(RegionEnd)) 780 RegionEnd = DbgValue; 781 } 782 DbgValues.clear(); 783 FirstDbgValue = NULL; 784 } 785 786 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 787 void ScheduleDAGMI::dumpSchedule() const { 788 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 789 if (SUnit *SU = getSUnit(&(*MI))) 790 SU->dump(this); 791 else 792 dbgs() << "Missing SUnit\n"; 793 } 794 } 795 #endif 796 797 //===----------------------------------------------------------------------===// 798 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals 799 // preservation. 800 //===----------------------------------------------------------------------===// 801 802 ScheduleDAGMILive::~ScheduleDAGMILive() { 803 delete DFSResult; 804 } 805 806 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 807 /// crossing a scheduling boundary. [begin, end) includes all instructions in 808 /// the region, including the boundary itself and single-instruction regions 809 /// that don't get scheduled. 810 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, 811 MachineBasicBlock::iterator begin, 812 MachineBasicBlock::iterator end, 813 unsigned regioninstrs) 814 { 815 // ScheduleDAGMI initializes SchedImpl's per-region policy. 816 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); 817 818 // For convenience remember the end of the liveness region. 819 LiveRegionEnd = 820 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 821 822 SUPressureDiffs.clear(); 823 824 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 825 } 826 827 // Setup the register pressure trackers for the top scheduled top and bottom 828 // scheduled regions. 829 void ScheduleDAGMILive::initRegPressure() { 830 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 831 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 832 833 // Close the RPTracker to finalize live ins. 834 RPTracker.closeRegion(); 835 836 DEBUG(RPTracker.dump()); 837 838 // Initialize the live ins and live outs. 839 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 840 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 841 842 // Close one end of the tracker so we can call 843 // getMaxUpward/DownwardPressureDelta before advancing across any 844 // instructions. This converts currently live regs into live ins/outs. 845 TopRPTracker.closeTop(); 846 BotRPTracker.closeBottom(); 847 848 BotRPTracker.initLiveThru(RPTracker); 849 if (!BotRPTracker.getLiveThru().empty()) { 850 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 851 DEBUG(dbgs() << "Live Thru: "; 852 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 853 }; 854 855 // For each live out vreg reduce the pressure change associated with other 856 // uses of the same vreg below the live-out reaching def. 857 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 858 859 // Account for liveness generated by the region boundary. 860 if (LiveRegionEnd != RegionEnd) { 861 SmallVector<unsigned, 8> LiveUses; 862 BotRPTracker.recede(&LiveUses); 863 updatePressureDiffs(LiveUses); 864 } 865 866 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 867 868 // Cache the list of excess pressure sets in this region. This will also track 869 // the max pressure in the scheduled code for these sets. 870 RegionCriticalPSets.clear(); 871 const std::vector<unsigned> &RegionPressure = 872 RPTracker.getPressure().MaxSetPressure; 873 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 874 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 875 if (RegionPressure[i] > Limit) { 876 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 877 << " Limit " << Limit 878 << " Actual " << RegionPressure[i] << "\n"); 879 RegionCriticalPSets.push_back(PressureChange(i)); 880 } 881 } 882 DEBUG(dbgs() << "Excess PSets: "; 883 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 884 dbgs() << TRI->getRegPressureSetName( 885 RegionCriticalPSets[i].getPSet()) << " "; 886 dbgs() << "\n"); 887 } 888 889 void ScheduleDAGMILive:: 890 updateScheduledPressure(const SUnit *SU, 891 const std::vector<unsigned> &NewMaxPressure) { 892 const PressureDiff &PDiff = getPressureDiff(SU); 893 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size(); 894 for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end(); 895 I != E; ++I) { 896 if (!I->isValid()) 897 break; 898 unsigned ID = I->getPSet(); 899 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID) 900 ++CritIdx; 901 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) { 902 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc() 903 && NewMaxPressure[ID] <= INT16_MAX) 904 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]); 905 } 906 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID); 907 if (NewMaxPressure[ID] >= Limit - 2) { 908 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": " 909 << NewMaxPressure[ID] << " > " << Limit << "(+ " 910 << BotRPTracker.getLiveThru()[ID] << " livethru)\n"); 911 } 912 } 913 } 914 915 /// Update the PressureDiff array for liveness after scheduling this 916 /// instruction. 917 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) { 918 for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) { 919 /// FIXME: Currently assuming single-use physregs. 920 unsigned Reg = LiveUses[LUIdx]; 921 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n"); 922 if (!TRI->isVirtualRegister(Reg)) 923 continue; 924 925 // This may be called before CurrentBottom has been initialized. However, 926 // BotRPTracker must have a valid position. We want the value live into the 927 // instruction or live out of the block, so ask for the previous 928 // instruction's live-out. 929 const LiveInterval &LI = LIS->getInterval(Reg); 930 VNInfo *VNI; 931 MachineBasicBlock::const_iterator I = 932 nextIfDebug(BotRPTracker.getPos(), BB->end()); 933 if (I == BB->end()) 934 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 935 else { 936 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I)); 937 VNI = LRQ.valueIn(); 938 } 939 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 940 assert(VNI && "No live value at use."); 941 for (VReg2UseMap::iterator 942 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 943 SUnit *SU = UI->SU; 944 DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 945 << *SU->getInstr()); 946 // If this use comes before the reaching def, it cannot be a last use, so 947 // descrease its pressure change. 948 if (!SU->isScheduled && SU != &ExitSU) { 949 LiveQueryResult LRQ 950 = LI.Query(LIS->getInstructionIndex(SU->getInstr())); 951 if (LRQ.valueIn() == VNI) 952 getPressureDiff(SU).addPressureChange(Reg, true, &MRI); 953 } 954 } 955 } 956 } 957 958 /// schedule - Called back from MachineScheduler::runOnMachineFunction 959 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 960 /// only includes instructions that have DAG nodes, not scheduling boundaries. 961 /// 962 /// This is a skeletal driver, with all the functionality pushed into helpers, 963 /// so that it can be easilly extended by experimental schedulers. Generally, 964 /// implementing MachineSchedStrategy should be sufficient to implement a new 965 /// scheduling algorithm. However, if a scheduler further subclasses 966 /// ScheduleDAGMILive then it will want to override this virtual method in order 967 /// to update any specialized state. 968 void ScheduleDAGMILive::schedule() { 969 buildDAGWithRegPressure(); 970 971 Topo.InitDAGTopologicalSorting(); 972 973 postprocessDAG(); 974 975 SmallVector<SUnit*, 8> TopRoots, BotRoots; 976 findRootsAndBiasEdges(TopRoots, BotRoots); 977 978 // Initialize the strategy before modifying the DAG. 979 // This may initialize a DFSResult to be used for queue priority. 980 SchedImpl->initialize(this); 981 982 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 983 SUnits[su].dumpAll(this)); 984 if (ViewMISchedDAGs) viewGraph(); 985 986 // Initialize ready queues now that the DAG and priority data are finalized. 987 initQueues(TopRoots, BotRoots); 988 989 if (ShouldTrackPressure) { 990 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 991 TopRPTracker.setPos(CurrentTop); 992 } 993 994 bool IsTopNode = false; 995 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 996 assert(!SU->isScheduled && "Node already scheduled"); 997 if (!checkSchedLimit()) 998 break; 999 1000 scheduleMI(SU, IsTopNode); 1001 1002 updateQueues(SU, IsTopNode); 1003 1004 if (DFSResult) { 1005 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 1006 if (!ScheduledTrees.test(SubtreeID)) { 1007 ScheduledTrees.set(SubtreeID); 1008 DFSResult->scheduleTree(SubtreeID); 1009 SchedImpl->scheduleTree(SubtreeID); 1010 } 1011 } 1012 1013 // Notify the scheduling strategy after updating the DAG. 1014 SchedImpl->schedNode(SU, IsTopNode); 1015 } 1016 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1017 1018 placeDebugValues(); 1019 1020 DEBUG({ 1021 unsigned BBNum = begin()->getParent()->getNumber(); 1022 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 1023 dumpSchedule(); 1024 dbgs() << '\n'; 1025 }); 1026 } 1027 1028 /// Build the DAG and setup three register pressure trackers. 1029 void ScheduleDAGMILive::buildDAGWithRegPressure() { 1030 if (!ShouldTrackPressure) { 1031 RPTracker.reset(); 1032 RegionCriticalPSets.clear(); 1033 buildSchedGraph(AA); 1034 return; 1035 } 1036 1037 // Initialize the register pressure tracker used by buildSchedGraph. 1038 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 1039 /*TrackUntiedDefs=*/true); 1040 1041 // Account for liveness generate by the region boundary. 1042 if (LiveRegionEnd != RegionEnd) 1043 RPTracker.recede(); 1044 1045 // Build the DAG, and compute current register pressure. 1046 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs); 1047 1048 // Initialize top/bottom trackers after computing region pressure. 1049 initRegPressure(); 1050 } 1051 1052 void ScheduleDAGMILive::computeDFSResult() { 1053 if (!DFSResult) 1054 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 1055 DFSResult->clear(); 1056 ScheduledTrees.clear(); 1057 DFSResult->resize(SUnits.size()); 1058 DFSResult->compute(SUnits); 1059 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 1060 } 1061 1062 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 1063 /// only provides the critical path for single block loops. To handle loops that 1064 /// span blocks, we could use the vreg path latencies provided by 1065 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 1066 /// available for use in the scheduler. 1067 /// 1068 /// The cyclic path estimation identifies a def-use pair that crosses the back 1069 /// edge and considers the depth and height of the nodes. For example, consider 1070 /// the following instruction sequence where each instruction has unit latency 1071 /// and defines an epomymous virtual register: 1072 /// 1073 /// a->b(a,c)->c(b)->d(c)->exit 1074 /// 1075 /// The cyclic critical path is a two cycles: b->c->b 1076 /// The acyclic critical path is four cycles: a->b->c->d->exit 1077 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 1078 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 1079 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 1080 /// LiveInDepth = depth(b) = len(a->b) = 1 1081 /// 1082 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 1083 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 1084 /// CyclicCriticalPath = min(2, 2) = 2 1085 /// 1086 /// This could be relevant to PostRA scheduling, but is currently implemented 1087 /// assuming LiveIntervals. 1088 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() { 1089 // This only applies to single block loop. 1090 if (!BB->isSuccessor(BB)) 1091 return 0; 1092 1093 unsigned MaxCyclicLatency = 0; 1094 // Visit each live out vreg def to find def/use pairs that cross iterations. 1095 ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs; 1096 for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end(); 1097 RI != RE; ++RI) { 1098 unsigned Reg = *RI; 1099 if (!TRI->isVirtualRegister(Reg)) 1100 continue; 1101 const LiveInterval &LI = LIS->getInterval(Reg); 1102 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 1103 if (!DefVNI) 1104 continue; 1105 1106 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 1107 const SUnit *DefSU = getSUnit(DefMI); 1108 if (!DefSU) 1109 continue; 1110 1111 unsigned LiveOutHeight = DefSU->getHeight(); 1112 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 1113 // Visit all local users of the vreg def. 1114 for (VReg2UseMap::iterator 1115 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 1116 if (UI->SU == &ExitSU) 1117 continue; 1118 1119 // Only consider uses of the phi. 1120 LiveQueryResult LRQ = 1121 LI.Query(LIS->getInstructionIndex(UI->SU->getInstr())); 1122 if (!LRQ.valueIn()->isPHIDef()) 1123 continue; 1124 1125 // Assume that a path spanning two iterations is a cycle, which could 1126 // overestimate in strange cases. This allows cyclic latency to be 1127 // estimated as the minimum slack of the vreg's depth or height. 1128 unsigned CyclicLatency = 0; 1129 if (LiveOutDepth > UI->SU->getDepth()) 1130 CyclicLatency = LiveOutDepth - UI->SU->getDepth(); 1131 1132 unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency; 1133 if (LiveInHeight > LiveOutHeight) { 1134 if (LiveInHeight - LiveOutHeight < CyclicLatency) 1135 CyclicLatency = LiveInHeight - LiveOutHeight; 1136 } 1137 else 1138 CyclicLatency = 0; 1139 1140 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 1141 << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n"); 1142 if (CyclicLatency > MaxCyclicLatency) 1143 MaxCyclicLatency = CyclicLatency; 1144 } 1145 } 1146 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 1147 return MaxCyclicLatency; 1148 } 1149 1150 /// Move an instruction and update register pressure. 1151 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { 1152 // Move the instruction to its new location in the instruction stream. 1153 MachineInstr *MI = SU->getInstr(); 1154 1155 if (IsTopNode) { 1156 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 1157 if (&*CurrentTop == MI) 1158 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 1159 else { 1160 moveInstruction(MI, CurrentTop); 1161 TopRPTracker.setPos(MI); 1162 } 1163 1164 if (ShouldTrackPressure) { 1165 // Update top scheduled pressure. 1166 TopRPTracker.advance(); 1167 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 1168 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure); 1169 } 1170 } 1171 else { 1172 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 1173 MachineBasicBlock::iterator priorII = 1174 priorNonDebug(CurrentBottom, CurrentTop); 1175 if (&*priorII == MI) 1176 CurrentBottom = priorII; 1177 else { 1178 if (&*CurrentTop == MI) { 1179 CurrentTop = nextIfDebug(++CurrentTop, priorII); 1180 TopRPTracker.setPos(CurrentTop); 1181 } 1182 moveInstruction(MI, CurrentBottom); 1183 CurrentBottom = MI; 1184 } 1185 if (ShouldTrackPressure) { 1186 // Update bottom scheduled pressure. 1187 SmallVector<unsigned, 8> LiveUses; 1188 BotRPTracker.recede(&LiveUses); 1189 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 1190 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure); 1191 updatePressureDiffs(LiveUses); 1192 } 1193 } 1194 } 1195 1196 //===----------------------------------------------------------------------===// 1197 // LoadClusterMutation - DAG post-processing to cluster loads. 1198 //===----------------------------------------------------------------------===// 1199 1200 namespace { 1201 /// \brief Post-process the DAG to create cluster edges between neighboring 1202 /// loads. 1203 class LoadClusterMutation : public ScheduleDAGMutation { 1204 struct LoadInfo { 1205 SUnit *SU; 1206 unsigned BaseReg; 1207 unsigned Offset; 1208 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 1209 : SU(su), BaseReg(reg), Offset(ofs) {} 1210 }; 1211 static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS, 1212 const LoadClusterMutation::LoadInfo &RHS); 1213 1214 const TargetInstrInfo *TII; 1215 const TargetRegisterInfo *TRI; 1216 public: 1217 LoadClusterMutation(const TargetInstrInfo *tii, 1218 const TargetRegisterInfo *tri) 1219 : TII(tii), TRI(tri) {} 1220 1221 virtual void apply(ScheduleDAGMI *DAG); 1222 protected: 1223 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 1224 }; 1225 } // anonymous 1226 1227 bool LoadClusterMutation::LoadInfoLess( 1228 const LoadClusterMutation::LoadInfo &LHS, 1229 const LoadClusterMutation::LoadInfo &RHS) { 1230 if (LHS.BaseReg != RHS.BaseReg) 1231 return LHS.BaseReg < RHS.BaseReg; 1232 return LHS.Offset < RHS.Offset; 1233 } 1234 1235 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 1236 ScheduleDAGMI *DAG) { 1237 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 1238 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 1239 SUnit *SU = Loads[Idx]; 1240 unsigned BaseReg; 1241 unsigned Offset; 1242 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 1243 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 1244 } 1245 if (LoadRecords.size() < 2) 1246 return; 1247 std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess); 1248 unsigned ClusterLength = 1; 1249 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 1250 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 1251 ClusterLength = 1; 1252 continue; 1253 } 1254 1255 SUnit *SUa = LoadRecords[Idx].SU; 1256 SUnit *SUb = LoadRecords[Idx+1].SU; 1257 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 1258 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1259 1260 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 1261 << SUb->NodeNum << ")\n"); 1262 // Copy successor edges from SUa to SUb. Interleaving computation 1263 // dependent on SUa can prevent load combining due to register reuse. 1264 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1265 // loads should have effectively the same inputs. 1266 for (SUnit::const_succ_iterator 1267 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 1268 if (SI->getSUnit() == SUb) 1269 continue; 1270 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 1271 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 1272 } 1273 ++ClusterLength; 1274 } 1275 else 1276 ClusterLength = 1; 1277 } 1278 } 1279 1280 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 1281 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 1282 // Map DAG NodeNum to store chain ID. 1283 DenseMap<unsigned, unsigned> StoreChainIDs; 1284 // Map each store chain to a set of dependent loads. 1285 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1286 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1287 SUnit *SU = &DAG->SUnits[Idx]; 1288 if (!SU->getInstr()->mayLoad()) 1289 continue; 1290 unsigned ChainPredID = DAG->SUnits.size(); 1291 for (SUnit::const_pred_iterator 1292 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1293 if (PI->isCtrl()) { 1294 ChainPredID = PI->getSUnit()->NodeNum; 1295 break; 1296 } 1297 } 1298 // Check if this chain-like pred has been seen 1299 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 1300 unsigned NumChains = StoreChainDependents.size(); 1301 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1302 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1303 if (Result.second) 1304 StoreChainDependents.resize(NumChains + 1); 1305 StoreChainDependents[Result.first->second].push_back(SU); 1306 } 1307 // Iterate over the store chains. 1308 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 1309 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 1310 } 1311 1312 //===----------------------------------------------------------------------===// 1313 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 1314 //===----------------------------------------------------------------------===// 1315 1316 namespace { 1317 /// \brief Post-process the DAG to create cluster edges between instructions 1318 /// that may be fused by the processor into a single operation. 1319 class MacroFusion : public ScheduleDAGMutation { 1320 const TargetInstrInfo *TII; 1321 public: 1322 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 1323 1324 virtual void apply(ScheduleDAGMI *DAG); 1325 }; 1326 } // anonymous 1327 1328 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 1329 /// fused operations. 1330 void MacroFusion::apply(ScheduleDAGMI *DAG) { 1331 // For now, assume targets can only fuse with the branch. 1332 MachineInstr *Branch = DAG->ExitSU.getInstr(); 1333 if (!Branch) 1334 return; 1335 1336 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 1337 SUnit *SU = &DAG->SUnits[--Idx]; 1338 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 1339 continue; 1340 1341 // Create a single weak edge from SU to ExitSU. The only effect is to cause 1342 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 1343 // need to copy predecessor edges from ExitSU to SU, since top-down 1344 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 1345 // of SU, we could create an artificial edge from the deepest root, but it 1346 // hasn't been needed yet. 1347 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 1348 (void)Success; 1349 assert(Success && "No DAG nodes should be reachable from ExitSU"); 1350 1351 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 1352 break; 1353 } 1354 } 1355 1356 //===----------------------------------------------------------------------===// 1357 // CopyConstrain - DAG post-processing to encourage copy elimination. 1358 //===----------------------------------------------------------------------===// 1359 1360 namespace { 1361 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 1362 /// the one use that defines the copy's source vreg, most likely an induction 1363 /// variable increment. 1364 class CopyConstrain : public ScheduleDAGMutation { 1365 // Transient state. 1366 SlotIndex RegionBeginIdx; 1367 // RegionEndIdx is the slot index of the last non-debug instruction in the 1368 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1369 SlotIndex RegionEndIdx; 1370 public: 1371 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1372 1373 virtual void apply(ScheduleDAGMI *DAG); 1374 1375 protected: 1376 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG); 1377 }; 1378 } // anonymous 1379 1380 /// constrainLocalCopy handles two possibilities: 1381 /// 1) Local src: 1382 /// I0: = dst 1383 /// I1: src = ... 1384 /// I2: = dst 1385 /// I3: dst = src (copy) 1386 /// (create pred->succ edges I0->I1, I2->I1) 1387 /// 1388 /// 2) Local copy: 1389 /// I0: dst = src (copy) 1390 /// I1: = dst 1391 /// I2: src = ... 1392 /// I3: = dst 1393 /// (create pred->succ edges I1->I2, I3->I2) 1394 /// 1395 /// Although the MachineScheduler is currently constrained to single blocks, 1396 /// this algorithm should handle extended blocks. An EBB is a set of 1397 /// contiguously numbered blocks such that the previous block in the EBB is 1398 /// always the single predecessor. 1399 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { 1400 LiveIntervals *LIS = DAG->getLIS(); 1401 MachineInstr *Copy = CopySU->getInstr(); 1402 1403 // Check for pure vreg copies. 1404 unsigned SrcReg = Copy->getOperand(1).getReg(); 1405 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 1406 return; 1407 1408 unsigned DstReg = Copy->getOperand(0).getReg(); 1409 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 1410 return; 1411 1412 // Check if either the dest or source is local. If it's live across a back 1413 // edge, it's not local. Note that if both vregs are live across the back 1414 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1415 unsigned LocalReg = DstReg; 1416 unsigned GlobalReg = SrcReg; 1417 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1418 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1419 LocalReg = SrcReg; 1420 GlobalReg = DstReg; 1421 LocalLI = &LIS->getInterval(LocalReg); 1422 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1423 return; 1424 } 1425 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1426 1427 // Find the global segment after the start of the local LI. 1428 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1429 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1430 // local live range. We could create edges from other global uses to the local 1431 // start, but the coalescer should have already eliminated these cases, so 1432 // don't bother dealing with it. 1433 if (GlobalSegment == GlobalLI->end()) 1434 return; 1435 1436 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1437 // returned the next global segment. But if GlobalSegment overlaps with 1438 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1439 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1440 if (GlobalSegment->contains(LocalLI->beginIndex())) 1441 ++GlobalSegment; 1442 1443 if (GlobalSegment == GlobalLI->end()) 1444 return; 1445 1446 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1447 if (GlobalSegment != GlobalLI->begin()) { 1448 // Two address defs have no hole. 1449 if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end, 1450 GlobalSegment->start)) { 1451 return; 1452 } 1453 // If the prior global segment may be defined by the same two-address 1454 // instruction that also defines LocalLI, then can't make a hole here. 1455 if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start, 1456 LocalLI->beginIndex())) { 1457 return; 1458 } 1459 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1460 // it would be a disconnected component in the live range. 1461 assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() && 1462 "Disconnected LRG within the scheduling region."); 1463 } 1464 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1465 if (!GlobalDef) 1466 return; 1467 1468 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1469 if (!GlobalSU) 1470 return; 1471 1472 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1473 // constraining the uses of the last local def to precede GlobalDef. 1474 SmallVector<SUnit*,8> LocalUses; 1475 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1476 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1477 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1478 for (SUnit::const_succ_iterator 1479 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1480 I != E; ++I) { 1481 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1482 continue; 1483 if (I->getSUnit() == GlobalSU) 1484 continue; 1485 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1486 return; 1487 LocalUses.push_back(I->getSUnit()); 1488 } 1489 // Open the top of the GlobalLI hole by constraining any earlier global uses 1490 // to precede the start of LocalLI. 1491 SmallVector<SUnit*,8> GlobalUses; 1492 MachineInstr *FirstLocalDef = 1493 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1494 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1495 for (SUnit::const_pred_iterator 1496 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1497 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1498 continue; 1499 if (I->getSUnit() == FirstLocalSU) 1500 continue; 1501 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1502 return; 1503 GlobalUses.push_back(I->getSUnit()); 1504 } 1505 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1506 // Add the weak edges. 1507 for (SmallVectorImpl<SUnit*>::const_iterator 1508 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1509 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1510 << GlobalSU->NodeNum << ")\n"); 1511 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1512 } 1513 for (SmallVectorImpl<SUnit*>::const_iterator 1514 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1515 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1516 << FirstLocalSU->NodeNum << ")\n"); 1517 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1518 } 1519 } 1520 1521 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1522 /// copy elimination. 1523 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1524 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals"); 1525 1526 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1527 if (FirstPos == DAG->end()) 1528 return; 1529 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); 1530 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1531 &*priorNonDebug(DAG->end(), DAG->begin())); 1532 1533 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1534 SUnit *SU = &DAG->SUnits[Idx]; 1535 if (!SU->getInstr()->isCopy()) 1536 continue; 1537 1538 constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG)); 1539 } 1540 } 1541 1542 //===----------------------------------------------------------------------===// 1543 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler 1544 // and possibly other custom schedulers. 1545 //===----------------------------------------------------------------------===// 1546 1547 static const unsigned InvalidCycle = ~0U; 1548 1549 SchedBoundary::~SchedBoundary() { delete HazardRec; } 1550 1551 void SchedBoundary::reset() { 1552 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1553 // Destroying and reconstructing it is very expensive though. So keep 1554 // invalid, placeholder HazardRecs. 1555 if (HazardRec && HazardRec->isEnabled()) { 1556 delete HazardRec; 1557 HazardRec = 0; 1558 } 1559 Available.clear(); 1560 Pending.clear(); 1561 CheckPending = false; 1562 NextSUs.clear(); 1563 CurrCycle = 0; 1564 CurrMOps = 0; 1565 MinReadyCycle = UINT_MAX; 1566 ExpectedLatency = 0; 1567 DependentLatency = 0; 1568 RetiredMOps = 0; 1569 MaxExecutedResCount = 0; 1570 ZoneCritResIdx = 0; 1571 IsResourceLimited = false; 1572 ReservedCycles.clear(); 1573 #ifndef NDEBUG 1574 // Track the maximum number of stall cycles that could arise either from the 1575 // latency of a DAG edge or the number of cycles that a processor resource is 1576 // reserved (SchedBoundary::ReservedCycles). 1577 MaxObservedLatency = 0; 1578 #endif 1579 // Reserve a zero-count for invalid CritResIdx. 1580 ExecutedResCounts.resize(1); 1581 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1582 } 1583 1584 void SchedRemainder:: 1585 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1586 reset(); 1587 if (!SchedModel->hasInstrSchedModel()) 1588 return; 1589 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1590 for (std::vector<SUnit>::iterator 1591 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1592 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1593 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC) 1594 * SchedModel->getMicroOpFactor(); 1595 for (TargetSchedModel::ProcResIter 1596 PI = SchedModel->getWriteProcResBegin(SC), 1597 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1598 unsigned PIdx = PI->ProcResourceIdx; 1599 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1600 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1601 } 1602 } 1603 } 1604 1605 void SchedBoundary:: 1606 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1607 reset(); 1608 DAG = dag; 1609 SchedModel = smodel; 1610 Rem = rem; 1611 if (SchedModel->hasInstrSchedModel()) { 1612 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1613 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle); 1614 } 1615 } 1616 1617 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat 1618 /// these "soft stalls" differently than the hard stall cycles based on CPU 1619 /// resources and computed by checkHazard(). A fully in-order model 1620 /// (MicroOpBufferSize==0) will not make use of this since instructions are not 1621 /// available for scheduling until they are ready. However, a weaker in-order 1622 /// model may use this for heuristics. For example, if a processor has in-order 1623 /// behavior when reading certain resources, this may come into play. 1624 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) { 1625 if (!SU->isUnbuffered) 1626 return 0; 1627 1628 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1629 if (ReadyCycle > CurrCycle) 1630 return ReadyCycle - CurrCycle; 1631 return 0; 1632 } 1633 1634 /// Compute the next cycle at which the given processor resource can be 1635 /// scheduled. 1636 unsigned SchedBoundary:: 1637 getNextResourceCycle(unsigned PIdx, unsigned Cycles) { 1638 unsigned NextUnreserved = ReservedCycles[PIdx]; 1639 // If this resource has never been used, always return cycle zero. 1640 if (NextUnreserved == InvalidCycle) 1641 return 0; 1642 // For bottom-up scheduling add the cycles needed for the current operation. 1643 if (!isTop()) 1644 NextUnreserved += Cycles; 1645 return NextUnreserved; 1646 } 1647 1648 /// Does this SU have a hazard within the current instruction group. 1649 /// 1650 /// The scheduler supports two modes of hazard recognition. The first is the 1651 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1652 /// supports highly complicated in-order reservation tables 1653 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1654 /// 1655 /// The second is a streamlined mechanism that checks for hazards based on 1656 /// simple counters that the scheduler itself maintains. It explicitly checks 1657 /// for instruction dispatch limitations, including the number of micro-ops that 1658 /// can dispatch per cycle. 1659 /// 1660 /// TODO: Also check whether the SU must start a new group. 1661 bool SchedBoundary::checkHazard(SUnit *SU) { 1662 if (HazardRec->isEnabled() 1663 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { 1664 return true; 1665 } 1666 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1667 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1668 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1669 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1670 return true; 1671 } 1672 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { 1673 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1674 for (TargetSchedModel::ProcResIter 1675 PI = SchedModel->getWriteProcResBegin(SC), 1676 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1677 if (getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles) > CurrCycle) 1678 return true; 1679 } 1680 } 1681 return false; 1682 } 1683 1684 // Find the unscheduled node in ReadySUs with the highest latency. 1685 unsigned SchedBoundary:: 1686 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1687 SUnit *LateSU = 0; 1688 unsigned RemLatency = 0; 1689 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end(); 1690 I != E; ++I) { 1691 unsigned L = getUnscheduledLatency(*I); 1692 if (L > RemLatency) { 1693 RemLatency = L; 1694 LateSU = *I; 1695 } 1696 } 1697 if (LateSU) { 1698 DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1699 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1700 } 1701 return RemLatency; 1702 } 1703 1704 // Count resources in this zone and the remaining unscheduled 1705 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 1706 // resource index, or zero if the zone is issue limited. 1707 unsigned SchedBoundary:: 1708 getOtherResourceCount(unsigned &OtherCritIdx) { 1709 OtherCritIdx = 0; 1710 if (!SchedModel->hasInstrSchedModel()) 1711 return 0; 1712 1713 unsigned OtherCritCount = Rem->RemIssueCount 1714 + (RetiredMOps * SchedModel->getMicroOpFactor()); 1715 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 1716 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 1717 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 1718 PIdx != PEnd; ++PIdx) { 1719 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 1720 if (OtherCount > OtherCritCount) { 1721 OtherCritCount = OtherCount; 1722 OtherCritIdx = PIdx; 1723 } 1724 } 1725 if (OtherCritIdx) { 1726 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: " 1727 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 1728 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n"); 1729 } 1730 return OtherCritCount; 1731 } 1732 1733 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { 1734 if (ReadyCycle < MinReadyCycle) 1735 MinReadyCycle = ReadyCycle; 1736 1737 // Check for interlocks first. For the purpose of other heuristics, an 1738 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1739 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1740 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU)) 1741 Pending.push(SU); 1742 else 1743 Available.push(SU); 1744 1745 // Record this node as an immediate dependent of the scheduled node. 1746 NextSUs.insert(SU); 1747 } 1748 1749 void SchedBoundary::releaseTopNode(SUnit *SU) { 1750 if (SU->isScheduled) 1751 return; 1752 1753 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1754 I != E; ++I) { 1755 if (I->isWeak()) 1756 continue; 1757 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 1758 unsigned Latency = I->getLatency(); 1759 #ifndef NDEBUG 1760 MaxObservedLatency = std::max(Latency, MaxObservedLatency); 1761 #endif 1762 if (SU->TopReadyCycle < PredReadyCycle + Latency) 1763 SU->TopReadyCycle = PredReadyCycle + Latency; 1764 } 1765 releaseNode(SU, SU->TopReadyCycle); 1766 } 1767 1768 void SchedBoundary::releaseBottomNode(SUnit *SU) { 1769 if (SU->isScheduled) 1770 return; 1771 1772 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1773 1774 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1775 I != E; ++I) { 1776 if (I->isWeak()) 1777 continue; 1778 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 1779 unsigned Latency = I->getLatency(); 1780 #ifndef NDEBUG 1781 MaxObservedLatency = std::max(Latency, MaxObservedLatency); 1782 #endif 1783 if (SU->BotReadyCycle < SuccReadyCycle + Latency) 1784 SU->BotReadyCycle = SuccReadyCycle + Latency; 1785 } 1786 releaseNode(SU, SU->BotReadyCycle); 1787 } 1788 1789 /// Move the boundary of scheduled code by one cycle. 1790 void SchedBoundary::bumpCycle(unsigned NextCycle) { 1791 if (SchedModel->getMicroOpBufferSize() == 0) { 1792 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1793 if (MinReadyCycle > NextCycle) 1794 NextCycle = MinReadyCycle; 1795 } 1796 // Update the current micro-ops, which will issue in the next cycle. 1797 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 1798 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 1799 1800 // Decrement DependentLatency based on the next cycle. 1801 if ((NextCycle - CurrCycle) > DependentLatency) 1802 DependentLatency = 0; 1803 else 1804 DependentLatency -= (NextCycle - CurrCycle); 1805 1806 if (!HazardRec->isEnabled()) { 1807 // Bypass HazardRec virtual calls. 1808 CurrCycle = NextCycle; 1809 } 1810 else { 1811 // Bypass getHazardType calls in case of long latency. 1812 for (; CurrCycle != NextCycle; ++CurrCycle) { 1813 if (isTop()) 1814 HazardRec->AdvanceCycle(); 1815 else 1816 HazardRec->RecedeCycle(); 1817 } 1818 } 1819 CheckPending = true; 1820 unsigned LFactor = SchedModel->getLatencyFactor(); 1821 IsResourceLimited = 1822 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1823 > (int)LFactor; 1824 1825 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n'); 1826 } 1827 1828 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { 1829 ExecutedResCounts[PIdx] += Count; 1830 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 1831 MaxExecutedResCount = ExecutedResCounts[PIdx]; 1832 } 1833 1834 /// Add the given processor resource to this scheduled zone. 1835 /// 1836 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 1837 /// during which this resource is consumed. 1838 /// 1839 /// \return the next cycle at which the instruction may execute without 1840 /// oversubscribing resources. 1841 unsigned SchedBoundary:: 1842 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) { 1843 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1844 unsigned Count = Factor * Cycles; 1845 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) 1846 << " +" << Cycles << "x" << Factor << "u\n"); 1847 1848 // Update Executed resources counts. 1849 incExecutedResources(PIdx, Count); 1850 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1851 Rem->RemainingCounts[PIdx] -= Count; 1852 1853 // Check if this resource exceeds the current critical resource. If so, it 1854 // becomes the critical resource. 1855 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 1856 ZoneCritResIdx = PIdx; 1857 DEBUG(dbgs() << " *** Critical resource " 1858 << SchedModel->getResourceName(PIdx) << ": " 1859 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n"); 1860 } 1861 // For reserved resources, record the highest cycle using the resource. 1862 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles); 1863 if (NextAvailable > CurrCycle) { 1864 DEBUG(dbgs() << " Resource conflict: " 1865 << SchedModel->getProcResource(PIdx)->Name << " reserved until @" 1866 << NextAvailable << "\n"); 1867 } 1868 return NextAvailable; 1869 } 1870 1871 /// Move the boundary of scheduled code by one SUnit. 1872 void SchedBoundary::bumpNode(SUnit *SU) { 1873 // Update the reservation table. 1874 if (HazardRec->isEnabled()) { 1875 if (!isTop() && SU->isCall) { 1876 // Calls are scheduled with their preceding instructions. For bottom-up 1877 // scheduling, clear the pipeline state before emitting. 1878 HazardRec->Reset(); 1879 } 1880 HazardRec->EmitInstruction(SU); 1881 } 1882 // checkHazard should prevent scheduling multiple instructions per cycle that 1883 // exceed the issue width. 1884 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1885 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 1886 assert( 1887 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && 1888 "Cannot schedule this instruction's MicroOps in the current cycle."); 1889 1890 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1891 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 1892 1893 unsigned NextCycle = CurrCycle; 1894 switch (SchedModel->getMicroOpBufferSize()) { 1895 case 0: 1896 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 1897 break; 1898 case 1: 1899 if (ReadyCycle > NextCycle) { 1900 NextCycle = ReadyCycle; 1901 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 1902 } 1903 break; 1904 default: 1905 // We don't currently model the OOO reorder buffer, so consider all 1906 // scheduled MOps to be "retired". We do loosely model in-order resource 1907 // latency. If this instruction uses an in-order resource, account for any 1908 // likely stall cycles. 1909 if (SU->isUnbuffered && ReadyCycle > NextCycle) 1910 NextCycle = ReadyCycle; 1911 break; 1912 } 1913 RetiredMOps += IncMOps; 1914 1915 // Update resource counts and critical resource. 1916 if (SchedModel->hasInstrSchedModel()) { 1917 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 1918 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 1919 Rem->RemIssueCount -= DecRemIssue; 1920 if (ZoneCritResIdx) { 1921 // Scale scheduled micro-ops for comparing with the critical resource. 1922 unsigned ScaledMOps = 1923 RetiredMOps * SchedModel->getMicroOpFactor(); 1924 1925 // If scaled micro-ops are now more than the previous critical resource by 1926 // a full cycle, then micro-ops issue becomes critical. 1927 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 1928 >= (int)SchedModel->getLatencyFactor()) { 1929 ZoneCritResIdx = 0; 1930 DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 1931 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); 1932 } 1933 } 1934 for (TargetSchedModel::ProcResIter 1935 PI = SchedModel->getWriteProcResBegin(SC), 1936 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1937 unsigned RCycle = 1938 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle); 1939 if (RCycle > NextCycle) 1940 NextCycle = RCycle; 1941 } 1942 if (SU->hasReservedResource) { 1943 // For reserved resources, record the highest cycle using the resource. 1944 // For top-down scheduling, this is the cycle in which we schedule this 1945 // instruction plus the number of cycles the operations reserves the 1946 // resource. For bottom-up is it simply the instruction's cycle. 1947 for (TargetSchedModel::ProcResIter 1948 PI = SchedModel->getWriteProcResBegin(SC), 1949 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1950 unsigned PIdx = PI->ProcResourceIdx; 1951 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) { 1952 ReservedCycles[PIdx] = isTop() ? NextCycle + PI->Cycles : NextCycle; 1953 #ifndef NDEBUG 1954 MaxObservedLatency = std::max(PI->Cycles, MaxObservedLatency); 1955 #endif 1956 } 1957 } 1958 } 1959 } 1960 // Update ExpectedLatency and DependentLatency. 1961 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 1962 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 1963 if (SU->getDepth() > TopLatency) { 1964 TopLatency = SU->getDepth(); 1965 DEBUG(dbgs() << " " << Available.getName() 1966 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n"); 1967 } 1968 if (SU->getHeight() > BotLatency) { 1969 BotLatency = SU->getHeight(); 1970 DEBUG(dbgs() << " " << Available.getName() 1971 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n"); 1972 } 1973 // If we stall for any reason, bump the cycle. 1974 if (NextCycle > CurrCycle) { 1975 bumpCycle(NextCycle); 1976 } 1977 else { 1978 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 1979 // resource limited. If a stall occured, bumpCycle does this. 1980 unsigned LFactor = SchedModel->getLatencyFactor(); 1981 IsResourceLimited = 1982 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1983 > (int)LFactor; 1984 } 1985 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle 1986 // resets CurrMOps. Loop to handle instructions with more MOps than issue in 1987 // one cycle. Since we commonly reach the max MOps here, opportunistically 1988 // bump the cycle to avoid uselessly checking everything in the readyQ. 1989 CurrMOps += IncMOps; 1990 while (CurrMOps >= SchedModel->getIssueWidth()) { 1991 DEBUG(dbgs() << " *** Max MOps " << CurrMOps 1992 << " at cycle " << CurrCycle << '\n'); 1993 bumpCycle(++NextCycle); 1994 } 1995 DEBUG(dumpScheduledState()); 1996 } 1997 1998 /// Release pending ready nodes in to the available queue. This makes them 1999 /// visible to heuristics. 2000 void SchedBoundary::releasePending() { 2001 // If the available queue is empty, it is safe to reset MinReadyCycle. 2002 if (Available.empty()) 2003 MinReadyCycle = UINT_MAX; 2004 2005 // Check to see if any of the pending instructions are ready to issue. If 2006 // so, add them to the available queue. 2007 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2008 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2009 SUnit *SU = *(Pending.begin()+i); 2010 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2011 2012 if (ReadyCycle < MinReadyCycle) 2013 MinReadyCycle = ReadyCycle; 2014 2015 if (!IsBuffered && ReadyCycle > CurrCycle) 2016 continue; 2017 2018 if (checkHazard(SU)) 2019 continue; 2020 2021 Available.push(SU); 2022 Pending.remove(Pending.begin()+i); 2023 --i; --e; 2024 } 2025 DEBUG(if (!Pending.empty()) Pending.dump()); 2026 CheckPending = false; 2027 } 2028 2029 /// Remove SU from the ready set for this boundary. 2030 void SchedBoundary::removeReady(SUnit *SU) { 2031 if (Available.isInQueue(SU)) 2032 Available.remove(Available.find(SU)); 2033 else { 2034 assert(Pending.isInQueue(SU) && "bad ready count"); 2035 Pending.remove(Pending.find(SU)); 2036 } 2037 } 2038 2039 /// If this queue only has one ready candidate, return it. As a side effect, 2040 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2041 /// one node is ready. If multiple instructions are ready, return NULL. 2042 SUnit *SchedBoundary::pickOnlyChoice() { 2043 if (CheckPending) 2044 releasePending(); 2045 2046 if (CurrMOps > 0) { 2047 // Defer any ready instrs that now have a hazard. 2048 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2049 if (checkHazard(*I)) { 2050 Pending.push(*I); 2051 I = Available.remove(I); 2052 continue; 2053 } 2054 ++I; 2055 } 2056 } 2057 for (unsigned i = 0; Available.empty(); ++i) { 2058 assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) && 2059 "permanent hazard"); (void)i; 2060 bumpCycle(CurrCycle + 1); 2061 releasePending(); 2062 } 2063 if (Available.size() == 1) 2064 return *Available.begin(); 2065 return NULL; 2066 } 2067 2068 #ifndef NDEBUG 2069 // This is useful information to dump after bumpNode. 2070 // Note that the Queue contents are more useful before pickNodeFromQueue. 2071 void SchedBoundary::dumpScheduledState() { 2072 unsigned ResFactor; 2073 unsigned ResCount; 2074 if (ZoneCritResIdx) { 2075 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2076 ResCount = getResourceCount(ZoneCritResIdx); 2077 } 2078 else { 2079 ResFactor = SchedModel->getMicroOpFactor(); 2080 ResCount = RetiredMOps * SchedModel->getMicroOpFactor(); 2081 } 2082 unsigned LFactor = SchedModel->getLatencyFactor(); 2083 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2084 << " Retired: " << RetiredMOps; 2085 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2086 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2087 << ResCount / ResFactor << " " 2088 << SchedModel->getResourceName(ZoneCritResIdx) 2089 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2090 << (IsResourceLimited ? " - Resource" : " - Latency") 2091 << " limited.\n"; 2092 } 2093 #endif 2094 2095 //===----------------------------------------------------------------------===// 2096 // GenericScheduler - Generic implementation of MachineSchedStrategy. 2097 //===----------------------------------------------------------------------===// 2098 2099 namespace { 2100 /// Base class for GenericScheduler. This class maintains information about 2101 /// scheduling candidates based on TargetSchedModel making it easy to implement 2102 /// heuristics for either preRA or postRA scheduling. 2103 class GenericSchedulerBase : public MachineSchedStrategy { 2104 public: 2105 /// Represent the type of SchedCandidate found within a single queue. 2106 /// pickNodeBidirectional depends on these listed by decreasing priority. 2107 enum CandReason { 2108 NoCand, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, RegMax, 2109 ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, 2110 TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder}; 2111 2112 #ifndef NDEBUG 2113 static const char *getReasonStr(GenericSchedulerBase::CandReason Reason); 2114 #endif 2115 2116 /// Policy for scheduling the next instruction in the candidate's zone. 2117 struct CandPolicy { 2118 bool ReduceLatency; 2119 unsigned ReduceResIdx; 2120 unsigned DemandResIdx; 2121 2122 CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} 2123 }; 2124 2125 /// Status of an instruction's critical resource consumption. 2126 struct SchedResourceDelta { 2127 // Count critical resources in the scheduled region required by SU. 2128 unsigned CritResources; 2129 2130 // Count critical resources from another region consumed by SU. 2131 unsigned DemandedResources; 2132 2133 SchedResourceDelta(): CritResources(0), DemandedResources(0) {} 2134 2135 bool operator==(const SchedResourceDelta &RHS) const { 2136 return CritResources == RHS.CritResources 2137 && DemandedResources == RHS.DemandedResources; 2138 } 2139 bool operator!=(const SchedResourceDelta &RHS) const { 2140 return !operator==(RHS); 2141 } 2142 }; 2143 2144 /// Store the state used by GenericScheduler heuristics, required for the 2145 /// lifetime of one invocation of pickNode(). 2146 struct SchedCandidate { 2147 CandPolicy Policy; 2148 2149 // The best SUnit candidate. 2150 SUnit *SU; 2151 2152 // The reason for this candidate. 2153 CandReason Reason; 2154 2155 // Set of reasons that apply to multiple candidates. 2156 uint32_t RepeatReasonSet; 2157 2158 // Register pressure values for the best candidate. 2159 RegPressureDelta RPDelta; 2160 2161 // Critical resource consumption of the best candidate. 2162 SchedResourceDelta ResDelta; 2163 2164 SchedCandidate(const CandPolicy &policy) 2165 : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {} 2166 2167 bool isValid() const { return SU; } 2168 2169 // Copy the status of another candidate without changing policy. 2170 void setBest(SchedCandidate &Best) { 2171 assert(Best.Reason != NoCand && "uninitialized Sched candidate"); 2172 SU = Best.SU; 2173 Reason = Best.Reason; 2174 RPDelta = Best.RPDelta; 2175 ResDelta = Best.ResDelta; 2176 } 2177 2178 bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); } 2179 void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); } 2180 2181 void initResourceDelta(const ScheduleDAGMI *DAG, 2182 const TargetSchedModel *SchedModel); 2183 }; 2184 2185 protected: 2186 const MachineSchedContext *Context; 2187 const TargetSchedModel *SchedModel; 2188 const TargetRegisterInfo *TRI; 2189 2190 SchedRemainder Rem; 2191 protected: 2192 GenericSchedulerBase(const MachineSchedContext *C): 2193 Context(C), SchedModel(0), TRI(0) {} 2194 2195 void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, 2196 SchedBoundary *OtherZone); 2197 2198 #ifndef NDEBUG 2199 void traceCandidate(const SchedCandidate &Cand); 2200 #endif 2201 }; 2202 } // namespace 2203 2204 void GenericSchedulerBase::SchedCandidate:: 2205 initResourceDelta(const ScheduleDAGMI *DAG, 2206 const TargetSchedModel *SchedModel) { 2207 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2208 return; 2209 2210 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2211 for (TargetSchedModel::ProcResIter 2212 PI = SchedModel->getWriteProcResBegin(SC), 2213 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2214 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2215 ResDelta.CritResources += PI->Cycles; 2216 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2217 ResDelta.DemandedResources += PI->Cycles; 2218 } 2219 } 2220 2221 /// Set the CandPolicy given a scheduling zone given the current resources and 2222 /// latencies inside and outside the zone. 2223 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, 2224 bool IsPostRA, 2225 SchedBoundary &CurrZone, 2226 SchedBoundary *OtherZone) { 2227 // Apply preemptive heuristics based on the the total latency and resources 2228 // inside and outside this zone. Potential stalls should be considered before 2229 // following this policy. 2230 2231 // Compute remaining latency. We need this both to determine whether the 2232 // overall schedule has become latency-limited and whether the instructions 2233 // outside this zone are resource or latency limited. 2234 // 2235 // The "dependent" latency is updated incrementally during scheduling as the 2236 // max height/depth of scheduled nodes minus the cycles since it was 2237 // scheduled: 2238 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 2239 // 2240 // The "independent" latency is the max ready queue depth: 2241 // ILat = max N.depth for N in Available|Pending 2242 // 2243 // RemainingLatency is the greater of independent and dependent latency. 2244 unsigned RemLatency = CurrZone.getDependentLatency(); 2245 RemLatency = std::max(RemLatency, 2246 CurrZone.findMaxLatency(CurrZone.Available.elements())); 2247 RemLatency = std::max(RemLatency, 2248 CurrZone.findMaxLatency(CurrZone.Pending.elements())); 2249 2250 // Compute the critical resource outside the zone. 2251 unsigned OtherCritIdx = 0; 2252 unsigned OtherCount = 2253 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0; 2254 2255 bool OtherResLimited = false; 2256 if (SchedModel->hasInstrSchedModel()) { 2257 unsigned LFactor = SchedModel->getLatencyFactor(); 2258 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor; 2259 } 2260 // Schedule aggressively for latency in PostRA mode. We don't check for 2261 // acyclic latency during PostRA, and highly out-of-order processors will 2262 // skip PostRA scheduling. 2263 if (!OtherResLimited) { 2264 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) { 2265 Policy.ReduceLatency |= true; 2266 DEBUG(dbgs() << " " << CurrZone.Available.getName() 2267 << " RemainingLatency " << RemLatency << " + " 2268 << CurrZone.getCurrCycle() << "c > CritPath " 2269 << Rem.CriticalPath << "\n"); 2270 } 2271 } 2272 // If the same resource is limiting inside and outside the zone, do nothing. 2273 if (CurrZone.getZoneCritResIdx() == OtherCritIdx) 2274 return; 2275 2276 DEBUG( 2277 if (CurrZone.isResourceLimited()) { 2278 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: " 2279 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) 2280 << "\n"; 2281 } 2282 if (OtherResLimited) 2283 dbgs() << " RemainingLimit: " 2284 << SchedModel->getResourceName(OtherCritIdx) << "\n"; 2285 if (!CurrZone.isResourceLimited() && !OtherResLimited) 2286 dbgs() << " Latency limited both directions.\n"); 2287 2288 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx) 2289 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx(); 2290 2291 if (OtherResLimited) 2292 Policy.DemandResIdx = OtherCritIdx; 2293 } 2294 2295 #ifndef NDEBUG 2296 const char *GenericSchedulerBase::getReasonStr( 2297 GenericSchedulerBase::CandReason Reason) { 2298 switch (Reason) { 2299 case NoCand: return "NOCAND "; 2300 case PhysRegCopy: return "PREG-COPY"; 2301 case RegExcess: return "REG-EXCESS"; 2302 case RegCritical: return "REG-CRIT "; 2303 case Stall: return "STALL "; 2304 case Cluster: return "CLUSTER "; 2305 case Weak: return "WEAK "; 2306 case RegMax: return "REG-MAX "; 2307 case ResourceReduce: return "RES-REDUCE"; 2308 case ResourceDemand: return "RES-DEMAND"; 2309 case TopDepthReduce: return "TOP-DEPTH "; 2310 case TopPathReduce: return "TOP-PATH "; 2311 case BotHeightReduce:return "BOT-HEIGHT"; 2312 case BotPathReduce: return "BOT-PATH "; 2313 case NextDefUse: return "DEF-USE "; 2314 case NodeOrder: return "ORDER "; 2315 }; 2316 llvm_unreachable("Unknown reason!"); 2317 } 2318 2319 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) { 2320 PressureChange P; 2321 unsigned ResIdx = 0; 2322 unsigned Latency = 0; 2323 switch (Cand.Reason) { 2324 default: 2325 break; 2326 case RegExcess: 2327 P = Cand.RPDelta.Excess; 2328 break; 2329 case RegCritical: 2330 P = Cand.RPDelta.CriticalMax; 2331 break; 2332 case RegMax: 2333 P = Cand.RPDelta.CurrentMax; 2334 break; 2335 case ResourceReduce: 2336 ResIdx = Cand.Policy.ReduceResIdx; 2337 break; 2338 case ResourceDemand: 2339 ResIdx = Cand.Policy.DemandResIdx; 2340 break; 2341 case TopDepthReduce: 2342 Latency = Cand.SU->getDepth(); 2343 break; 2344 case TopPathReduce: 2345 Latency = Cand.SU->getHeight(); 2346 break; 2347 case BotHeightReduce: 2348 Latency = Cand.SU->getHeight(); 2349 break; 2350 case BotPathReduce: 2351 Latency = Cand.SU->getDepth(); 2352 break; 2353 } 2354 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2355 if (P.isValid()) 2356 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2357 << ":" << P.getUnitInc() << " "; 2358 else 2359 dbgs() << " "; 2360 if (ResIdx) 2361 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2362 else 2363 dbgs() << " "; 2364 if (Latency) 2365 dbgs() << " " << Latency << " cycles "; 2366 else 2367 dbgs() << " "; 2368 dbgs() << '\n'; 2369 } 2370 #endif 2371 2372 /// Return true if this heuristic determines order. 2373 static bool tryLess(int TryVal, int CandVal, 2374 GenericSchedulerBase::SchedCandidate &TryCand, 2375 GenericSchedulerBase::SchedCandidate &Cand, 2376 GenericSchedulerBase::CandReason Reason) { 2377 if (TryVal < CandVal) { 2378 TryCand.Reason = Reason; 2379 return true; 2380 } 2381 if (TryVal > CandVal) { 2382 if (Cand.Reason > Reason) 2383 Cand.Reason = Reason; 2384 return true; 2385 } 2386 Cand.setRepeat(Reason); 2387 return false; 2388 } 2389 2390 static bool tryGreater(int TryVal, int CandVal, 2391 GenericSchedulerBase::SchedCandidate &TryCand, 2392 GenericSchedulerBase::SchedCandidate &Cand, 2393 GenericSchedulerBase::CandReason Reason) { 2394 if (TryVal > CandVal) { 2395 TryCand.Reason = Reason; 2396 return true; 2397 } 2398 if (TryVal < CandVal) { 2399 if (Cand.Reason > Reason) 2400 Cand.Reason = Reason; 2401 return true; 2402 } 2403 Cand.setRepeat(Reason); 2404 return false; 2405 } 2406 2407 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, 2408 GenericSchedulerBase::SchedCandidate &Cand, 2409 SchedBoundary &Zone) { 2410 if (Zone.isTop()) { 2411 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2412 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2413 TryCand, Cand, GenericSchedulerBase::TopDepthReduce)) 2414 return true; 2415 } 2416 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2417 TryCand, Cand, GenericSchedulerBase::TopPathReduce)) 2418 return true; 2419 } 2420 else { 2421 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2422 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2423 TryCand, Cand, GenericSchedulerBase::BotHeightReduce)) 2424 return true; 2425 } 2426 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2427 TryCand, Cand, GenericSchedulerBase::BotPathReduce)) 2428 return true; 2429 } 2430 return false; 2431 } 2432 2433 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand, 2434 bool IsTop) { 2435 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2436 << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n'); 2437 } 2438 2439 namespace { 2440 /// GenericScheduler shrinks the unscheduled zone using heuristics to balance 2441 /// the schedule. 2442 class GenericScheduler : public GenericSchedulerBase { 2443 ScheduleDAGMILive *DAG; 2444 2445 // State of the top and bottom scheduled instruction boundaries. 2446 SchedBoundary Top; 2447 SchedBoundary Bot; 2448 2449 MachineSchedPolicy RegionPolicy; 2450 public: 2451 GenericScheduler(const MachineSchedContext *C): 2452 GenericSchedulerBase(C), DAG(0), Top(SchedBoundary::TopQID, "TopQ"), 2453 Bot(SchedBoundary::BotQID, "BotQ") {} 2454 2455 virtual void initPolicy(MachineBasicBlock::iterator Begin, 2456 MachineBasicBlock::iterator End, 2457 unsigned NumRegionInstrs) LLVM_OVERRIDE; 2458 2459 virtual bool shouldTrackPressure() const LLVM_OVERRIDE { 2460 return RegionPolicy.ShouldTrackPressure; 2461 } 2462 2463 virtual void initialize(ScheduleDAGMI *dag) LLVM_OVERRIDE; 2464 2465 virtual SUnit *pickNode(bool &IsTopNode) LLVM_OVERRIDE; 2466 2467 virtual void schedNode(SUnit *SU, bool IsTopNode) LLVM_OVERRIDE; 2468 2469 virtual void releaseTopNode(SUnit *SU) LLVM_OVERRIDE { 2470 Top.releaseTopNode(SU); 2471 } 2472 2473 virtual void releaseBottomNode(SUnit *SU) LLVM_OVERRIDE { 2474 Bot.releaseBottomNode(SU); 2475 } 2476 2477 virtual void registerRoots() LLVM_OVERRIDE; 2478 2479 protected: 2480 void checkAcyclicLatency(); 2481 2482 void tryCandidate(SchedCandidate &Cand, 2483 SchedCandidate &TryCand, 2484 SchedBoundary &Zone, 2485 const RegPressureTracker &RPTracker, 2486 RegPressureTracker &TempTracker); 2487 2488 SUnit *pickNodeBidirectional(bool &IsTopNode); 2489 2490 void pickNodeFromQueue(SchedBoundary &Zone, 2491 const RegPressureTracker &RPTracker, 2492 SchedCandidate &Candidate); 2493 2494 void reschedulePhysRegCopies(SUnit *SU, bool isTop); 2495 }; 2496 } // namespace 2497 2498 void GenericScheduler::initialize(ScheduleDAGMI *dag) { 2499 assert(dag->hasVRegLiveness() && 2500 "(PreRA)GenericScheduler needs vreg liveness"); 2501 DAG = static_cast<ScheduleDAGMILive*>(dag); 2502 SchedModel = DAG->getSchedModel(); 2503 TRI = DAG->TRI; 2504 2505 Rem.init(DAG, SchedModel); 2506 Top.init(DAG, SchedModel, &Rem); 2507 Bot.init(DAG, SchedModel, &Rem); 2508 2509 // Initialize resource counts. 2510 2511 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 2512 // are disabled, then these HazardRecs will be disabled. 2513 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 2514 const TargetMachine &TM = DAG->MF.getTarget(); 2515 if (!Top.HazardRec) { 2516 Top.HazardRec = 2517 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 2518 } 2519 if (!Bot.HazardRec) { 2520 Bot.HazardRec = 2521 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 2522 } 2523 } 2524 2525 /// Initialize the per-region scheduling policy. 2526 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, 2527 MachineBasicBlock::iterator End, 2528 unsigned NumRegionInstrs) { 2529 const TargetMachine &TM = Context->MF->getTarget(); 2530 2531 // Avoid setting up the register pressure tracker for small regions to save 2532 // compile time. As a rough heuristic, only track pressure when the number of 2533 // schedulable instructions exceeds half the integer register file. 2534 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 2535 TM.getTargetLowering()->getRegClassFor(MVT::i32)); 2536 2537 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 2538 2539 // For generic targets, we default to bottom-up, because it's simpler and more 2540 // compile-time optimizations have been implemented in that direction. 2541 RegionPolicy.OnlyBottomUp = true; 2542 2543 // Allow the subtarget to override default policy. 2544 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 2545 ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs); 2546 2547 // After subtarget overrides, apply command line options. 2548 if (!EnableRegPressure) 2549 RegionPolicy.ShouldTrackPressure = false; 2550 2551 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 2552 // e.g. -misched-bottomup=false allows scheduling in both directions. 2553 assert((!ForceTopDown || !ForceBottomUp) && 2554 "-misched-topdown incompatible with -misched-bottomup"); 2555 if (ForceBottomUp.getNumOccurrences() > 0) { 2556 RegionPolicy.OnlyBottomUp = ForceBottomUp; 2557 if (RegionPolicy.OnlyBottomUp) 2558 RegionPolicy.OnlyTopDown = false; 2559 } 2560 if (ForceTopDown.getNumOccurrences() > 0) { 2561 RegionPolicy.OnlyTopDown = ForceTopDown; 2562 if (RegionPolicy.OnlyTopDown) 2563 RegionPolicy.OnlyBottomUp = false; 2564 } 2565 } 2566 2567 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 2568 /// critical path by more cycles than it takes to drain the instruction buffer. 2569 /// We estimate an upper bounds on in-flight instructions as: 2570 /// 2571 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 2572 /// InFlightIterations = AcyclicPath / CyclesPerIteration 2573 /// InFlightResources = InFlightIterations * LoopResources 2574 /// 2575 /// TODO: Check execution resources in addition to IssueCount. 2576 void GenericScheduler::checkAcyclicLatency() { 2577 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 2578 return; 2579 2580 // Scaled number of cycles per loop iteration. 2581 unsigned IterCount = 2582 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 2583 Rem.RemIssueCount); 2584 // Scaled acyclic critical path. 2585 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 2586 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 2587 unsigned InFlightCount = 2588 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 2589 unsigned BufferLimit = 2590 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 2591 2592 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 2593 2594 DEBUG(dbgs() << "IssueCycles=" 2595 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 2596 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 2597 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount 2598 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 2599 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 2600 if (Rem.IsAcyclicLatencyLimited) 2601 dbgs() << " ACYCLIC LATENCY LIMIT\n"); 2602 } 2603 2604 void GenericScheduler::registerRoots() { 2605 Rem.CriticalPath = DAG->ExitSU.getDepth(); 2606 2607 // Some roots may not feed into ExitSU. Check all of them in case. 2608 for (std::vector<SUnit*>::const_iterator 2609 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 2610 if ((*I)->getDepth() > Rem.CriticalPath) 2611 Rem.CriticalPath = (*I)->getDepth(); 2612 } 2613 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 2614 2615 if (EnableCyclicPath) { 2616 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 2617 checkAcyclicLatency(); 2618 } 2619 } 2620 2621 static bool tryPressure(const PressureChange &TryP, 2622 const PressureChange &CandP, 2623 GenericSchedulerBase::SchedCandidate &TryCand, 2624 GenericSchedulerBase::SchedCandidate &Cand, 2625 GenericSchedulerBase::CandReason Reason) { 2626 int TryRank = TryP.getPSetOrMax(); 2627 int CandRank = CandP.getPSetOrMax(); 2628 // If both candidates affect the same set, go with the smallest increase. 2629 if (TryRank == CandRank) { 2630 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2631 Reason); 2632 } 2633 // If one candidate decreases and the other increases, go with it. 2634 // Invalid candidates have UnitInc==0. 2635 if (tryLess(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2636 Reason)) { 2637 return true; 2638 } 2639 // If the candidates are decreasing pressure, reverse priority. 2640 if (TryP.getUnitInc() < 0) 2641 std::swap(TryRank, CandRank); 2642 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2643 } 2644 2645 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2646 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2647 } 2648 2649 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2650 /// their physreg def/use. 2651 /// 2652 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2653 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2654 /// with the operation that produces or consumes the physreg. We'll do this when 2655 /// regalloc has support for parallel copies. 2656 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2657 const MachineInstr *MI = SU->getInstr(); 2658 if (!MI->isCopy()) 2659 return 0; 2660 2661 unsigned ScheduledOper = isTop ? 1 : 0; 2662 unsigned UnscheduledOper = isTop ? 0 : 1; 2663 // If we have already scheduled the physreg produce/consumer, immediately 2664 // schedule the copy. 2665 if (TargetRegisterInfo::isPhysicalRegister( 2666 MI->getOperand(ScheduledOper).getReg())) 2667 return 1; 2668 // If the physreg is at the boundary, defer it. Otherwise schedule it 2669 // immediately to free the dependent. We can hoist the copy later. 2670 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2671 if (TargetRegisterInfo::isPhysicalRegister( 2672 MI->getOperand(UnscheduledOper).getReg())) 2673 return AtBoundary ? -1 : 1; 2674 return 0; 2675 } 2676 2677 /// Apply a set of heursitics to a new candidate. Heuristics are currently 2678 /// hierarchical. This may be more efficient than a graduated cost model because 2679 /// we don't need to evaluate all aspects of the model for each node in the 2680 /// queue. But it's really done to make the heuristics easier to debug and 2681 /// statistically analyze. 2682 /// 2683 /// \param Cand provides the policy and current best candidate. 2684 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2685 /// \param Zone describes the scheduled zone that we are extending. 2686 /// \param RPTracker describes reg pressure within the scheduled zone. 2687 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 2688 void GenericScheduler::tryCandidate(SchedCandidate &Cand, 2689 SchedCandidate &TryCand, 2690 SchedBoundary &Zone, 2691 const RegPressureTracker &RPTracker, 2692 RegPressureTracker &TempTracker) { 2693 2694 if (DAG->isTrackingPressure()) { 2695 // Always initialize TryCand's RPDelta. 2696 if (Zone.isTop()) { 2697 TempTracker.getMaxDownwardPressureDelta( 2698 TryCand.SU->getInstr(), 2699 TryCand.RPDelta, 2700 DAG->getRegionCriticalPSets(), 2701 DAG->getRegPressure().MaxSetPressure); 2702 } 2703 else { 2704 if (VerifyScheduling) { 2705 TempTracker.getMaxUpwardPressureDelta( 2706 TryCand.SU->getInstr(), 2707 &DAG->getPressureDiff(TryCand.SU), 2708 TryCand.RPDelta, 2709 DAG->getRegionCriticalPSets(), 2710 DAG->getRegPressure().MaxSetPressure); 2711 } 2712 else { 2713 RPTracker.getUpwardPressureDelta( 2714 TryCand.SU->getInstr(), 2715 DAG->getPressureDiff(TryCand.SU), 2716 TryCand.RPDelta, 2717 DAG->getRegionCriticalPSets(), 2718 DAG->getRegPressure().MaxSetPressure); 2719 } 2720 } 2721 } 2722 DEBUG(if (TryCand.RPDelta.Excess.isValid()) 2723 dbgs() << " SU(" << TryCand.SU->NodeNum << ") " 2724 << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet()) 2725 << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n"); 2726 2727 // Initialize the candidate if needed. 2728 if (!Cand.isValid()) { 2729 TryCand.Reason = NodeOrder; 2730 return; 2731 } 2732 2733 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 2734 biasPhysRegCopy(Cand.SU, Zone.isTop()), 2735 TryCand, Cand, PhysRegCopy)) 2736 return; 2737 2738 // Avoid exceeding the target's limit. If signed PSetID is negative, it is 2739 // invalid; convert it to INT_MAX to give it lowest priority. 2740 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 2741 Cand.RPDelta.Excess, 2742 TryCand, Cand, RegExcess)) 2743 return; 2744 2745 // Avoid increasing the max critical pressure in the scheduled region. 2746 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 2747 Cand.RPDelta.CriticalMax, 2748 TryCand, Cand, RegCritical)) 2749 return; 2750 2751 // For loops that are acyclic path limited, aggressively schedule for latency. 2752 // This can result in very long dependence chains scheduled in sequence, so 2753 // once every cycle (when CurrMOps == 0), switch to normal heuristics. 2754 if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps() 2755 && tryLatency(TryCand, Cand, Zone)) 2756 return; 2757 2758 // Prioritize instructions that read unbuffered resources by stall cycles. 2759 if (tryLess(Zone.getLatencyStallCycles(TryCand.SU), 2760 Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 2761 return; 2762 2763 // Keep clustered nodes together to encourage downstream peephole 2764 // optimizations which may reduce resource requirements. 2765 // 2766 // This is a best effort to set things up for a post-RA pass. Optimizations 2767 // like generating loads of multiple registers should ideally be done within 2768 // the scheduler pass by combining the loads during DAG postprocessing. 2769 const SUnit *NextClusterSU = 2770 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2771 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 2772 TryCand, Cand, Cluster)) 2773 return; 2774 2775 // Weak edges are for clustering and other constraints. 2776 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 2777 getWeakLeft(Cand.SU, Zone.isTop()), 2778 TryCand, Cand, Weak)) { 2779 return; 2780 } 2781 // Avoid increasing the max pressure of the entire region. 2782 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 2783 Cand.RPDelta.CurrentMax, 2784 TryCand, Cand, RegMax)) 2785 return; 2786 2787 // Avoid critical resource consumption and balance the schedule. 2788 TryCand.initResourceDelta(DAG, SchedModel); 2789 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2790 TryCand, Cand, ResourceReduce)) 2791 return; 2792 if (tryGreater(TryCand.ResDelta.DemandedResources, 2793 Cand.ResDelta.DemandedResources, 2794 TryCand, Cand, ResourceDemand)) 2795 return; 2796 2797 // Avoid serializing long latency dependence chains. 2798 // For acyclic path limited loops, latency was already checked above. 2799 if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited 2800 && tryLatency(TryCand, Cand, Zone)) { 2801 return; 2802 } 2803 2804 // Prefer immediate defs/users of the last scheduled instruction. This is a 2805 // local pressure avoidance strategy that also makes the machine code 2806 // readable. 2807 if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU), 2808 TryCand, Cand, NextDefUse)) 2809 return; 2810 2811 // Fall through to original instruction order. 2812 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2813 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2814 TryCand.Reason = NodeOrder; 2815 } 2816 } 2817 2818 /// Pick the best candidate from the queue. 2819 /// 2820 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2821 /// DAG building. To adjust for the current scheduling location we need to 2822 /// maintain the number of vreg uses remaining to be top-scheduled. 2823 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2824 const RegPressureTracker &RPTracker, 2825 SchedCandidate &Cand) { 2826 ReadyQueue &Q = Zone.Available; 2827 2828 DEBUG(Q.dump()); 2829 2830 // getMaxPressureDelta temporarily modifies the tracker. 2831 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2832 2833 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2834 2835 SchedCandidate TryCand(Cand.Policy); 2836 TryCand.SU = *I; 2837 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2838 if (TryCand.Reason != NoCand) { 2839 // Initialize resource delta if needed in case future heuristics query it. 2840 if (TryCand.ResDelta == SchedResourceDelta()) 2841 TryCand.initResourceDelta(DAG, SchedModel); 2842 Cand.setBest(TryCand); 2843 DEBUG(traceCandidate(Cand)); 2844 } 2845 } 2846 } 2847 2848 /// Pick the best candidate node from either the top or bottom queue. 2849 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) { 2850 // Schedule as far as possible in the direction of no choice. This is most 2851 // efficient, but also provides the best heuristics for CriticalPSets. 2852 if (SUnit *SU = Bot.pickOnlyChoice()) { 2853 IsTopNode = false; 2854 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2855 return SU; 2856 } 2857 if (SUnit *SU = Top.pickOnlyChoice()) { 2858 IsTopNode = true; 2859 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2860 return SU; 2861 } 2862 CandPolicy NoPolicy; 2863 SchedCandidate BotCand(NoPolicy); 2864 SchedCandidate TopCand(NoPolicy); 2865 // Set the bottom-up policy based on the state of the current bottom zone and 2866 // the instructions outside the zone, including the top zone. 2867 setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top); 2868 // Set the top-down policy based on the state of the current top zone and 2869 // the instructions outside the zone, including the bottom zone. 2870 setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot); 2871 2872 // Prefer bottom scheduling when heuristics are silent. 2873 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2874 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2875 2876 // If either Q has a single candidate that provides the least increase in 2877 // Excess pressure, we can immediately schedule from that Q. 2878 // 2879 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2880 // affects picking from either Q. If scheduling in one direction must 2881 // increase pressure for one of the excess PSets, then schedule in that 2882 // direction first to provide more freedom in the other direction. 2883 if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess)) 2884 || (BotCand.Reason == RegCritical 2885 && !BotCand.isRepeat(RegCritical))) 2886 { 2887 IsTopNode = false; 2888 tracePick(BotCand, IsTopNode); 2889 return BotCand.SU; 2890 } 2891 // Check if the top Q has a better candidate. 2892 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2893 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2894 2895 // Choose the queue with the most important (lowest enum) reason. 2896 if (TopCand.Reason < BotCand.Reason) { 2897 IsTopNode = true; 2898 tracePick(TopCand, IsTopNode); 2899 return TopCand.SU; 2900 } 2901 // Otherwise prefer the bottom candidate, in node order if all else failed. 2902 IsTopNode = false; 2903 tracePick(BotCand, IsTopNode); 2904 return BotCand.SU; 2905 } 2906 2907 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2908 SUnit *GenericScheduler::pickNode(bool &IsTopNode) { 2909 if (DAG->top() == DAG->bottom()) { 2910 assert(Top.Available.empty() && Top.Pending.empty() && 2911 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2912 return NULL; 2913 } 2914 SUnit *SU; 2915 do { 2916 if (RegionPolicy.OnlyTopDown) { 2917 SU = Top.pickOnlyChoice(); 2918 if (!SU) { 2919 CandPolicy NoPolicy; 2920 SchedCandidate TopCand(NoPolicy); 2921 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2922 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2923 tracePick(TopCand, true); 2924 SU = TopCand.SU; 2925 } 2926 IsTopNode = true; 2927 } 2928 else if (RegionPolicy.OnlyBottomUp) { 2929 SU = Bot.pickOnlyChoice(); 2930 if (!SU) { 2931 CandPolicy NoPolicy; 2932 SchedCandidate BotCand(NoPolicy); 2933 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2934 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 2935 tracePick(BotCand, false); 2936 SU = BotCand.SU; 2937 } 2938 IsTopNode = false; 2939 } 2940 else { 2941 SU = pickNodeBidirectional(IsTopNode); 2942 } 2943 } while (SU->isScheduled); 2944 2945 if (SU->isTopReady()) 2946 Top.removeReady(SU); 2947 if (SU->isBottomReady()) 2948 Bot.removeReady(SU); 2949 2950 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2951 return SU; 2952 } 2953 2954 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2955 2956 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2957 if (!isTop) 2958 ++InsertPos; 2959 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2960 2961 // Find already scheduled copies with a single physreg dependence and move 2962 // them just above the scheduled instruction. 2963 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2964 I != E; ++I) { 2965 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2966 continue; 2967 SUnit *DepSU = I->getSUnit(); 2968 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2969 continue; 2970 MachineInstr *Copy = DepSU->getInstr(); 2971 if (!Copy->isCopy()) 2972 continue; 2973 DEBUG(dbgs() << " Rescheduling physreg copy "; 2974 I->getSUnit()->dump(DAG)); 2975 DAG->moveInstruction(Copy, InsertPos); 2976 } 2977 } 2978 2979 /// Update the scheduler's state after scheduling a node. This is the same node 2980 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to 2981 /// update it's state based on the current cycle before MachineSchedStrategy 2982 /// does. 2983 /// 2984 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2985 /// them here. See comments in biasPhysRegCopy. 2986 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2987 if (IsTopNode) { 2988 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 2989 Top.bumpNode(SU); 2990 if (SU->hasPhysRegUses) 2991 reschedulePhysRegCopies(SU, true); 2992 } 2993 else { 2994 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle()); 2995 Bot.bumpNode(SU); 2996 if (SU->hasPhysRegDefs) 2997 reschedulePhysRegCopies(SU, false); 2998 } 2999 } 3000 3001 /// Create the standard converging machine scheduler. This will be used as the 3002 /// default scheduler if the target does not set a default. 3003 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) { 3004 ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, new GenericScheduler(C)); 3005 // Register DAG post-processors. 3006 // 3007 // FIXME: extend the mutation API to allow earlier mutations to instantiate 3008 // data and pass it to later mutations. Have a single mutation that gathers 3009 // the interesting nodes in one pass. 3010 DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI)); 3011 if (EnableLoadCluster && DAG->TII->enableClusterLoads()) 3012 DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); 3013 if (EnableMacroFusion) 3014 DAG->addMutation(new MacroFusion(DAG->TII)); 3015 return DAG; 3016 } 3017 3018 static MachineSchedRegistry 3019 GenericSchedRegistry("converge", "Standard converging scheduler.", 3020 createGenericSchedLive); 3021 3022 //===----------------------------------------------------------------------===// 3023 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy. 3024 //===----------------------------------------------------------------------===// 3025 3026 namespace { 3027 /// PostGenericScheduler - Interface to the scheduling algorithm used by 3028 /// ScheduleDAGMI. 3029 /// 3030 /// Callbacks from ScheduleDAGMI: 3031 /// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ... 3032 class PostGenericScheduler : public GenericSchedulerBase { 3033 ScheduleDAGMI *DAG; 3034 SchedBoundary Top; 3035 SmallVector<SUnit*, 8> BotRoots; 3036 public: 3037 PostGenericScheduler(const MachineSchedContext *C): 3038 GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {} 3039 3040 virtual ~PostGenericScheduler() {} 3041 3042 virtual void initPolicy(MachineBasicBlock::iterator Begin, 3043 MachineBasicBlock::iterator End, 3044 unsigned NumRegionInstrs) LLVM_OVERRIDE { 3045 /* no configurable policy */ 3046 }; 3047 3048 /// PostRA scheduling does not track pressure. 3049 virtual bool shouldTrackPressure() const LLVM_OVERRIDE { return false; } 3050 3051 virtual void initialize(ScheduleDAGMI *Dag) LLVM_OVERRIDE { 3052 DAG = Dag; 3053 SchedModel = DAG->getSchedModel(); 3054 TRI = DAG->TRI; 3055 3056 Rem.init(DAG, SchedModel); 3057 Top.init(DAG, SchedModel, &Rem); 3058 BotRoots.clear(); 3059 3060 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, 3061 // or are disabled, then these HazardRecs will be disabled. 3062 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 3063 const TargetMachine &TM = DAG->MF.getTarget(); 3064 if (!Top.HazardRec) { 3065 Top.HazardRec = 3066 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 3067 } 3068 } 3069 3070 virtual void registerRoots() LLVM_OVERRIDE; 3071 3072 virtual SUnit *pickNode(bool &IsTopNode) LLVM_OVERRIDE; 3073 3074 virtual void scheduleTree(unsigned SubtreeID) LLVM_OVERRIDE { 3075 llvm_unreachable("PostRA scheduler does not support subtree analysis."); 3076 } 3077 3078 virtual void schedNode(SUnit *SU, bool IsTopNode) LLVM_OVERRIDE; 3079 3080 virtual void releaseTopNode(SUnit *SU) LLVM_OVERRIDE { 3081 Top.releaseTopNode(SU); 3082 } 3083 3084 // Only called for roots. 3085 virtual void releaseBottomNode(SUnit *SU) LLVM_OVERRIDE { 3086 BotRoots.push_back(SU); 3087 } 3088 3089 protected: 3090 void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand); 3091 3092 void pickNodeFromQueue(SchedCandidate &Cand); 3093 }; 3094 } // namespace 3095 3096 void PostGenericScheduler::registerRoots() { 3097 Rem.CriticalPath = DAG->ExitSU.getDepth(); 3098 3099 // Some roots may not feed into ExitSU. Check all of them in case. 3100 for (SmallVectorImpl<SUnit*>::const_iterator 3101 I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) { 3102 if ((*I)->getDepth() > Rem.CriticalPath) 3103 Rem.CriticalPath = (*I)->getDepth(); 3104 } 3105 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 3106 } 3107 3108 /// Apply a set of heursitics to a new candidate for PostRA scheduling. 3109 /// 3110 /// \param Cand provides the policy and current best candidate. 3111 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 3112 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand, 3113 SchedCandidate &TryCand) { 3114 3115 // Initialize the candidate if needed. 3116 if (!Cand.isValid()) { 3117 TryCand.Reason = NodeOrder; 3118 return; 3119 } 3120 3121 // Prioritize instructions that read unbuffered resources by stall cycles. 3122 if (tryLess(Top.getLatencyStallCycles(TryCand.SU), 3123 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) 3124 return; 3125 3126 // Avoid critical resource consumption and balance the schedule. 3127 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 3128 TryCand, Cand, ResourceReduce)) 3129 return; 3130 if (tryGreater(TryCand.ResDelta.DemandedResources, 3131 Cand.ResDelta.DemandedResources, 3132 TryCand, Cand, ResourceDemand)) 3133 return; 3134 3135 // Avoid serializing long latency dependence chains. 3136 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) { 3137 return; 3138 } 3139 3140 // Fall through to original instruction order. 3141 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) 3142 TryCand.Reason = NodeOrder; 3143 } 3144 3145 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) { 3146 ReadyQueue &Q = Top.Available; 3147 3148 DEBUG(Q.dump()); 3149 3150 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 3151 SchedCandidate TryCand(Cand.Policy); 3152 TryCand.SU = *I; 3153 TryCand.initResourceDelta(DAG, SchedModel); 3154 tryCandidate(Cand, TryCand); 3155 if (TryCand.Reason != NoCand) { 3156 Cand.setBest(TryCand); 3157 DEBUG(traceCandidate(Cand)); 3158 } 3159 } 3160 } 3161 3162 /// Pick the next node to schedule. 3163 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { 3164 if (DAG->top() == DAG->bottom()) { 3165 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage"); 3166 return NULL; 3167 } 3168 SUnit *SU; 3169 do { 3170 SU = Top.pickOnlyChoice(); 3171 if (!SU) { 3172 CandPolicy NoPolicy; 3173 SchedCandidate TopCand(NoPolicy); 3174 // Set the top-down policy based on the state of the current top zone and 3175 // the instructions outside the zone, including the bottom zone. 3176 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, NULL); 3177 pickNodeFromQueue(TopCand); 3178 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 3179 tracePick(TopCand, true); 3180 SU = TopCand.SU; 3181 } 3182 } while (SU->isScheduled); 3183 3184 IsTopNode = true; 3185 Top.removeReady(SU); 3186 3187 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 3188 return SU; 3189 } 3190 3191 /// Called after ScheduleDAGMI has scheduled an instruction and updated 3192 /// scheduled/remaining flags in the DAG nodes. 3193 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) { 3194 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle()); 3195 Top.bumpNode(SU); 3196 } 3197 3198 /// Create a generic scheduler with no vreg liveness or DAG mutation passes. 3199 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) { 3200 return new ScheduleDAGMI(C, new PostGenericScheduler(C), /*IsPostRA=*/true); 3201 } 3202 3203 //===----------------------------------------------------------------------===// 3204 // ILP Scheduler. Currently for experimental analysis of heuristics. 3205 //===----------------------------------------------------------------------===// 3206 3207 namespace { 3208 /// \brief Order nodes by the ILP metric. 3209 struct ILPOrder { 3210 const SchedDFSResult *DFSResult; 3211 const BitVector *ScheduledTrees; 3212 bool MaximizeILP; 3213 3214 ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {} 3215 3216 /// \brief Apply a less-than relation on node priority. 3217 /// 3218 /// (Return true if A comes after B in the Q.) 3219 bool operator()(const SUnit *A, const SUnit *B) const { 3220 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 3221 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 3222 if (SchedTreeA != SchedTreeB) { 3223 // Unscheduled trees have lower priority. 3224 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 3225 return ScheduledTrees->test(SchedTreeB); 3226 3227 // Trees with shallower connections have have lower priority. 3228 if (DFSResult->getSubtreeLevel(SchedTreeA) 3229 != DFSResult->getSubtreeLevel(SchedTreeB)) { 3230 return DFSResult->getSubtreeLevel(SchedTreeA) 3231 < DFSResult->getSubtreeLevel(SchedTreeB); 3232 } 3233 } 3234 if (MaximizeILP) 3235 return DFSResult->getILP(A) < DFSResult->getILP(B); 3236 else 3237 return DFSResult->getILP(A) > DFSResult->getILP(B); 3238 } 3239 }; 3240 3241 /// \brief Schedule based on the ILP metric. 3242 class ILPScheduler : public MachineSchedStrategy { 3243 ScheduleDAGMILive *DAG; 3244 ILPOrder Cmp; 3245 3246 std::vector<SUnit*> ReadyQ; 3247 public: 3248 ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {} 3249 3250 virtual void initialize(ScheduleDAGMI *dag) { 3251 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness"); 3252 DAG = static_cast<ScheduleDAGMILive*>(dag); 3253 DAG->computeDFSResult(); 3254 Cmp.DFSResult = DAG->getDFSResult(); 3255 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 3256 ReadyQ.clear(); 3257 } 3258 3259 virtual void registerRoots() { 3260 // Restore the heap in ReadyQ with the updated DFS results. 3261 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3262 } 3263 3264 /// Implement MachineSchedStrategy interface. 3265 /// ----------------------------------------- 3266 3267 /// Callback to select the highest priority node from the ready Q. 3268 virtual SUnit *pickNode(bool &IsTopNode) { 3269 if (ReadyQ.empty()) return NULL; 3270 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3271 SUnit *SU = ReadyQ.back(); 3272 ReadyQ.pop_back(); 3273 IsTopNode = false; 3274 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 3275 << " ILP: " << DAG->getDFSResult()->getILP(SU) 3276 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 3277 << DAG->getDFSResult()->getSubtreeLevel( 3278 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 3279 << "Scheduling " << *SU->getInstr()); 3280 return SU; 3281 } 3282 3283 /// \brief Scheduler callback to notify that a new subtree is scheduled. 3284 virtual void scheduleTree(unsigned SubtreeID) { 3285 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3286 } 3287 3288 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 3289 /// DFSResults, and resort the priority Q. 3290 virtual void schedNode(SUnit *SU, bool IsTopNode) { 3291 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 3292 } 3293 3294 virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } 3295 3296 virtual void releaseBottomNode(SUnit *SU) { 3297 ReadyQ.push_back(SU); 3298 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 3299 } 3300 }; 3301 } // namespace 3302 3303 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 3304 return new ScheduleDAGMILive(C, new ILPScheduler(true)); 3305 } 3306 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 3307 return new ScheduleDAGMILive(C, new ILPScheduler(false)); 3308 } 3309 static MachineSchedRegistry ILPMaxRegistry( 3310 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 3311 static MachineSchedRegistry ILPMinRegistry( 3312 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 3313 3314 //===----------------------------------------------------------------------===// 3315 // Machine Instruction Shuffler for Correctness Testing 3316 //===----------------------------------------------------------------------===// 3317 3318 #ifndef NDEBUG 3319 namespace { 3320 /// Apply a less-than relation on the node order, which corresponds to the 3321 /// instruction order prior to scheduling. IsReverse implements greater-than. 3322 template<bool IsReverse> 3323 struct SUnitOrder { 3324 bool operator()(SUnit *A, SUnit *B) const { 3325 if (IsReverse) 3326 return A->NodeNum > B->NodeNum; 3327 else 3328 return A->NodeNum < B->NodeNum; 3329 } 3330 }; 3331 3332 /// Reorder instructions as much as possible. 3333 class InstructionShuffler : public MachineSchedStrategy { 3334 bool IsAlternating; 3335 bool IsTopDown; 3336 3337 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 3338 // gives nodes with a higher number higher priority causing the latest 3339 // instructions to be scheduled first. 3340 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 3341 TopQ; 3342 // When scheduling bottom-up, use greater-than as the queue priority. 3343 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 3344 BottomQ; 3345 public: 3346 InstructionShuffler(bool alternate, bool topdown) 3347 : IsAlternating(alternate), IsTopDown(topdown) {} 3348 3349 virtual void initialize(ScheduleDAGMI*) { 3350 TopQ.clear(); 3351 BottomQ.clear(); 3352 } 3353 3354 /// Implement MachineSchedStrategy interface. 3355 /// ----------------------------------------- 3356 3357 virtual SUnit *pickNode(bool &IsTopNode) { 3358 SUnit *SU; 3359 if (IsTopDown) { 3360 do { 3361 if (TopQ.empty()) return NULL; 3362 SU = TopQ.top(); 3363 TopQ.pop(); 3364 } while (SU->isScheduled); 3365 IsTopNode = true; 3366 } 3367 else { 3368 do { 3369 if (BottomQ.empty()) return NULL; 3370 SU = BottomQ.top(); 3371 BottomQ.pop(); 3372 } while (SU->isScheduled); 3373 IsTopNode = false; 3374 } 3375 if (IsAlternating) 3376 IsTopDown = !IsTopDown; 3377 return SU; 3378 } 3379 3380 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 3381 3382 virtual void releaseTopNode(SUnit *SU) { 3383 TopQ.push(SU); 3384 } 3385 virtual void releaseBottomNode(SUnit *SU) { 3386 BottomQ.push(SU); 3387 } 3388 }; 3389 } // namespace 3390 3391 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3392 bool Alternate = !ForceTopDown && !ForceBottomUp; 3393 bool TopDown = !ForceBottomUp; 3394 assert((TopDown || !ForceTopDown) && 3395 "-misched-topdown incompatible with -misched-bottomup"); 3396 return new ScheduleDAGMILive(C, new InstructionShuffler(Alternate, TopDown)); 3397 } 3398 static MachineSchedRegistry ShufflerRegistry( 3399 "shuffle", "Shuffle machine instructions alternating directions", 3400 createInstructionShuffler); 3401 #endif // !NDEBUG 3402 3403 //===----------------------------------------------------------------------===// 3404 // GraphWriter support for ScheduleDAGMILive. 3405 //===----------------------------------------------------------------------===// 3406 3407 #ifndef NDEBUG 3408 namespace llvm { 3409 3410 template<> struct GraphTraits< 3411 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3412 3413 template<> 3414 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3415 3416 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 3417 3418 static std::string getGraphName(const ScheduleDAG *G) { 3419 return G->MF.getName(); 3420 } 3421 3422 static bool renderGraphFromBottomUp() { 3423 return true; 3424 } 3425 3426 static bool isNodeHidden(const SUnit *Node) { 3427 return (Node->Preds.size() > 10 || Node->Succs.size() > 10); 3428 } 3429 3430 static bool hasNodeAddressLabel(const SUnit *Node, 3431 const ScheduleDAG *Graph) { 3432 return false; 3433 } 3434 3435 /// If you want to override the dot attributes printed for a particular 3436 /// edge, override this method. 3437 static std::string getEdgeAttributes(const SUnit *Node, 3438 SUnitIterator EI, 3439 const ScheduleDAG *Graph) { 3440 if (EI.isArtificialDep()) 3441 return "color=cyan,style=dashed"; 3442 if (EI.isCtrlDep()) 3443 return "color=blue,style=dashed"; 3444 return ""; 3445 } 3446 3447 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3448 std::string Str; 3449 raw_string_ostream SS(Str); 3450 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3451 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3452 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : 0; 3453 SS << "SU:" << SU->NodeNum; 3454 if (DFS) 3455 SS << " I:" << DFS->getNumInstrs(SU); 3456 return SS.str(); 3457 } 3458 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3459 return G->getGraphNodeLabel(SU); 3460 } 3461 3462 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) { 3463 std::string Str("shape=Mrecord"); 3464 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G); 3465 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ? 3466 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : 0; 3467 if (DFS) { 3468 Str += ",style=filled,fillcolor=\"#"; 3469 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3470 Str += '"'; 3471 } 3472 return Str; 3473 } 3474 }; 3475 } // namespace llvm 3476 #endif // NDEBUG 3477 3478 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3479 /// rendered using 'dot'. 3480 /// 3481 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3482 #ifndef NDEBUG 3483 ViewGraph(this, Name, false, Title); 3484 #else 3485 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3486 << "systems with Graphviz or gv!\n"; 3487 #endif // NDEBUG 3488 } 3489 3490 /// Out-of-line implementation with no arguments is handy for gdb. 3491 void ScheduleDAGMI::viewGraph() { 3492 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3493 } 3494