1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/MachineScheduler.h" 18 #include "llvm/ADT/OwningPtr.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/CodeGen/RegisterClassInfo.h" 26 #include "llvm/CodeGen/ScheduleDFS.h" 27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/GraphWriter.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include <queue> 34 35 using namespace llvm; 36 37 namespace llvm { 38 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 } 43 44 #ifndef NDEBUG 45 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 46 cl::desc("Pop up a window to show MISched dags after they are processed")); 47 48 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 49 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 50 #else 51 static bool ViewMISchedDAGs = false; 52 #endif // NDEBUG 53 54 // FIXME: remove this flag after initial testing. It should always be a good 55 // thing. 56 static cl::opt<bool> EnableCopyConstrain("misched-vcopy", cl::Hidden, 57 cl::desc("Constrain vreg copies."), cl::init(true)); 58 59 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 60 cl::desc("Enable load clustering."), cl::init(true)); 61 62 // Experimental heuristics 63 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 64 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 65 66 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 67 cl::desc("Verify machine instrs before and after machine scheduling")); 68 69 // DAG subtrees must have at least this many nodes. 70 static const unsigned MinSubtreeSize = 8; 71 72 //===----------------------------------------------------------------------===// 73 // Machine Instruction Scheduling Pass and Registry 74 //===----------------------------------------------------------------------===// 75 76 MachineSchedContext::MachineSchedContext(): 77 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 78 RegClassInfo = new RegisterClassInfo(); 79 } 80 81 MachineSchedContext::~MachineSchedContext() { 82 delete RegClassInfo; 83 } 84 85 namespace { 86 /// MachineScheduler runs after coalescing and before register allocation. 87 class MachineScheduler : public MachineSchedContext, 88 public MachineFunctionPass { 89 public: 90 MachineScheduler(); 91 92 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 93 94 virtual void releaseMemory() {} 95 96 virtual bool runOnMachineFunction(MachineFunction&); 97 98 virtual void print(raw_ostream &O, const Module* = 0) const; 99 100 static char ID; // Class identification, replacement for typeinfo 101 }; 102 } // namespace 103 104 char MachineScheduler::ID = 0; 105 106 char &llvm::MachineSchedulerID = MachineScheduler::ID; 107 108 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 109 "Machine Instruction Scheduler", false, false) 110 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 111 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 112 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 113 INITIALIZE_PASS_END(MachineScheduler, "misched", 114 "Machine Instruction Scheduler", false, false) 115 116 MachineScheduler::MachineScheduler() 117 : MachineFunctionPass(ID) { 118 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 119 } 120 121 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 122 AU.setPreservesCFG(); 123 AU.addRequiredID(MachineDominatorsID); 124 AU.addRequired<MachineLoopInfo>(); 125 AU.addRequired<AliasAnalysis>(); 126 AU.addRequired<TargetPassConfig>(); 127 AU.addRequired<SlotIndexes>(); 128 AU.addPreserved<SlotIndexes>(); 129 AU.addRequired<LiveIntervals>(); 130 AU.addPreserved<LiveIntervals>(); 131 MachineFunctionPass::getAnalysisUsage(AU); 132 } 133 134 MachinePassRegistry MachineSchedRegistry::Registry; 135 136 /// A dummy default scheduler factory indicates whether the scheduler 137 /// is overridden on the command line. 138 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 139 return 0; 140 } 141 142 /// MachineSchedOpt allows command line selection of the scheduler. 143 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 144 RegisterPassParser<MachineSchedRegistry> > 145 MachineSchedOpt("misched", 146 cl::init(&useDefaultMachineSched), cl::Hidden, 147 cl::desc("Machine instruction scheduler to use")); 148 149 static MachineSchedRegistry 150 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 151 useDefaultMachineSched); 152 153 /// Forward declare the standard machine scheduler. This will be used as the 154 /// default scheduler if the target does not set a default. 155 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 156 157 158 /// Decrement this iterator until reaching the top or a non-debug instr. 159 static MachineBasicBlock::iterator 160 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 161 assert(I != Beg && "reached the top of the region, cannot decrement"); 162 while (--I != Beg) { 163 if (!I->isDebugValue()) 164 break; 165 } 166 return I; 167 } 168 169 /// If this iterator is a debug value, increment until reaching the End or a 170 /// non-debug instruction. 171 static MachineBasicBlock::iterator 172 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 173 for(; I != End; ++I) { 174 if (!I->isDebugValue()) 175 break; 176 } 177 return I; 178 } 179 180 /// Top-level MachineScheduler pass driver. 181 /// 182 /// Visit blocks in function order. Divide each block into scheduling regions 183 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 184 /// consistent with the DAG builder, which traverses the interior of the 185 /// scheduling regions bottom-up. 186 /// 187 /// This design avoids exposing scheduling boundaries to the DAG builder, 188 /// simplifying the DAG builder's support for "special" target instructions. 189 /// At the same time the design allows target schedulers to operate across 190 /// scheduling boundaries, for example to bundle the boudary instructions 191 /// without reordering them. This creates complexity, because the target 192 /// scheduler must update the RegionBegin and RegionEnd positions cached by 193 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 194 /// design would be to split blocks at scheduling boundaries, but LLVM has a 195 /// general bias against block splitting purely for implementation simplicity. 196 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 197 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 198 199 // Initialize the context of the pass. 200 MF = &mf; 201 MLI = &getAnalysis<MachineLoopInfo>(); 202 MDT = &getAnalysis<MachineDominatorTree>(); 203 PassConfig = &getAnalysis<TargetPassConfig>(); 204 AA = &getAnalysis<AliasAnalysis>(); 205 206 LIS = &getAnalysis<LiveIntervals>(); 207 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 208 209 if (VerifyScheduling) { 210 DEBUG(LIS->print(dbgs())); 211 MF->verify(this, "Before machine scheduling."); 212 } 213 RegClassInfo->runOnMachineFunction(*MF); 214 215 // Select the scheduler, or set the default. 216 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 217 if (Ctor == useDefaultMachineSched) { 218 // Get the default scheduler set by the target. 219 Ctor = MachineSchedRegistry::getDefault(); 220 if (!Ctor) { 221 Ctor = createConvergingSched; 222 MachineSchedRegistry::setDefault(Ctor); 223 } 224 } 225 // Instantiate the selected scheduler. 226 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 227 228 // Visit all machine basic blocks. 229 // 230 // TODO: Visit blocks in global postorder or postorder within the bottom-up 231 // loop tree. Then we can optionally compute global RegPressure. 232 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 233 MBB != MBBEnd; ++MBB) { 234 235 Scheduler->startBlock(MBB); 236 237 // Break the block into scheduling regions [I, RegionEnd), and schedule each 238 // region as soon as it is discovered. RegionEnd points the scheduling 239 // boundary at the bottom of the region. The DAG does not include RegionEnd, 240 // but the region does (i.e. the next RegionEnd is above the previous 241 // RegionBegin). If the current block has no terminator then RegionEnd == 242 // MBB->end() for the bottom region. 243 // 244 // The Scheduler may insert instructions during either schedule() or 245 // exitRegion(), even for empty regions. So the local iterators 'I' and 246 // 'RegionEnd' are invalid across these calls. 247 unsigned RemainingInstrs = MBB->size(); 248 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 249 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 250 251 // Avoid decrementing RegionEnd for blocks with no terminator. 252 if (RegionEnd != MBB->end() 253 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 254 --RegionEnd; 255 // Count the boundary instruction. 256 --RemainingInstrs; 257 } 258 259 // The next region starts above the previous region. Look backward in the 260 // instruction stream until we find the nearest boundary. 261 MachineBasicBlock::iterator I = RegionEnd; 262 for(;I != MBB->begin(); --I, --RemainingInstrs) { 263 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 264 break; 265 } 266 // Notify the scheduler of the region, even if we may skip scheduling 267 // it. Perhaps it still needs to be bundled. 268 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs); 269 270 // Skip empty scheduling regions (0 or 1 schedulable instructions). 271 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 272 // Close the current region. Bundle the terminator if needed. 273 // This invalidates 'RegionEnd' and 'I'. 274 Scheduler->exitRegion(); 275 continue; 276 } 277 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 278 DEBUG(dbgs() << MF->getName() 279 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 280 << "\n From: " << *I << " To: "; 281 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 282 else dbgs() << "End"; 283 dbgs() << " Remaining: " << RemainingInstrs << "\n"); 284 285 // Schedule a region: possibly reorder instructions. 286 // This invalidates 'RegionEnd' and 'I'. 287 Scheduler->schedule(); 288 289 // Close the current region. 290 Scheduler->exitRegion(); 291 292 // Scheduling has invalidated the current iterator 'I'. Ask the 293 // scheduler for the top of it's scheduled region. 294 RegionEnd = Scheduler->begin(); 295 } 296 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 297 Scheduler->finishBlock(); 298 } 299 Scheduler->finalizeSchedule(); 300 DEBUG(LIS->print(dbgs())); 301 if (VerifyScheduling) 302 MF->verify(this, "After machine scheduling."); 303 return true; 304 } 305 306 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 307 // unimplemented 308 } 309 310 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 311 void ReadyQueue::dump() { 312 dbgs() << " " << Name << ": "; 313 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 314 dbgs() << Queue[i]->NodeNum << " "; 315 dbgs() << "\n"; 316 } 317 #endif 318 319 //===----------------------------------------------------------------------===// 320 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 321 // preservation. 322 //===----------------------------------------------------------------------===// 323 324 ScheduleDAGMI::~ScheduleDAGMI() { 325 delete DFSResult; 326 DeleteContainerPointers(Mutations); 327 delete SchedImpl; 328 } 329 330 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 331 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 332 } 333 334 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 335 if (SuccSU != &ExitSU) { 336 // Do not use WillCreateCycle, it assumes SD scheduling. 337 // If Pred is reachable from Succ, then the edge creates a cycle. 338 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 339 return false; 340 Topo.AddPred(SuccSU, PredDep.getSUnit()); 341 } 342 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 343 // Return true regardless of whether a new edge needed to be inserted. 344 return true; 345 } 346 347 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 348 /// NumPredsLeft reaches zero, release the successor node. 349 /// 350 /// FIXME: Adjust SuccSU height based on MinLatency. 351 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 352 SUnit *SuccSU = SuccEdge->getSUnit(); 353 354 if (SuccEdge->isWeak()) { 355 --SuccSU->WeakPredsLeft; 356 if (SuccEdge->isCluster()) 357 NextClusterSucc = SuccSU; 358 return; 359 } 360 #ifndef NDEBUG 361 if (SuccSU->NumPredsLeft == 0) { 362 dbgs() << "*** Scheduling failed! ***\n"; 363 SuccSU->dump(this); 364 dbgs() << " has been released too many times!\n"; 365 llvm_unreachable(0); 366 } 367 #endif 368 --SuccSU->NumPredsLeft; 369 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 370 SchedImpl->releaseTopNode(SuccSU); 371 } 372 373 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 374 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 375 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 376 I != E; ++I) { 377 releaseSucc(SU, &*I); 378 } 379 } 380 381 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 382 /// NumSuccsLeft reaches zero, release the predecessor node. 383 /// 384 /// FIXME: Adjust PredSU height based on MinLatency. 385 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 386 SUnit *PredSU = PredEdge->getSUnit(); 387 388 if (PredEdge->isWeak()) { 389 --PredSU->WeakSuccsLeft; 390 if (PredEdge->isCluster()) 391 NextClusterPred = PredSU; 392 return; 393 } 394 #ifndef NDEBUG 395 if (PredSU->NumSuccsLeft == 0) { 396 dbgs() << "*** Scheduling failed! ***\n"; 397 PredSU->dump(this); 398 dbgs() << " has been released too many times!\n"; 399 llvm_unreachable(0); 400 } 401 #endif 402 --PredSU->NumSuccsLeft; 403 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 404 SchedImpl->releaseBottomNode(PredSU); 405 } 406 407 /// releasePredecessors - Call releasePred on each of SU's predecessors. 408 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 409 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 410 I != E; ++I) { 411 releasePred(SU, &*I); 412 } 413 } 414 415 /// This is normally called from the main scheduler loop but may also be invoked 416 /// by the scheduling strategy to perform additional code motion. 417 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 418 MachineBasicBlock::iterator InsertPos) { 419 // Advance RegionBegin if the first instruction moves down. 420 if (&*RegionBegin == MI) 421 ++RegionBegin; 422 423 // Update the instruction stream. 424 BB->splice(InsertPos, BB, MI); 425 426 // Update LiveIntervals 427 LIS->handleMove(MI, /*UpdateFlags=*/true); 428 429 // Recede RegionBegin if an instruction moves above the first. 430 if (RegionBegin == InsertPos) 431 RegionBegin = MI; 432 } 433 434 bool ScheduleDAGMI::checkSchedLimit() { 435 #ifndef NDEBUG 436 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 437 CurrentTop = CurrentBottom; 438 return false; 439 } 440 ++NumInstrsScheduled; 441 #endif 442 return true; 443 } 444 445 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 446 /// crossing a scheduling boundary. [begin, end) includes all instructions in 447 /// the region, including the boundary itself and single-instruction regions 448 /// that don't get scheduled. 449 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 450 MachineBasicBlock::iterator begin, 451 MachineBasicBlock::iterator end, 452 unsigned endcount) 453 { 454 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 455 456 // For convenience remember the end of the liveness region. 457 LiveRegionEnd = 458 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 459 } 460 461 // Setup the register pressure trackers for the top scheduled top and bottom 462 // scheduled regions. 463 void ScheduleDAGMI::initRegPressure() { 464 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 465 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 466 467 // Close the RPTracker to finalize live ins. 468 RPTracker.closeRegion(); 469 470 DEBUG(RPTracker.getPressure().dump(TRI)); 471 472 // Initialize the live ins and live outs. 473 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 474 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 475 476 // Close one end of the tracker so we can call 477 // getMaxUpward/DownwardPressureDelta before advancing across any 478 // instructions. This converts currently live regs into live ins/outs. 479 TopRPTracker.closeTop(); 480 BotRPTracker.closeBottom(); 481 482 // Account for liveness generated by the region boundary. 483 if (LiveRegionEnd != RegionEnd) 484 BotRPTracker.recede(); 485 486 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 487 488 // Cache the list of excess pressure sets in this region. This will also track 489 // the max pressure in the scheduled code for these sets. 490 RegionCriticalPSets.clear(); 491 const std::vector<unsigned> &RegionPressure = 492 RPTracker.getPressure().MaxSetPressure; 493 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 494 unsigned Limit = TRI->getRegPressureSetLimit(i); 495 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 496 << "Limit " << Limit 497 << " Actual " << RegionPressure[i] << "\n"); 498 if (RegionPressure[i] > Limit) 499 RegionCriticalPSets.push_back(PressureElement(i, 0)); 500 } 501 DEBUG(dbgs() << "Excess PSets: "; 502 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 503 dbgs() << TRI->getRegPressureSetName( 504 RegionCriticalPSets[i].PSetID) << " "; 505 dbgs() << "\n"); 506 } 507 508 // FIXME: When the pressure tracker deals in pressure differences then we won't 509 // iterate over all RegionCriticalPSets[i]. 510 void ScheduleDAGMI:: 511 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) { 512 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 513 unsigned ID = RegionCriticalPSets[i].PSetID; 514 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 515 if ((int)NewMaxPressure[ID] > MaxUnits) 516 MaxUnits = NewMaxPressure[ID]; 517 } 518 DEBUG( 519 for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) { 520 unsigned Limit = TRI->getRegPressureSetLimit(i); 521 if (NewMaxPressure[i] > Limit ) { 522 dbgs() << " " << TRI->getRegPressureSetName(i) << ": " 523 << NewMaxPressure[i] << " > " << Limit << "\n"; 524 } 525 }); 526 } 527 528 /// schedule - Called back from MachineScheduler::runOnMachineFunction 529 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 530 /// only includes instructions that have DAG nodes, not scheduling boundaries. 531 /// 532 /// This is a skeletal driver, with all the functionality pushed into helpers, 533 /// so that it can be easilly extended by experimental schedulers. Generally, 534 /// implementing MachineSchedStrategy should be sufficient to implement a new 535 /// scheduling algorithm. However, if a scheduler further subclasses 536 /// ScheduleDAGMI then it will want to override this virtual method in order to 537 /// update any specialized state. 538 void ScheduleDAGMI::schedule() { 539 buildDAGWithRegPressure(); 540 541 Topo.InitDAGTopologicalSorting(); 542 543 postprocessDAG(); 544 545 SmallVector<SUnit*, 8> TopRoots, BotRoots; 546 findRootsAndBiasEdges(TopRoots, BotRoots); 547 548 // Initialize the strategy before modifying the DAG. 549 // This may initialize a DFSResult to be used for queue priority. 550 SchedImpl->initialize(this); 551 552 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 553 SUnits[su].dumpAll(this)); 554 if (ViewMISchedDAGs) viewGraph(); 555 556 // Initialize ready queues now that the DAG and priority data are finalized. 557 initQueues(TopRoots, BotRoots); 558 559 bool IsTopNode = false; 560 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 561 assert(!SU->isScheduled && "Node already scheduled"); 562 if (!checkSchedLimit()) 563 break; 564 565 scheduleMI(SU, IsTopNode); 566 567 updateQueues(SU, IsTopNode); 568 } 569 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 570 571 placeDebugValues(); 572 573 DEBUG({ 574 unsigned BBNum = begin()->getParent()->getNumber(); 575 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 576 dumpSchedule(); 577 dbgs() << '\n'; 578 }); 579 } 580 581 /// Build the DAG and setup three register pressure trackers. 582 void ScheduleDAGMI::buildDAGWithRegPressure() { 583 // Initialize the register pressure tracker used by buildSchedGraph. 584 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 585 586 // Account for liveness generate by the region boundary. 587 if (LiveRegionEnd != RegionEnd) 588 RPTracker.recede(); 589 590 // Build the DAG, and compute current register pressure. 591 buildSchedGraph(AA, &RPTracker); 592 593 // Initialize top/bottom trackers after computing region pressure. 594 initRegPressure(); 595 } 596 597 /// Apply each ScheduleDAGMutation step in order. 598 void ScheduleDAGMI::postprocessDAG() { 599 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 600 Mutations[i]->apply(this); 601 } 602 } 603 604 void ScheduleDAGMI::computeDFSResult() { 605 if (!DFSResult) 606 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 607 DFSResult->clear(); 608 ScheduledTrees.clear(); 609 DFSResult->resize(SUnits.size()); 610 DFSResult->compute(SUnits); 611 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 612 } 613 614 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 615 SmallVectorImpl<SUnit*> &BotRoots) { 616 for (std::vector<SUnit>::iterator 617 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 618 SUnit *SU = &(*I); 619 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 620 621 // Order predecessors so DFSResult follows the critical path. 622 SU->biasCriticalPath(); 623 624 // A SUnit is ready to top schedule if it has no predecessors. 625 if (!I->NumPredsLeft) 626 TopRoots.push_back(SU); 627 // A SUnit is ready to bottom schedule if it has no successors. 628 if (!I->NumSuccsLeft) 629 BotRoots.push_back(SU); 630 } 631 ExitSU.biasCriticalPath(); 632 } 633 634 /// Identify DAG roots and setup scheduler queues. 635 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 636 ArrayRef<SUnit*> BotRoots) { 637 NextClusterSucc = NULL; 638 NextClusterPred = NULL; 639 640 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 641 // 642 // Nodes with unreleased weak edges can still be roots. 643 // Release top roots in forward order. 644 for (SmallVectorImpl<SUnit*>::const_iterator 645 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 646 SchedImpl->releaseTopNode(*I); 647 } 648 // Release bottom roots in reverse order so the higher priority nodes appear 649 // first. This is more natural and slightly more efficient. 650 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 651 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 652 SchedImpl->releaseBottomNode(*I); 653 } 654 655 releaseSuccessors(&EntrySU); 656 releasePredecessors(&ExitSU); 657 658 SchedImpl->registerRoots(); 659 660 // Advance past initial DebugValues. 661 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 662 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 663 TopRPTracker.setPos(CurrentTop); 664 665 CurrentBottom = RegionEnd; 666 } 667 668 /// Move an instruction and update register pressure. 669 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) { 670 // Move the instruction to its new location in the instruction stream. 671 MachineInstr *MI = SU->getInstr(); 672 673 if (IsTopNode) { 674 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 675 if (&*CurrentTop == MI) 676 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 677 else { 678 moveInstruction(MI, CurrentTop); 679 TopRPTracker.setPos(MI); 680 } 681 682 // Update top scheduled pressure. 683 TopRPTracker.advance(); 684 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 685 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 686 } 687 else { 688 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 689 MachineBasicBlock::iterator priorII = 690 priorNonDebug(CurrentBottom, CurrentTop); 691 if (&*priorII == MI) 692 CurrentBottom = priorII; 693 else { 694 if (&*CurrentTop == MI) { 695 CurrentTop = nextIfDebug(++CurrentTop, priorII); 696 TopRPTracker.setPos(CurrentTop); 697 } 698 moveInstruction(MI, CurrentBottom); 699 CurrentBottom = MI; 700 } 701 // Update bottom scheduled pressure. 702 BotRPTracker.recede(); 703 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 704 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 705 } 706 } 707 708 /// Update scheduler queues after scheduling an instruction. 709 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 710 // Release dependent instructions for scheduling. 711 if (IsTopNode) 712 releaseSuccessors(SU); 713 else 714 releasePredecessors(SU); 715 716 SU->isScheduled = true; 717 718 if (DFSResult) { 719 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 720 if (!ScheduledTrees.test(SubtreeID)) { 721 ScheduledTrees.set(SubtreeID); 722 DFSResult->scheduleTree(SubtreeID); 723 SchedImpl->scheduleTree(SubtreeID); 724 } 725 } 726 727 // Notify the scheduling strategy after updating the DAG. 728 SchedImpl->schedNode(SU, IsTopNode); 729 } 730 731 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 732 void ScheduleDAGMI::placeDebugValues() { 733 // If first instruction was a DBG_VALUE then put it back. 734 if (FirstDbgValue) { 735 BB->splice(RegionBegin, BB, FirstDbgValue); 736 RegionBegin = FirstDbgValue; 737 } 738 739 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 740 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 741 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 742 MachineInstr *DbgValue = P.first; 743 MachineBasicBlock::iterator OrigPrevMI = P.second; 744 if (&*RegionBegin == DbgValue) 745 ++RegionBegin; 746 BB->splice(++OrigPrevMI, BB, DbgValue); 747 if (OrigPrevMI == llvm::prior(RegionEnd)) 748 RegionEnd = DbgValue; 749 } 750 DbgValues.clear(); 751 FirstDbgValue = NULL; 752 } 753 754 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 755 void ScheduleDAGMI::dumpSchedule() const { 756 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 757 if (SUnit *SU = getSUnit(&(*MI))) 758 SU->dump(this); 759 else 760 dbgs() << "Missing SUnit\n"; 761 } 762 } 763 #endif 764 765 //===----------------------------------------------------------------------===// 766 // LoadClusterMutation - DAG post-processing to cluster loads. 767 //===----------------------------------------------------------------------===// 768 769 namespace { 770 /// \brief Post-process the DAG to create cluster edges between neighboring 771 /// loads. 772 class LoadClusterMutation : public ScheduleDAGMutation { 773 struct LoadInfo { 774 SUnit *SU; 775 unsigned BaseReg; 776 unsigned Offset; 777 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 778 : SU(su), BaseReg(reg), Offset(ofs) {} 779 }; 780 static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS, 781 const LoadClusterMutation::LoadInfo &RHS); 782 783 const TargetInstrInfo *TII; 784 const TargetRegisterInfo *TRI; 785 public: 786 LoadClusterMutation(const TargetInstrInfo *tii, 787 const TargetRegisterInfo *tri) 788 : TII(tii), TRI(tri) {} 789 790 virtual void apply(ScheduleDAGMI *DAG); 791 protected: 792 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 793 }; 794 } // anonymous 795 796 bool LoadClusterMutation::LoadInfoLess( 797 const LoadClusterMutation::LoadInfo &LHS, 798 const LoadClusterMutation::LoadInfo &RHS) { 799 if (LHS.BaseReg != RHS.BaseReg) 800 return LHS.BaseReg < RHS.BaseReg; 801 return LHS.Offset < RHS.Offset; 802 } 803 804 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 805 ScheduleDAGMI *DAG) { 806 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 807 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 808 SUnit *SU = Loads[Idx]; 809 unsigned BaseReg; 810 unsigned Offset; 811 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 812 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 813 } 814 if (LoadRecords.size() < 2) 815 return; 816 std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess); 817 unsigned ClusterLength = 1; 818 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 819 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 820 ClusterLength = 1; 821 continue; 822 } 823 824 SUnit *SUa = LoadRecords[Idx].SU; 825 SUnit *SUb = LoadRecords[Idx+1].SU; 826 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 827 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 828 829 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 830 << SUb->NodeNum << ")\n"); 831 // Copy successor edges from SUa to SUb. Interleaving computation 832 // dependent on SUa can prevent load combining due to register reuse. 833 // Predecessor edges do not need to be copied from SUb to SUa since nearby 834 // loads should have effectively the same inputs. 835 for (SUnit::const_succ_iterator 836 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 837 if (SI->getSUnit() == SUb) 838 continue; 839 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 840 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 841 } 842 ++ClusterLength; 843 } 844 else 845 ClusterLength = 1; 846 } 847 } 848 849 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 850 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 851 // Map DAG NodeNum to store chain ID. 852 DenseMap<unsigned, unsigned> StoreChainIDs; 853 // Map each store chain to a set of dependent loads. 854 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 855 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 856 SUnit *SU = &DAG->SUnits[Idx]; 857 if (!SU->getInstr()->mayLoad()) 858 continue; 859 unsigned ChainPredID = DAG->SUnits.size(); 860 for (SUnit::const_pred_iterator 861 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 862 if (PI->isCtrl()) { 863 ChainPredID = PI->getSUnit()->NodeNum; 864 break; 865 } 866 } 867 // Check if this chain-like pred has been seen 868 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 869 unsigned NumChains = StoreChainDependents.size(); 870 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 871 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 872 if (Result.second) 873 StoreChainDependents.resize(NumChains + 1); 874 StoreChainDependents[Result.first->second].push_back(SU); 875 } 876 // Iterate over the store chains. 877 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 878 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 879 } 880 881 //===----------------------------------------------------------------------===// 882 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 883 //===----------------------------------------------------------------------===// 884 885 namespace { 886 /// \brief Post-process the DAG to create cluster edges between instructions 887 /// that may be fused by the processor into a single operation. 888 class MacroFusion : public ScheduleDAGMutation { 889 const TargetInstrInfo *TII; 890 public: 891 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 892 893 virtual void apply(ScheduleDAGMI *DAG); 894 }; 895 } // anonymous 896 897 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 898 /// fused operations. 899 void MacroFusion::apply(ScheduleDAGMI *DAG) { 900 // For now, assume targets can only fuse with the branch. 901 MachineInstr *Branch = DAG->ExitSU.getInstr(); 902 if (!Branch) 903 return; 904 905 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 906 SUnit *SU = &DAG->SUnits[--Idx]; 907 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 908 continue; 909 910 // Create a single weak edge from SU to ExitSU. The only effect is to cause 911 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 912 // need to copy predecessor edges from ExitSU to SU, since top-down 913 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 914 // of SU, we could create an artificial edge from the deepest root, but it 915 // hasn't been needed yet. 916 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 917 (void)Success; 918 assert(Success && "No DAG nodes should be reachable from ExitSU"); 919 920 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 921 break; 922 } 923 } 924 925 //===----------------------------------------------------------------------===// 926 // CopyConstrain - DAG post-processing to encourage copy elimination. 927 //===----------------------------------------------------------------------===// 928 929 namespace { 930 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 931 /// the one use that defines the copy's source vreg, most likely an induction 932 /// variable increment. 933 class CopyConstrain : public ScheduleDAGMutation { 934 // Transient state. 935 SlotIndex RegionBeginIdx; 936 SlotIndex RegionEndIdx; 937 public: 938 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 939 940 virtual void apply(ScheduleDAGMI *DAG); 941 942 protected: 943 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG); 944 }; 945 } // anonymous 946 947 /// constrainLocalCopy handles two possibilities: 948 /// 1) Local src: 949 /// I0: = dst 950 /// I1: src = ... 951 /// I2: = dst 952 /// I3: dst = src (copy) 953 /// (create pred->succ edges I0->I1, I2->I1) 954 /// 955 /// 2) Local copy: 956 /// I0: dst = src (copy) 957 /// I1: = dst 958 /// I2: src = ... 959 /// I3: = dst 960 /// (create pred->succ edges I1->I2, I3->I2) 961 /// 962 /// Although the MachineScheduler is currently constrained to single blocks, 963 /// this algorithm should handle extended blocks. An EBB is a set of 964 /// contiguously numbered blocks such that the previous block in the EBB is 965 /// always the single predecessor. 966 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) { 967 LiveIntervals *LIS = DAG->getLIS(); 968 MachineInstr *Copy = CopySU->getInstr(); 969 970 // Check for pure vreg copies. 971 unsigned SrcReg = Copy->getOperand(1).getReg(); 972 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 973 return; 974 975 unsigned DstReg = Copy->getOperand(0).getReg(); 976 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 977 return; 978 979 // Check if either the dest or source is local. If it's live across a back 980 // edge, it's not local. Note that if both vregs are live across the back 981 // edge, we cannot successfully contrain the copy without cyclic scheduling. 982 unsigned LocalReg = DstReg; 983 unsigned GlobalReg = SrcReg; 984 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 985 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 986 LocalReg = SrcReg; 987 GlobalReg = DstReg; 988 LocalLI = &LIS->getInterval(LocalReg); 989 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 990 return; 991 } 992 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 993 994 // Find the global segment after the start of the local LI. 995 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 996 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 997 // local live range. We could create edges from other global uses to the local 998 // start, but the coalescer should have already eliminated these cases, so 999 // don't bother dealing with it. 1000 if (GlobalSegment == GlobalLI->end()) 1001 return; 1002 1003 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1004 // returned the next global segment. But if GlobalSegment overlaps with 1005 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1006 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1007 if (GlobalSegment->contains(LocalLI->beginIndex())) 1008 ++GlobalSegment; 1009 1010 if (GlobalSegment == GlobalLI->end()) 1011 return; 1012 1013 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1014 if (GlobalSegment != GlobalLI->begin()) { 1015 // Two address defs have no hole. 1016 if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end, 1017 GlobalSegment->start)) { 1018 return; 1019 } 1020 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1021 // it would be a disconnected component in the live range. 1022 assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() && 1023 "Disconnected LRG within the scheduling region."); 1024 } 1025 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1026 if (!GlobalDef) 1027 return; 1028 1029 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1030 if (!GlobalSU) 1031 return; 1032 1033 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1034 // constraining the uses of the last local def to precede GlobalDef. 1035 SmallVector<SUnit*,8> LocalUses; 1036 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1037 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1038 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1039 for (SUnit::const_succ_iterator 1040 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1041 I != E; ++I) { 1042 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1043 continue; 1044 if (I->getSUnit() == GlobalSU) 1045 continue; 1046 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1047 return; 1048 LocalUses.push_back(I->getSUnit()); 1049 } 1050 // Open the top of the GlobalLI hole by constraining any earlier global uses 1051 // to precede the start of LocalLI. 1052 SmallVector<SUnit*,8> GlobalUses; 1053 MachineInstr *FirstLocalDef = 1054 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1055 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1056 for (SUnit::const_pred_iterator 1057 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1058 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1059 continue; 1060 if (I->getSUnit() == FirstLocalSU) 1061 continue; 1062 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1063 return; 1064 GlobalUses.push_back(I->getSUnit()); 1065 } 1066 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1067 // Add the weak edges. 1068 for (SmallVectorImpl<SUnit*>::const_iterator 1069 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1070 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1071 << GlobalSU->NodeNum << ")\n"); 1072 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1073 } 1074 for (SmallVectorImpl<SUnit*>::const_iterator 1075 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1076 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1077 << FirstLocalSU->NodeNum << ")\n"); 1078 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1079 } 1080 } 1081 1082 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1083 /// copy elimination. 1084 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1085 RegionBeginIdx = DAG->getLIS()->getInstructionIndex( 1086 &*nextIfDebug(DAG->begin(), DAG->end())); 1087 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1088 &*priorNonDebug(DAG->end(), DAG->begin())); 1089 1090 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1091 SUnit *SU = &DAG->SUnits[Idx]; 1092 if (!SU->getInstr()->isCopy()) 1093 continue; 1094 1095 constrainLocalCopy(SU, DAG); 1096 } 1097 } 1098 1099 //===----------------------------------------------------------------------===// 1100 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 1101 //===----------------------------------------------------------------------===// 1102 1103 namespace { 1104 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 1105 /// the schedule. 1106 class ConvergingScheduler : public MachineSchedStrategy { 1107 public: 1108 /// Represent the type of SchedCandidate found within a single queue. 1109 /// pickNodeBidirectional depends on these listed by decreasing priority. 1110 enum CandReason { 1111 NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak, 1112 ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, 1113 TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse, 1114 NodeOrder}; 1115 1116 #ifndef NDEBUG 1117 static const char *getReasonStr(ConvergingScheduler::CandReason Reason); 1118 #endif 1119 1120 /// Policy for scheduling the next instruction in the candidate's zone. 1121 struct CandPolicy { 1122 bool ReduceLatency; 1123 unsigned ReduceResIdx; 1124 unsigned DemandResIdx; 1125 1126 CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} 1127 }; 1128 1129 /// Status of an instruction's critical resource consumption. 1130 struct SchedResourceDelta { 1131 // Count critical resources in the scheduled region required by SU. 1132 unsigned CritResources; 1133 1134 // Count critical resources from another region consumed by SU. 1135 unsigned DemandedResources; 1136 1137 SchedResourceDelta(): CritResources(0), DemandedResources(0) {} 1138 1139 bool operator==(const SchedResourceDelta &RHS) const { 1140 return CritResources == RHS.CritResources 1141 && DemandedResources == RHS.DemandedResources; 1142 } 1143 bool operator!=(const SchedResourceDelta &RHS) const { 1144 return !operator==(RHS); 1145 } 1146 }; 1147 1148 /// Store the state used by ConvergingScheduler heuristics, required for the 1149 /// lifetime of one invocation of pickNode(). 1150 struct SchedCandidate { 1151 CandPolicy Policy; 1152 1153 // The best SUnit candidate. 1154 SUnit *SU; 1155 1156 // The reason for this candidate. 1157 CandReason Reason; 1158 1159 // Register pressure values for the best candidate. 1160 RegPressureDelta RPDelta; 1161 1162 // Critical resource consumption of the best candidate. 1163 SchedResourceDelta ResDelta; 1164 1165 SchedCandidate(const CandPolicy &policy) 1166 : Policy(policy), SU(NULL), Reason(NoCand) {} 1167 1168 bool isValid() const { return SU; } 1169 1170 // Copy the status of another candidate without changing policy. 1171 void setBest(SchedCandidate &Best) { 1172 assert(Best.Reason != NoCand && "uninitialized Sched candidate"); 1173 SU = Best.SU; 1174 Reason = Best.Reason; 1175 RPDelta = Best.RPDelta; 1176 ResDelta = Best.ResDelta; 1177 } 1178 1179 void initResourceDelta(const ScheduleDAGMI *DAG, 1180 const TargetSchedModel *SchedModel); 1181 }; 1182 1183 /// Summarize the unscheduled region. 1184 struct SchedRemainder { 1185 // Critical path through the DAG in expected latency. 1186 unsigned CriticalPath; 1187 1188 // Unscheduled resources 1189 SmallVector<unsigned, 16> RemainingCounts; 1190 // Critical resource for the unscheduled zone. 1191 unsigned CritResIdx; 1192 // Number of micro-ops left to schedule. 1193 unsigned RemainingMicroOps; 1194 1195 void reset() { 1196 CriticalPath = 0; 1197 RemainingCounts.clear(); 1198 CritResIdx = 0; 1199 RemainingMicroOps = 0; 1200 } 1201 1202 SchedRemainder() { reset(); } 1203 1204 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); 1205 1206 unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const { 1207 if (!SchedModel->hasInstrSchedModel()) 1208 return 0; 1209 1210 return std::max( 1211 RemainingMicroOps * SchedModel->getMicroOpFactor(), 1212 RemainingCounts[CritResIdx]); 1213 } 1214 }; 1215 1216 /// Each Scheduling boundary is associated with ready queues. It tracks the 1217 /// current cycle in the direction of movement, and maintains the state 1218 /// of "hazards" and other interlocks at the current cycle. 1219 struct SchedBoundary { 1220 ScheduleDAGMI *DAG; 1221 const TargetSchedModel *SchedModel; 1222 SchedRemainder *Rem; 1223 1224 ReadyQueue Available; 1225 ReadyQueue Pending; 1226 bool CheckPending; 1227 1228 // For heuristics, keep a list of the nodes that immediately depend on the 1229 // most recently scheduled node. 1230 SmallPtrSet<const SUnit*, 8> NextSUs; 1231 1232 ScheduleHazardRecognizer *HazardRec; 1233 1234 unsigned CurrCycle; 1235 unsigned IssueCount; 1236 1237 /// MinReadyCycle - Cycle of the soonest available instruction. 1238 unsigned MinReadyCycle; 1239 1240 // The expected latency of the critical path in this scheduled zone. 1241 unsigned ExpectedLatency; 1242 1243 // Resources used in the scheduled zone beyond this boundary. 1244 SmallVector<unsigned, 16> ResourceCounts; 1245 1246 // Cache the critical resources ID in this scheduled zone. 1247 unsigned CritResIdx; 1248 1249 // Is the scheduled region resource limited vs. latency limited. 1250 bool IsResourceLimited; 1251 1252 unsigned ExpectedCount; 1253 1254 #ifndef NDEBUG 1255 // Remember the greatest min operand latency. 1256 unsigned MaxMinLatency; 1257 #endif 1258 1259 void reset() { 1260 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1261 delete HazardRec; 1262 1263 Available.clear(); 1264 Pending.clear(); 1265 CheckPending = false; 1266 NextSUs.clear(); 1267 HazardRec = 0; 1268 CurrCycle = 0; 1269 IssueCount = 0; 1270 MinReadyCycle = UINT_MAX; 1271 ExpectedLatency = 0; 1272 ResourceCounts.resize(1); 1273 assert(!ResourceCounts[0] && "nonzero count for bad resource"); 1274 CritResIdx = 0; 1275 IsResourceLimited = false; 1276 ExpectedCount = 0; 1277 #ifndef NDEBUG 1278 MaxMinLatency = 0; 1279 #endif 1280 // Reserve a zero-count for invalid CritResIdx. 1281 ResourceCounts.resize(1); 1282 } 1283 1284 /// Pending queues extend the ready queues with the same ID and the 1285 /// PendingFlag set. 1286 SchedBoundary(unsigned ID, const Twine &Name): 1287 DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"), 1288 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 1289 HazardRec(0) { 1290 reset(); 1291 } 1292 1293 ~SchedBoundary() { delete HazardRec; } 1294 1295 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, 1296 SchedRemainder *rem); 1297 1298 bool isTop() const { 1299 return Available.getID() == ConvergingScheduler::TopQID; 1300 } 1301 1302 unsigned getUnscheduledLatency(SUnit *SU) const { 1303 if (isTop()) 1304 return SU->getHeight(); 1305 return SU->getDepth() + SU->Latency; 1306 } 1307 1308 unsigned getCriticalCount() const { 1309 return ResourceCounts[CritResIdx]; 1310 } 1311 1312 bool checkHazard(SUnit *SU); 1313 1314 void setLatencyPolicy(CandPolicy &Policy); 1315 1316 void releaseNode(SUnit *SU, unsigned ReadyCycle); 1317 1318 void bumpCycle(); 1319 1320 void countResource(unsigned PIdx, unsigned Cycles); 1321 1322 void bumpNode(SUnit *SU); 1323 1324 void releasePending(); 1325 1326 void removeReady(SUnit *SU); 1327 1328 SUnit *pickOnlyChoice(); 1329 }; 1330 1331 private: 1332 ScheduleDAGMI *DAG; 1333 const TargetSchedModel *SchedModel; 1334 const TargetRegisterInfo *TRI; 1335 1336 // State of the top and bottom scheduled instruction boundaries. 1337 SchedRemainder Rem; 1338 SchedBoundary Top; 1339 SchedBoundary Bot; 1340 1341 public: 1342 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 1343 enum { 1344 TopQID = 1, 1345 BotQID = 2, 1346 LogMaxQID = 2 1347 }; 1348 1349 ConvergingScheduler(): 1350 DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 1351 1352 virtual void initialize(ScheduleDAGMI *dag); 1353 1354 virtual SUnit *pickNode(bool &IsTopNode); 1355 1356 virtual void schedNode(SUnit *SU, bool IsTopNode); 1357 1358 virtual void releaseTopNode(SUnit *SU); 1359 1360 virtual void releaseBottomNode(SUnit *SU); 1361 1362 virtual void registerRoots(); 1363 1364 protected: 1365 void balanceZones( 1366 ConvergingScheduler::SchedBoundary &CriticalZone, 1367 ConvergingScheduler::SchedCandidate &CriticalCand, 1368 ConvergingScheduler::SchedBoundary &OppositeZone, 1369 ConvergingScheduler::SchedCandidate &OppositeCand); 1370 1371 void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand, 1372 ConvergingScheduler::SchedCandidate &BotCand); 1373 1374 void tryCandidate(SchedCandidate &Cand, 1375 SchedCandidate &TryCand, 1376 SchedBoundary &Zone, 1377 const RegPressureTracker &RPTracker, 1378 RegPressureTracker &TempTracker); 1379 1380 SUnit *pickNodeBidirectional(bool &IsTopNode); 1381 1382 void pickNodeFromQueue(SchedBoundary &Zone, 1383 const RegPressureTracker &RPTracker, 1384 SchedCandidate &Candidate); 1385 1386 void reschedulePhysRegCopies(SUnit *SU, bool isTop); 1387 1388 #ifndef NDEBUG 1389 void traceCandidate(const SchedCandidate &Cand); 1390 #endif 1391 }; 1392 } // namespace 1393 1394 void ConvergingScheduler::SchedRemainder:: 1395 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1396 reset(); 1397 if (!SchedModel->hasInstrSchedModel()) 1398 return; 1399 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1400 for (std::vector<SUnit>::iterator 1401 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1402 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1403 RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC); 1404 for (TargetSchedModel::ProcResIter 1405 PI = SchedModel->getWriteProcResBegin(SC), 1406 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1407 unsigned PIdx = PI->ProcResourceIdx; 1408 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1409 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1410 } 1411 } 1412 for (unsigned PIdx = 0, PEnd = SchedModel->getNumProcResourceKinds(); 1413 PIdx != PEnd; ++PIdx) { 1414 if ((int)(RemainingCounts[PIdx] - RemainingCounts[CritResIdx]) 1415 >= (int)SchedModel->getLatencyFactor()) { 1416 CritResIdx = PIdx; 1417 } 1418 } 1419 } 1420 1421 void ConvergingScheduler::SchedBoundary:: 1422 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1423 reset(); 1424 DAG = dag; 1425 SchedModel = smodel; 1426 Rem = rem; 1427 if (SchedModel->hasInstrSchedModel()) 1428 ResourceCounts.resize(SchedModel->getNumProcResourceKinds()); 1429 } 1430 1431 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 1432 DAG = dag; 1433 SchedModel = DAG->getSchedModel(); 1434 TRI = DAG->TRI; 1435 1436 Rem.init(DAG, SchedModel); 1437 Top.init(DAG, SchedModel, &Rem); 1438 Bot.init(DAG, SchedModel, &Rem); 1439 1440 // Initialize resource counts. 1441 1442 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 1443 // are disabled, then these HazardRecs will be disabled. 1444 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 1445 const TargetMachine &TM = DAG->MF.getTarget(); 1446 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1447 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1448 1449 assert((!ForceTopDown || !ForceBottomUp) && 1450 "-misched-topdown incompatible with -misched-bottomup"); 1451 } 1452 1453 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 1454 if (SU->isScheduled) 1455 return; 1456 1457 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1458 I != E; ++I) { 1459 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 1460 unsigned MinLatency = I->getMinLatency(); 1461 #ifndef NDEBUG 1462 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); 1463 #endif 1464 if (SU->TopReadyCycle < PredReadyCycle + MinLatency) 1465 SU->TopReadyCycle = PredReadyCycle + MinLatency; 1466 } 1467 Top.releaseNode(SU, SU->TopReadyCycle); 1468 } 1469 1470 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 1471 if (SU->isScheduled) 1472 return; 1473 1474 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1475 1476 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1477 I != E; ++I) { 1478 if (I->isWeak()) 1479 continue; 1480 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 1481 unsigned MinLatency = I->getMinLatency(); 1482 #ifndef NDEBUG 1483 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); 1484 #endif 1485 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency) 1486 SU->BotReadyCycle = SuccReadyCycle + MinLatency; 1487 } 1488 Bot.releaseNode(SU, SU->BotReadyCycle); 1489 } 1490 1491 void ConvergingScheduler::registerRoots() { 1492 Rem.CriticalPath = DAG->ExitSU.getDepth(); 1493 // Some roots may not feed into ExitSU. Check all of them in case. 1494 for (std::vector<SUnit*>::const_iterator 1495 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 1496 if ((*I)->getDepth() > Rem.CriticalPath) 1497 Rem.CriticalPath = (*I)->getDepth(); 1498 } 1499 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 1500 } 1501 1502 /// Does this SU have a hazard within the current instruction group. 1503 /// 1504 /// The scheduler supports two modes of hazard recognition. The first is the 1505 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1506 /// supports highly complicated in-order reservation tables 1507 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1508 /// 1509 /// The second is a streamlined mechanism that checks for hazards based on 1510 /// simple counters that the scheduler itself maintains. It explicitly checks 1511 /// for instruction dispatch limitations, including the number of micro-ops that 1512 /// can dispatch per cycle. 1513 /// 1514 /// TODO: Also check whether the SU must start a new group. 1515 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 1516 if (HazardRec->isEnabled()) 1517 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 1518 1519 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1520 if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) { 1521 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1522 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1523 return true; 1524 } 1525 return false; 1526 } 1527 1528 /// Compute the remaining latency to determine whether ILP should be increased. 1529 void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) { 1530 // FIXME: compile time. In all, we visit four queues here one we should only 1531 // need to visit the one that was last popped if we cache the result. 1532 unsigned RemLatency = 0; 1533 for (ReadyQueue::iterator I = Available.begin(), E = Available.end(); 1534 I != E; ++I) { 1535 unsigned L = getUnscheduledLatency(*I); 1536 DEBUG(dbgs() << " " << Available.getName() 1537 << " RemLatency SU(" << (*I)->NodeNum << ") " << L << '\n'); 1538 if (L > RemLatency) 1539 RemLatency = L; 1540 } 1541 for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end(); 1542 I != E; ++I) { 1543 unsigned L = getUnscheduledLatency(*I); 1544 if (L > RemLatency) 1545 RemLatency = L; 1546 } 1547 unsigned CriticalPathLimit = Rem->CriticalPath + SchedModel->getILPWindow(); 1548 DEBUG(dbgs() << " " << Available.getName() 1549 << " ExpectedLatency " << ExpectedLatency 1550 << " CP Limit " << CriticalPathLimit << '\n'); 1551 if (RemLatency + ExpectedLatency >= CriticalPathLimit 1552 && RemLatency > Rem->getMaxRemainingCount(SchedModel)) { 1553 Policy.ReduceLatency = true; 1554 DEBUG(dbgs() << " Increase ILP: " << Available.getName() << '\n'); 1555 } 1556 } 1557 1558 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 1559 unsigned ReadyCycle) { 1560 1561 if (ReadyCycle < MinReadyCycle) 1562 MinReadyCycle = ReadyCycle; 1563 1564 // Check for interlocks first. For the purpose of other heuristics, an 1565 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1566 if (ReadyCycle > CurrCycle || checkHazard(SU)) 1567 Pending.push(SU); 1568 else 1569 Available.push(SU); 1570 1571 // Record this node as an immediate dependent of the scheduled node. 1572 NextSUs.insert(SU); 1573 } 1574 1575 /// Move the boundary of scheduled code by one cycle. 1576 void ConvergingScheduler::SchedBoundary::bumpCycle() { 1577 unsigned Width = SchedModel->getIssueWidth(); 1578 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; 1579 1580 unsigned NextCycle = CurrCycle + 1; 1581 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1582 if (MinReadyCycle > NextCycle) { 1583 IssueCount = 0; 1584 NextCycle = MinReadyCycle; 1585 } 1586 1587 if (!HazardRec->isEnabled()) { 1588 // Bypass HazardRec virtual calls. 1589 CurrCycle = NextCycle; 1590 } 1591 else { 1592 // Bypass getHazardType calls in case of long latency. 1593 for (; CurrCycle != NextCycle; ++CurrCycle) { 1594 if (isTop()) 1595 HazardRec->AdvanceCycle(); 1596 else 1597 HazardRec->RecedeCycle(); 1598 } 1599 } 1600 CheckPending = true; 1601 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1602 1603 DEBUG(dbgs() << " " << Available.getName() 1604 << " Cycle: " << CurrCycle << '\n'); 1605 } 1606 1607 /// Add the given processor resource to this scheduled zone. 1608 void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx, 1609 unsigned Cycles) { 1610 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1611 DEBUG(dbgs() << " " << SchedModel->getProcResource(PIdx)->Name 1612 << " +(" << Cycles << "x" << Factor 1613 << ") / " << SchedModel->getLatencyFactor() << '\n'); 1614 1615 unsigned Count = Factor * Cycles; 1616 ResourceCounts[PIdx] += Count; 1617 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1618 Rem->RemainingCounts[PIdx] -= Count; 1619 1620 // Check if this resource exceeds the current critical resource by a full 1621 // cycle. If so, it becomes the critical resource. 1622 if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx]) 1623 >= (int)SchedModel->getLatencyFactor()) { 1624 CritResIdx = PIdx; 1625 DEBUG(dbgs() << " *** Critical resource " 1626 << SchedModel->getProcResource(PIdx)->Name << " x" 1627 << ResourceCounts[PIdx] << '\n'); 1628 } 1629 } 1630 1631 /// Move the boundary of scheduled code by one SUnit. 1632 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 1633 // Update the reservation table. 1634 if (HazardRec->isEnabled()) { 1635 if (!isTop() && SU->isCall) { 1636 // Calls are scheduled with their preceding instructions. For bottom-up 1637 // scheduling, clear the pipeline state before emitting. 1638 HazardRec->Reset(); 1639 } 1640 HazardRec->EmitInstruction(SU); 1641 } 1642 // Update resource counts and critical resource. 1643 if (SchedModel->hasInstrSchedModel()) { 1644 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1645 Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC); 1646 for (TargetSchedModel::ProcResIter 1647 PI = SchedModel->getWriteProcResBegin(SC), 1648 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1649 countResource(PI->ProcResourceIdx, PI->Cycles); 1650 } 1651 } 1652 if (isTop()) { 1653 if (SU->getDepth() > ExpectedLatency) 1654 ExpectedLatency = SU->getDepth(); 1655 } 1656 else { 1657 if (SU->getHeight() > ExpectedLatency) 1658 ExpectedLatency = SU->getHeight(); 1659 } 1660 1661 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1662 1663 // Check the instruction group dispatch limit. 1664 // TODO: Check if this SU must end a dispatch group. 1665 IssueCount += SchedModel->getNumMicroOps(SU->getInstr()); 1666 1667 // checkHazard prevents scheduling multiple instructions per cycle that exceed 1668 // issue width. However, we commonly reach the maximum. In this case 1669 // opportunistically bump the cycle to avoid uselessly checking everything in 1670 // the readyQ. Furthermore, a single instruction may produce more than one 1671 // cycle's worth of micro-ops. 1672 if (IssueCount >= SchedModel->getIssueWidth()) { 1673 DEBUG(dbgs() << " *** Max instrs at cycle " << CurrCycle << '\n'); 1674 bumpCycle(); 1675 } 1676 } 1677 1678 /// Release pending ready nodes in to the available queue. This makes them 1679 /// visible to heuristics. 1680 void ConvergingScheduler::SchedBoundary::releasePending() { 1681 // If the available queue is empty, it is safe to reset MinReadyCycle. 1682 if (Available.empty()) 1683 MinReadyCycle = UINT_MAX; 1684 1685 // Check to see if any of the pending instructions are ready to issue. If 1686 // so, add them to the available queue. 1687 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1688 SUnit *SU = *(Pending.begin()+i); 1689 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1690 1691 if (ReadyCycle < MinReadyCycle) 1692 MinReadyCycle = ReadyCycle; 1693 1694 if (ReadyCycle > CurrCycle) 1695 continue; 1696 1697 if (checkHazard(SU)) 1698 continue; 1699 1700 Available.push(SU); 1701 Pending.remove(Pending.begin()+i); 1702 --i; --e; 1703 } 1704 DEBUG(if (!Pending.empty()) Pending.dump()); 1705 CheckPending = false; 1706 } 1707 1708 /// Remove SU from the ready set for this boundary. 1709 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1710 if (Available.isInQueue(SU)) 1711 Available.remove(Available.find(SU)); 1712 else { 1713 assert(Pending.isInQueue(SU) && "bad ready count"); 1714 Pending.remove(Pending.find(SU)); 1715 } 1716 } 1717 1718 /// If this queue only has one ready candidate, return it. As a side effect, 1719 /// defer any nodes that now hit a hazard, and advance the cycle until at least 1720 /// one node is ready. If multiple instructions are ready, return NULL. 1721 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1722 if (CheckPending) 1723 releasePending(); 1724 1725 if (IssueCount > 0) { 1726 // Defer any ready instrs that now have a hazard. 1727 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 1728 if (checkHazard(*I)) { 1729 Pending.push(*I); 1730 I = Available.remove(I); 1731 continue; 1732 } 1733 ++I; 1734 } 1735 } 1736 for (unsigned i = 0; Available.empty(); ++i) { 1737 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1738 "permanent hazard"); (void)i; 1739 bumpCycle(); 1740 releasePending(); 1741 } 1742 if (Available.size() == 1) 1743 return *Available.begin(); 1744 return NULL; 1745 } 1746 1747 /// Record the candidate policy for opposite zones with different critical 1748 /// resources. 1749 /// 1750 /// If the CriticalZone is latency limited, don't force a policy for the 1751 /// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed. 1752 void ConvergingScheduler::balanceZones( 1753 ConvergingScheduler::SchedBoundary &CriticalZone, 1754 ConvergingScheduler::SchedCandidate &CriticalCand, 1755 ConvergingScheduler::SchedBoundary &OppositeZone, 1756 ConvergingScheduler::SchedCandidate &OppositeCand) { 1757 1758 if (!CriticalZone.IsResourceLimited) 1759 return; 1760 assert(SchedModel->hasInstrSchedModel() && "required schedmodel"); 1761 1762 SchedRemainder *Rem = CriticalZone.Rem; 1763 1764 // If the critical zone is overconsuming a resource relative to the 1765 // remainder, try to reduce it. 1766 unsigned RemainingCritCount = 1767 Rem->RemainingCounts[CriticalZone.CritResIdx]; 1768 if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount) 1769 > (int)SchedModel->getLatencyFactor()) { 1770 CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx; 1771 DEBUG(dbgs() << " Balance " << CriticalZone.Available.getName() 1772 << " reduce " 1773 << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name 1774 << '\n'); 1775 } 1776 // If the other zone is underconsuming a resource relative to the full zone, 1777 // try to increase it. 1778 unsigned OppositeCount = 1779 OppositeZone.ResourceCounts[CriticalZone.CritResIdx]; 1780 if ((int)(OppositeZone.ExpectedCount - OppositeCount) 1781 > (int)SchedModel->getLatencyFactor()) { 1782 OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx; 1783 DEBUG(dbgs() << " Balance " << OppositeZone.Available.getName() 1784 << " demand " 1785 << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name 1786 << '\n'); 1787 } 1788 } 1789 1790 /// Determine if the scheduled zones exceed resource limits or critical path and 1791 /// set each candidate's ReduceHeight policy accordingly. 1792 void ConvergingScheduler::checkResourceLimits( 1793 ConvergingScheduler::SchedCandidate &TopCand, 1794 ConvergingScheduler::SchedCandidate &BotCand) { 1795 1796 // Set ReduceLatency to true if needed. 1797 Bot.setLatencyPolicy(BotCand.Policy); 1798 Top.setLatencyPolicy(TopCand.Policy); 1799 1800 // Handle resource-limited regions. 1801 if (Top.IsResourceLimited && Bot.IsResourceLimited 1802 && Top.CritResIdx == Bot.CritResIdx) { 1803 // If the scheduled critical resource in both zones is no longer the 1804 // critical remaining resource, attempt to reduce resource height both ways. 1805 if (Top.CritResIdx != Rem.CritResIdx) { 1806 TopCand.Policy.ReduceResIdx = Top.CritResIdx; 1807 BotCand.Policy.ReduceResIdx = Bot.CritResIdx; 1808 DEBUG(dbgs() << " Reduce scheduled " 1809 << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n'); 1810 } 1811 return; 1812 } 1813 // Handle latency-limited regions. 1814 if (!Top.IsResourceLimited && !Bot.IsResourceLimited) { 1815 // If the total scheduled expected latency exceeds the region's critical 1816 // path then reduce latency both ways. 1817 // 1818 // Just because a zone is not resource limited does not mean it is latency 1819 // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle 1820 // to exceed expected latency. 1821 if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath) 1822 && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) { 1823 TopCand.Policy.ReduceLatency = true; 1824 BotCand.Policy.ReduceLatency = true; 1825 DEBUG(dbgs() << " Reduce scheduled latency " << Top.ExpectedLatency 1826 << " + " << Bot.ExpectedLatency << '\n'); 1827 } 1828 return; 1829 } 1830 // The critical resource is different in each zone, so request balancing. 1831 1832 // Compute the cost of each zone. 1833 Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle); 1834 Top.ExpectedCount = std::max( 1835 Top.getCriticalCount(), 1836 Top.ExpectedCount * SchedModel->getLatencyFactor()); 1837 Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle); 1838 Bot.ExpectedCount = std::max( 1839 Bot.getCriticalCount(), 1840 Bot.ExpectedCount * SchedModel->getLatencyFactor()); 1841 1842 balanceZones(Top, TopCand, Bot, BotCand); 1843 balanceZones(Bot, BotCand, Top, TopCand); 1844 } 1845 1846 void ConvergingScheduler::SchedCandidate:: 1847 initResourceDelta(const ScheduleDAGMI *DAG, 1848 const TargetSchedModel *SchedModel) { 1849 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 1850 return; 1851 1852 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1853 for (TargetSchedModel::ProcResIter 1854 PI = SchedModel->getWriteProcResBegin(SC), 1855 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1856 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 1857 ResDelta.CritResources += PI->Cycles; 1858 if (PI->ProcResourceIdx == Policy.DemandResIdx) 1859 ResDelta.DemandedResources += PI->Cycles; 1860 } 1861 } 1862 1863 /// Return true if this heuristic determines order. 1864 static bool tryLess(int TryVal, int CandVal, 1865 ConvergingScheduler::SchedCandidate &TryCand, 1866 ConvergingScheduler::SchedCandidate &Cand, 1867 ConvergingScheduler::CandReason Reason) { 1868 if (TryVal < CandVal) { 1869 TryCand.Reason = Reason; 1870 return true; 1871 } 1872 if (TryVal > CandVal) { 1873 if (Cand.Reason > Reason) 1874 Cand.Reason = Reason; 1875 return true; 1876 } 1877 return false; 1878 } 1879 1880 static bool tryGreater(int TryVal, int CandVal, 1881 ConvergingScheduler::SchedCandidate &TryCand, 1882 ConvergingScheduler::SchedCandidate &Cand, 1883 ConvergingScheduler::CandReason Reason) { 1884 if (TryVal > CandVal) { 1885 TryCand.Reason = Reason; 1886 return true; 1887 } 1888 if (TryVal < CandVal) { 1889 if (Cand.Reason > Reason) 1890 Cand.Reason = Reason; 1891 return true; 1892 } 1893 return false; 1894 } 1895 1896 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 1897 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 1898 } 1899 1900 /// Minimize physical register live ranges. Regalloc wants them adjacent to 1901 /// their physreg def/use. 1902 /// 1903 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 1904 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 1905 /// with the operation that produces or consumes the physreg. We'll do this when 1906 /// regalloc has support for parallel copies. 1907 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 1908 const MachineInstr *MI = SU->getInstr(); 1909 if (!MI->isCopy()) 1910 return 0; 1911 1912 unsigned ScheduledOper = isTop ? 1 : 0; 1913 unsigned UnscheduledOper = isTop ? 0 : 1; 1914 // If we have already scheduled the physreg produce/consumer, immediately 1915 // schedule the copy. 1916 if (TargetRegisterInfo::isPhysicalRegister( 1917 MI->getOperand(ScheduledOper).getReg())) 1918 return 1; 1919 // If the physreg is at the boundary, defer it. Otherwise schedule it 1920 // immediately to free the dependent. We can hoist the copy later. 1921 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 1922 if (TargetRegisterInfo::isPhysicalRegister( 1923 MI->getOperand(UnscheduledOper).getReg())) 1924 return AtBoundary ? -1 : 1; 1925 return 0; 1926 } 1927 1928 /// Apply a set of heursitics to a new candidate. Heuristics are currently 1929 /// hierarchical. This may be more efficient than a graduated cost model because 1930 /// we don't need to evaluate all aspects of the model for each node in the 1931 /// queue. But it's really done to make the heuristics easier to debug and 1932 /// statistically analyze. 1933 /// 1934 /// \param Cand provides the policy and current best candidate. 1935 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 1936 /// \param Zone describes the scheduled zone that we are extending. 1937 /// \param RPTracker describes reg pressure within the scheduled zone. 1938 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 1939 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, 1940 SchedCandidate &TryCand, 1941 SchedBoundary &Zone, 1942 const RegPressureTracker &RPTracker, 1943 RegPressureTracker &TempTracker) { 1944 1945 // Always initialize TryCand's RPDelta. 1946 TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta, 1947 DAG->getRegionCriticalPSets(), 1948 DAG->getRegPressure().MaxSetPressure); 1949 1950 // Initialize the candidate if needed. 1951 if (!Cand.isValid()) { 1952 TryCand.Reason = NodeOrder; 1953 return; 1954 } 1955 1956 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 1957 biasPhysRegCopy(Cand.SU, Zone.isTop()), 1958 TryCand, Cand, PhysRegCopy)) 1959 return; 1960 1961 // Avoid exceeding the target's limit. 1962 if (tryLess(TryCand.RPDelta.Excess.UnitIncrease, 1963 Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess)) 1964 return; 1965 if (Cand.Reason == SingleExcess) 1966 Cand.Reason = MultiPressure; 1967 1968 // Avoid increasing the max critical pressure in the scheduled region. 1969 if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease, 1970 Cand.RPDelta.CriticalMax.UnitIncrease, 1971 TryCand, Cand, SingleCritical)) 1972 return; 1973 if (Cand.Reason == SingleCritical) 1974 Cand.Reason = MultiPressure; 1975 1976 // Keep clustered nodes together to encourage downstream peephole 1977 // optimizations which may reduce resource requirements. 1978 // 1979 // This is a best effort to set things up for a post-RA pass. Optimizations 1980 // like generating loads of multiple registers should ideally be done within 1981 // the scheduler pass by combining the loads during DAG postprocessing. 1982 const SUnit *NextClusterSU = 1983 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 1984 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 1985 TryCand, Cand, Cluster)) 1986 return; 1987 1988 // Weak edges are for clustering and other constraints. 1989 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 1990 getWeakLeft(Cand.SU, Zone.isTop()), 1991 TryCand, Cand, Weak)) { 1992 return; 1993 } 1994 // Avoid critical resource consumption and balance the schedule. 1995 TryCand.initResourceDelta(DAG, SchedModel); 1996 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 1997 TryCand, Cand, ResourceReduce)) 1998 return; 1999 if (tryGreater(TryCand.ResDelta.DemandedResources, 2000 Cand.ResDelta.DemandedResources, 2001 TryCand, Cand, ResourceDemand)) 2002 return; 2003 2004 // Avoid serializing long latency dependence chains. 2005 if (Cand.Policy.ReduceLatency) { 2006 if (Zone.isTop()) { 2007 if (Cand.SU->getDepth() * SchedModel->getLatencyFactor() 2008 > Zone.ExpectedCount) { 2009 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2010 TryCand, Cand, TopDepthReduce)) 2011 return; 2012 } 2013 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2014 TryCand, Cand, TopPathReduce)) 2015 return; 2016 } 2017 else { 2018 if (Cand.SU->getHeight() * SchedModel->getLatencyFactor() 2019 > Zone.ExpectedCount) { 2020 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2021 TryCand, Cand, BotHeightReduce)) 2022 return; 2023 } 2024 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2025 TryCand, Cand, BotPathReduce)) 2026 return; 2027 } 2028 } 2029 2030 // Avoid increasing the max pressure of the entire region. 2031 if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease, 2032 Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax)) 2033 return; 2034 if (Cand.Reason == SingleMax) 2035 Cand.Reason = MultiPressure; 2036 2037 // Prefer immediate defs/users of the last scheduled instruction. This is a 2038 // nice pressure avoidance strategy that also conserves the processor's 2039 // register renaming resources and keeps the machine code readable. 2040 if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU), 2041 TryCand, Cand, NextDefUse)) 2042 return; 2043 2044 // Fall through to original instruction order. 2045 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2046 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2047 TryCand.Reason = NodeOrder; 2048 } 2049 } 2050 2051 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 2052 /// more desirable than RHS from scheduling standpoint. 2053 static bool compareRPDelta(const RegPressureDelta &LHS, 2054 const RegPressureDelta &RHS) { 2055 // Compare each component of pressure in decreasing order of importance 2056 // without checking if any are valid. Invalid PressureElements are assumed to 2057 // have UnitIncrease==0, so are neutral. 2058 2059 // Avoid increasing the max critical pressure in the scheduled region. 2060 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) { 2061 DEBUG(dbgs() << " RP excess top - bot: " 2062 << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n'); 2063 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 2064 } 2065 // Avoid increasing the max critical pressure in the scheduled region. 2066 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) { 2067 DEBUG(dbgs() << " RP critical top - bot: " 2068 << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease) 2069 << '\n'); 2070 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 2071 } 2072 // Avoid increasing the max pressure of the entire region. 2073 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) { 2074 DEBUG(dbgs() << " RP current top - bot: " 2075 << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease) 2076 << '\n'); 2077 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 2078 } 2079 return false; 2080 } 2081 2082 #ifndef NDEBUG 2083 const char *ConvergingScheduler::getReasonStr( 2084 ConvergingScheduler::CandReason Reason) { 2085 switch (Reason) { 2086 case NoCand: return "NOCAND "; 2087 case PhysRegCopy: return "PREG-COPY"; 2088 case SingleExcess: return "REG-EXCESS"; 2089 case SingleCritical: return "REG-CRIT "; 2090 case Cluster: return "CLUSTER "; 2091 case Weak: return "WEAK "; 2092 case SingleMax: return "REG-MAX "; 2093 case MultiPressure: return "REG-MULTI "; 2094 case ResourceReduce: return "RES-REDUCE"; 2095 case ResourceDemand: return "RES-DEMAND"; 2096 case TopDepthReduce: return "TOP-DEPTH "; 2097 case TopPathReduce: return "TOP-PATH "; 2098 case BotHeightReduce:return "BOT-HEIGHT"; 2099 case BotPathReduce: return "BOT-PATH "; 2100 case NextDefUse: return "DEF-USE "; 2101 case NodeOrder: return "ORDER "; 2102 }; 2103 llvm_unreachable("Unknown reason!"); 2104 } 2105 2106 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) { 2107 PressureElement P; 2108 unsigned ResIdx = 0; 2109 unsigned Latency = 0; 2110 switch (Cand.Reason) { 2111 default: 2112 break; 2113 case SingleExcess: 2114 P = Cand.RPDelta.Excess; 2115 break; 2116 case SingleCritical: 2117 P = Cand.RPDelta.CriticalMax; 2118 break; 2119 case SingleMax: 2120 P = Cand.RPDelta.CurrentMax; 2121 break; 2122 case ResourceReduce: 2123 ResIdx = Cand.Policy.ReduceResIdx; 2124 break; 2125 case ResourceDemand: 2126 ResIdx = Cand.Policy.DemandResIdx; 2127 break; 2128 case TopDepthReduce: 2129 Latency = Cand.SU->getDepth(); 2130 break; 2131 case TopPathReduce: 2132 Latency = Cand.SU->getHeight(); 2133 break; 2134 case BotHeightReduce: 2135 Latency = Cand.SU->getHeight(); 2136 break; 2137 case BotPathReduce: 2138 Latency = Cand.SU->getDepth(); 2139 break; 2140 } 2141 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2142 if (P.isValid()) 2143 dbgs() << " " << TRI->getRegPressureSetName(P.PSetID) 2144 << ":" << P.UnitIncrease << " "; 2145 else 2146 dbgs() << " "; 2147 if (ResIdx) 2148 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2149 else 2150 dbgs() << " "; 2151 if (Latency) 2152 dbgs() << " " << Latency << " cycles "; 2153 else 2154 dbgs() << " "; 2155 dbgs() << '\n'; 2156 } 2157 #endif 2158 2159 /// Pick the best candidate from the top queue. 2160 /// 2161 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2162 /// DAG building. To adjust for the current scheduling location we need to 2163 /// maintain the number of vreg uses remaining to be top-scheduled. 2164 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2165 const RegPressureTracker &RPTracker, 2166 SchedCandidate &Cand) { 2167 ReadyQueue &Q = Zone.Available; 2168 2169 DEBUG(Q.dump()); 2170 2171 // getMaxPressureDelta temporarily modifies the tracker. 2172 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2173 2174 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2175 2176 SchedCandidate TryCand(Cand.Policy); 2177 TryCand.SU = *I; 2178 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2179 if (TryCand.Reason != NoCand) { 2180 // Initialize resource delta if needed in case future heuristics query it. 2181 if (TryCand.ResDelta == SchedResourceDelta()) 2182 TryCand.initResourceDelta(DAG, SchedModel); 2183 Cand.setBest(TryCand); 2184 DEBUG(traceCandidate(Cand)); 2185 } 2186 } 2187 } 2188 2189 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, 2190 bool IsTop) { 2191 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2192 << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); 2193 } 2194 2195 /// Pick the best candidate node from either the top or bottom queue. 2196 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { 2197 // Schedule as far as possible in the direction of no choice. This is most 2198 // efficient, but also provides the best heuristics for CriticalPSets. 2199 if (SUnit *SU = Bot.pickOnlyChoice()) { 2200 IsTopNode = false; 2201 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2202 return SU; 2203 } 2204 if (SUnit *SU = Top.pickOnlyChoice()) { 2205 IsTopNode = true; 2206 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2207 return SU; 2208 } 2209 CandPolicy NoPolicy; 2210 SchedCandidate BotCand(NoPolicy); 2211 SchedCandidate TopCand(NoPolicy); 2212 checkResourceLimits(TopCand, BotCand); 2213 2214 // Prefer bottom scheduling when heuristics are silent. 2215 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2216 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2217 2218 // If either Q has a single candidate that provides the least increase in 2219 // Excess pressure, we can immediately schedule from that Q. 2220 // 2221 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2222 // affects picking from either Q. If scheduling in one direction must 2223 // increase pressure for one of the excess PSets, then schedule in that 2224 // direction first to provide more freedom in the other direction. 2225 if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) { 2226 IsTopNode = false; 2227 tracePick(BotCand, IsTopNode); 2228 return BotCand.SU; 2229 } 2230 // Check if the top Q has a better candidate. 2231 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2232 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2233 2234 // If either Q has a single candidate that minimizes pressure above the 2235 // original region's pressure pick it. 2236 if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) { 2237 if (TopCand.Reason < BotCand.Reason) { 2238 IsTopNode = true; 2239 tracePick(TopCand, IsTopNode); 2240 return TopCand.SU; 2241 } 2242 IsTopNode = false; 2243 tracePick(BotCand, IsTopNode); 2244 return BotCand.SU; 2245 } 2246 // Check for a salient pressure difference and pick the best from either side. 2247 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 2248 IsTopNode = true; 2249 tracePick(TopCand, IsTopNode); 2250 return TopCand.SU; 2251 } 2252 // Otherwise prefer the bottom candidate, in node order if all else failed. 2253 if (TopCand.Reason < BotCand.Reason) { 2254 IsTopNode = true; 2255 tracePick(TopCand, IsTopNode); 2256 return TopCand.SU; 2257 } 2258 IsTopNode = false; 2259 tracePick(BotCand, IsTopNode); 2260 return BotCand.SU; 2261 } 2262 2263 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2264 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 2265 if (DAG->top() == DAG->bottom()) { 2266 assert(Top.Available.empty() && Top.Pending.empty() && 2267 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2268 return NULL; 2269 } 2270 SUnit *SU; 2271 do { 2272 if (ForceTopDown) { 2273 SU = Top.pickOnlyChoice(); 2274 if (!SU) { 2275 CandPolicy NoPolicy; 2276 SchedCandidate TopCand(NoPolicy); 2277 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2278 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2279 SU = TopCand.SU; 2280 } 2281 IsTopNode = true; 2282 } 2283 else if (ForceBottomUp) { 2284 SU = Bot.pickOnlyChoice(); 2285 if (!SU) { 2286 CandPolicy NoPolicy; 2287 SchedCandidate BotCand(NoPolicy); 2288 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2289 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2290 SU = BotCand.SU; 2291 } 2292 IsTopNode = false; 2293 } 2294 else { 2295 SU = pickNodeBidirectional(IsTopNode); 2296 } 2297 } while (SU->isScheduled); 2298 2299 if (SU->isTopReady()) 2300 Top.removeReady(SU); 2301 if (SU->isBottomReady()) 2302 Bot.removeReady(SU); 2303 2304 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2305 return SU; 2306 } 2307 2308 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2309 2310 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2311 if (!isTop) 2312 ++InsertPos; 2313 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2314 2315 // Find already scheduled copies with a single physreg dependence and move 2316 // them just above the scheduled instruction. 2317 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2318 I != E; ++I) { 2319 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2320 continue; 2321 SUnit *DepSU = I->getSUnit(); 2322 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2323 continue; 2324 MachineInstr *Copy = DepSU->getInstr(); 2325 if (!Copy->isCopy()) 2326 continue; 2327 DEBUG(dbgs() << " Rescheduling physreg copy "; 2328 I->getSUnit()->dump(DAG)); 2329 DAG->moveInstruction(Copy, InsertPos); 2330 } 2331 } 2332 2333 /// Update the scheduler's state after scheduling a node. This is the same node 2334 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 2335 /// it's state based on the current cycle before MachineSchedStrategy does. 2336 /// 2337 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2338 /// them here. See comments in biasPhysRegCopy. 2339 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2340 if (IsTopNode) { 2341 SU->TopReadyCycle = Top.CurrCycle; 2342 Top.bumpNode(SU); 2343 if (SU->hasPhysRegUses) 2344 reschedulePhysRegCopies(SU, true); 2345 } 2346 else { 2347 SU->BotReadyCycle = Bot.CurrCycle; 2348 Bot.bumpNode(SU); 2349 if (SU->hasPhysRegDefs) 2350 reschedulePhysRegCopies(SU, false); 2351 } 2352 } 2353 2354 /// Create the standard converging machine scheduler. This will be used as the 2355 /// default scheduler if the target does not set a default. 2356 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 2357 assert((!ForceTopDown || !ForceBottomUp) && 2358 "-misched-topdown incompatible with -misched-bottomup"); 2359 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler()); 2360 // Register DAG post-processors. 2361 // 2362 // FIXME: extend the mutation API to allow earlier mutations to instantiate 2363 // data and pass it to later mutations. Have a single mutation that gathers 2364 // the interesting nodes in one pass. 2365 if (EnableCopyConstrain) 2366 DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI)); 2367 if (EnableLoadCluster) 2368 DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); 2369 if (EnableMacroFusion) 2370 DAG->addMutation(new MacroFusion(DAG->TII)); 2371 return DAG; 2372 } 2373 static MachineSchedRegistry 2374 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 2375 createConvergingSched); 2376 2377 //===----------------------------------------------------------------------===// 2378 // ILP Scheduler. Currently for experimental analysis of heuristics. 2379 //===----------------------------------------------------------------------===// 2380 2381 namespace { 2382 /// \brief Order nodes by the ILP metric. 2383 struct ILPOrder { 2384 const SchedDFSResult *DFSResult; 2385 const BitVector *ScheduledTrees; 2386 bool MaximizeILP; 2387 2388 ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {} 2389 2390 /// \brief Apply a less-than relation on node priority. 2391 /// 2392 /// (Return true if A comes after B in the Q.) 2393 bool operator()(const SUnit *A, const SUnit *B) const { 2394 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 2395 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 2396 if (SchedTreeA != SchedTreeB) { 2397 // Unscheduled trees have lower priority. 2398 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 2399 return ScheduledTrees->test(SchedTreeB); 2400 2401 // Trees with shallower connections have have lower priority. 2402 if (DFSResult->getSubtreeLevel(SchedTreeA) 2403 != DFSResult->getSubtreeLevel(SchedTreeB)) { 2404 return DFSResult->getSubtreeLevel(SchedTreeA) 2405 < DFSResult->getSubtreeLevel(SchedTreeB); 2406 } 2407 } 2408 if (MaximizeILP) 2409 return DFSResult->getILP(A) < DFSResult->getILP(B); 2410 else 2411 return DFSResult->getILP(A) > DFSResult->getILP(B); 2412 } 2413 }; 2414 2415 /// \brief Schedule based on the ILP metric. 2416 class ILPScheduler : public MachineSchedStrategy { 2417 /// In case all subtrees are eventually connected to a common root through 2418 /// data dependence (e.g. reduction), place an upper limit on their size. 2419 /// 2420 /// FIXME: A subtree limit is generally good, but in the situation commented 2421 /// above, where multiple similar subtrees feed a common root, we should 2422 /// only split at a point where the resulting subtrees will be balanced. 2423 /// (a motivating test case must be found). 2424 static const unsigned SubtreeLimit = 16; 2425 2426 ScheduleDAGMI *DAG; 2427 ILPOrder Cmp; 2428 2429 std::vector<SUnit*> ReadyQ; 2430 public: 2431 ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {} 2432 2433 virtual void initialize(ScheduleDAGMI *dag) { 2434 DAG = dag; 2435 DAG->computeDFSResult(); 2436 Cmp.DFSResult = DAG->getDFSResult(); 2437 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 2438 ReadyQ.clear(); 2439 } 2440 2441 virtual void registerRoots() { 2442 // Restore the heap in ReadyQ with the updated DFS results. 2443 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2444 } 2445 2446 /// Implement MachineSchedStrategy interface. 2447 /// ----------------------------------------- 2448 2449 /// Callback to select the highest priority node from the ready Q. 2450 virtual SUnit *pickNode(bool &IsTopNode) { 2451 if (ReadyQ.empty()) return NULL; 2452 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2453 SUnit *SU = ReadyQ.back(); 2454 ReadyQ.pop_back(); 2455 IsTopNode = false; 2456 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 2457 << " ILP: " << DAG->getDFSResult()->getILP(SU) 2458 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 2459 << DAG->getDFSResult()->getSubtreeLevel( 2460 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 2461 << "Scheduling " << *SU->getInstr()); 2462 return SU; 2463 } 2464 2465 /// \brief Scheduler callback to notify that a new subtree is scheduled. 2466 virtual void scheduleTree(unsigned SubtreeID) { 2467 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2468 } 2469 2470 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 2471 /// DFSResults, and resort the priority Q. 2472 virtual void schedNode(SUnit *SU, bool IsTopNode) { 2473 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 2474 } 2475 2476 virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } 2477 2478 virtual void releaseBottomNode(SUnit *SU) { 2479 ReadyQ.push_back(SU); 2480 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2481 } 2482 }; 2483 } // namespace 2484 2485 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 2486 return new ScheduleDAGMI(C, new ILPScheduler(true)); 2487 } 2488 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 2489 return new ScheduleDAGMI(C, new ILPScheduler(false)); 2490 } 2491 static MachineSchedRegistry ILPMaxRegistry( 2492 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 2493 static MachineSchedRegistry ILPMinRegistry( 2494 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 2495 2496 //===----------------------------------------------------------------------===// 2497 // Machine Instruction Shuffler for Correctness Testing 2498 //===----------------------------------------------------------------------===// 2499 2500 #ifndef NDEBUG 2501 namespace { 2502 /// Apply a less-than relation on the node order, which corresponds to the 2503 /// instruction order prior to scheduling. IsReverse implements greater-than. 2504 template<bool IsReverse> 2505 struct SUnitOrder { 2506 bool operator()(SUnit *A, SUnit *B) const { 2507 if (IsReverse) 2508 return A->NodeNum > B->NodeNum; 2509 else 2510 return A->NodeNum < B->NodeNum; 2511 } 2512 }; 2513 2514 /// Reorder instructions as much as possible. 2515 class InstructionShuffler : public MachineSchedStrategy { 2516 bool IsAlternating; 2517 bool IsTopDown; 2518 2519 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 2520 // gives nodes with a higher number higher priority causing the latest 2521 // instructions to be scheduled first. 2522 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 2523 TopQ; 2524 // When scheduling bottom-up, use greater-than as the queue priority. 2525 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 2526 BottomQ; 2527 public: 2528 InstructionShuffler(bool alternate, bool topdown) 2529 : IsAlternating(alternate), IsTopDown(topdown) {} 2530 2531 virtual void initialize(ScheduleDAGMI *) { 2532 TopQ.clear(); 2533 BottomQ.clear(); 2534 } 2535 2536 /// Implement MachineSchedStrategy interface. 2537 /// ----------------------------------------- 2538 2539 virtual SUnit *pickNode(bool &IsTopNode) { 2540 SUnit *SU; 2541 if (IsTopDown) { 2542 do { 2543 if (TopQ.empty()) return NULL; 2544 SU = TopQ.top(); 2545 TopQ.pop(); 2546 } while (SU->isScheduled); 2547 IsTopNode = true; 2548 } 2549 else { 2550 do { 2551 if (BottomQ.empty()) return NULL; 2552 SU = BottomQ.top(); 2553 BottomQ.pop(); 2554 } while (SU->isScheduled); 2555 IsTopNode = false; 2556 } 2557 if (IsAlternating) 2558 IsTopDown = !IsTopDown; 2559 return SU; 2560 } 2561 2562 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 2563 2564 virtual void releaseTopNode(SUnit *SU) { 2565 TopQ.push(SU); 2566 } 2567 virtual void releaseBottomNode(SUnit *SU) { 2568 BottomQ.push(SU); 2569 } 2570 }; 2571 } // namespace 2572 2573 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 2574 bool Alternate = !ForceTopDown && !ForceBottomUp; 2575 bool TopDown = !ForceBottomUp; 2576 assert((TopDown || !ForceTopDown) && 2577 "-misched-topdown incompatible with -misched-bottomup"); 2578 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 2579 } 2580 static MachineSchedRegistry ShufflerRegistry( 2581 "shuffle", "Shuffle machine instructions alternating directions", 2582 createInstructionShuffler); 2583 #endif // !NDEBUG 2584 2585 //===----------------------------------------------------------------------===// 2586 // GraphWriter support for ScheduleDAGMI. 2587 //===----------------------------------------------------------------------===// 2588 2589 #ifndef NDEBUG 2590 namespace llvm { 2591 2592 template<> struct GraphTraits< 2593 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 2594 2595 template<> 2596 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 2597 2598 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 2599 2600 static std::string getGraphName(const ScheduleDAG *G) { 2601 return G->MF.getName(); 2602 } 2603 2604 static bool renderGraphFromBottomUp() { 2605 return true; 2606 } 2607 2608 static bool isNodeHidden(const SUnit *Node) { 2609 return (Node->NumPreds > 10 || Node->NumSuccs > 10); 2610 } 2611 2612 static bool hasNodeAddressLabel(const SUnit *Node, 2613 const ScheduleDAG *Graph) { 2614 return false; 2615 } 2616 2617 /// If you want to override the dot attributes printed for a particular 2618 /// edge, override this method. 2619 static std::string getEdgeAttributes(const SUnit *Node, 2620 SUnitIterator EI, 2621 const ScheduleDAG *Graph) { 2622 if (EI.isArtificialDep()) 2623 return "color=cyan,style=dashed"; 2624 if (EI.isCtrlDep()) 2625 return "color=blue,style=dashed"; 2626 return ""; 2627 } 2628 2629 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 2630 std::string Str; 2631 raw_string_ostream SS(Str); 2632 SS << "SU(" << SU->NodeNum << ')'; 2633 return SS.str(); 2634 } 2635 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 2636 return G->getGraphNodeLabel(SU); 2637 } 2638 2639 static std::string getNodeAttributes(const SUnit *N, 2640 const ScheduleDAG *Graph) { 2641 std::string Str("shape=Mrecord"); 2642 const SchedDFSResult *DFS = 2643 static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult(); 2644 if (DFS) { 2645 Str += ",style=filled,fillcolor=\"#"; 2646 Str += DOT::getColorString(DFS->getSubtreeID(N)); 2647 Str += '"'; 2648 } 2649 return Str; 2650 } 2651 }; 2652 } // namespace llvm 2653 #endif // NDEBUG 2654 2655 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 2656 /// rendered using 'dot'. 2657 /// 2658 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 2659 #ifndef NDEBUG 2660 ViewGraph(this, Name, false, Title); 2661 #else 2662 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 2663 << "systems with Graphviz or gv!\n"; 2664 #endif // NDEBUG 2665 } 2666 2667 /// Out-of-line implementation with no arguments is handy for gdb. 2668 void ScheduleDAGMI::viewGraph() { 2669 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 2670 } 2671