1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/MachineScheduler.h" 18 #include "llvm/ADT/OwningPtr.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/CodeGen/RegisterClassInfo.h" 26 #include "llvm/CodeGen/ScheduleDFS.h" 27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/GraphWriter.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include <queue> 34 35 using namespace llvm; 36 37 namespace llvm { 38 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 } 43 44 #ifndef NDEBUG 45 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 46 cl::desc("Pop up a window to show MISched dags after they are processed")); 47 48 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 49 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 50 #else 51 static bool ViewMISchedDAGs = false; 52 #endif // NDEBUG 53 54 // Experimental heuristics 55 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 56 cl::desc("Enable load clustering."), cl::init(true)); 57 58 // Experimental heuristics 59 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 60 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 61 62 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 63 cl::desc("Verify machine instrs before and after machine scheduling")); 64 65 // DAG subtrees must have at least this many nodes. 66 static const unsigned MinSubtreeSize = 8; 67 68 //===----------------------------------------------------------------------===// 69 // Machine Instruction Scheduling Pass and Registry 70 //===----------------------------------------------------------------------===// 71 72 MachineSchedContext::MachineSchedContext(): 73 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 74 RegClassInfo = new RegisterClassInfo(); 75 } 76 77 MachineSchedContext::~MachineSchedContext() { 78 delete RegClassInfo; 79 } 80 81 namespace { 82 /// MachineScheduler runs after coalescing and before register allocation. 83 class MachineScheduler : public MachineSchedContext, 84 public MachineFunctionPass { 85 public: 86 MachineScheduler(); 87 88 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 89 90 virtual void releaseMemory() {} 91 92 virtual bool runOnMachineFunction(MachineFunction&); 93 94 virtual void print(raw_ostream &O, const Module* = 0) const; 95 96 static char ID; // Class identification, replacement for typeinfo 97 }; 98 } // namespace 99 100 char MachineScheduler::ID = 0; 101 102 char &llvm::MachineSchedulerID = MachineScheduler::ID; 103 104 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 105 "Machine Instruction Scheduler", false, false) 106 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 107 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 108 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 109 INITIALIZE_PASS_END(MachineScheduler, "misched", 110 "Machine Instruction Scheduler", false, false) 111 112 MachineScheduler::MachineScheduler() 113 : MachineFunctionPass(ID) { 114 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 115 } 116 117 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 118 AU.setPreservesCFG(); 119 AU.addRequiredID(MachineDominatorsID); 120 AU.addRequired<MachineLoopInfo>(); 121 AU.addRequired<AliasAnalysis>(); 122 AU.addRequired<TargetPassConfig>(); 123 AU.addRequired<SlotIndexes>(); 124 AU.addPreserved<SlotIndexes>(); 125 AU.addRequired<LiveIntervals>(); 126 AU.addPreserved<LiveIntervals>(); 127 MachineFunctionPass::getAnalysisUsage(AU); 128 } 129 130 MachinePassRegistry MachineSchedRegistry::Registry; 131 132 /// A dummy default scheduler factory indicates whether the scheduler 133 /// is overridden on the command line. 134 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 135 return 0; 136 } 137 138 /// MachineSchedOpt allows command line selection of the scheduler. 139 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 140 RegisterPassParser<MachineSchedRegistry> > 141 MachineSchedOpt("misched", 142 cl::init(&useDefaultMachineSched), cl::Hidden, 143 cl::desc("Machine instruction scheduler to use")); 144 145 static MachineSchedRegistry 146 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 147 useDefaultMachineSched); 148 149 /// Forward declare the standard machine scheduler. This will be used as the 150 /// default scheduler if the target does not set a default. 151 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 152 153 154 /// Decrement this iterator until reaching the top or a non-debug instr. 155 static MachineBasicBlock::iterator 156 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 157 assert(I != Beg && "reached the top of the region, cannot decrement"); 158 while (--I != Beg) { 159 if (!I->isDebugValue()) 160 break; 161 } 162 return I; 163 } 164 165 /// If this iterator is a debug value, increment until reaching the End or a 166 /// non-debug instruction. 167 static MachineBasicBlock::iterator 168 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 169 for(; I != End; ++I) { 170 if (!I->isDebugValue()) 171 break; 172 } 173 return I; 174 } 175 176 /// Top-level MachineScheduler pass driver. 177 /// 178 /// Visit blocks in function order. Divide each block into scheduling regions 179 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 180 /// consistent with the DAG builder, which traverses the interior of the 181 /// scheduling regions bottom-up. 182 /// 183 /// This design avoids exposing scheduling boundaries to the DAG builder, 184 /// simplifying the DAG builder's support for "special" target instructions. 185 /// At the same time the design allows target schedulers to operate across 186 /// scheduling boundaries, for example to bundle the boudary instructions 187 /// without reordering them. This creates complexity, because the target 188 /// scheduler must update the RegionBegin and RegionEnd positions cached by 189 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 190 /// design would be to split blocks at scheduling boundaries, but LLVM has a 191 /// general bias against block splitting purely for implementation simplicity. 192 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 193 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 194 195 // Initialize the context of the pass. 196 MF = &mf; 197 MLI = &getAnalysis<MachineLoopInfo>(); 198 MDT = &getAnalysis<MachineDominatorTree>(); 199 PassConfig = &getAnalysis<TargetPassConfig>(); 200 AA = &getAnalysis<AliasAnalysis>(); 201 202 LIS = &getAnalysis<LiveIntervals>(); 203 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 204 205 if (VerifyScheduling) { 206 DEBUG(LIS->print(dbgs())); 207 MF->verify(this, "Before machine scheduling."); 208 } 209 RegClassInfo->runOnMachineFunction(*MF); 210 211 // Select the scheduler, or set the default. 212 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 213 if (Ctor == useDefaultMachineSched) { 214 // Get the default scheduler set by the target. 215 Ctor = MachineSchedRegistry::getDefault(); 216 if (!Ctor) { 217 Ctor = createConvergingSched; 218 MachineSchedRegistry::setDefault(Ctor); 219 } 220 } 221 // Instantiate the selected scheduler. 222 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 223 224 // Visit all machine basic blocks. 225 // 226 // TODO: Visit blocks in global postorder or postorder within the bottom-up 227 // loop tree. Then we can optionally compute global RegPressure. 228 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 229 MBB != MBBEnd; ++MBB) { 230 231 Scheduler->startBlock(MBB); 232 233 // Break the block into scheduling regions [I, RegionEnd), and schedule each 234 // region as soon as it is discovered. RegionEnd points the scheduling 235 // boundary at the bottom of the region. The DAG does not include RegionEnd, 236 // but the region does (i.e. the next RegionEnd is above the previous 237 // RegionBegin). If the current block has no terminator then RegionEnd == 238 // MBB->end() for the bottom region. 239 // 240 // The Scheduler may insert instructions during either schedule() or 241 // exitRegion(), even for empty regions. So the local iterators 'I' and 242 // 'RegionEnd' are invalid across these calls. 243 unsigned RemainingInstrs = MBB->size(); 244 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 245 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 246 247 // Avoid decrementing RegionEnd for blocks with no terminator. 248 if (RegionEnd != MBB->end() 249 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 250 --RegionEnd; 251 // Count the boundary instruction. 252 --RemainingInstrs; 253 } 254 255 // The next region starts above the previous region. Look backward in the 256 // instruction stream until we find the nearest boundary. 257 MachineBasicBlock::iterator I = RegionEnd; 258 for(;I != MBB->begin(); --I, --RemainingInstrs) { 259 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 260 break; 261 } 262 // Notify the scheduler of the region, even if we may skip scheduling 263 // it. Perhaps it still needs to be bundled. 264 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs); 265 266 // Skip empty scheduling regions (0 or 1 schedulable instructions). 267 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 268 // Close the current region. Bundle the terminator if needed. 269 // This invalidates 'RegionEnd' and 'I'. 270 Scheduler->exitRegion(); 271 continue; 272 } 273 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 274 DEBUG(dbgs() << MF->getName() 275 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 276 << "\n From: " << *I << " To: "; 277 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 278 else dbgs() << "End"; 279 dbgs() << " Remaining: " << RemainingInstrs << "\n"); 280 281 // Schedule a region: possibly reorder instructions. 282 // This invalidates 'RegionEnd' and 'I'. 283 Scheduler->schedule(); 284 285 // Close the current region. 286 Scheduler->exitRegion(); 287 288 // Scheduling has invalidated the current iterator 'I'. Ask the 289 // scheduler for the top of it's scheduled region. 290 RegionEnd = Scheduler->begin(); 291 } 292 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 293 Scheduler->finishBlock(); 294 } 295 Scheduler->finalizeSchedule(); 296 DEBUG(LIS->print(dbgs())); 297 if (VerifyScheduling) 298 MF->verify(this, "After machine scheduling."); 299 return true; 300 } 301 302 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 303 // unimplemented 304 } 305 306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 307 void ReadyQueue::dump() { 308 dbgs() << " " << Name << ": "; 309 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 310 dbgs() << Queue[i]->NodeNum << " "; 311 dbgs() << "\n"; 312 } 313 #endif 314 315 //===----------------------------------------------------------------------===// 316 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 317 // preservation. 318 //===----------------------------------------------------------------------===// 319 320 ScheduleDAGMI::~ScheduleDAGMI() { 321 delete DFSResult; 322 DeleteContainerPointers(Mutations); 323 delete SchedImpl; 324 } 325 326 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 327 if (SuccSU != &ExitSU) { 328 // Do not use WillCreateCycle, it assumes SD scheduling. 329 // If Pred is reachable from Succ, then the edge creates a cycle. 330 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 331 return false; 332 Topo.AddPred(SuccSU, PredDep.getSUnit()); 333 } 334 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 335 // Return true regardless of whether a new edge needed to be inserted. 336 return true; 337 } 338 339 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 340 /// NumPredsLeft reaches zero, release the successor node. 341 /// 342 /// FIXME: Adjust SuccSU height based on MinLatency. 343 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 344 SUnit *SuccSU = SuccEdge->getSUnit(); 345 346 if (SuccEdge->isWeak()) { 347 --SuccSU->WeakPredsLeft; 348 if (SuccEdge->isCluster()) 349 NextClusterSucc = SuccSU; 350 return; 351 } 352 #ifndef NDEBUG 353 if (SuccSU->NumPredsLeft == 0) { 354 dbgs() << "*** Scheduling failed! ***\n"; 355 SuccSU->dump(this); 356 dbgs() << " has been released too many times!\n"; 357 llvm_unreachable(0); 358 } 359 #endif 360 --SuccSU->NumPredsLeft; 361 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 362 SchedImpl->releaseTopNode(SuccSU); 363 } 364 365 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 366 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 367 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 368 I != E; ++I) { 369 releaseSucc(SU, &*I); 370 } 371 } 372 373 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 374 /// NumSuccsLeft reaches zero, release the predecessor node. 375 /// 376 /// FIXME: Adjust PredSU height based on MinLatency. 377 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 378 SUnit *PredSU = PredEdge->getSUnit(); 379 380 if (PredEdge->isWeak()) { 381 --PredSU->WeakSuccsLeft; 382 if (PredEdge->isCluster()) 383 NextClusterPred = PredSU; 384 return; 385 } 386 #ifndef NDEBUG 387 if (PredSU->NumSuccsLeft == 0) { 388 dbgs() << "*** Scheduling failed! ***\n"; 389 PredSU->dump(this); 390 dbgs() << " has been released too many times!\n"; 391 llvm_unreachable(0); 392 } 393 #endif 394 --PredSU->NumSuccsLeft; 395 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 396 SchedImpl->releaseBottomNode(PredSU); 397 } 398 399 /// releasePredecessors - Call releasePred on each of SU's predecessors. 400 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 401 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 402 I != E; ++I) { 403 releasePred(SU, &*I); 404 } 405 } 406 407 /// This is normally called from the main scheduler loop but may also be invoked 408 /// by the scheduling strategy to perform additional code motion. 409 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 410 MachineBasicBlock::iterator InsertPos) { 411 // Advance RegionBegin if the first instruction moves down. 412 if (&*RegionBegin == MI) 413 ++RegionBegin; 414 415 // Update the instruction stream. 416 BB->splice(InsertPos, BB, MI); 417 418 // Update LiveIntervals 419 LIS->handleMove(MI, /*UpdateFlags=*/true); 420 421 // Recede RegionBegin if an instruction moves above the first. 422 if (RegionBegin == InsertPos) 423 RegionBegin = MI; 424 } 425 426 bool ScheduleDAGMI::checkSchedLimit() { 427 #ifndef NDEBUG 428 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 429 CurrentTop = CurrentBottom; 430 return false; 431 } 432 ++NumInstrsScheduled; 433 #endif 434 return true; 435 } 436 437 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 438 /// crossing a scheduling boundary. [begin, end) includes all instructions in 439 /// the region, including the boundary itself and single-instruction regions 440 /// that don't get scheduled. 441 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 442 MachineBasicBlock::iterator begin, 443 MachineBasicBlock::iterator end, 444 unsigned endcount) 445 { 446 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 447 448 // For convenience remember the end of the liveness region. 449 LiveRegionEnd = 450 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 451 } 452 453 // Setup the register pressure trackers for the top scheduled top and bottom 454 // scheduled regions. 455 void ScheduleDAGMI::initRegPressure() { 456 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 457 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 458 459 // Close the RPTracker to finalize live ins. 460 RPTracker.closeRegion(); 461 462 DEBUG(RPTracker.getPressure().dump(TRI)); 463 464 // Initialize the live ins and live outs. 465 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 466 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 467 468 // Close one end of the tracker so we can call 469 // getMaxUpward/DownwardPressureDelta before advancing across any 470 // instructions. This converts currently live regs into live ins/outs. 471 TopRPTracker.closeTop(); 472 BotRPTracker.closeBottom(); 473 474 // Account for liveness generated by the region boundary. 475 if (LiveRegionEnd != RegionEnd) 476 BotRPTracker.recede(); 477 478 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 479 480 // Cache the list of excess pressure sets in this region. This will also track 481 // the max pressure in the scheduled code for these sets. 482 RegionCriticalPSets.clear(); 483 const std::vector<unsigned> &RegionPressure = 484 RPTracker.getPressure().MaxSetPressure; 485 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 486 unsigned Limit = TRI->getRegPressureSetLimit(i); 487 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 488 << "Limit " << Limit 489 << " Actual " << RegionPressure[i] << "\n"); 490 if (RegionPressure[i] > Limit) 491 RegionCriticalPSets.push_back(PressureElement(i, 0)); 492 } 493 DEBUG(dbgs() << "Excess PSets: "; 494 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 495 dbgs() << TRI->getRegPressureSetName( 496 RegionCriticalPSets[i].PSetID) << " "; 497 dbgs() << "\n"); 498 } 499 500 // FIXME: When the pressure tracker deals in pressure differences then we won't 501 // iterate over all RegionCriticalPSets[i]. 502 void ScheduleDAGMI:: 503 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) { 504 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 505 unsigned ID = RegionCriticalPSets[i].PSetID; 506 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 507 if ((int)NewMaxPressure[ID] > MaxUnits) 508 MaxUnits = NewMaxPressure[ID]; 509 } 510 } 511 512 /// schedule - Called back from MachineScheduler::runOnMachineFunction 513 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 514 /// only includes instructions that have DAG nodes, not scheduling boundaries. 515 /// 516 /// This is a skeletal driver, with all the functionality pushed into helpers, 517 /// so that it can be easilly extended by experimental schedulers. Generally, 518 /// implementing MachineSchedStrategy should be sufficient to implement a new 519 /// scheduling algorithm. However, if a scheduler further subclasses 520 /// ScheduleDAGMI then it will want to override this virtual method in order to 521 /// update any specialized state. 522 void ScheduleDAGMI::schedule() { 523 buildDAGWithRegPressure(); 524 525 Topo.InitDAGTopologicalSorting(); 526 527 postprocessDAG(); 528 529 SmallVector<SUnit*, 8> TopRoots, BotRoots; 530 findRootsAndBiasEdges(TopRoots, BotRoots); 531 532 // Initialize the strategy before modifying the DAG. 533 // This may initialize a DFSResult to be used for queue priority. 534 SchedImpl->initialize(this); 535 536 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 537 SUnits[su].dumpAll(this)); 538 if (ViewMISchedDAGs) viewGraph(); 539 540 // Initialize ready queues now that the DAG and priority data are finalized. 541 initQueues(TopRoots, BotRoots); 542 543 bool IsTopNode = false; 544 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 545 assert(!SU->isScheduled && "Node already scheduled"); 546 if (!checkSchedLimit()) 547 break; 548 549 scheduleMI(SU, IsTopNode); 550 551 updateQueues(SU, IsTopNode); 552 } 553 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 554 555 placeDebugValues(); 556 557 DEBUG({ 558 unsigned BBNum = begin()->getParent()->getNumber(); 559 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 560 dumpSchedule(); 561 dbgs() << '\n'; 562 }); 563 } 564 565 /// Build the DAG and setup three register pressure trackers. 566 void ScheduleDAGMI::buildDAGWithRegPressure() { 567 // Initialize the register pressure tracker used by buildSchedGraph. 568 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 569 570 // Account for liveness generate by the region boundary. 571 if (LiveRegionEnd != RegionEnd) 572 RPTracker.recede(); 573 574 // Build the DAG, and compute current register pressure. 575 buildSchedGraph(AA, &RPTracker); 576 577 // Initialize top/bottom trackers after computing region pressure. 578 initRegPressure(); 579 } 580 581 /// Apply each ScheduleDAGMutation step in order. 582 void ScheduleDAGMI::postprocessDAG() { 583 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 584 Mutations[i]->apply(this); 585 } 586 } 587 588 void ScheduleDAGMI::computeDFSResult() { 589 if (!DFSResult) 590 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 591 DFSResult->clear(); 592 ScheduledTrees.clear(); 593 DFSResult->resize(SUnits.size()); 594 DFSResult->compute(SUnits); 595 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 596 } 597 598 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 599 SmallVectorImpl<SUnit*> &BotRoots) { 600 for (std::vector<SUnit>::iterator 601 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 602 SUnit *SU = &(*I); 603 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 604 605 // Order predecessors so DFSResult follows the critical path. 606 SU->biasCriticalPath(); 607 608 // A SUnit is ready to top schedule if it has no predecessors. 609 if (!I->NumPredsLeft) 610 TopRoots.push_back(SU); 611 // A SUnit is ready to bottom schedule if it has no successors. 612 if (!I->NumSuccsLeft) 613 BotRoots.push_back(SU); 614 } 615 ExitSU.biasCriticalPath(); 616 } 617 618 /// Identify DAG roots and setup scheduler queues. 619 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 620 ArrayRef<SUnit*> BotRoots) { 621 NextClusterSucc = NULL; 622 NextClusterPred = NULL; 623 624 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 625 // 626 // Nodes with unreleased weak edges can still be roots. 627 // Release top roots in forward order. 628 for (SmallVectorImpl<SUnit*>::const_iterator 629 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 630 SchedImpl->releaseTopNode(*I); 631 } 632 // Release bottom roots in reverse order so the higher priority nodes appear 633 // first. This is more natural and slightly more efficient. 634 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 635 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 636 SchedImpl->releaseBottomNode(*I); 637 } 638 639 releaseSuccessors(&EntrySU); 640 releasePredecessors(&ExitSU); 641 642 SchedImpl->registerRoots(); 643 644 // Advance past initial DebugValues. 645 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 646 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 647 TopRPTracker.setPos(CurrentTop); 648 649 CurrentBottom = RegionEnd; 650 } 651 652 /// Move an instruction and update register pressure. 653 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) { 654 // Move the instruction to its new location in the instruction stream. 655 MachineInstr *MI = SU->getInstr(); 656 657 if (IsTopNode) { 658 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 659 if (&*CurrentTop == MI) 660 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 661 else { 662 moveInstruction(MI, CurrentTop); 663 TopRPTracker.setPos(MI); 664 } 665 666 // Update top scheduled pressure. 667 TopRPTracker.advance(); 668 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 669 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 670 } 671 else { 672 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 673 MachineBasicBlock::iterator priorII = 674 priorNonDebug(CurrentBottom, CurrentTop); 675 if (&*priorII == MI) 676 CurrentBottom = priorII; 677 else { 678 if (&*CurrentTop == MI) { 679 CurrentTop = nextIfDebug(++CurrentTop, priorII); 680 TopRPTracker.setPos(CurrentTop); 681 } 682 moveInstruction(MI, CurrentBottom); 683 CurrentBottom = MI; 684 } 685 // Update bottom scheduled pressure. 686 BotRPTracker.recede(); 687 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 688 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 689 } 690 } 691 692 /// Update scheduler queues after scheduling an instruction. 693 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 694 // Release dependent instructions for scheduling. 695 if (IsTopNode) 696 releaseSuccessors(SU); 697 else 698 releasePredecessors(SU); 699 700 SU->isScheduled = true; 701 702 if (DFSResult) { 703 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 704 if (!ScheduledTrees.test(SubtreeID)) { 705 ScheduledTrees.set(SubtreeID); 706 DFSResult->scheduleTree(SubtreeID); 707 SchedImpl->scheduleTree(SubtreeID); 708 } 709 } 710 711 // Notify the scheduling strategy after updating the DAG. 712 SchedImpl->schedNode(SU, IsTopNode); 713 } 714 715 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 716 void ScheduleDAGMI::placeDebugValues() { 717 // If first instruction was a DBG_VALUE then put it back. 718 if (FirstDbgValue) { 719 BB->splice(RegionBegin, BB, FirstDbgValue); 720 RegionBegin = FirstDbgValue; 721 } 722 723 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 724 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 725 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 726 MachineInstr *DbgValue = P.first; 727 MachineBasicBlock::iterator OrigPrevMI = P.second; 728 if (&*RegionBegin == DbgValue) 729 ++RegionBegin; 730 BB->splice(++OrigPrevMI, BB, DbgValue); 731 if (OrigPrevMI == llvm::prior(RegionEnd)) 732 RegionEnd = DbgValue; 733 } 734 DbgValues.clear(); 735 FirstDbgValue = NULL; 736 } 737 738 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 739 void ScheduleDAGMI::dumpSchedule() const { 740 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 741 if (SUnit *SU = getSUnit(&(*MI))) 742 SU->dump(this); 743 else 744 dbgs() << "Missing SUnit\n"; 745 } 746 } 747 #endif 748 749 //===----------------------------------------------------------------------===// 750 // LoadClusterMutation - DAG post-processing to cluster loads. 751 //===----------------------------------------------------------------------===// 752 753 namespace { 754 /// \brief Post-process the DAG to create cluster edges between neighboring 755 /// loads. 756 class LoadClusterMutation : public ScheduleDAGMutation { 757 struct LoadInfo { 758 SUnit *SU; 759 unsigned BaseReg; 760 unsigned Offset; 761 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 762 : SU(su), BaseReg(reg), Offset(ofs) {} 763 }; 764 static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS, 765 const LoadClusterMutation::LoadInfo &RHS); 766 767 const TargetInstrInfo *TII; 768 const TargetRegisterInfo *TRI; 769 public: 770 LoadClusterMutation(const TargetInstrInfo *tii, 771 const TargetRegisterInfo *tri) 772 : TII(tii), TRI(tri) {} 773 774 virtual void apply(ScheduleDAGMI *DAG); 775 protected: 776 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 777 }; 778 } // anonymous 779 780 bool LoadClusterMutation::LoadInfoLess( 781 const LoadClusterMutation::LoadInfo &LHS, 782 const LoadClusterMutation::LoadInfo &RHS) { 783 if (LHS.BaseReg != RHS.BaseReg) 784 return LHS.BaseReg < RHS.BaseReg; 785 return LHS.Offset < RHS.Offset; 786 } 787 788 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 789 ScheduleDAGMI *DAG) { 790 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 791 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 792 SUnit *SU = Loads[Idx]; 793 unsigned BaseReg; 794 unsigned Offset; 795 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 796 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 797 } 798 if (LoadRecords.size() < 2) 799 return; 800 std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess); 801 unsigned ClusterLength = 1; 802 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 803 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 804 ClusterLength = 1; 805 continue; 806 } 807 808 SUnit *SUa = LoadRecords[Idx].SU; 809 SUnit *SUb = LoadRecords[Idx+1].SU; 810 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 811 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 812 813 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 814 << SUb->NodeNum << ")\n"); 815 // Copy successor edges from SUa to SUb. Interleaving computation 816 // dependent on SUa can prevent load combining due to register reuse. 817 // Predecessor edges do not need to be copied from SUb to SUa since nearby 818 // loads should have effectively the same inputs. 819 for (SUnit::const_succ_iterator 820 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 821 if (SI->getSUnit() == SUb) 822 continue; 823 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 824 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 825 } 826 ++ClusterLength; 827 } 828 else 829 ClusterLength = 1; 830 } 831 } 832 833 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 834 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 835 // Map DAG NodeNum to store chain ID. 836 DenseMap<unsigned, unsigned> StoreChainIDs; 837 // Map each store chain to a set of dependent loads. 838 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 839 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 840 SUnit *SU = &DAG->SUnits[Idx]; 841 if (!SU->getInstr()->mayLoad()) 842 continue; 843 unsigned ChainPredID = DAG->SUnits.size(); 844 for (SUnit::const_pred_iterator 845 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 846 if (PI->isCtrl()) { 847 ChainPredID = PI->getSUnit()->NodeNum; 848 break; 849 } 850 } 851 // Check if this chain-like pred has been seen 852 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 853 unsigned NumChains = StoreChainDependents.size(); 854 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 855 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 856 if (Result.second) 857 StoreChainDependents.resize(NumChains + 1); 858 StoreChainDependents[Result.first->second].push_back(SU); 859 } 860 // Iterate over the store chains. 861 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 862 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 863 } 864 865 //===----------------------------------------------------------------------===// 866 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 867 //===----------------------------------------------------------------------===// 868 869 namespace { 870 /// \brief Post-process the DAG to create cluster edges between instructions 871 /// that may be fused by the processor into a single operation. 872 class MacroFusion : public ScheduleDAGMutation { 873 const TargetInstrInfo *TII; 874 public: 875 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 876 877 virtual void apply(ScheduleDAGMI *DAG); 878 }; 879 } // anonymous 880 881 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 882 /// fused operations. 883 void MacroFusion::apply(ScheduleDAGMI *DAG) { 884 // For now, assume targets can only fuse with the branch. 885 MachineInstr *Branch = DAG->ExitSU.getInstr(); 886 if (!Branch) 887 return; 888 889 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 890 SUnit *SU = &DAG->SUnits[--Idx]; 891 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 892 continue; 893 894 // Create a single weak edge from SU to ExitSU. The only effect is to cause 895 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 896 // need to copy predecessor edges from ExitSU to SU, since top-down 897 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 898 // of SU, we could create an artificial edge from the deepest root, but it 899 // hasn't been needed yet. 900 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 901 (void)Success; 902 assert(Success && "No DAG nodes should be reachable from ExitSU"); 903 904 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 905 break; 906 } 907 } 908 909 //===----------------------------------------------------------------------===// 910 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 911 //===----------------------------------------------------------------------===// 912 913 namespace { 914 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 915 /// the schedule. 916 class ConvergingScheduler : public MachineSchedStrategy { 917 public: 918 /// Represent the type of SchedCandidate found within a single queue. 919 /// pickNodeBidirectional depends on these listed by decreasing priority. 920 enum CandReason { 921 NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, 922 ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, 923 TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse, 924 NodeOrder}; 925 926 #ifndef NDEBUG 927 static const char *getReasonStr(ConvergingScheduler::CandReason Reason); 928 #endif 929 930 /// Policy for scheduling the next instruction in the candidate's zone. 931 struct CandPolicy { 932 bool ReduceLatency; 933 unsigned ReduceResIdx; 934 unsigned DemandResIdx; 935 936 CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} 937 }; 938 939 /// Status of an instruction's critical resource consumption. 940 struct SchedResourceDelta { 941 // Count critical resources in the scheduled region required by SU. 942 unsigned CritResources; 943 944 // Count critical resources from another region consumed by SU. 945 unsigned DemandedResources; 946 947 SchedResourceDelta(): CritResources(0), DemandedResources(0) {} 948 949 bool operator==(const SchedResourceDelta &RHS) const { 950 return CritResources == RHS.CritResources 951 && DemandedResources == RHS.DemandedResources; 952 } 953 bool operator!=(const SchedResourceDelta &RHS) const { 954 return !operator==(RHS); 955 } 956 }; 957 958 /// Store the state used by ConvergingScheduler heuristics, required for the 959 /// lifetime of one invocation of pickNode(). 960 struct SchedCandidate { 961 CandPolicy Policy; 962 963 // The best SUnit candidate. 964 SUnit *SU; 965 966 // The reason for this candidate. 967 CandReason Reason; 968 969 // Register pressure values for the best candidate. 970 RegPressureDelta RPDelta; 971 972 // Critical resource consumption of the best candidate. 973 SchedResourceDelta ResDelta; 974 975 SchedCandidate(const CandPolicy &policy) 976 : Policy(policy), SU(NULL), Reason(NoCand) {} 977 978 bool isValid() const { return SU; } 979 980 // Copy the status of another candidate without changing policy. 981 void setBest(SchedCandidate &Best) { 982 assert(Best.Reason != NoCand && "uninitialized Sched candidate"); 983 SU = Best.SU; 984 Reason = Best.Reason; 985 RPDelta = Best.RPDelta; 986 ResDelta = Best.ResDelta; 987 } 988 989 void initResourceDelta(const ScheduleDAGMI *DAG, 990 const TargetSchedModel *SchedModel); 991 }; 992 993 /// Summarize the unscheduled region. 994 struct SchedRemainder { 995 // Critical path through the DAG in expected latency. 996 unsigned CriticalPath; 997 998 // Unscheduled resources 999 SmallVector<unsigned, 16> RemainingCounts; 1000 // Critical resource for the unscheduled zone. 1001 unsigned CritResIdx; 1002 // Number of micro-ops left to schedule. 1003 unsigned RemainingMicroOps; 1004 1005 void reset() { 1006 CriticalPath = 0; 1007 RemainingCounts.clear(); 1008 CritResIdx = 0; 1009 RemainingMicroOps = 0; 1010 } 1011 1012 SchedRemainder() { reset(); } 1013 1014 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); 1015 1016 unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const { 1017 if (!SchedModel->hasInstrSchedModel()) 1018 return 0; 1019 1020 return std::max( 1021 RemainingMicroOps * SchedModel->getMicroOpFactor(), 1022 RemainingCounts[CritResIdx]); 1023 } 1024 }; 1025 1026 /// Each Scheduling boundary is associated with ready queues. It tracks the 1027 /// current cycle in the direction of movement, and maintains the state 1028 /// of "hazards" and other interlocks at the current cycle. 1029 struct SchedBoundary { 1030 ScheduleDAGMI *DAG; 1031 const TargetSchedModel *SchedModel; 1032 SchedRemainder *Rem; 1033 1034 ReadyQueue Available; 1035 ReadyQueue Pending; 1036 bool CheckPending; 1037 1038 // For heuristics, keep a list of the nodes that immediately depend on the 1039 // most recently scheduled node. 1040 SmallPtrSet<const SUnit*, 8> NextSUs; 1041 1042 ScheduleHazardRecognizer *HazardRec; 1043 1044 unsigned CurrCycle; 1045 unsigned IssueCount; 1046 1047 /// MinReadyCycle - Cycle of the soonest available instruction. 1048 unsigned MinReadyCycle; 1049 1050 // The expected latency of the critical path in this scheduled zone. 1051 unsigned ExpectedLatency; 1052 1053 // Resources used in the scheduled zone beyond this boundary. 1054 SmallVector<unsigned, 16> ResourceCounts; 1055 1056 // Cache the critical resources ID in this scheduled zone. 1057 unsigned CritResIdx; 1058 1059 // Is the scheduled region resource limited vs. latency limited. 1060 bool IsResourceLimited; 1061 1062 unsigned ExpectedCount; 1063 1064 #ifndef NDEBUG 1065 // Remember the greatest min operand latency. 1066 unsigned MaxMinLatency; 1067 #endif 1068 1069 void reset() { 1070 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1071 delete HazardRec; 1072 1073 Available.clear(); 1074 Pending.clear(); 1075 CheckPending = false; 1076 NextSUs.clear(); 1077 HazardRec = 0; 1078 CurrCycle = 0; 1079 IssueCount = 0; 1080 MinReadyCycle = UINT_MAX; 1081 ExpectedLatency = 0; 1082 ResourceCounts.resize(1); 1083 assert(!ResourceCounts[0] && "nonzero count for bad resource"); 1084 CritResIdx = 0; 1085 IsResourceLimited = false; 1086 ExpectedCount = 0; 1087 #ifndef NDEBUG 1088 MaxMinLatency = 0; 1089 #endif 1090 // Reserve a zero-count for invalid CritResIdx. 1091 ResourceCounts.resize(1); 1092 } 1093 1094 /// Pending queues extend the ready queues with the same ID and the 1095 /// PendingFlag set. 1096 SchedBoundary(unsigned ID, const Twine &Name): 1097 DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"), 1098 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 1099 HazardRec(0) { 1100 reset(); 1101 } 1102 1103 ~SchedBoundary() { delete HazardRec; } 1104 1105 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, 1106 SchedRemainder *rem); 1107 1108 bool isTop() const { 1109 return Available.getID() == ConvergingScheduler::TopQID; 1110 } 1111 1112 unsigned getUnscheduledLatency(SUnit *SU) const { 1113 if (isTop()) 1114 return SU->getHeight(); 1115 return SU->getDepth() + SU->Latency; 1116 } 1117 1118 unsigned getCriticalCount() const { 1119 return ResourceCounts[CritResIdx]; 1120 } 1121 1122 bool checkHazard(SUnit *SU); 1123 1124 void setLatencyPolicy(CandPolicy &Policy); 1125 1126 void releaseNode(SUnit *SU, unsigned ReadyCycle); 1127 1128 void bumpCycle(); 1129 1130 void countResource(unsigned PIdx, unsigned Cycles); 1131 1132 void bumpNode(SUnit *SU); 1133 1134 void releasePending(); 1135 1136 void removeReady(SUnit *SU); 1137 1138 SUnit *pickOnlyChoice(); 1139 }; 1140 1141 private: 1142 ScheduleDAGMI *DAG; 1143 const TargetSchedModel *SchedModel; 1144 const TargetRegisterInfo *TRI; 1145 1146 // State of the top and bottom scheduled instruction boundaries. 1147 SchedRemainder Rem; 1148 SchedBoundary Top; 1149 SchedBoundary Bot; 1150 1151 public: 1152 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 1153 enum { 1154 TopQID = 1, 1155 BotQID = 2, 1156 LogMaxQID = 2 1157 }; 1158 1159 ConvergingScheduler(): 1160 DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 1161 1162 virtual void initialize(ScheduleDAGMI *dag); 1163 1164 virtual SUnit *pickNode(bool &IsTopNode); 1165 1166 virtual void schedNode(SUnit *SU, bool IsTopNode); 1167 1168 virtual void releaseTopNode(SUnit *SU); 1169 1170 virtual void releaseBottomNode(SUnit *SU); 1171 1172 virtual void registerRoots(); 1173 1174 protected: 1175 void balanceZones( 1176 ConvergingScheduler::SchedBoundary &CriticalZone, 1177 ConvergingScheduler::SchedCandidate &CriticalCand, 1178 ConvergingScheduler::SchedBoundary &OppositeZone, 1179 ConvergingScheduler::SchedCandidate &OppositeCand); 1180 1181 void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand, 1182 ConvergingScheduler::SchedCandidate &BotCand); 1183 1184 void tryCandidate(SchedCandidate &Cand, 1185 SchedCandidate &TryCand, 1186 SchedBoundary &Zone, 1187 const RegPressureTracker &RPTracker, 1188 RegPressureTracker &TempTracker); 1189 1190 SUnit *pickNodeBidirectional(bool &IsTopNode); 1191 1192 void pickNodeFromQueue(SchedBoundary &Zone, 1193 const RegPressureTracker &RPTracker, 1194 SchedCandidate &Candidate); 1195 1196 void reschedulePhysRegCopies(SUnit *SU, bool isTop); 1197 1198 #ifndef NDEBUG 1199 void traceCandidate(const SchedCandidate &Cand); 1200 #endif 1201 }; 1202 } // namespace 1203 1204 void ConvergingScheduler::SchedRemainder:: 1205 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1206 reset(); 1207 if (!SchedModel->hasInstrSchedModel()) 1208 return; 1209 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1210 for (std::vector<SUnit>::iterator 1211 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1212 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1213 RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC); 1214 for (TargetSchedModel::ProcResIter 1215 PI = SchedModel->getWriteProcResBegin(SC), 1216 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1217 unsigned PIdx = PI->ProcResourceIdx; 1218 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1219 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1220 } 1221 } 1222 for (unsigned PIdx = 0, PEnd = SchedModel->getNumProcResourceKinds(); 1223 PIdx != PEnd; ++PIdx) { 1224 if ((int)(RemainingCounts[PIdx] - RemainingCounts[CritResIdx]) 1225 >= (int)SchedModel->getLatencyFactor()) { 1226 CritResIdx = PIdx; 1227 } 1228 } 1229 } 1230 1231 void ConvergingScheduler::SchedBoundary:: 1232 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1233 reset(); 1234 DAG = dag; 1235 SchedModel = smodel; 1236 Rem = rem; 1237 if (SchedModel->hasInstrSchedModel()) 1238 ResourceCounts.resize(SchedModel->getNumProcResourceKinds()); 1239 } 1240 1241 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 1242 DAG = dag; 1243 SchedModel = DAG->getSchedModel(); 1244 TRI = DAG->TRI; 1245 1246 Rem.init(DAG, SchedModel); 1247 Top.init(DAG, SchedModel, &Rem); 1248 Bot.init(DAG, SchedModel, &Rem); 1249 1250 // Initialize resource counts. 1251 1252 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 1253 // are disabled, then these HazardRecs will be disabled. 1254 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 1255 const TargetMachine &TM = DAG->MF.getTarget(); 1256 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1257 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1258 1259 assert((!ForceTopDown || !ForceBottomUp) && 1260 "-misched-topdown incompatible with -misched-bottomup"); 1261 } 1262 1263 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 1264 if (SU->isScheduled) 1265 return; 1266 1267 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1268 I != E; ++I) { 1269 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 1270 unsigned MinLatency = I->getMinLatency(); 1271 #ifndef NDEBUG 1272 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); 1273 #endif 1274 if (SU->TopReadyCycle < PredReadyCycle + MinLatency) 1275 SU->TopReadyCycle = PredReadyCycle + MinLatency; 1276 } 1277 Top.releaseNode(SU, SU->TopReadyCycle); 1278 } 1279 1280 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 1281 if (SU->isScheduled) 1282 return; 1283 1284 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1285 1286 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1287 I != E; ++I) { 1288 if (I->isWeak()) 1289 continue; 1290 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 1291 unsigned MinLatency = I->getMinLatency(); 1292 #ifndef NDEBUG 1293 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); 1294 #endif 1295 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency) 1296 SU->BotReadyCycle = SuccReadyCycle + MinLatency; 1297 } 1298 Bot.releaseNode(SU, SU->BotReadyCycle); 1299 } 1300 1301 void ConvergingScheduler::registerRoots() { 1302 Rem.CriticalPath = DAG->ExitSU.getDepth(); 1303 // Some roots may not feed into ExitSU. Check all of them in case. 1304 for (std::vector<SUnit*>::const_iterator 1305 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 1306 if ((*I)->getDepth() > Rem.CriticalPath) 1307 Rem.CriticalPath = (*I)->getDepth(); 1308 } 1309 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 1310 } 1311 1312 /// Does this SU have a hazard within the current instruction group. 1313 /// 1314 /// The scheduler supports two modes of hazard recognition. The first is the 1315 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1316 /// supports highly complicated in-order reservation tables 1317 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1318 /// 1319 /// The second is a streamlined mechanism that checks for hazards based on 1320 /// simple counters that the scheduler itself maintains. It explicitly checks 1321 /// for instruction dispatch limitations, including the number of micro-ops that 1322 /// can dispatch per cycle. 1323 /// 1324 /// TODO: Also check whether the SU must start a new group. 1325 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 1326 if (HazardRec->isEnabled()) 1327 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 1328 1329 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1330 if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) { 1331 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1332 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1333 return true; 1334 } 1335 return false; 1336 } 1337 1338 /// Compute the remaining latency to determine whether ILP should be increased. 1339 void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) { 1340 // FIXME: compile time. In all, we visit four queues here one we should only 1341 // need to visit the one that was last popped if we cache the result. 1342 unsigned RemLatency = 0; 1343 for (ReadyQueue::iterator I = Available.begin(), E = Available.end(); 1344 I != E; ++I) { 1345 unsigned L = getUnscheduledLatency(*I); 1346 if (L > RemLatency) 1347 RemLatency = L; 1348 } 1349 for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end(); 1350 I != E; ++I) { 1351 unsigned L = getUnscheduledLatency(*I); 1352 if (L > RemLatency) 1353 RemLatency = L; 1354 } 1355 unsigned CriticalPathLimit = Rem->CriticalPath + SchedModel->getILPWindow(); 1356 if (RemLatency + ExpectedLatency >= CriticalPathLimit 1357 && RemLatency > Rem->getMaxRemainingCount(SchedModel)) { 1358 Policy.ReduceLatency = true; 1359 DEBUG(dbgs() << "Increase ILP: " << Available.getName() << '\n'); 1360 } 1361 } 1362 1363 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 1364 unsigned ReadyCycle) { 1365 1366 if (ReadyCycle < MinReadyCycle) 1367 MinReadyCycle = ReadyCycle; 1368 1369 // Check for interlocks first. For the purpose of other heuristics, an 1370 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1371 if (ReadyCycle > CurrCycle || checkHazard(SU)) 1372 Pending.push(SU); 1373 else 1374 Available.push(SU); 1375 1376 // Record this node as an immediate dependent of the scheduled node. 1377 NextSUs.insert(SU); 1378 } 1379 1380 /// Move the boundary of scheduled code by one cycle. 1381 void ConvergingScheduler::SchedBoundary::bumpCycle() { 1382 unsigned Width = SchedModel->getIssueWidth(); 1383 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; 1384 1385 unsigned NextCycle = CurrCycle + 1; 1386 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1387 if (MinReadyCycle > NextCycle) { 1388 IssueCount = 0; 1389 NextCycle = MinReadyCycle; 1390 } 1391 1392 if (!HazardRec->isEnabled()) { 1393 // Bypass HazardRec virtual calls. 1394 CurrCycle = NextCycle; 1395 } 1396 else { 1397 // Bypass getHazardType calls in case of long latency. 1398 for (; CurrCycle != NextCycle; ++CurrCycle) { 1399 if (isTop()) 1400 HazardRec->AdvanceCycle(); 1401 else 1402 HazardRec->RecedeCycle(); 1403 } 1404 } 1405 CheckPending = true; 1406 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1407 1408 DEBUG(dbgs() << " " << Available.getName() 1409 << " Cycle: " << CurrCycle << '\n'); 1410 } 1411 1412 /// Add the given processor resource to this scheduled zone. 1413 void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx, 1414 unsigned Cycles) { 1415 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1416 DEBUG(dbgs() << " " << SchedModel->getProcResource(PIdx)->Name 1417 << " +(" << Cycles << "x" << Factor 1418 << ") / " << SchedModel->getLatencyFactor() << '\n'); 1419 1420 unsigned Count = Factor * Cycles; 1421 ResourceCounts[PIdx] += Count; 1422 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1423 Rem->RemainingCounts[PIdx] -= Count; 1424 1425 // Check if this resource exceeds the current critical resource by a full 1426 // cycle. If so, it becomes the critical resource. 1427 if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx]) 1428 >= (int)SchedModel->getLatencyFactor()) { 1429 CritResIdx = PIdx; 1430 DEBUG(dbgs() << " *** Critical resource " 1431 << SchedModel->getProcResource(PIdx)->Name << " x" 1432 << ResourceCounts[PIdx] << '\n'); 1433 } 1434 } 1435 1436 /// Move the boundary of scheduled code by one SUnit. 1437 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 1438 // Update the reservation table. 1439 if (HazardRec->isEnabled()) { 1440 if (!isTop() && SU->isCall) { 1441 // Calls are scheduled with their preceding instructions. For bottom-up 1442 // scheduling, clear the pipeline state before emitting. 1443 HazardRec->Reset(); 1444 } 1445 HazardRec->EmitInstruction(SU); 1446 } 1447 // Update resource counts and critical resource. 1448 if (SchedModel->hasInstrSchedModel()) { 1449 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1450 Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC); 1451 for (TargetSchedModel::ProcResIter 1452 PI = SchedModel->getWriteProcResBegin(SC), 1453 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1454 countResource(PI->ProcResourceIdx, PI->Cycles); 1455 } 1456 } 1457 if (isTop()) { 1458 if (SU->getDepth() > ExpectedLatency) 1459 ExpectedLatency = SU->getDepth(); 1460 } 1461 else { 1462 if (SU->getHeight() > ExpectedLatency) 1463 ExpectedLatency = SU->getHeight(); 1464 } 1465 1466 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1467 1468 // Check the instruction group dispatch limit. 1469 // TODO: Check if this SU must end a dispatch group. 1470 IssueCount += SchedModel->getNumMicroOps(SU->getInstr()); 1471 1472 // checkHazard prevents scheduling multiple instructions per cycle that exceed 1473 // issue width. However, we commonly reach the maximum. In this case 1474 // opportunistically bump the cycle to avoid uselessly checking everything in 1475 // the readyQ. Furthermore, a single instruction may produce more than one 1476 // cycle's worth of micro-ops. 1477 if (IssueCount >= SchedModel->getIssueWidth()) { 1478 DEBUG(dbgs() << " *** Max instrs at cycle " << CurrCycle << '\n'); 1479 bumpCycle(); 1480 } 1481 } 1482 1483 /// Release pending ready nodes in to the available queue. This makes them 1484 /// visible to heuristics. 1485 void ConvergingScheduler::SchedBoundary::releasePending() { 1486 // If the available queue is empty, it is safe to reset MinReadyCycle. 1487 if (Available.empty()) 1488 MinReadyCycle = UINT_MAX; 1489 1490 // Check to see if any of the pending instructions are ready to issue. If 1491 // so, add them to the available queue. 1492 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1493 SUnit *SU = *(Pending.begin()+i); 1494 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1495 1496 if (ReadyCycle < MinReadyCycle) 1497 MinReadyCycle = ReadyCycle; 1498 1499 if (ReadyCycle > CurrCycle) 1500 continue; 1501 1502 if (checkHazard(SU)) 1503 continue; 1504 1505 Available.push(SU); 1506 Pending.remove(Pending.begin()+i); 1507 --i; --e; 1508 } 1509 DEBUG(if (!Pending.empty()) Pending.dump()); 1510 CheckPending = false; 1511 } 1512 1513 /// Remove SU from the ready set for this boundary. 1514 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1515 if (Available.isInQueue(SU)) 1516 Available.remove(Available.find(SU)); 1517 else { 1518 assert(Pending.isInQueue(SU) && "bad ready count"); 1519 Pending.remove(Pending.find(SU)); 1520 } 1521 } 1522 1523 /// If this queue only has one ready candidate, return it. As a side effect, 1524 /// defer any nodes that now hit a hazard, and advance the cycle until at least 1525 /// one node is ready. If multiple instructions are ready, return NULL. 1526 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1527 if (CheckPending) 1528 releasePending(); 1529 1530 if (IssueCount > 0) { 1531 // Defer any ready instrs that now have a hazard. 1532 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 1533 if (checkHazard(*I)) { 1534 Pending.push(*I); 1535 I = Available.remove(I); 1536 continue; 1537 } 1538 ++I; 1539 } 1540 } 1541 for (unsigned i = 0; Available.empty(); ++i) { 1542 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1543 "permanent hazard"); (void)i; 1544 bumpCycle(); 1545 releasePending(); 1546 } 1547 if (Available.size() == 1) 1548 return *Available.begin(); 1549 return NULL; 1550 } 1551 1552 /// Record the candidate policy for opposite zones with different critical 1553 /// resources. 1554 /// 1555 /// If the CriticalZone is latency limited, don't force a policy for the 1556 /// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed. 1557 void ConvergingScheduler::balanceZones( 1558 ConvergingScheduler::SchedBoundary &CriticalZone, 1559 ConvergingScheduler::SchedCandidate &CriticalCand, 1560 ConvergingScheduler::SchedBoundary &OppositeZone, 1561 ConvergingScheduler::SchedCandidate &OppositeCand) { 1562 1563 if (!CriticalZone.IsResourceLimited) 1564 return; 1565 assert(SchedModel->hasInstrSchedModel() && "required schedmodel"); 1566 1567 SchedRemainder *Rem = CriticalZone.Rem; 1568 1569 // If the critical zone is overconsuming a resource relative to the 1570 // remainder, try to reduce it. 1571 unsigned RemainingCritCount = 1572 Rem->RemainingCounts[CriticalZone.CritResIdx]; 1573 if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount) 1574 > (int)SchedModel->getLatencyFactor()) { 1575 CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx; 1576 DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce " 1577 << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name 1578 << '\n'); 1579 } 1580 // If the other zone is underconsuming a resource relative to the full zone, 1581 // try to increase it. 1582 unsigned OppositeCount = 1583 OppositeZone.ResourceCounts[CriticalZone.CritResIdx]; 1584 if ((int)(OppositeZone.ExpectedCount - OppositeCount) 1585 > (int)SchedModel->getLatencyFactor()) { 1586 OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx; 1587 DEBUG(dbgs() << "Balance " << OppositeZone.Available.getName() << " demand " 1588 << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name 1589 << '\n'); 1590 } 1591 } 1592 1593 /// Determine if the scheduled zones exceed resource limits or critical path and 1594 /// set each candidate's ReduceHeight policy accordingly. 1595 void ConvergingScheduler::checkResourceLimits( 1596 ConvergingScheduler::SchedCandidate &TopCand, 1597 ConvergingScheduler::SchedCandidate &BotCand) { 1598 1599 // Set ReduceLatency to true if needed. 1600 Bot.setLatencyPolicy(BotCand.Policy); 1601 Top.setLatencyPolicy(TopCand.Policy); 1602 1603 // Handle resource-limited regions. 1604 if (Top.IsResourceLimited && Bot.IsResourceLimited 1605 && Top.CritResIdx == Bot.CritResIdx) { 1606 // If the scheduled critical resource in both zones is no longer the 1607 // critical remaining resource, attempt to reduce resource height both ways. 1608 if (Top.CritResIdx != Rem.CritResIdx) { 1609 TopCand.Policy.ReduceResIdx = Top.CritResIdx; 1610 BotCand.Policy.ReduceResIdx = Bot.CritResIdx; 1611 DEBUG(dbgs() << "Reduce scheduled " 1612 << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n'); 1613 } 1614 return; 1615 } 1616 // Handle latency-limited regions. 1617 if (!Top.IsResourceLimited && !Bot.IsResourceLimited) { 1618 // If the total scheduled expected latency exceeds the region's critical 1619 // path then reduce latency both ways. 1620 // 1621 // Just because a zone is not resource limited does not mean it is latency 1622 // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle 1623 // to exceed expected latency. 1624 if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath) 1625 && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) { 1626 TopCand.Policy.ReduceLatency = true; 1627 BotCand.Policy.ReduceLatency = true; 1628 DEBUG(dbgs() << "Reduce scheduled latency " << Top.ExpectedLatency 1629 << " + " << Bot.ExpectedLatency << '\n'); 1630 } 1631 return; 1632 } 1633 // The critical resource is different in each zone, so request balancing. 1634 1635 // Compute the cost of each zone. 1636 Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle); 1637 Top.ExpectedCount = std::max( 1638 Top.getCriticalCount(), 1639 Top.ExpectedCount * SchedModel->getLatencyFactor()); 1640 Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle); 1641 Bot.ExpectedCount = std::max( 1642 Bot.getCriticalCount(), 1643 Bot.ExpectedCount * SchedModel->getLatencyFactor()); 1644 1645 balanceZones(Top, TopCand, Bot, BotCand); 1646 balanceZones(Bot, BotCand, Top, TopCand); 1647 } 1648 1649 void ConvergingScheduler::SchedCandidate:: 1650 initResourceDelta(const ScheduleDAGMI *DAG, 1651 const TargetSchedModel *SchedModel) { 1652 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 1653 return; 1654 1655 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1656 for (TargetSchedModel::ProcResIter 1657 PI = SchedModel->getWriteProcResBegin(SC), 1658 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1659 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 1660 ResDelta.CritResources += PI->Cycles; 1661 if (PI->ProcResourceIdx == Policy.DemandResIdx) 1662 ResDelta.DemandedResources += PI->Cycles; 1663 } 1664 } 1665 1666 /// Return true if this heuristic determines order. 1667 static bool tryLess(int TryVal, int CandVal, 1668 ConvergingScheduler::SchedCandidate &TryCand, 1669 ConvergingScheduler::SchedCandidate &Cand, 1670 ConvergingScheduler::CandReason Reason) { 1671 if (TryVal < CandVal) { 1672 TryCand.Reason = Reason; 1673 return true; 1674 } 1675 if (TryVal > CandVal) { 1676 if (Cand.Reason > Reason) 1677 Cand.Reason = Reason; 1678 return true; 1679 } 1680 return false; 1681 } 1682 1683 static bool tryGreater(int TryVal, int CandVal, 1684 ConvergingScheduler::SchedCandidate &TryCand, 1685 ConvergingScheduler::SchedCandidate &Cand, 1686 ConvergingScheduler::CandReason Reason) { 1687 if (TryVal > CandVal) { 1688 TryCand.Reason = Reason; 1689 return true; 1690 } 1691 if (TryVal < CandVal) { 1692 if (Cand.Reason > Reason) 1693 Cand.Reason = Reason; 1694 return true; 1695 } 1696 return false; 1697 } 1698 1699 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 1700 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 1701 } 1702 1703 /// Minimize physical register live ranges. Regalloc wants them adjacent to 1704 /// their physreg def/use. 1705 /// 1706 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 1707 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 1708 /// with the operation that produces or consumes the physreg. We'll do this when 1709 /// regalloc has support for parallel copies. 1710 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 1711 const MachineInstr *MI = SU->getInstr(); 1712 if (!MI->isCopy()) 1713 return 0; 1714 1715 unsigned ScheduledOper = isTop ? 1 : 0; 1716 unsigned UnscheduledOper = isTop ? 0 : 1; 1717 // If we have already scheduled the physreg produce/consumer, immediately 1718 // schedule the copy. 1719 if (TargetRegisterInfo::isPhysicalRegister( 1720 MI->getOperand(ScheduledOper).getReg())) 1721 return 1; 1722 // If the physreg is at the boundary, defer it. Otherwise schedule it 1723 // immediately to free the dependent. We can hoist the copy later. 1724 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 1725 if (TargetRegisterInfo::isPhysicalRegister( 1726 MI->getOperand(UnscheduledOper).getReg())) 1727 return AtBoundary ? -1 : 1; 1728 return 0; 1729 } 1730 1731 /// Apply a set of heursitics to a new candidate. Heuristics are currently 1732 /// hierarchical. This may be more efficient than a graduated cost model because 1733 /// we don't need to evaluate all aspects of the model for each node in the 1734 /// queue. But it's really done to make the heuristics easier to debug and 1735 /// statistically analyze. 1736 /// 1737 /// \param Cand provides the policy and current best candidate. 1738 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 1739 /// \param Zone describes the scheduled zone that we are extending. 1740 /// \param RPTracker describes reg pressure within the scheduled zone. 1741 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 1742 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, 1743 SchedCandidate &TryCand, 1744 SchedBoundary &Zone, 1745 const RegPressureTracker &RPTracker, 1746 RegPressureTracker &TempTracker) { 1747 1748 // Always initialize TryCand's RPDelta. 1749 TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta, 1750 DAG->getRegionCriticalPSets(), 1751 DAG->getRegPressure().MaxSetPressure); 1752 1753 // Initialize the candidate if needed. 1754 if (!Cand.isValid()) { 1755 TryCand.Reason = NodeOrder; 1756 return; 1757 } 1758 1759 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 1760 biasPhysRegCopy(Cand.SU, Zone.isTop()), 1761 TryCand, Cand, PhysRegCopy)) 1762 return; 1763 1764 // Avoid exceeding the target's limit. 1765 if (tryLess(TryCand.RPDelta.Excess.UnitIncrease, 1766 Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess)) 1767 return; 1768 if (Cand.Reason == SingleExcess) 1769 Cand.Reason = MultiPressure; 1770 1771 // Avoid increasing the max critical pressure in the scheduled region. 1772 if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease, 1773 Cand.RPDelta.CriticalMax.UnitIncrease, 1774 TryCand, Cand, SingleCritical)) 1775 return; 1776 if (Cand.Reason == SingleCritical) 1777 Cand.Reason = MultiPressure; 1778 1779 // Keep clustered nodes together to encourage downstream peephole 1780 // optimizations which may reduce resource requirements. 1781 // 1782 // This is a best effort to set things up for a post-RA pass. Optimizations 1783 // like generating loads of multiple registers should ideally be done within 1784 // the scheduler pass by combining the loads during DAG postprocessing. 1785 const SUnit *NextClusterSU = 1786 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 1787 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 1788 TryCand, Cand, Cluster)) 1789 return; 1790 // Currently, weak edges are for clustering, so we hard-code that reason. 1791 // However, deferring the current TryCand will not change Cand's reason. 1792 CandReason OrigReason = Cand.Reason; 1793 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 1794 getWeakLeft(Cand.SU, Zone.isTop()), 1795 TryCand, Cand, Cluster)) { 1796 Cand.Reason = OrigReason; 1797 return; 1798 } 1799 // Avoid critical resource consumption and balance the schedule. 1800 TryCand.initResourceDelta(DAG, SchedModel); 1801 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 1802 TryCand, Cand, ResourceReduce)) 1803 return; 1804 if (tryGreater(TryCand.ResDelta.DemandedResources, 1805 Cand.ResDelta.DemandedResources, 1806 TryCand, Cand, ResourceDemand)) 1807 return; 1808 1809 // Avoid serializing long latency dependence chains. 1810 if (Cand.Policy.ReduceLatency) { 1811 if (Zone.isTop()) { 1812 if (Cand.SU->getDepth() * SchedModel->getLatencyFactor() 1813 > Zone.ExpectedCount) { 1814 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 1815 TryCand, Cand, TopDepthReduce)) 1816 return; 1817 } 1818 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 1819 TryCand, Cand, TopPathReduce)) 1820 return; 1821 } 1822 else { 1823 if (Cand.SU->getHeight() * SchedModel->getLatencyFactor() 1824 > Zone.ExpectedCount) { 1825 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 1826 TryCand, Cand, BotHeightReduce)) 1827 return; 1828 } 1829 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 1830 TryCand, Cand, BotPathReduce)) 1831 return; 1832 } 1833 } 1834 1835 // Avoid increasing the max pressure of the entire region. 1836 if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease, 1837 Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax)) 1838 return; 1839 if (Cand.Reason == SingleMax) 1840 Cand.Reason = MultiPressure; 1841 1842 // Prefer immediate defs/users of the last scheduled instruction. This is a 1843 // nice pressure avoidance strategy that also conserves the processor's 1844 // register renaming resources and keeps the machine code readable. 1845 if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU), 1846 TryCand, Cand, NextDefUse)) 1847 return; 1848 1849 // Fall through to original instruction order. 1850 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 1851 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 1852 TryCand.Reason = NodeOrder; 1853 } 1854 } 1855 1856 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 1857 /// more desirable than RHS from scheduling standpoint. 1858 static bool compareRPDelta(const RegPressureDelta &LHS, 1859 const RegPressureDelta &RHS) { 1860 // Compare each component of pressure in decreasing order of importance 1861 // without checking if any are valid. Invalid PressureElements are assumed to 1862 // have UnitIncrease==0, so are neutral. 1863 1864 // Avoid increasing the max critical pressure in the scheduled region. 1865 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) { 1866 DEBUG(dbgs() << "RP excess top - bot: " 1867 << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n'); 1868 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 1869 } 1870 // Avoid increasing the max critical pressure in the scheduled region. 1871 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) { 1872 DEBUG(dbgs() << "RP critical top - bot: " 1873 << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease) 1874 << '\n'); 1875 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 1876 } 1877 // Avoid increasing the max pressure of the entire region. 1878 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) { 1879 DEBUG(dbgs() << "RP current top - bot: " 1880 << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease) 1881 << '\n'); 1882 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 1883 } 1884 return false; 1885 } 1886 1887 #ifndef NDEBUG 1888 const char *ConvergingScheduler::getReasonStr( 1889 ConvergingScheduler::CandReason Reason) { 1890 switch (Reason) { 1891 case NoCand: return "NOCAND "; 1892 case PhysRegCopy: return "PREG-COPY"; 1893 case SingleExcess: return "REG-EXCESS"; 1894 case SingleCritical: return "REG-CRIT "; 1895 case Cluster: return "CLUSTER "; 1896 case SingleMax: return "REG-MAX "; 1897 case MultiPressure: return "REG-MULTI "; 1898 case ResourceReduce: return "RES-REDUCE"; 1899 case ResourceDemand: return "RES-DEMAND"; 1900 case TopDepthReduce: return "TOP-DEPTH "; 1901 case TopPathReduce: return "TOP-PATH "; 1902 case BotHeightReduce:return "BOT-HEIGHT"; 1903 case BotPathReduce: return "BOT-PATH "; 1904 case NextDefUse: return "DEF-USE "; 1905 case NodeOrder: return "ORDER "; 1906 }; 1907 llvm_unreachable("Unknown reason!"); 1908 } 1909 1910 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) { 1911 PressureElement P; 1912 unsigned ResIdx = 0; 1913 unsigned Latency = 0; 1914 switch (Cand.Reason) { 1915 default: 1916 break; 1917 case SingleExcess: 1918 P = Cand.RPDelta.Excess; 1919 break; 1920 case SingleCritical: 1921 P = Cand.RPDelta.CriticalMax; 1922 break; 1923 case SingleMax: 1924 P = Cand.RPDelta.CurrentMax; 1925 break; 1926 case ResourceReduce: 1927 ResIdx = Cand.Policy.ReduceResIdx; 1928 break; 1929 case ResourceDemand: 1930 ResIdx = Cand.Policy.DemandResIdx; 1931 break; 1932 case TopDepthReduce: 1933 Latency = Cand.SU->getDepth(); 1934 break; 1935 case TopPathReduce: 1936 Latency = Cand.SU->getHeight(); 1937 break; 1938 case BotHeightReduce: 1939 Latency = Cand.SU->getHeight(); 1940 break; 1941 case BotPathReduce: 1942 Latency = Cand.SU->getDepth(); 1943 break; 1944 } 1945 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 1946 if (P.isValid()) 1947 dbgs() << " " << TRI->getRegPressureSetName(P.PSetID) 1948 << ":" << P.UnitIncrease << " "; 1949 else 1950 dbgs() << " "; 1951 if (ResIdx) 1952 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 1953 else 1954 dbgs() << " "; 1955 if (Latency) 1956 dbgs() << " " << Latency << " cycles "; 1957 else 1958 dbgs() << " "; 1959 dbgs() << '\n'; 1960 } 1961 #endif 1962 1963 /// Pick the best candidate from the top queue. 1964 /// 1965 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 1966 /// DAG building. To adjust for the current scheduling location we need to 1967 /// maintain the number of vreg uses remaining to be top-scheduled. 1968 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, 1969 const RegPressureTracker &RPTracker, 1970 SchedCandidate &Cand) { 1971 ReadyQueue &Q = Zone.Available; 1972 1973 DEBUG(Q.dump()); 1974 1975 // getMaxPressureDelta temporarily modifies the tracker. 1976 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 1977 1978 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 1979 1980 SchedCandidate TryCand(Cand.Policy); 1981 TryCand.SU = *I; 1982 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 1983 if (TryCand.Reason != NoCand) { 1984 // Initialize resource delta if needed in case future heuristics query it. 1985 if (TryCand.ResDelta == SchedResourceDelta()) 1986 TryCand.initResourceDelta(DAG, SchedModel); 1987 Cand.setBest(TryCand); 1988 DEBUG(traceCandidate(Cand)); 1989 } 1990 } 1991 } 1992 1993 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, 1994 bool IsTop) { 1995 DEBUG(dbgs() << "Pick " << (IsTop ? "Top" : "Bot") 1996 << " SU(" << Cand.SU->NodeNum << ") " 1997 << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); 1998 } 1999 2000 /// Pick the best candidate node from either the top or bottom queue. 2001 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { 2002 // Schedule as far as possible in the direction of no choice. This is most 2003 // efficient, but also provides the best heuristics for CriticalPSets. 2004 if (SUnit *SU = Bot.pickOnlyChoice()) { 2005 IsTopNode = false; 2006 return SU; 2007 } 2008 if (SUnit *SU = Top.pickOnlyChoice()) { 2009 IsTopNode = true; 2010 return SU; 2011 } 2012 CandPolicy NoPolicy; 2013 SchedCandidate BotCand(NoPolicy); 2014 SchedCandidate TopCand(NoPolicy); 2015 checkResourceLimits(TopCand, BotCand); 2016 2017 // Prefer bottom scheduling when heuristics are silent. 2018 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2019 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2020 2021 // If either Q has a single candidate that provides the least increase in 2022 // Excess pressure, we can immediately schedule from that Q. 2023 // 2024 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2025 // affects picking from either Q. If scheduling in one direction must 2026 // increase pressure for one of the excess PSets, then schedule in that 2027 // direction first to provide more freedom in the other direction. 2028 if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) { 2029 IsTopNode = false; 2030 tracePick(BotCand, IsTopNode); 2031 return BotCand.SU; 2032 } 2033 // Check if the top Q has a better candidate. 2034 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2035 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2036 2037 // If either Q has a single candidate that minimizes pressure above the 2038 // original region's pressure pick it. 2039 if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) { 2040 if (TopCand.Reason < BotCand.Reason) { 2041 IsTopNode = true; 2042 tracePick(TopCand, IsTopNode); 2043 return TopCand.SU; 2044 } 2045 IsTopNode = false; 2046 tracePick(BotCand, IsTopNode); 2047 return BotCand.SU; 2048 } 2049 // Check for a salient pressure difference and pick the best from either side. 2050 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 2051 IsTopNode = true; 2052 tracePick(TopCand, IsTopNode); 2053 return TopCand.SU; 2054 } 2055 // Otherwise prefer the bottom candidate, in node order if all else failed. 2056 if (TopCand.Reason < BotCand.Reason) { 2057 IsTopNode = true; 2058 tracePick(TopCand, IsTopNode); 2059 return TopCand.SU; 2060 } 2061 IsTopNode = false; 2062 tracePick(BotCand, IsTopNode); 2063 return BotCand.SU; 2064 } 2065 2066 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2067 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 2068 if (DAG->top() == DAG->bottom()) { 2069 assert(Top.Available.empty() && Top.Pending.empty() && 2070 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2071 return NULL; 2072 } 2073 SUnit *SU; 2074 do { 2075 if (ForceTopDown) { 2076 SU = Top.pickOnlyChoice(); 2077 if (!SU) { 2078 CandPolicy NoPolicy; 2079 SchedCandidate TopCand(NoPolicy); 2080 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2081 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2082 SU = TopCand.SU; 2083 } 2084 IsTopNode = true; 2085 } 2086 else if (ForceBottomUp) { 2087 SU = Bot.pickOnlyChoice(); 2088 if (!SU) { 2089 CandPolicy NoPolicy; 2090 SchedCandidate BotCand(NoPolicy); 2091 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2092 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2093 SU = BotCand.SU; 2094 } 2095 IsTopNode = false; 2096 } 2097 else { 2098 SU = pickNodeBidirectional(IsTopNode); 2099 } 2100 } while (SU->isScheduled); 2101 2102 if (SU->isTopReady()) 2103 Top.removeReady(SU); 2104 if (SU->isBottomReady()) 2105 Bot.removeReady(SU); 2106 2107 DEBUG(dbgs() << "Scheduling " << *SU->getInstr()); 2108 return SU; 2109 } 2110 2111 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2112 2113 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2114 if (!isTop) 2115 ++InsertPos; 2116 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2117 2118 // Find already scheduled copies with a single physreg dependence and move 2119 // them just above the scheduled instruction. 2120 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2121 I != E; ++I) { 2122 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2123 continue; 2124 SUnit *DepSU = I->getSUnit(); 2125 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2126 continue; 2127 MachineInstr *Copy = DepSU->getInstr(); 2128 if (!Copy->isCopy()) 2129 continue; 2130 DEBUG(dbgs() << " Rescheduling physreg copy "; 2131 I->getSUnit()->dump(DAG)); 2132 DAG->moveInstruction(Copy, InsertPos); 2133 } 2134 } 2135 2136 /// Update the scheduler's state after scheduling a node. This is the same node 2137 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 2138 /// it's state based on the current cycle before MachineSchedStrategy does. 2139 /// 2140 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2141 /// them here. See comments in biasPhysRegCopy. 2142 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2143 if (IsTopNode) { 2144 SU->TopReadyCycle = Top.CurrCycle; 2145 Top.bumpNode(SU); 2146 if (SU->hasPhysRegUses) 2147 reschedulePhysRegCopies(SU, true); 2148 } 2149 else { 2150 SU->BotReadyCycle = Bot.CurrCycle; 2151 Bot.bumpNode(SU); 2152 if (SU->hasPhysRegDefs) 2153 reschedulePhysRegCopies(SU, false); 2154 } 2155 } 2156 2157 /// Create the standard converging machine scheduler. This will be used as the 2158 /// default scheduler if the target does not set a default. 2159 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 2160 assert((!ForceTopDown || !ForceBottomUp) && 2161 "-misched-topdown incompatible with -misched-bottomup"); 2162 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler()); 2163 // Register DAG post-processors. 2164 if (EnableLoadCluster) 2165 DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); 2166 if (EnableMacroFusion) 2167 DAG->addMutation(new MacroFusion(DAG->TII)); 2168 return DAG; 2169 } 2170 static MachineSchedRegistry 2171 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 2172 createConvergingSched); 2173 2174 //===----------------------------------------------------------------------===// 2175 // ILP Scheduler. Currently for experimental analysis of heuristics. 2176 //===----------------------------------------------------------------------===// 2177 2178 namespace { 2179 /// \brief Order nodes by the ILP metric. 2180 struct ILPOrder { 2181 const SchedDFSResult *DFSResult; 2182 const BitVector *ScheduledTrees; 2183 bool MaximizeILP; 2184 2185 ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {} 2186 2187 /// \brief Apply a less-than relation on node priority. 2188 /// 2189 /// (Return true if A comes after B in the Q.) 2190 bool operator()(const SUnit *A, const SUnit *B) const { 2191 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 2192 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 2193 if (SchedTreeA != SchedTreeB) { 2194 // Unscheduled trees have lower priority. 2195 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 2196 return ScheduledTrees->test(SchedTreeB); 2197 2198 // Trees with shallower connections have have lower priority. 2199 if (DFSResult->getSubtreeLevel(SchedTreeA) 2200 != DFSResult->getSubtreeLevel(SchedTreeB)) { 2201 return DFSResult->getSubtreeLevel(SchedTreeA) 2202 < DFSResult->getSubtreeLevel(SchedTreeB); 2203 } 2204 } 2205 if (MaximizeILP) 2206 return DFSResult->getILP(A) < DFSResult->getILP(B); 2207 else 2208 return DFSResult->getILP(A) > DFSResult->getILP(B); 2209 } 2210 }; 2211 2212 /// \brief Schedule based on the ILP metric. 2213 class ILPScheduler : public MachineSchedStrategy { 2214 /// In case all subtrees are eventually connected to a common root through 2215 /// data dependence (e.g. reduction), place an upper limit on their size. 2216 /// 2217 /// FIXME: A subtree limit is generally good, but in the situation commented 2218 /// above, where multiple similar subtrees feed a common root, we should 2219 /// only split at a point where the resulting subtrees will be balanced. 2220 /// (a motivating test case must be found). 2221 static const unsigned SubtreeLimit = 16; 2222 2223 ScheduleDAGMI *DAG; 2224 ILPOrder Cmp; 2225 2226 std::vector<SUnit*> ReadyQ; 2227 public: 2228 ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {} 2229 2230 virtual void initialize(ScheduleDAGMI *dag) { 2231 DAG = dag; 2232 DAG->computeDFSResult(); 2233 Cmp.DFSResult = DAG->getDFSResult(); 2234 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 2235 ReadyQ.clear(); 2236 } 2237 2238 virtual void registerRoots() { 2239 // Restore the heap in ReadyQ with the updated DFS results. 2240 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2241 } 2242 2243 /// Implement MachineSchedStrategy interface. 2244 /// ----------------------------------------- 2245 2246 /// Callback to select the highest priority node from the ready Q. 2247 virtual SUnit *pickNode(bool &IsTopNode) { 2248 if (ReadyQ.empty()) return NULL; 2249 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2250 SUnit *SU = ReadyQ.back(); 2251 ReadyQ.pop_back(); 2252 IsTopNode = false; 2253 DEBUG(dbgs() << "*** Scheduling " << "SU(" << SU->NodeNum << "): " 2254 << *SU->getInstr() 2255 << " ILP: " << DAG->getDFSResult()->getILP(SU) 2256 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 2257 << DAG->getDFSResult()->getSubtreeLevel( 2258 DAG->getDFSResult()->getSubtreeID(SU)) << '\n'); 2259 return SU; 2260 } 2261 2262 /// \brief Scheduler callback to notify that a new subtree is scheduled. 2263 virtual void scheduleTree(unsigned SubtreeID) { 2264 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2265 } 2266 2267 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 2268 /// DFSResults, and resort the priority Q. 2269 virtual void schedNode(SUnit *SU, bool IsTopNode) { 2270 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 2271 } 2272 2273 virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } 2274 2275 virtual void releaseBottomNode(SUnit *SU) { 2276 ReadyQ.push_back(SU); 2277 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2278 } 2279 }; 2280 } // namespace 2281 2282 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 2283 return new ScheduleDAGMI(C, new ILPScheduler(true)); 2284 } 2285 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 2286 return new ScheduleDAGMI(C, new ILPScheduler(false)); 2287 } 2288 static MachineSchedRegistry ILPMaxRegistry( 2289 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 2290 static MachineSchedRegistry ILPMinRegistry( 2291 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 2292 2293 //===----------------------------------------------------------------------===// 2294 // Machine Instruction Shuffler for Correctness Testing 2295 //===----------------------------------------------------------------------===// 2296 2297 #ifndef NDEBUG 2298 namespace { 2299 /// Apply a less-than relation on the node order, which corresponds to the 2300 /// instruction order prior to scheduling. IsReverse implements greater-than. 2301 template<bool IsReverse> 2302 struct SUnitOrder { 2303 bool operator()(SUnit *A, SUnit *B) const { 2304 if (IsReverse) 2305 return A->NodeNum > B->NodeNum; 2306 else 2307 return A->NodeNum < B->NodeNum; 2308 } 2309 }; 2310 2311 /// Reorder instructions as much as possible. 2312 class InstructionShuffler : public MachineSchedStrategy { 2313 bool IsAlternating; 2314 bool IsTopDown; 2315 2316 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 2317 // gives nodes with a higher number higher priority causing the latest 2318 // instructions to be scheduled first. 2319 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 2320 TopQ; 2321 // When scheduling bottom-up, use greater-than as the queue priority. 2322 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 2323 BottomQ; 2324 public: 2325 InstructionShuffler(bool alternate, bool topdown) 2326 : IsAlternating(alternate), IsTopDown(topdown) {} 2327 2328 virtual void initialize(ScheduleDAGMI *) { 2329 TopQ.clear(); 2330 BottomQ.clear(); 2331 } 2332 2333 /// Implement MachineSchedStrategy interface. 2334 /// ----------------------------------------- 2335 2336 virtual SUnit *pickNode(bool &IsTopNode) { 2337 SUnit *SU; 2338 if (IsTopDown) { 2339 do { 2340 if (TopQ.empty()) return NULL; 2341 SU = TopQ.top(); 2342 TopQ.pop(); 2343 } while (SU->isScheduled); 2344 IsTopNode = true; 2345 } 2346 else { 2347 do { 2348 if (BottomQ.empty()) return NULL; 2349 SU = BottomQ.top(); 2350 BottomQ.pop(); 2351 } while (SU->isScheduled); 2352 IsTopNode = false; 2353 } 2354 if (IsAlternating) 2355 IsTopDown = !IsTopDown; 2356 return SU; 2357 } 2358 2359 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 2360 2361 virtual void releaseTopNode(SUnit *SU) { 2362 TopQ.push(SU); 2363 } 2364 virtual void releaseBottomNode(SUnit *SU) { 2365 BottomQ.push(SU); 2366 } 2367 }; 2368 } // namespace 2369 2370 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 2371 bool Alternate = !ForceTopDown && !ForceBottomUp; 2372 bool TopDown = !ForceBottomUp; 2373 assert((TopDown || !ForceTopDown) && 2374 "-misched-topdown incompatible with -misched-bottomup"); 2375 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 2376 } 2377 static MachineSchedRegistry ShufflerRegistry( 2378 "shuffle", "Shuffle machine instructions alternating directions", 2379 createInstructionShuffler); 2380 #endif // !NDEBUG 2381 2382 //===----------------------------------------------------------------------===// 2383 // GraphWriter support for ScheduleDAGMI. 2384 //===----------------------------------------------------------------------===// 2385 2386 #ifndef NDEBUG 2387 namespace llvm { 2388 2389 template<> struct GraphTraits< 2390 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 2391 2392 template<> 2393 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 2394 2395 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 2396 2397 static std::string getGraphName(const ScheduleDAG *G) { 2398 return G->MF.getName(); 2399 } 2400 2401 static bool renderGraphFromBottomUp() { 2402 return true; 2403 } 2404 2405 static bool isNodeHidden(const SUnit *Node) { 2406 return (Node->NumPreds > 10 || Node->NumSuccs > 10); 2407 } 2408 2409 static bool hasNodeAddressLabel(const SUnit *Node, 2410 const ScheduleDAG *Graph) { 2411 return false; 2412 } 2413 2414 /// If you want to override the dot attributes printed for a particular 2415 /// edge, override this method. 2416 static std::string getEdgeAttributes(const SUnit *Node, 2417 SUnitIterator EI, 2418 const ScheduleDAG *Graph) { 2419 if (EI.isArtificialDep()) 2420 return "color=cyan,style=dashed"; 2421 if (EI.isCtrlDep()) 2422 return "color=blue,style=dashed"; 2423 return ""; 2424 } 2425 2426 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 2427 std::string Str; 2428 raw_string_ostream SS(Str); 2429 SS << "SU(" << SU->NodeNum << ')'; 2430 return SS.str(); 2431 } 2432 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 2433 return G->getGraphNodeLabel(SU); 2434 } 2435 2436 static std::string getNodeAttributes(const SUnit *N, 2437 const ScheduleDAG *Graph) { 2438 std::string Str("shape=Mrecord"); 2439 const SchedDFSResult *DFS = 2440 static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult(); 2441 if (DFS) { 2442 Str += ",style=filled,fillcolor=\"#"; 2443 Str += DOT::getColorString(DFS->getSubtreeID(N)); 2444 Str += '"'; 2445 } 2446 return Str; 2447 } 2448 }; 2449 } // namespace llvm 2450 #endif // NDEBUG 2451 2452 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 2453 /// rendered using 'dot'. 2454 /// 2455 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 2456 #ifndef NDEBUG 2457 ViewGraph(this, Name, false, Title); 2458 #else 2459 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 2460 << "systems with Graphviz or gv!\n"; 2461 #endif // NDEBUG 2462 } 2463 2464 /// Out-of-line implementation with no arguments is handy for gdb. 2465 void ScheduleDAGMI::viewGraph() { 2466 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 2467 } 2468