1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/MachineScheduler.h" 18 #include "llvm/ADT/OwningPtr.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/CodeGen/RegisterClassInfo.h" 26 #include "llvm/CodeGen/ScheduleDFS.h" 27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/GraphWriter.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include <queue> 34 35 using namespace llvm; 36 37 namespace llvm { 38 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 } 43 44 #ifndef NDEBUG 45 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 46 cl::desc("Pop up a window to show MISched dags after they are processed")); 47 48 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 49 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 50 #else 51 static bool ViewMISchedDAGs = false; 52 #endif // NDEBUG 53 54 // FIXME: remove this flag after initial testing. It should always be a good 55 // thing. 56 static cl::opt<bool> EnableCopyConstrain("misched-vcopy", cl::Hidden, 57 cl::desc("Constrain vreg copies."), cl::init(true)); 58 59 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 60 cl::desc("Enable load clustering."), cl::init(true)); 61 62 // Experimental heuristics 63 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 64 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 65 66 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 67 cl::desc("Verify machine instrs before and after machine scheduling")); 68 69 // DAG subtrees must have at least this many nodes. 70 static const unsigned MinSubtreeSize = 8; 71 72 //===----------------------------------------------------------------------===// 73 // Machine Instruction Scheduling Pass and Registry 74 //===----------------------------------------------------------------------===// 75 76 MachineSchedContext::MachineSchedContext(): 77 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 78 RegClassInfo = new RegisterClassInfo(); 79 } 80 81 MachineSchedContext::~MachineSchedContext() { 82 delete RegClassInfo; 83 } 84 85 namespace { 86 /// MachineScheduler runs after coalescing and before register allocation. 87 class MachineScheduler : public MachineSchedContext, 88 public MachineFunctionPass { 89 public: 90 MachineScheduler(); 91 92 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 93 94 virtual void releaseMemory() {} 95 96 virtual bool runOnMachineFunction(MachineFunction&); 97 98 virtual void print(raw_ostream &O, const Module* = 0) const; 99 100 static char ID; // Class identification, replacement for typeinfo 101 }; 102 } // namespace 103 104 char MachineScheduler::ID = 0; 105 106 char &llvm::MachineSchedulerID = MachineScheduler::ID; 107 108 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 109 "Machine Instruction Scheduler", false, false) 110 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 111 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 112 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 113 INITIALIZE_PASS_END(MachineScheduler, "misched", 114 "Machine Instruction Scheduler", false, false) 115 116 MachineScheduler::MachineScheduler() 117 : MachineFunctionPass(ID) { 118 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 119 } 120 121 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 122 AU.setPreservesCFG(); 123 AU.addRequiredID(MachineDominatorsID); 124 AU.addRequired<MachineLoopInfo>(); 125 AU.addRequired<AliasAnalysis>(); 126 AU.addRequired<TargetPassConfig>(); 127 AU.addRequired<SlotIndexes>(); 128 AU.addPreserved<SlotIndexes>(); 129 AU.addRequired<LiveIntervals>(); 130 AU.addPreserved<LiveIntervals>(); 131 MachineFunctionPass::getAnalysisUsage(AU); 132 } 133 134 MachinePassRegistry MachineSchedRegistry::Registry; 135 136 /// A dummy default scheduler factory indicates whether the scheduler 137 /// is overridden on the command line. 138 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 139 return 0; 140 } 141 142 /// MachineSchedOpt allows command line selection of the scheduler. 143 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 144 RegisterPassParser<MachineSchedRegistry> > 145 MachineSchedOpt("misched", 146 cl::init(&useDefaultMachineSched), cl::Hidden, 147 cl::desc("Machine instruction scheduler to use")); 148 149 static MachineSchedRegistry 150 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 151 useDefaultMachineSched); 152 153 /// Forward declare the standard machine scheduler. This will be used as the 154 /// default scheduler if the target does not set a default. 155 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 156 157 158 /// Decrement this iterator until reaching the top or a non-debug instr. 159 static MachineBasicBlock::iterator 160 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 161 assert(I != Beg && "reached the top of the region, cannot decrement"); 162 while (--I != Beg) { 163 if (!I->isDebugValue()) 164 break; 165 } 166 return I; 167 } 168 169 /// If this iterator is a debug value, increment until reaching the End or a 170 /// non-debug instruction. 171 static MachineBasicBlock::iterator 172 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 173 for(; I != End; ++I) { 174 if (!I->isDebugValue()) 175 break; 176 } 177 return I; 178 } 179 180 /// Top-level MachineScheduler pass driver. 181 /// 182 /// Visit blocks in function order. Divide each block into scheduling regions 183 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 184 /// consistent with the DAG builder, which traverses the interior of the 185 /// scheduling regions bottom-up. 186 /// 187 /// This design avoids exposing scheduling boundaries to the DAG builder, 188 /// simplifying the DAG builder's support for "special" target instructions. 189 /// At the same time the design allows target schedulers to operate across 190 /// scheduling boundaries, for example to bundle the boudary instructions 191 /// without reordering them. This creates complexity, because the target 192 /// scheduler must update the RegionBegin and RegionEnd positions cached by 193 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 194 /// design would be to split blocks at scheduling boundaries, but LLVM has a 195 /// general bias against block splitting purely for implementation simplicity. 196 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 197 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 198 199 // Initialize the context of the pass. 200 MF = &mf; 201 MLI = &getAnalysis<MachineLoopInfo>(); 202 MDT = &getAnalysis<MachineDominatorTree>(); 203 PassConfig = &getAnalysis<TargetPassConfig>(); 204 AA = &getAnalysis<AliasAnalysis>(); 205 206 LIS = &getAnalysis<LiveIntervals>(); 207 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 208 209 if (VerifyScheduling) { 210 DEBUG(LIS->print(dbgs())); 211 MF->verify(this, "Before machine scheduling."); 212 } 213 RegClassInfo->runOnMachineFunction(*MF); 214 215 // Select the scheduler, or set the default. 216 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 217 if (Ctor == useDefaultMachineSched) { 218 // Get the default scheduler set by the target. 219 Ctor = MachineSchedRegistry::getDefault(); 220 if (!Ctor) { 221 Ctor = createConvergingSched; 222 MachineSchedRegistry::setDefault(Ctor); 223 } 224 } 225 // Instantiate the selected scheduler. 226 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 227 228 // Visit all machine basic blocks. 229 // 230 // TODO: Visit blocks in global postorder or postorder within the bottom-up 231 // loop tree. Then we can optionally compute global RegPressure. 232 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 233 MBB != MBBEnd; ++MBB) { 234 235 Scheduler->startBlock(MBB); 236 237 // Break the block into scheduling regions [I, RegionEnd), and schedule each 238 // region as soon as it is discovered. RegionEnd points the scheduling 239 // boundary at the bottom of the region. The DAG does not include RegionEnd, 240 // but the region does (i.e. the next RegionEnd is above the previous 241 // RegionBegin). If the current block has no terminator then RegionEnd == 242 // MBB->end() for the bottom region. 243 // 244 // The Scheduler may insert instructions during either schedule() or 245 // exitRegion(), even for empty regions. So the local iterators 'I' and 246 // 'RegionEnd' are invalid across these calls. 247 unsigned RemainingInstrs = MBB->size(); 248 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 249 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 250 251 // Avoid decrementing RegionEnd for blocks with no terminator. 252 if (RegionEnd != MBB->end() 253 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 254 --RegionEnd; 255 // Count the boundary instruction. 256 --RemainingInstrs; 257 } 258 259 // The next region starts above the previous region. Look backward in the 260 // instruction stream until we find the nearest boundary. 261 MachineBasicBlock::iterator I = RegionEnd; 262 for(;I != MBB->begin(); --I, --RemainingInstrs) { 263 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 264 break; 265 } 266 // Notify the scheduler of the region, even if we may skip scheduling 267 // it. Perhaps it still needs to be bundled. 268 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs); 269 270 // Skip empty scheduling regions (0 or 1 schedulable instructions). 271 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 272 // Close the current region. Bundle the terminator if needed. 273 // This invalidates 'RegionEnd' and 'I'. 274 Scheduler->exitRegion(); 275 continue; 276 } 277 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 278 DEBUG(dbgs() << MF->getName() 279 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 280 << "\n From: " << *I << " To: "; 281 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 282 else dbgs() << "End"; 283 dbgs() << " Remaining: " << RemainingInstrs << "\n"); 284 285 // Schedule a region: possibly reorder instructions. 286 // This invalidates 'RegionEnd' and 'I'. 287 Scheduler->schedule(); 288 289 // Close the current region. 290 Scheduler->exitRegion(); 291 292 // Scheduling has invalidated the current iterator 'I'. Ask the 293 // scheduler for the top of it's scheduled region. 294 RegionEnd = Scheduler->begin(); 295 } 296 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 297 Scheduler->finishBlock(); 298 } 299 Scheduler->finalizeSchedule(); 300 DEBUG(LIS->print(dbgs())); 301 if (VerifyScheduling) 302 MF->verify(this, "After machine scheduling."); 303 return true; 304 } 305 306 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 307 // unimplemented 308 } 309 310 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 311 void ReadyQueue::dump() { 312 dbgs() << " " << Name << ": "; 313 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 314 dbgs() << Queue[i]->NodeNum << " "; 315 dbgs() << "\n"; 316 } 317 #endif 318 319 //===----------------------------------------------------------------------===// 320 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 321 // preservation. 322 //===----------------------------------------------------------------------===// 323 324 ScheduleDAGMI::~ScheduleDAGMI() { 325 delete DFSResult; 326 DeleteContainerPointers(Mutations); 327 delete SchedImpl; 328 } 329 330 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 331 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 332 } 333 334 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 335 if (SuccSU != &ExitSU) { 336 // Do not use WillCreateCycle, it assumes SD scheduling. 337 // If Pred is reachable from Succ, then the edge creates a cycle. 338 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 339 return false; 340 Topo.AddPred(SuccSU, PredDep.getSUnit()); 341 } 342 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 343 // Return true regardless of whether a new edge needed to be inserted. 344 return true; 345 } 346 347 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 348 /// NumPredsLeft reaches zero, release the successor node. 349 /// 350 /// FIXME: Adjust SuccSU height based on MinLatency. 351 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 352 SUnit *SuccSU = SuccEdge->getSUnit(); 353 354 if (SuccEdge->isWeak()) { 355 --SuccSU->WeakPredsLeft; 356 if (SuccEdge->isCluster()) 357 NextClusterSucc = SuccSU; 358 return; 359 } 360 #ifndef NDEBUG 361 if (SuccSU->NumPredsLeft == 0) { 362 dbgs() << "*** Scheduling failed! ***\n"; 363 SuccSU->dump(this); 364 dbgs() << " has been released too many times!\n"; 365 llvm_unreachable(0); 366 } 367 #endif 368 --SuccSU->NumPredsLeft; 369 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 370 SchedImpl->releaseTopNode(SuccSU); 371 } 372 373 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 374 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 375 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 376 I != E; ++I) { 377 releaseSucc(SU, &*I); 378 } 379 } 380 381 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 382 /// NumSuccsLeft reaches zero, release the predecessor node. 383 /// 384 /// FIXME: Adjust PredSU height based on MinLatency. 385 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 386 SUnit *PredSU = PredEdge->getSUnit(); 387 388 if (PredEdge->isWeak()) { 389 --PredSU->WeakSuccsLeft; 390 if (PredEdge->isCluster()) 391 NextClusterPred = PredSU; 392 return; 393 } 394 #ifndef NDEBUG 395 if (PredSU->NumSuccsLeft == 0) { 396 dbgs() << "*** Scheduling failed! ***\n"; 397 PredSU->dump(this); 398 dbgs() << " has been released too many times!\n"; 399 llvm_unreachable(0); 400 } 401 #endif 402 --PredSU->NumSuccsLeft; 403 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 404 SchedImpl->releaseBottomNode(PredSU); 405 } 406 407 /// releasePredecessors - Call releasePred on each of SU's predecessors. 408 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 409 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 410 I != E; ++I) { 411 releasePred(SU, &*I); 412 } 413 } 414 415 /// This is normally called from the main scheduler loop but may also be invoked 416 /// by the scheduling strategy to perform additional code motion. 417 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 418 MachineBasicBlock::iterator InsertPos) { 419 // Advance RegionBegin if the first instruction moves down. 420 if (&*RegionBegin == MI) 421 ++RegionBegin; 422 423 // Update the instruction stream. 424 BB->splice(InsertPos, BB, MI); 425 426 // Update LiveIntervals 427 LIS->handleMove(MI, /*UpdateFlags=*/true); 428 429 // Recede RegionBegin if an instruction moves above the first. 430 if (RegionBegin == InsertPos) 431 RegionBegin = MI; 432 } 433 434 bool ScheduleDAGMI::checkSchedLimit() { 435 #ifndef NDEBUG 436 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 437 CurrentTop = CurrentBottom; 438 return false; 439 } 440 ++NumInstrsScheduled; 441 #endif 442 return true; 443 } 444 445 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 446 /// crossing a scheduling boundary. [begin, end) includes all instructions in 447 /// the region, including the boundary itself and single-instruction regions 448 /// that don't get scheduled. 449 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 450 MachineBasicBlock::iterator begin, 451 MachineBasicBlock::iterator end, 452 unsigned endcount) 453 { 454 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 455 456 // For convenience remember the end of the liveness region. 457 LiveRegionEnd = 458 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 459 } 460 461 // Setup the register pressure trackers for the top scheduled top and bottom 462 // scheduled regions. 463 void ScheduleDAGMI::initRegPressure() { 464 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 465 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 466 467 // Close the RPTracker to finalize live ins. 468 RPTracker.closeRegion(); 469 470 DEBUG(RPTracker.getPressure().dump(TRI)); 471 472 // Initialize the live ins and live outs. 473 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 474 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 475 476 // Close one end of the tracker so we can call 477 // getMaxUpward/DownwardPressureDelta before advancing across any 478 // instructions. This converts currently live regs into live ins/outs. 479 TopRPTracker.closeTop(); 480 BotRPTracker.closeBottom(); 481 482 // Account for liveness generated by the region boundary. 483 if (LiveRegionEnd != RegionEnd) 484 BotRPTracker.recede(); 485 486 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 487 488 // Cache the list of excess pressure sets in this region. This will also track 489 // the max pressure in the scheduled code for these sets. 490 RegionCriticalPSets.clear(); 491 const std::vector<unsigned> &RegionPressure = 492 RPTracker.getPressure().MaxSetPressure; 493 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 494 unsigned Limit = TRI->getRegPressureSetLimit(i); 495 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 496 << "Limit " << Limit 497 << " Actual " << RegionPressure[i] << "\n"); 498 if (RegionPressure[i] > Limit) 499 RegionCriticalPSets.push_back(PressureElement(i, 0)); 500 } 501 DEBUG(dbgs() << "Excess PSets: "; 502 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 503 dbgs() << TRI->getRegPressureSetName( 504 RegionCriticalPSets[i].PSetID) << " "; 505 dbgs() << "\n"); 506 } 507 508 // FIXME: When the pressure tracker deals in pressure differences then we won't 509 // iterate over all RegionCriticalPSets[i]. 510 void ScheduleDAGMI:: 511 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) { 512 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 513 unsigned ID = RegionCriticalPSets[i].PSetID; 514 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 515 if ((int)NewMaxPressure[ID] > MaxUnits) 516 MaxUnits = NewMaxPressure[ID]; 517 } 518 DEBUG( 519 for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) { 520 unsigned Limit = TRI->getRegPressureSetLimit(i); 521 if (NewMaxPressure[i] > Limit ) { 522 dbgs() << " " << TRI->getRegPressureSetName(i) << ": " 523 << NewMaxPressure[i] << " > " << Limit << "\n"; 524 } 525 }); 526 } 527 528 /// schedule - Called back from MachineScheduler::runOnMachineFunction 529 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 530 /// only includes instructions that have DAG nodes, not scheduling boundaries. 531 /// 532 /// This is a skeletal driver, with all the functionality pushed into helpers, 533 /// so that it can be easilly extended by experimental schedulers. Generally, 534 /// implementing MachineSchedStrategy should be sufficient to implement a new 535 /// scheduling algorithm. However, if a scheduler further subclasses 536 /// ScheduleDAGMI then it will want to override this virtual method in order to 537 /// update any specialized state. 538 void ScheduleDAGMI::schedule() { 539 buildDAGWithRegPressure(); 540 541 Topo.InitDAGTopologicalSorting(); 542 543 postprocessDAG(); 544 545 SmallVector<SUnit*, 8> TopRoots, BotRoots; 546 findRootsAndBiasEdges(TopRoots, BotRoots); 547 548 // Initialize the strategy before modifying the DAG. 549 // This may initialize a DFSResult to be used for queue priority. 550 SchedImpl->initialize(this); 551 552 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 553 SUnits[su].dumpAll(this)); 554 if (ViewMISchedDAGs) viewGraph(); 555 556 // Initialize ready queues now that the DAG and priority data are finalized. 557 initQueues(TopRoots, BotRoots); 558 559 bool IsTopNode = false; 560 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 561 assert(!SU->isScheduled && "Node already scheduled"); 562 if (!checkSchedLimit()) 563 break; 564 565 scheduleMI(SU, IsTopNode); 566 567 updateQueues(SU, IsTopNode); 568 } 569 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 570 571 placeDebugValues(); 572 573 DEBUG({ 574 unsigned BBNum = begin()->getParent()->getNumber(); 575 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 576 dumpSchedule(); 577 dbgs() << '\n'; 578 }); 579 } 580 581 /// Build the DAG and setup three register pressure trackers. 582 void ScheduleDAGMI::buildDAGWithRegPressure() { 583 // Initialize the register pressure tracker used by buildSchedGraph. 584 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 585 586 // Account for liveness generate by the region boundary. 587 if (LiveRegionEnd != RegionEnd) 588 RPTracker.recede(); 589 590 // Build the DAG, and compute current register pressure. 591 buildSchedGraph(AA, &RPTracker); 592 593 // Initialize top/bottom trackers after computing region pressure. 594 initRegPressure(); 595 } 596 597 /// Apply each ScheduleDAGMutation step in order. 598 void ScheduleDAGMI::postprocessDAG() { 599 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 600 Mutations[i]->apply(this); 601 } 602 } 603 604 void ScheduleDAGMI::computeDFSResult() { 605 if (!DFSResult) 606 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 607 DFSResult->clear(); 608 ScheduledTrees.clear(); 609 DFSResult->resize(SUnits.size()); 610 DFSResult->compute(SUnits); 611 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 612 } 613 614 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 615 SmallVectorImpl<SUnit*> &BotRoots) { 616 for (std::vector<SUnit>::iterator 617 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 618 SUnit *SU = &(*I); 619 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 620 621 // Order predecessors so DFSResult follows the critical path. 622 SU->biasCriticalPath(); 623 624 // A SUnit is ready to top schedule if it has no predecessors. 625 if (!I->NumPredsLeft) 626 TopRoots.push_back(SU); 627 // A SUnit is ready to bottom schedule if it has no successors. 628 if (!I->NumSuccsLeft) 629 BotRoots.push_back(SU); 630 } 631 ExitSU.biasCriticalPath(); 632 } 633 634 /// Identify DAG roots and setup scheduler queues. 635 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 636 ArrayRef<SUnit*> BotRoots) { 637 NextClusterSucc = NULL; 638 NextClusterPred = NULL; 639 640 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 641 // 642 // Nodes with unreleased weak edges can still be roots. 643 // Release top roots in forward order. 644 for (SmallVectorImpl<SUnit*>::const_iterator 645 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 646 SchedImpl->releaseTopNode(*I); 647 } 648 // Release bottom roots in reverse order so the higher priority nodes appear 649 // first. This is more natural and slightly more efficient. 650 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 651 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 652 SchedImpl->releaseBottomNode(*I); 653 } 654 655 releaseSuccessors(&EntrySU); 656 releasePredecessors(&ExitSU); 657 658 SchedImpl->registerRoots(); 659 660 // Advance past initial DebugValues. 661 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 662 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 663 TopRPTracker.setPos(CurrentTop); 664 665 CurrentBottom = RegionEnd; 666 } 667 668 /// Move an instruction and update register pressure. 669 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) { 670 // Move the instruction to its new location in the instruction stream. 671 MachineInstr *MI = SU->getInstr(); 672 673 if (IsTopNode) { 674 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 675 if (&*CurrentTop == MI) 676 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 677 else { 678 moveInstruction(MI, CurrentTop); 679 TopRPTracker.setPos(MI); 680 } 681 682 // Update top scheduled pressure. 683 TopRPTracker.advance(); 684 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 685 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 686 } 687 else { 688 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 689 MachineBasicBlock::iterator priorII = 690 priorNonDebug(CurrentBottom, CurrentTop); 691 if (&*priorII == MI) 692 CurrentBottom = priorII; 693 else { 694 if (&*CurrentTop == MI) { 695 CurrentTop = nextIfDebug(++CurrentTop, priorII); 696 TopRPTracker.setPos(CurrentTop); 697 } 698 moveInstruction(MI, CurrentBottom); 699 CurrentBottom = MI; 700 } 701 // Update bottom scheduled pressure. 702 BotRPTracker.recede(); 703 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 704 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 705 } 706 } 707 708 /// Update scheduler queues after scheduling an instruction. 709 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 710 // Release dependent instructions for scheduling. 711 if (IsTopNode) 712 releaseSuccessors(SU); 713 else 714 releasePredecessors(SU); 715 716 SU->isScheduled = true; 717 718 if (DFSResult) { 719 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 720 if (!ScheduledTrees.test(SubtreeID)) { 721 ScheduledTrees.set(SubtreeID); 722 DFSResult->scheduleTree(SubtreeID); 723 SchedImpl->scheduleTree(SubtreeID); 724 } 725 } 726 727 // Notify the scheduling strategy after updating the DAG. 728 SchedImpl->schedNode(SU, IsTopNode); 729 } 730 731 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 732 void ScheduleDAGMI::placeDebugValues() { 733 // If first instruction was a DBG_VALUE then put it back. 734 if (FirstDbgValue) { 735 BB->splice(RegionBegin, BB, FirstDbgValue); 736 RegionBegin = FirstDbgValue; 737 } 738 739 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 740 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 741 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 742 MachineInstr *DbgValue = P.first; 743 MachineBasicBlock::iterator OrigPrevMI = P.second; 744 if (&*RegionBegin == DbgValue) 745 ++RegionBegin; 746 BB->splice(++OrigPrevMI, BB, DbgValue); 747 if (OrigPrevMI == llvm::prior(RegionEnd)) 748 RegionEnd = DbgValue; 749 } 750 DbgValues.clear(); 751 FirstDbgValue = NULL; 752 } 753 754 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 755 void ScheduleDAGMI::dumpSchedule() const { 756 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 757 if (SUnit *SU = getSUnit(&(*MI))) 758 SU->dump(this); 759 else 760 dbgs() << "Missing SUnit\n"; 761 } 762 } 763 #endif 764 765 //===----------------------------------------------------------------------===// 766 // LoadClusterMutation - DAG post-processing to cluster loads. 767 //===----------------------------------------------------------------------===// 768 769 namespace { 770 /// \brief Post-process the DAG to create cluster edges between neighboring 771 /// loads. 772 class LoadClusterMutation : public ScheduleDAGMutation { 773 struct LoadInfo { 774 SUnit *SU; 775 unsigned BaseReg; 776 unsigned Offset; 777 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 778 : SU(su), BaseReg(reg), Offset(ofs) {} 779 }; 780 static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS, 781 const LoadClusterMutation::LoadInfo &RHS); 782 783 const TargetInstrInfo *TII; 784 const TargetRegisterInfo *TRI; 785 public: 786 LoadClusterMutation(const TargetInstrInfo *tii, 787 const TargetRegisterInfo *tri) 788 : TII(tii), TRI(tri) {} 789 790 virtual void apply(ScheduleDAGMI *DAG); 791 protected: 792 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 793 }; 794 } // anonymous 795 796 bool LoadClusterMutation::LoadInfoLess( 797 const LoadClusterMutation::LoadInfo &LHS, 798 const LoadClusterMutation::LoadInfo &RHS) { 799 if (LHS.BaseReg != RHS.BaseReg) 800 return LHS.BaseReg < RHS.BaseReg; 801 return LHS.Offset < RHS.Offset; 802 } 803 804 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 805 ScheduleDAGMI *DAG) { 806 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 807 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 808 SUnit *SU = Loads[Idx]; 809 unsigned BaseReg; 810 unsigned Offset; 811 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 812 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 813 } 814 if (LoadRecords.size() < 2) 815 return; 816 std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess); 817 unsigned ClusterLength = 1; 818 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 819 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 820 ClusterLength = 1; 821 continue; 822 } 823 824 SUnit *SUa = LoadRecords[Idx].SU; 825 SUnit *SUb = LoadRecords[Idx+1].SU; 826 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 827 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 828 829 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 830 << SUb->NodeNum << ")\n"); 831 // Copy successor edges from SUa to SUb. Interleaving computation 832 // dependent on SUa can prevent load combining due to register reuse. 833 // Predecessor edges do not need to be copied from SUb to SUa since nearby 834 // loads should have effectively the same inputs. 835 for (SUnit::const_succ_iterator 836 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 837 if (SI->getSUnit() == SUb) 838 continue; 839 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 840 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 841 } 842 ++ClusterLength; 843 } 844 else 845 ClusterLength = 1; 846 } 847 } 848 849 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 850 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 851 // Map DAG NodeNum to store chain ID. 852 DenseMap<unsigned, unsigned> StoreChainIDs; 853 // Map each store chain to a set of dependent loads. 854 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 855 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 856 SUnit *SU = &DAG->SUnits[Idx]; 857 if (!SU->getInstr()->mayLoad()) 858 continue; 859 unsigned ChainPredID = DAG->SUnits.size(); 860 for (SUnit::const_pred_iterator 861 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 862 if (PI->isCtrl()) { 863 ChainPredID = PI->getSUnit()->NodeNum; 864 break; 865 } 866 } 867 // Check if this chain-like pred has been seen 868 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 869 unsigned NumChains = StoreChainDependents.size(); 870 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 871 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 872 if (Result.second) 873 StoreChainDependents.resize(NumChains + 1); 874 StoreChainDependents[Result.first->second].push_back(SU); 875 } 876 // Iterate over the store chains. 877 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 878 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 879 } 880 881 //===----------------------------------------------------------------------===// 882 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 883 //===----------------------------------------------------------------------===// 884 885 namespace { 886 /// \brief Post-process the DAG to create cluster edges between instructions 887 /// that may be fused by the processor into a single operation. 888 class MacroFusion : public ScheduleDAGMutation { 889 const TargetInstrInfo *TII; 890 public: 891 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 892 893 virtual void apply(ScheduleDAGMI *DAG); 894 }; 895 } // anonymous 896 897 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 898 /// fused operations. 899 void MacroFusion::apply(ScheduleDAGMI *DAG) { 900 // For now, assume targets can only fuse with the branch. 901 MachineInstr *Branch = DAG->ExitSU.getInstr(); 902 if (!Branch) 903 return; 904 905 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 906 SUnit *SU = &DAG->SUnits[--Idx]; 907 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 908 continue; 909 910 // Create a single weak edge from SU to ExitSU. The only effect is to cause 911 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 912 // need to copy predecessor edges from ExitSU to SU, since top-down 913 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 914 // of SU, we could create an artificial edge from the deepest root, but it 915 // hasn't been needed yet. 916 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 917 (void)Success; 918 assert(Success && "No DAG nodes should be reachable from ExitSU"); 919 920 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 921 break; 922 } 923 } 924 925 //===----------------------------------------------------------------------===// 926 // CopyConstrain - DAG post-processing to encourage copy elimination. 927 //===----------------------------------------------------------------------===// 928 929 namespace { 930 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 931 /// the one use that defines the copy's source vreg, most likely an induction 932 /// variable increment. 933 class CopyConstrain : public ScheduleDAGMutation { 934 // Transient state. 935 SlotIndex RegionBeginIdx; 936 // RegionEndIdx is the slot index of the last non-debug instruction in the 937 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 938 SlotIndex RegionEndIdx; 939 public: 940 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 941 942 virtual void apply(ScheduleDAGMI *DAG); 943 944 protected: 945 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG); 946 }; 947 } // anonymous 948 949 /// constrainLocalCopy handles two possibilities: 950 /// 1) Local src: 951 /// I0: = dst 952 /// I1: src = ... 953 /// I2: = dst 954 /// I3: dst = src (copy) 955 /// (create pred->succ edges I0->I1, I2->I1) 956 /// 957 /// 2) Local copy: 958 /// I0: dst = src (copy) 959 /// I1: = dst 960 /// I2: src = ... 961 /// I3: = dst 962 /// (create pred->succ edges I1->I2, I3->I2) 963 /// 964 /// Although the MachineScheduler is currently constrained to single blocks, 965 /// this algorithm should handle extended blocks. An EBB is a set of 966 /// contiguously numbered blocks such that the previous block in the EBB is 967 /// always the single predecessor. 968 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) { 969 LiveIntervals *LIS = DAG->getLIS(); 970 MachineInstr *Copy = CopySU->getInstr(); 971 972 // Check for pure vreg copies. 973 unsigned SrcReg = Copy->getOperand(1).getReg(); 974 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 975 return; 976 977 unsigned DstReg = Copy->getOperand(0).getReg(); 978 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 979 return; 980 981 // Check if either the dest or source is local. If it's live across a back 982 // edge, it's not local. Note that if both vregs are live across the back 983 // edge, we cannot successfully contrain the copy without cyclic scheduling. 984 unsigned LocalReg = DstReg; 985 unsigned GlobalReg = SrcReg; 986 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 987 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 988 LocalReg = SrcReg; 989 GlobalReg = DstReg; 990 LocalLI = &LIS->getInterval(LocalReg); 991 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 992 return; 993 } 994 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 995 996 // Find the global segment after the start of the local LI. 997 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 998 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 999 // local live range. We could create edges from other global uses to the local 1000 // start, but the coalescer should have already eliminated these cases, so 1001 // don't bother dealing with it. 1002 if (GlobalSegment == GlobalLI->end()) 1003 return; 1004 1005 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1006 // returned the next global segment. But if GlobalSegment overlaps with 1007 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1008 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1009 if (GlobalSegment->contains(LocalLI->beginIndex())) 1010 ++GlobalSegment; 1011 1012 if (GlobalSegment == GlobalLI->end()) 1013 return; 1014 1015 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1016 if (GlobalSegment != GlobalLI->begin()) { 1017 // Two address defs have no hole. 1018 if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end, 1019 GlobalSegment->start)) { 1020 return; 1021 } 1022 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1023 // it would be a disconnected component in the live range. 1024 assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() && 1025 "Disconnected LRG within the scheduling region."); 1026 } 1027 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1028 if (!GlobalDef) 1029 return; 1030 1031 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1032 if (!GlobalSU) 1033 return; 1034 1035 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1036 // constraining the uses of the last local def to precede GlobalDef. 1037 SmallVector<SUnit*,8> LocalUses; 1038 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1039 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1040 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1041 for (SUnit::const_succ_iterator 1042 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1043 I != E; ++I) { 1044 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1045 continue; 1046 if (I->getSUnit() == GlobalSU) 1047 continue; 1048 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1049 return; 1050 LocalUses.push_back(I->getSUnit()); 1051 } 1052 // Open the top of the GlobalLI hole by constraining any earlier global uses 1053 // to precede the start of LocalLI. 1054 SmallVector<SUnit*,8> GlobalUses; 1055 MachineInstr *FirstLocalDef = 1056 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1057 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1058 for (SUnit::const_pred_iterator 1059 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1060 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1061 continue; 1062 if (I->getSUnit() == FirstLocalSU) 1063 continue; 1064 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1065 return; 1066 GlobalUses.push_back(I->getSUnit()); 1067 } 1068 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1069 // Add the weak edges. 1070 for (SmallVectorImpl<SUnit*>::const_iterator 1071 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1072 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1073 << GlobalSU->NodeNum << ")\n"); 1074 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1075 } 1076 for (SmallVectorImpl<SUnit*>::const_iterator 1077 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1078 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1079 << FirstLocalSU->NodeNum << ")\n"); 1080 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1081 } 1082 } 1083 1084 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1085 /// copy elimination. 1086 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1087 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1088 if (FirstPos == DAG->end()) 1089 return; 1090 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); 1091 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1092 &*priorNonDebug(DAG->end(), DAG->begin())); 1093 1094 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1095 SUnit *SU = &DAG->SUnits[Idx]; 1096 if (!SU->getInstr()->isCopy()) 1097 continue; 1098 1099 constrainLocalCopy(SU, DAG); 1100 } 1101 } 1102 1103 //===----------------------------------------------------------------------===// 1104 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 1105 //===----------------------------------------------------------------------===// 1106 1107 namespace { 1108 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 1109 /// the schedule. 1110 class ConvergingScheduler : public MachineSchedStrategy { 1111 public: 1112 /// Represent the type of SchedCandidate found within a single queue. 1113 /// pickNodeBidirectional depends on these listed by decreasing priority. 1114 enum CandReason { 1115 NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak, 1116 ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, 1117 TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse, 1118 NodeOrder}; 1119 1120 #ifndef NDEBUG 1121 static const char *getReasonStr(ConvergingScheduler::CandReason Reason); 1122 #endif 1123 1124 /// Policy for scheduling the next instruction in the candidate's zone. 1125 struct CandPolicy { 1126 bool ReduceLatency; 1127 unsigned ReduceResIdx; 1128 unsigned DemandResIdx; 1129 1130 CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} 1131 }; 1132 1133 /// Status of an instruction's critical resource consumption. 1134 struct SchedResourceDelta { 1135 // Count critical resources in the scheduled region required by SU. 1136 unsigned CritResources; 1137 1138 // Count critical resources from another region consumed by SU. 1139 unsigned DemandedResources; 1140 1141 SchedResourceDelta(): CritResources(0), DemandedResources(0) {} 1142 1143 bool operator==(const SchedResourceDelta &RHS) const { 1144 return CritResources == RHS.CritResources 1145 && DemandedResources == RHS.DemandedResources; 1146 } 1147 bool operator!=(const SchedResourceDelta &RHS) const { 1148 return !operator==(RHS); 1149 } 1150 }; 1151 1152 /// Store the state used by ConvergingScheduler heuristics, required for the 1153 /// lifetime of one invocation of pickNode(). 1154 struct SchedCandidate { 1155 CandPolicy Policy; 1156 1157 // The best SUnit candidate. 1158 SUnit *SU; 1159 1160 // The reason for this candidate. 1161 CandReason Reason; 1162 1163 // Register pressure values for the best candidate. 1164 RegPressureDelta RPDelta; 1165 1166 // Critical resource consumption of the best candidate. 1167 SchedResourceDelta ResDelta; 1168 1169 SchedCandidate(const CandPolicy &policy) 1170 : Policy(policy), SU(NULL), Reason(NoCand) {} 1171 1172 bool isValid() const { return SU; } 1173 1174 // Copy the status of another candidate without changing policy. 1175 void setBest(SchedCandidate &Best) { 1176 assert(Best.Reason != NoCand && "uninitialized Sched candidate"); 1177 SU = Best.SU; 1178 Reason = Best.Reason; 1179 RPDelta = Best.RPDelta; 1180 ResDelta = Best.ResDelta; 1181 } 1182 1183 void initResourceDelta(const ScheduleDAGMI *DAG, 1184 const TargetSchedModel *SchedModel); 1185 }; 1186 1187 /// Summarize the unscheduled region. 1188 struct SchedRemainder { 1189 // Critical path through the DAG in expected latency. 1190 unsigned CriticalPath; 1191 1192 // Unscheduled resources 1193 SmallVector<unsigned, 16> RemainingCounts; 1194 // Critical resource for the unscheduled zone. 1195 unsigned CritResIdx; 1196 // Number of micro-ops left to schedule. 1197 unsigned RemainingMicroOps; 1198 1199 void reset() { 1200 CriticalPath = 0; 1201 RemainingCounts.clear(); 1202 CritResIdx = 0; 1203 RemainingMicroOps = 0; 1204 } 1205 1206 SchedRemainder() { reset(); } 1207 1208 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); 1209 1210 unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const { 1211 if (!SchedModel->hasInstrSchedModel()) 1212 return 0; 1213 1214 return std::max( 1215 RemainingMicroOps * SchedModel->getMicroOpFactor(), 1216 RemainingCounts[CritResIdx]); 1217 } 1218 }; 1219 1220 /// Each Scheduling boundary is associated with ready queues. It tracks the 1221 /// current cycle in the direction of movement, and maintains the state 1222 /// of "hazards" and other interlocks at the current cycle. 1223 struct SchedBoundary { 1224 ScheduleDAGMI *DAG; 1225 const TargetSchedModel *SchedModel; 1226 SchedRemainder *Rem; 1227 1228 ReadyQueue Available; 1229 ReadyQueue Pending; 1230 bool CheckPending; 1231 1232 // For heuristics, keep a list of the nodes that immediately depend on the 1233 // most recently scheduled node. 1234 SmallPtrSet<const SUnit*, 8> NextSUs; 1235 1236 ScheduleHazardRecognizer *HazardRec; 1237 1238 unsigned CurrCycle; 1239 unsigned IssueCount; 1240 1241 /// MinReadyCycle - Cycle of the soonest available instruction. 1242 unsigned MinReadyCycle; 1243 1244 // The expected latency of the critical path in this scheduled zone. 1245 unsigned ExpectedLatency; 1246 1247 // Resources used in the scheduled zone beyond this boundary. 1248 SmallVector<unsigned, 16> ResourceCounts; 1249 1250 // Cache the critical resources ID in this scheduled zone. 1251 unsigned CritResIdx; 1252 1253 // Is the scheduled region resource limited vs. latency limited. 1254 bool IsResourceLimited; 1255 1256 unsigned ExpectedCount; 1257 1258 #ifndef NDEBUG 1259 // Remember the greatest min operand latency. 1260 unsigned MaxMinLatency; 1261 #endif 1262 1263 void reset() { 1264 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1265 delete HazardRec; 1266 1267 Available.clear(); 1268 Pending.clear(); 1269 CheckPending = false; 1270 NextSUs.clear(); 1271 HazardRec = 0; 1272 CurrCycle = 0; 1273 IssueCount = 0; 1274 MinReadyCycle = UINT_MAX; 1275 ExpectedLatency = 0; 1276 ResourceCounts.resize(1); 1277 assert(!ResourceCounts[0] && "nonzero count for bad resource"); 1278 CritResIdx = 0; 1279 IsResourceLimited = false; 1280 ExpectedCount = 0; 1281 #ifndef NDEBUG 1282 MaxMinLatency = 0; 1283 #endif 1284 // Reserve a zero-count for invalid CritResIdx. 1285 ResourceCounts.resize(1); 1286 } 1287 1288 /// Pending queues extend the ready queues with the same ID and the 1289 /// PendingFlag set. 1290 SchedBoundary(unsigned ID, const Twine &Name): 1291 DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"), 1292 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 1293 HazardRec(0) { 1294 reset(); 1295 } 1296 1297 ~SchedBoundary() { delete HazardRec; } 1298 1299 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, 1300 SchedRemainder *rem); 1301 1302 bool isTop() const { 1303 return Available.getID() == ConvergingScheduler::TopQID; 1304 } 1305 1306 unsigned getUnscheduledLatency(SUnit *SU) const { 1307 if (isTop()) 1308 return SU->getHeight(); 1309 return SU->getDepth() + SU->Latency; 1310 } 1311 1312 unsigned getCriticalCount() const { 1313 return ResourceCounts[CritResIdx]; 1314 } 1315 1316 bool checkHazard(SUnit *SU); 1317 1318 void setLatencyPolicy(CandPolicy &Policy); 1319 1320 void releaseNode(SUnit *SU, unsigned ReadyCycle); 1321 1322 void bumpCycle(); 1323 1324 void countResource(unsigned PIdx, unsigned Cycles); 1325 1326 void bumpNode(SUnit *SU); 1327 1328 void releasePending(); 1329 1330 void removeReady(SUnit *SU); 1331 1332 SUnit *pickOnlyChoice(); 1333 }; 1334 1335 private: 1336 ScheduleDAGMI *DAG; 1337 const TargetSchedModel *SchedModel; 1338 const TargetRegisterInfo *TRI; 1339 1340 // State of the top and bottom scheduled instruction boundaries. 1341 SchedRemainder Rem; 1342 SchedBoundary Top; 1343 SchedBoundary Bot; 1344 1345 public: 1346 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 1347 enum { 1348 TopQID = 1, 1349 BotQID = 2, 1350 LogMaxQID = 2 1351 }; 1352 1353 ConvergingScheduler(): 1354 DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 1355 1356 virtual void initialize(ScheduleDAGMI *dag); 1357 1358 virtual SUnit *pickNode(bool &IsTopNode); 1359 1360 virtual void schedNode(SUnit *SU, bool IsTopNode); 1361 1362 virtual void releaseTopNode(SUnit *SU); 1363 1364 virtual void releaseBottomNode(SUnit *SU); 1365 1366 virtual void registerRoots(); 1367 1368 protected: 1369 void balanceZones( 1370 ConvergingScheduler::SchedBoundary &CriticalZone, 1371 ConvergingScheduler::SchedCandidate &CriticalCand, 1372 ConvergingScheduler::SchedBoundary &OppositeZone, 1373 ConvergingScheduler::SchedCandidate &OppositeCand); 1374 1375 void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand, 1376 ConvergingScheduler::SchedCandidate &BotCand); 1377 1378 void tryCandidate(SchedCandidate &Cand, 1379 SchedCandidate &TryCand, 1380 SchedBoundary &Zone, 1381 const RegPressureTracker &RPTracker, 1382 RegPressureTracker &TempTracker); 1383 1384 SUnit *pickNodeBidirectional(bool &IsTopNode); 1385 1386 void pickNodeFromQueue(SchedBoundary &Zone, 1387 const RegPressureTracker &RPTracker, 1388 SchedCandidate &Candidate); 1389 1390 void reschedulePhysRegCopies(SUnit *SU, bool isTop); 1391 1392 #ifndef NDEBUG 1393 void traceCandidate(const SchedCandidate &Cand); 1394 #endif 1395 }; 1396 } // namespace 1397 1398 void ConvergingScheduler::SchedRemainder:: 1399 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1400 reset(); 1401 if (!SchedModel->hasInstrSchedModel()) 1402 return; 1403 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1404 for (std::vector<SUnit>::iterator 1405 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1406 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1407 RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC); 1408 for (TargetSchedModel::ProcResIter 1409 PI = SchedModel->getWriteProcResBegin(SC), 1410 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1411 unsigned PIdx = PI->ProcResourceIdx; 1412 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1413 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1414 } 1415 } 1416 for (unsigned PIdx = 0, PEnd = SchedModel->getNumProcResourceKinds(); 1417 PIdx != PEnd; ++PIdx) { 1418 if ((int)(RemainingCounts[PIdx] - RemainingCounts[CritResIdx]) 1419 >= (int)SchedModel->getLatencyFactor()) { 1420 CritResIdx = PIdx; 1421 } 1422 } 1423 } 1424 1425 void ConvergingScheduler::SchedBoundary:: 1426 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1427 reset(); 1428 DAG = dag; 1429 SchedModel = smodel; 1430 Rem = rem; 1431 if (SchedModel->hasInstrSchedModel()) 1432 ResourceCounts.resize(SchedModel->getNumProcResourceKinds()); 1433 } 1434 1435 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 1436 DAG = dag; 1437 SchedModel = DAG->getSchedModel(); 1438 TRI = DAG->TRI; 1439 1440 Rem.init(DAG, SchedModel); 1441 Top.init(DAG, SchedModel, &Rem); 1442 Bot.init(DAG, SchedModel, &Rem); 1443 1444 // Initialize resource counts. 1445 1446 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 1447 // are disabled, then these HazardRecs will be disabled. 1448 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 1449 const TargetMachine &TM = DAG->MF.getTarget(); 1450 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1451 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1452 1453 assert((!ForceTopDown || !ForceBottomUp) && 1454 "-misched-topdown incompatible with -misched-bottomup"); 1455 } 1456 1457 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 1458 if (SU->isScheduled) 1459 return; 1460 1461 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1462 I != E; ++I) { 1463 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 1464 unsigned MinLatency = I->getMinLatency(); 1465 #ifndef NDEBUG 1466 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); 1467 #endif 1468 if (SU->TopReadyCycle < PredReadyCycle + MinLatency) 1469 SU->TopReadyCycle = PredReadyCycle + MinLatency; 1470 } 1471 Top.releaseNode(SU, SU->TopReadyCycle); 1472 } 1473 1474 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 1475 if (SU->isScheduled) 1476 return; 1477 1478 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1479 1480 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1481 I != E; ++I) { 1482 if (I->isWeak()) 1483 continue; 1484 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 1485 unsigned MinLatency = I->getMinLatency(); 1486 #ifndef NDEBUG 1487 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); 1488 #endif 1489 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency) 1490 SU->BotReadyCycle = SuccReadyCycle + MinLatency; 1491 } 1492 Bot.releaseNode(SU, SU->BotReadyCycle); 1493 } 1494 1495 void ConvergingScheduler::registerRoots() { 1496 Rem.CriticalPath = DAG->ExitSU.getDepth(); 1497 // Some roots may not feed into ExitSU. Check all of them in case. 1498 for (std::vector<SUnit*>::const_iterator 1499 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 1500 if ((*I)->getDepth() > Rem.CriticalPath) 1501 Rem.CriticalPath = (*I)->getDepth(); 1502 } 1503 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 1504 } 1505 1506 /// Does this SU have a hazard within the current instruction group. 1507 /// 1508 /// The scheduler supports two modes of hazard recognition. The first is the 1509 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1510 /// supports highly complicated in-order reservation tables 1511 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1512 /// 1513 /// The second is a streamlined mechanism that checks for hazards based on 1514 /// simple counters that the scheduler itself maintains. It explicitly checks 1515 /// for instruction dispatch limitations, including the number of micro-ops that 1516 /// can dispatch per cycle. 1517 /// 1518 /// TODO: Also check whether the SU must start a new group. 1519 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 1520 if (HazardRec->isEnabled()) 1521 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 1522 1523 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1524 if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) { 1525 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1526 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1527 return true; 1528 } 1529 return false; 1530 } 1531 1532 /// Compute the remaining latency to determine whether ILP should be increased. 1533 void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) { 1534 // FIXME: compile time. In all, we visit four queues here one we should only 1535 // need to visit the one that was last popped if we cache the result. 1536 unsigned RemLatency = 0; 1537 for (ReadyQueue::iterator I = Available.begin(), E = Available.end(); 1538 I != E; ++I) { 1539 unsigned L = getUnscheduledLatency(*I); 1540 DEBUG(dbgs() << " " << Available.getName() 1541 << " RemLatency SU(" << (*I)->NodeNum << ") " << L << '\n'); 1542 if (L > RemLatency) 1543 RemLatency = L; 1544 } 1545 for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end(); 1546 I != E; ++I) { 1547 unsigned L = getUnscheduledLatency(*I); 1548 if (L > RemLatency) 1549 RemLatency = L; 1550 } 1551 unsigned CriticalPathLimit = Rem->CriticalPath + SchedModel->getILPWindow(); 1552 DEBUG(dbgs() << " " << Available.getName() 1553 << " ExpectedLatency " << ExpectedLatency 1554 << " CP Limit " << CriticalPathLimit << '\n'); 1555 if (RemLatency + ExpectedLatency >= CriticalPathLimit 1556 && RemLatency > Rem->getMaxRemainingCount(SchedModel)) { 1557 Policy.ReduceLatency = true; 1558 DEBUG(dbgs() << " Increase ILP: " << Available.getName() << '\n'); 1559 } 1560 } 1561 1562 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 1563 unsigned ReadyCycle) { 1564 1565 if (ReadyCycle < MinReadyCycle) 1566 MinReadyCycle = ReadyCycle; 1567 1568 // Check for interlocks first. For the purpose of other heuristics, an 1569 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1570 if (ReadyCycle > CurrCycle || checkHazard(SU)) 1571 Pending.push(SU); 1572 else 1573 Available.push(SU); 1574 1575 // Record this node as an immediate dependent of the scheduled node. 1576 NextSUs.insert(SU); 1577 } 1578 1579 /// Move the boundary of scheduled code by one cycle. 1580 void ConvergingScheduler::SchedBoundary::bumpCycle() { 1581 unsigned Width = SchedModel->getIssueWidth(); 1582 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; 1583 1584 unsigned NextCycle = CurrCycle + 1; 1585 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1586 if (MinReadyCycle > NextCycle) { 1587 IssueCount = 0; 1588 NextCycle = MinReadyCycle; 1589 } 1590 1591 if (!HazardRec->isEnabled()) { 1592 // Bypass HazardRec virtual calls. 1593 CurrCycle = NextCycle; 1594 } 1595 else { 1596 // Bypass getHazardType calls in case of long latency. 1597 for (; CurrCycle != NextCycle; ++CurrCycle) { 1598 if (isTop()) 1599 HazardRec->AdvanceCycle(); 1600 else 1601 HazardRec->RecedeCycle(); 1602 } 1603 } 1604 CheckPending = true; 1605 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1606 1607 DEBUG(dbgs() << " " << Available.getName() 1608 << " Cycle: " << CurrCycle << '\n'); 1609 } 1610 1611 /// Add the given processor resource to this scheduled zone. 1612 void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx, 1613 unsigned Cycles) { 1614 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1615 DEBUG(dbgs() << " " << SchedModel->getProcResource(PIdx)->Name 1616 << " +(" << Cycles << "x" << Factor 1617 << ") / " << SchedModel->getLatencyFactor() << '\n'); 1618 1619 unsigned Count = Factor * Cycles; 1620 ResourceCounts[PIdx] += Count; 1621 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1622 Rem->RemainingCounts[PIdx] -= Count; 1623 1624 // Check if this resource exceeds the current critical resource by a full 1625 // cycle. If so, it becomes the critical resource. 1626 if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx]) 1627 >= (int)SchedModel->getLatencyFactor()) { 1628 CritResIdx = PIdx; 1629 DEBUG(dbgs() << " *** Critical resource " 1630 << SchedModel->getProcResource(PIdx)->Name << " x" 1631 << ResourceCounts[PIdx] << '\n'); 1632 } 1633 } 1634 1635 /// Move the boundary of scheduled code by one SUnit. 1636 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 1637 // Update the reservation table. 1638 if (HazardRec->isEnabled()) { 1639 if (!isTop() && SU->isCall) { 1640 // Calls are scheduled with their preceding instructions. For bottom-up 1641 // scheduling, clear the pipeline state before emitting. 1642 HazardRec->Reset(); 1643 } 1644 HazardRec->EmitInstruction(SU); 1645 } 1646 // Update resource counts and critical resource. 1647 if (SchedModel->hasInstrSchedModel()) { 1648 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1649 Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC); 1650 for (TargetSchedModel::ProcResIter 1651 PI = SchedModel->getWriteProcResBegin(SC), 1652 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1653 countResource(PI->ProcResourceIdx, PI->Cycles); 1654 } 1655 } 1656 if (isTop()) { 1657 if (SU->getDepth() > ExpectedLatency) 1658 ExpectedLatency = SU->getDepth(); 1659 } 1660 else { 1661 if (SU->getHeight() > ExpectedLatency) 1662 ExpectedLatency = SU->getHeight(); 1663 } 1664 1665 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1666 1667 // Check the instruction group dispatch limit. 1668 // TODO: Check if this SU must end a dispatch group. 1669 IssueCount += SchedModel->getNumMicroOps(SU->getInstr()); 1670 1671 // checkHazard prevents scheduling multiple instructions per cycle that exceed 1672 // issue width. However, we commonly reach the maximum. In this case 1673 // opportunistically bump the cycle to avoid uselessly checking everything in 1674 // the readyQ. Furthermore, a single instruction may produce more than one 1675 // cycle's worth of micro-ops. 1676 if (IssueCount >= SchedModel->getIssueWidth()) { 1677 DEBUG(dbgs() << " *** Max instrs at cycle " << CurrCycle << '\n'); 1678 bumpCycle(); 1679 } 1680 } 1681 1682 /// Release pending ready nodes in to the available queue. This makes them 1683 /// visible to heuristics. 1684 void ConvergingScheduler::SchedBoundary::releasePending() { 1685 // If the available queue is empty, it is safe to reset MinReadyCycle. 1686 if (Available.empty()) 1687 MinReadyCycle = UINT_MAX; 1688 1689 // Check to see if any of the pending instructions are ready to issue. If 1690 // so, add them to the available queue. 1691 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1692 SUnit *SU = *(Pending.begin()+i); 1693 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1694 1695 if (ReadyCycle < MinReadyCycle) 1696 MinReadyCycle = ReadyCycle; 1697 1698 if (ReadyCycle > CurrCycle) 1699 continue; 1700 1701 if (checkHazard(SU)) 1702 continue; 1703 1704 Available.push(SU); 1705 Pending.remove(Pending.begin()+i); 1706 --i; --e; 1707 } 1708 DEBUG(if (!Pending.empty()) Pending.dump()); 1709 CheckPending = false; 1710 } 1711 1712 /// Remove SU from the ready set for this boundary. 1713 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1714 if (Available.isInQueue(SU)) 1715 Available.remove(Available.find(SU)); 1716 else { 1717 assert(Pending.isInQueue(SU) && "bad ready count"); 1718 Pending.remove(Pending.find(SU)); 1719 } 1720 } 1721 1722 /// If this queue only has one ready candidate, return it. As a side effect, 1723 /// defer any nodes that now hit a hazard, and advance the cycle until at least 1724 /// one node is ready. If multiple instructions are ready, return NULL. 1725 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1726 if (CheckPending) 1727 releasePending(); 1728 1729 if (IssueCount > 0) { 1730 // Defer any ready instrs that now have a hazard. 1731 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 1732 if (checkHazard(*I)) { 1733 Pending.push(*I); 1734 I = Available.remove(I); 1735 continue; 1736 } 1737 ++I; 1738 } 1739 } 1740 for (unsigned i = 0; Available.empty(); ++i) { 1741 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1742 "permanent hazard"); (void)i; 1743 bumpCycle(); 1744 releasePending(); 1745 } 1746 if (Available.size() == 1) 1747 return *Available.begin(); 1748 return NULL; 1749 } 1750 1751 /// Record the candidate policy for opposite zones with different critical 1752 /// resources. 1753 /// 1754 /// If the CriticalZone is latency limited, don't force a policy for the 1755 /// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed. 1756 void ConvergingScheduler::balanceZones( 1757 ConvergingScheduler::SchedBoundary &CriticalZone, 1758 ConvergingScheduler::SchedCandidate &CriticalCand, 1759 ConvergingScheduler::SchedBoundary &OppositeZone, 1760 ConvergingScheduler::SchedCandidate &OppositeCand) { 1761 1762 if (!CriticalZone.IsResourceLimited) 1763 return; 1764 assert(SchedModel->hasInstrSchedModel() && "required schedmodel"); 1765 1766 SchedRemainder *Rem = CriticalZone.Rem; 1767 1768 // If the critical zone is overconsuming a resource relative to the 1769 // remainder, try to reduce it. 1770 unsigned RemainingCritCount = 1771 Rem->RemainingCounts[CriticalZone.CritResIdx]; 1772 if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount) 1773 > (int)SchedModel->getLatencyFactor()) { 1774 CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx; 1775 DEBUG(dbgs() << " Balance " << CriticalZone.Available.getName() 1776 << " reduce " 1777 << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name 1778 << '\n'); 1779 } 1780 // If the other zone is underconsuming a resource relative to the full zone, 1781 // try to increase it. 1782 unsigned OppositeCount = 1783 OppositeZone.ResourceCounts[CriticalZone.CritResIdx]; 1784 if ((int)(OppositeZone.ExpectedCount - OppositeCount) 1785 > (int)SchedModel->getLatencyFactor()) { 1786 OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx; 1787 DEBUG(dbgs() << " Balance " << OppositeZone.Available.getName() 1788 << " demand " 1789 << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name 1790 << '\n'); 1791 } 1792 } 1793 1794 /// Determine if the scheduled zones exceed resource limits or critical path and 1795 /// set each candidate's ReduceHeight policy accordingly. 1796 void ConvergingScheduler::checkResourceLimits( 1797 ConvergingScheduler::SchedCandidate &TopCand, 1798 ConvergingScheduler::SchedCandidate &BotCand) { 1799 1800 // Set ReduceLatency to true if needed. 1801 Bot.setLatencyPolicy(BotCand.Policy); 1802 Top.setLatencyPolicy(TopCand.Policy); 1803 1804 // Handle resource-limited regions. 1805 if (Top.IsResourceLimited && Bot.IsResourceLimited 1806 && Top.CritResIdx == Bot.CritResIdx) { 1807 // If the scheduled critical resource in both zones is no longer the 1808 // critical remaining resource, attempt to reduce resource height both ways. 1809 if (Top.CritResIdx != Rem.CritResIdx) { 1810 TopCand.Policy.ReduceResIdx = Top.CritResIdx; 1811 BotCand.Policy.ReduceResIdx = Bot.CritResIdx; 1812 DEBUG(dbgs() << " Reduce scheduled " 1813 << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n'); 1814 } 1815 return; 1816 } 1817 // Handle latency-limited regions. 1818 if (!Top.IsResourceLimited && !Bot.IsResourceLimited) { 1819 // If the total scheduled expected latency exceeds the region's critical 1820 // path then reduce latency both ways. 1821 // 1822 // Just because a zone is not resource limited does not mean it is latency 1823 // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle 1824 // to exceed expected latency. 1825 if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath) 1826 && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) { 1827 TopCand.Policy.ReduceLatency = true; 1828 BotCand.Policy.ReduceLatency = true; 1829 DEBUG(dbgs() << " Reduce scheduled latency " << Top.ExpectedLatency 1830 << " + " << Bot.ExpectedLatency << '\n'); 1831 } 1832 return; 1833 } 1834 // The critical resource is different in each zone, so request balancing. 1835 1836 // Compute the cost of each zone. 1837 Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle); 1838 Top.ExpectedCount = std::max( 1839 Top.getCriticalCount(), 1840 Top.ExpectedCount * SchedModel->getLatencyFactor()); 1841 Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle); 1842 Bot.ExpectedCount = std::max( 1843 Bot.getCriticalCount(), 1844 Bot.ExpectedCount * SchedModel->getLatencyFactor()); 1845 1846 balanceZones(Top, TopCand, Bot, BotCand); 1847 balanceZones(Bot, BotCand, Top, TopCand); 1848 } 1849 1850 void ConvergingScheduler::SchedCandidate:: 1851 initResourceDelta(const ScheduleDAGMI *DAG, 1852 const TargetSchedModel *SchedModel) { 1853 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 1854 return; 1855 1856 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1857 for (TargetSchedModel::ProcResIter 1858 PI = SchedModel->getWriteProcResBegin(SC), 1859 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1860 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 1861 ResDelta.CritResources += PI->Cycles; 1862 if (PI->ProcResourceIdx == Policy.DemandResIdx) 1863 ResDelta.DemandedResources += PI->Cycles; 1864 } 1865 } 1866 1867 /// Return true if this heuristic determines order. 1868 static bool tryLess(int TryVal, int CandVal, 1869 ConvergingScheduler::SchedCandidate &TryCand, 1870 ConvergingScheduler::SchedCandidate &Cand, 1871 ConvergingScheduler::CandReason Reason) { 1872 if (TryVal < CandVal) { 1873 TryCand.Reason = Reason; 1874 return true; 1875 } 1876 if (TryVal > CandVal) { 1877 if (Cand.Reason > Reason) 1878 Cand.Reason = Reason; 1879 return true; 1880 } 1881 return false; 1882 } 1883 1884 static bool tryGreater(int TryVal, int CandVal, 1885 ConvergingScheduler::SchedCandidate &TryCand, 1886 ConvergingScheduler::SchedCandidate &Cand, 1887 ConvergingScheduler::CandReason Reason) { 1888 if (TryVal > CandVal) { 1889 TryCand.Reason = Reason; 1890 return true; 1891 } 1892 if (TryVal < CandVal) { 1893 if (Cand.Reason > Reason) 1894 Cand.Reason = Reason; 1895 return true; 1896 } 1897 return false; 1898 } 1899 1900 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 1901 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 1902 } 1903 1904 /// Minimize physical register live ranges. Regalloc wants them adjacent to 1905 /// their physreg def/use. 1906 /// 1907 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 1908 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 1909 /// with the operation that produces or consumes the physreg. We'll do this when 1910 /// regalloc has support for parallel copies. 1911 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 1912 const MachineInstr *MI = SU->getInstr(); 1913 if (!MI->isCopy()) 1914 return 0; 1915 1916 unsigned ScheduledOper = isTop ? 1 : 0; 1917 unsigned UnscheduledOper = isTop ? 0 : 1; 1918 // If we have already scheduled the physreg produce/consumer, immediately 1919 // schedule the copy. 1920 if (TargetRegisterInfo::isPhysicalRegister( 1921 MI->getOperand(ScheduledOper).getReg())) 1922 return 1; 1923 // If the physreg is at the boundary, defer it. Otherwise schedule it 1924 // immediately to free the dependent. We can hoist the copy later. 1925 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 1926 if (TargetRegisterInfo::isPhysicalRegister( 1927 MI->getOperand(UnscheduledOper).getReg())) 1928 return AtBoundary ? -1 : 1; 1929 return 0; 1930 } 1931 1932 /// Apply a set of heursitics to a new candidate. Heuristics are currently 1933 /// hierarchical. This may be more efficient than a graduated cost model because 1934 /// we don't need to evaluate all aspects of the model for each node in the 1935 /// queue. But it's really done to make the heuristics easier to debug and 1936 /// statistically analyze. 1937 /// 1938 /// \param Cand provides the policy and current best candidate. 1939 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 1940 /// \param Zone describes the scheduled zone that we are extending. 1941 /// \param RPTracker describes reg pressure within the scheduled zone. 1942 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 1943 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, 1944 SchedCandidate &TryCand, 1945 SchedBoundary &Zone, 1946 const RegPressureTracker &RPTracker, 1947 RegPressureTracker &TempTracker) { 1948 1949 // Always initialize TryCand's RPDelta. 1950 TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta, 1951 DAG->getRegionCriticalPSets(), 1952 DAG->getRegPressure().MaxSetPressure); 1953 1954 // Initialize the candidate if needed. 1955 if (!Cand.isValid()) { 1956 TryCand.Reason = NodeOrder; 1957 return; 1958 } 1959 1960 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 1961 biasPhysRegCopy(Cand.SU, Zone.isTop()), 1962 TryCand, Cand, PhysRegCopy)) 1963 return; 1964 1965 // Avoid exceeding the target's limit. 1966 if (tryLess(TryCand.RPDelta.Excess.UnitIncrease, 1967 Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess)) 1968 return; 1969 if (Cand.Reason == SingleExcess) 1970 Cand.Reason = MultiPressure; 1971 1972 // Avoid increasing the max critical pressure in the scheduled region. 1973 if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease, 1974 Cand.RPDelta.CriticalMax.UnitIncrease, 1975 TryCand, Cand, SingleCritical)) 1976 return; 1977 if (Cand.Reason == SingleCritical) 1978 Cand.Reason = MultiPressure; 1979 1980 // Keep clustered nodes together to encourage downstream peephole 1981 // optimizations which may reduce resource requirements. 1982 // 1983 // This is a best effort to set things up for a post-RA pass. Optimizations 1984 // like generating loads of multiple registers should ideally be done within 1985 // the scheduler pass by combining the loads during DAG postprocessing. 1986 const SUnit *NextClusterSU = 1987 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 1988 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 1989 TryCand, Cand, Cluster)) 1990 return; 1991 1992 // Weak edges are for clustering and other constraints. 1993 // 1994 // Deferring TryCand here does not change Cand's reason. This is good in the 1995 // sense that a bad candidate shouldn't affect a previous candidate's 1996 // goodness, but bad in that it is assymetric and depends on queue order. 1997 CandReason OrigReason = Cand.Reason; 1998 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 1999 getWeakLeft(Cand.SU, Zone.isTop()), 2000 TryCand, Cand, Weak)) { 2001 Cand.Reason = OrigReason; 2002 return; 2003 } 2004 // Avoid critical resource consumption and balance the schedule. 2005 TryCand.initResourceDelta(DAG, SchedModel); 2006 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2007 TryCand, Cand, ResourceReduce)) 2008 return; 2009 if (tryGreater(TryCand.ResDelta.DemandedResources, 2010 Cand.ResDelta.DemandedResources, 2011 TryCand, Cand, ResourceDemand)) 2012 return; 2013 2014 // Avoid serializing long latency dependence chains. 2015 if (Cand.Policy.ReduceLatency) { 2016 if (Zone.isTop()) { 2017 if (Cand.SU->getDepth() * SchedModel->getLatencyFactor() 2018 > Zone.ExpectedCount) { 2019 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2020 TryCand, Cand, TopDepthReduce)) 2021 return; 2022 } 2023 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2024 TryCand, Cand, TopPathReduce)) 2025 return; 2026 } 2027 else { 2028 if (Cand.SU->getHeight() * SchedModel->getLatencyFactor() 2029 > Zone.ExpectedCount) { 2030 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2031 TryCand, Cand, BotHeightReduce)) 2032 return; 2033 } 2034 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2035 TryCand, Cand, BotPathReduce)) 2036 return; 2037 } 2038 } 2039 2040 // Avoid increasing the max pressure of the entire region. 2041 if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease, 2042 Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax)) 2043 return; 2044 if (Cand.Reason == SingleMax) 2045 Cand.Reason = MultiPressure; 2046 2047 // Prefer immediate defs/users of the last scheduled instruction. This is a 2048 // nice pressure avoidance strategy that also conserves the processor's 2049 // register renaming resources and keeps the machine code readable. 2050 if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU), 2051 TryCand, Cand, NextDefUse)) 2052 return; 2053 2054 // Fall through to original instruction order. 2055 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2056 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2057 TryCand.Reason = NodeOrder; 2058 } 2059 } 2060 2061 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 2062 /// more desirable than RHS from scheduling standpoint. 2063 static bool compareRPDelta(const RegPressureDelta &LHS, 2064 const RegPressureDelta &RHS) { 2065 // Compare each component of pressure in decreasing order of importance 2066 // without checking if any are valid. Invalid PressureElements are assumed to 2067 // have UnitIncrease==0, so are neutral. 2068 2069 // Avoid increasing the max critical pressure in the scheduled region. 2070 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) { 2071 DEBUG(dbgs() << " RP excess top - bot: " 2072 << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n'); 2073 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 2074 } 2075 // Avoid increasing the max critical pressure in the scheduled region. 2076 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) { 2077 DEBUG(dbgs() << " RP critical top - bot: " 2078 << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease) 2079 << '\n'); 2080 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 2081 } 2082 // Avoid increasing the max pressure of the entire region. 2083 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) { 2084 DEBUG(dbgs() << " RP current top - bot: " 2085 << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease) 2086 << '\n'); 2087 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 2088 } 2089 return false; 2090 } 2091 2092 #ifndef NDEBUG 2093 const char *ConvergingScheduler::getReasonStr( 2094 ConvergingScheduler::CandReason Reason) { 2095 switch (Reason) { 2096 case NoCand: return "NOCAND "; 2097 case PhysRegCopy: return "PREG-COPY"; 2098 case SingleExcess: return "REG-EXCESS"; 2099 case SingleCritical: return "REG-CRIT "; 2100 case Cluster: return "CLUSTER "; 2101 case Weak: return "WEAK "; 2102 case SingleMax: return "REG-MAX "; 2103 case MultiPressure: return "REG-MULTI "; 2104 case ResourceReduce: return "RES-REDUCE"; 2105 case ResourceDemand: return "RES-DEMAND"; 2106 case TopDepthReduce: return "TOP-DEPTH "; 2107 case TopPathReduce: return "TOP-PATH "; 2108 case BotHeightReduce:return "BOT-HEIGHT"; 2109 case BotPathReduce: return "BOT-PATH "; 2110 case NextDefUse: return "DEF-USE "; 2111 case NodeOrder: return "ORDER "; 2112 }; 2113 llvm_unreachable("Unknown reason!"); 2114 } 2115 2116 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) { 2117 PressureElement P; 2118 unsigned ResIdx = 0; 2119 unsigned Latency = 0; 2120 switch (Cand.Reason) { 2121 default: 2122 break; 2123 case SingleExcess: 2124 P = Cand.RPDelta.Excess; 2125 break; 2126 case SingleCritical: 2127 P = Cand.RPDelta.CriticalMax; 2128 break; 2129 case SingleMax: 2130 P = Cand.RPDelta.CurrentMax; 2131 break; 2132 case ResourceReduce: 2133 ResIdx = Cand.Policy.ReduceResIdx; 2134 break; 2135 case ResourceDemand: 2136 ResIdx = Cand.Policy.DemandResIdx; 2137 break; 2138 case TopDepthReduce: 2139 Latency = Cand.SU->getDepth(); 2140 break; 2141 case TopPathReduce: 2142 Latency = Cand.SU->getHeight(); 2143 break; 2144 case BotHeightReduce: 2145 Latency = Cand.SU->getHeight(); 2146 break; 2147 case BotPathReduce: 2148 Latency = Cand.SU->getDepth(); 2149 break; 2150 } 2151 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2152 if (P.isValid()) 2153 dbgs() << " " << TRI->getRegPressureSetName(P.PSetID) 2154 << ":" << P.UnitIncrease << " "; 2155 else 2156 dbgs() << " "; 2157 if (ResIdx) 2158 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2159 else 2160 dbgs() << " "; 2161 if (Latency) 2162 dbgs() << " " << Latency << " cycles "; 2163 else 2164 dbgs() << " "; 2165 dbgs() << '\n'; 2166 } 2167 #endif 2168 2169 /// Pick the best candidate from the top queue. 2170 /// 2171 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2172 /// DAG building. To adjust for the current scheduling location we need to 2173 /// maintain the number of vreg uses remaining to be top-scheduled. 2174 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2175 const RegPressureTracker &RPTracker, 2176 SchedCandidate &Cand) { 2177 ReadyQueue &Q = Zone.Available; 2178 2179 DEBUG(Q.dump()); 2180 2181 // getMaxPressureDelta temporarily modifies the tracker. 2182 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2183 2184 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2185 2186 SchedCandidate TryCand(Cand.Policy); 2187 TryCand.SU = *I; 2188 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2189 if (TryCand.Reason != NoCand) { 2190 // Initialize resource delta if needed in case future heuristics query it. 2191 if (TryCand.ResDelta == SchedResourceDelta()) 2192 TryCand.initResourceDelta(DAG, SchedModel); 2193 Cand.setBest(TryCand); 2194 DEBUG(traceCandidate(Cand)); 2195 } 2196 } 2197 } 2198 2199 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, 2200 bool IsTop) { 2201 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2202 << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); 2203 } 2204 2205 /// Pick the best candidate node from either the top or bottom queue. 2206 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { 2207 // Schedule as far as possible in the direction of no choice. This is most 2208 // efficient, but also provides the best heuristics for CriticalPSets. 2209 if (SUnit *SU = Bot.pickOnlyChoice()) { 2210 IsTopNode = false; 2211 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2212 return SU; 2213 } 2214 if (SUnit *SU = Top.pickOnlyChoice()) { 2215 IsTopNode = true; 2216 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2217 return SU; 2218 } 2219 CandPolicy NoPolicy; 2220 SchedCandidate BotCand(NoPolicy); 2221 SchedCandidate TopCand(NoPolicy); 2222 checkResourceLimits(TopCand, BotCand); 2223 2224 // Prefer bottom scheduling when heuristics are silent. 2225 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2226 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2227 2228 // If either Q has a single candidate that provides the least increase in 2229 // Excess pressure, we can immediately schedule from that Q. 2230 // 2231 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2232 // affects picking from either Q. If scheduling in one direction must 2233 // increase pressure for one of the excess PSets, then schedule in that 2234 // direction first to provide more freedom in the other direction. 2235 if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) { 2236 IsTopNode = false; 2237 tracePick(BotCand, IsTopNode); 2238 return BotCand.SU; 2239 } 2240 // Check if the top Q has a better candidate. 2241 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2242 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2243 2244 // If either Q has a single candidate that minimizes pressure above the 2245 // original region's pressure pick it. 2246 if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) { 2247 if (TopCand.Reason < BotCand.Reason) { 2248 IsTopNode = true; 2249 tracePick(TopCand, IsTopNode); 2250 return TopCand.SU; 2251 } 2252 IsTopNode = false; 2253 tracePick(BotCand, IsTopNode); 2254 return BotCand.SU; 2255 } 2256 // Check for a salient pressure difference and pick the best from either side. 2257 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 2258 IsTopNode = true; 2259 tracePick(TopCand, IsTopNode); 2260 return TopCand.SU; 2261 } 2262 // Otherwise prefer the bottom candidate, in node order if all else failed. 2263 if (TopCand.Reason < BotCand.Reason) { 2264 IsTopNode = true; 2265 tracePick(TopCand, IsTopNode); 2266 return TopCand.SU; 2267 } 2268 IsTopNode = false; 2269 tracePick(BotCand, IsTopNode); 2270 return BotCand.SU; 2271 } 2272 2273 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2274 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 2275 if (DAG->top() == DAG->bottom()) { 2276 assert(Top.Available.empty() && Top.Pending.empty() && 2277 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2278 return NULL; 2279 } 2280 SUnit *SU; 2281 do { 2282 if (ForceTopDown) { 2283 SU = Top.pickOnlyChoice(); 2284 if (!SU) { 2285 CandPolicy NoPolicy; 2286 SchedCandidate TopCand(NoPolicy); 2287 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2288 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2289 SU = TopCand.SU; 2290 } 2291 IsTopNode = true; 2292 } 2293 else if (ForceBottomUp) { 2294 SU = Bot.pickOnlyChoice(); 2295 if (!SU) { 2296 CandPolicy NoPolicy; 2297 SchedCandidate BotCand(NoPolicy); 2298 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2299 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2300 SU = BotCand.SU; 2301 } 2302 IsTopNode = false; 2303 } 2304 else { 2305 SU = pickNodeBidirectional(IsTopNode); 2306 } 2307 } while (SU->isScheduled); 2308 2309 if (SU->isTopReady()) 2310 Top.removeReady(SU); 2311 if (SU->isBottomReady()) 2312 Bot.removeReady(SU); 2313 2314 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2315 return SU; 2316 } 2317 2318 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2319 2320 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2321 if (!isTop) 2322 ++InsertPos; 2323 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2324 2325 // Find already scheduled copies with a single physreg dependence and move 2326 // them just above the scheduled instruction. 2327 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2328 I != E; ++I) { 2329 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2330 continue; 2331 SUnit *DepSU = I->getSUnit(); 2332 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2333 continue; 2334 MachineInstr *Copy = DepSU->getInstr(); 2335 if (!Copy->isCopy()) 2336 continue; 2337 DEBUG(dbgs() << " Rescheduling physreg copy "; 2338 I->getSUnit()->dump(DAG)); 2339 DAG->moveInstruction(Copy, InsertPos); 2340 } 2341 } 2342 2343 /// Update the scheduler's state after scheduling a node. This is the same node 2344 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 2345 /// it's state based on the current cycle before MachineSchedStrategy does. 2346 /// 2347 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2348 /// them here. See comments in biasPhysRegCopy. 2349 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2350 if (IsTopNode) { 2351 SU->TopReadyCycle = Top.CurrCycle; 2352 Top.bumpNode(SU); 2353 if (SU->hasPhysRegUses) 2354 reschedulePhysRegCopies(SU, true); 2355 } 2356 else { 2357 SU->BotReadyCycle = Bot.CurrCycle; 2358 Bot.bumpNode(SU); 2359 if (SU->hasPhysRegDefs) 2360 reschedulePhysRegCopies(SU, false); 2361 } 2362 } 2363 2364 /// Create the standard converging machine scheduler. This will be used as the 2365 /// default scheduler if the target does not set a default. 2366 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 2367 assert((!ForceTopDown || !ForceBottomUp) && 2368 "-misched-topdown incompatible with -misched-bottomup"); 2369 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler()); 2370 // Register DAG post-processors. 2371 // 2372 // FIXME: extend the mutation API to allow earlier mutations to instantiate 2373 // data and pass it to later mutations. Have a single mutation that gathers 2374 // the interesting nodes in one pass. 2375 if (EnableCopyConstrain) 2376 DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI)); 2377 if (EnableLoadCluster) 2378 DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); 2379 if (EnableMacroFusion) 2380 DAG->addMutation(new MacroFusion(DAG->TII)); 2381 return DAG; 2382 } 2383 static MachineSchedRegistry 2384 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 2385 createConvergingSched); 2386 2387 //===----------------------------------------------------------------------===// 2388 // ILP Scheduler. Currently for experimental analysis of heuristics. 2389 //===----------------------------------------------------------------------===// 2390 2391 namespace { 2392 /// \brief Order nodes by the ILP metric. 2393 struct ILPOrder { 2394 const SchedDFSResult *DFSResult; 2395 const BitVector *ScheduledTrees; 2396 bool MaximizeILP; 2397 2398 ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {} 2399 2400 /// \brief Apply a less-than relation on node priority. 2401 /// 2402 /// (Return true if A comes after B in the Q.) 2403 bool operator()(const SUnit *A, const SUnit *B) const { 2404 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 2405 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 2406 if (SchedTreeA != SchedTreeB) { 2407 // Unscheduled trees have lower priority. 2408 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 2409 return ScheduledTrees->test(SchedTreeB); 2410 2411 // Trees with shallower connections have have lower priority. 2412 if (DFSResult->getSubtreeLevel(SchedTreeA) 2413 != DFSResult->getSubtreeLevel(SchedTreeB)) { 2414 return DFSResult->getSubtreeLevel(SchedTreeA) 2415 < DFSResult->getSubtreeLevel(SchedTreeB); 2416 } 2417 } 2418 if (MaximizeILP) 2419 return DFSResult->getILP(A) < DFSResult->getILP(B); 2420 else 2421 return DFSResult->getILP(A) > DFSResult->getILP(B); 2422 } 2423 }; 2424 2425 /// \brief Schedule based on the ILP metric. 2426 class ILPScheduler : public MachineSchedStrategy { 2427 /// In case all subtrees are eventually connected to a common root through 2428 /// data dependence (e.g. reduction), place an upper limit on their size. 2429 /// 2430 /// FIXME: A subtree limit is generally good, but in the situation commented 2431 /// above, where multiple similar subtrees feed a common root, we should 2432 /// only split at a point where the resulting subtrees will be balanced. 2433 /// (a motivating test case must be found). 2434 static const unsigned SubtreeLimit = 16; 2435 2436 ScheduleDAGMI *DAG; 2437 ILPOrder Cmp; 2438 2439 std::vector<SUnit*> ReadyQ; 2440 public: 2441 ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {} 2442 2443 virtual void initialize(ScheduleDAGMI *dag) { 2444 DAG = dag; 2445 DAG->computeDFSResult(); 2446 Cmp.DFSResult = DAG->getDFSResult(); 2447 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 2448 ReadyQ.clear(); 2449 } 2450 2451 virtual void registerRoots() { 2452 // Restore the heap in ReadyQ with the updated DFS results. 2453 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2454 } 2455 2456 /// Implement MachineSchedStrategy interface. 2457 /// ----------------------------------------- 2458 2459 /// Callback to select the highest priority node from the ready Q. 2460 virtual SUnit *pickNode(bool &IsTopNode) { 2461 if (ReadyQ.empty()) return NULL; 2462 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2463 SUnit *SU = ReadyQ.back(); 2464 ReadyQ.pop_back(); 2465 IsTopNode = false; 2466 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 2467 << " ILP: " << DAG->getDFSResult()->getILP(SU) 2468 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 2469 << DAG->getDFSResult()->getSubtreeLevel( 2470 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 2471 << "Scheduling " << *SU->getInstr()); 2472 return SU; 2473 } 2474 2475 /// \brief Scheduler callback to notify that a new subtree is scheduled. 2476 virtual void scheduleTree(unsigned SubtreeID) { 2477 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2478 } 2479 2480 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 2481 /// DFSResults, and resort the priority Q. 2482 virtual void schedNode(SUnit *SU, bool IsTopNode) { 2483 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 2484 } 2485 2486 virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } 2487 2488 virtual void releaseBottomNode(SUnit *SU) { 2489 ReadyQ.push_back(SU); 2490 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2491 } 2492 }; 2493 } // namespace 2494 2495 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 2496 return new ScheduleDAGMI(C, new ILPScheduler(true)); 2497 } 2498 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 2499 return new ScheduleDAGMI(C, new ILPScheduler(false)); 2500 } 2501 static MachineSchedRegistry ILPMaxRegistry( 2502 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 2503 static MachineSchedRegistry ILPMinRegistry( 2504 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 2505 2506 //===----------------------------------------------------------------------===// 2507 // Machine Instruction Shuffler for Correctness Testing 2508 //===----------------------------------------------------------------------===// 2509 2510 #ifndef NDEBUG 2511 namespace { 2512 /// Apply a less-than relation on the node order, which corresponds to the 2513 /// instruction order prior to scheduling. IsReverse implements greater-than. 2514 template<bool IsReverse> 2515 struct SUnitOrder { 2516 bool operator()(SUnit *A, SUnit *B) const { 2517 if (IsReverse) 2518 return A->NodeNum > B->NodeNum; 2519 else 2520 return A->NodeNum < B->NodeNum; 2521 } 2522 }; 2523 2524 /// Reorder instructions as much as possible. 2525 class InstructionShuffler : public MachineSchedStrategy { 2526 bool IsAlternating; 2527 bool IsTopDown; 2528 2529 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 2530 // gives nodes with a higher number higher priority causing the latest 2531 // instructions to be scheduled first. 2532 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 2533 TopQ; 2534 // When scheduling bottom-up, use greater-than as the queue priority. 2535 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 2536 BottomQ; 2537 public: 2538 InstructionShuffler(bool alternate, bool topdown) 2539 : IsAlternating(alternate), IsTopDown(topdown) {} 2540 2541 virtual void initialize(ScheduleDAGMI *) { 2542 TopQ.clear(); 2543 BottomQ.clear(); 2544 } 2545 2546 /// Implement MachineSchedStrategy interface. 2547 /// ----------------------------------------- 2548 2549 virtual SUnit *pickNode(bool &IsTopNode) { 2550 SUnit *SU; 2551 if (IsTopDown) { 2552 do { 2553 if (TopQ.empty()) return NULL; 2554 SU = TopQ.top(); 2555 TopQ.pop(); 2556 } while (SU->isScheduled); 2557 IsTopNode = true; 2558 } 2559 else { 2560 do { 2561 if (BottomQ.empty()) return NULL; 2562 SU = BottomQ.top(); 2563 BottomQ.pop(); 2564 } while (SU->isScheduled); 2565 IsTopNode = false; 2566 } 2567 if (IsAlternating) 2568 IsTopDown = !IsTopDown; 2569 return SU; 2570 } 2571 2572 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 2573 2574 virtual void releaseTopNode(SUnit *SU) { 2575 TopQ.push(SU); 2576 } 2577 virtual void releaseBottomNode(SUnit *SU) { 2578 BottomQ.push(SU); 2579 } 2580 }; 2581 } // namespace 2582 2583 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 2584 bool Alternate = !ForceTopDown && !ForceBottomUp; 2585 bool TopDown = !ForceBottomUp; 2586 assert((TopDown || !ForceTopDown) && 2587 "-misched-topdown incompatible with -misched-bottomup"); 2588 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 2589 } 2590 static MachineSchedRegistry ShufflerRegistry( 2591 "shuffle", "Shuffle machine instructions alternating directions", 2592 createInstructionShuffler); 2593 #endif // !NDEBUG 2594 2595 //===----------------------------------------------------------------------===// 2596 // GraphWriter support for ScheduleDAGMI. 2597 //===----------------------------------------------------------------------===// 2598 2599 #ifndef NDEBUG 2600 namespace llvm { 2601 2602 template<> struct GraphTraits< 2603 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 2604 2605 template<> 2606 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 2607 2608 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 2609 2610 static std::string getGraphName(const ScheduleDAG *G) { 2611 return G->MF.getName(); 2612 } 2613 2614 static bool renderGraphFromBottomUp() { 2615 return true; 2616 } 2617 2618 static bool isNodeHidden(const SUnit *Node) { 2619 return (Node->NumPreds > 10 || Node->NumSuccs > 10); 2620 } 2621 2622 static bool hasNodeAddressLabel(const SUnit *Node, 2623 const ScheduleDAG *Graph) { 2624 return false; 2625 } 2626 2627 /// If you want to override the dot attributes printed for a particular 2628 /// edge, override this method. 2629 static std::string getEdgeAttributes(const SUnit *Node, 2630 SUnitIterator EI, 2631 const ScheduleDAG *Graph) { 2632 if (EI.isArtificialDep()) 2633 return "color=cyan,style=dashed"; 2634 if (EI.isCtrlDep()) 2635 return "color=blue,style=dashed"; 2636 return ""; 2637 } 2638 2639 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 2640 std::string Str; 2641 raw_string_ostream SS(Str); 2642 SS << "SU(" << SU->NodeNum << ')'; 2643 return SS.str(); 2644 } 2645 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 2646 return G->getGraphNodeLabel(SU); 2647 } 2648 2649 static std::string getNodeAttributes(const SUnit *N, 2650 const ScheduleDAG *Graph) { 2651 std::string Str("shape=Mrecord"); 2652 const SchedDFSResult *DFS = 2653 static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult(); 2654 if (DFS) { 2655 Str += ",style=filled,fillcolor=\"#"; 2656 Str += DOT::getColorString(DFS->getSubtreeID(N)); 2657 Str += '"'; 2658 } 2659 return Str; 2660 } 2661 }; 2662 } // namespace llvm 2663 #endif // NDEBUG 2664 2665 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 2666 /// rendered using 'dot'. 2667 /// 2668 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 2669 #ifndef NDEBUG 2670 ViewGraph(this, Name, false, Title); 2671 #else 2672 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 2673 << "systems with Graphviz or gv!\n"; 2674 #endif // NDEBUG 2675 } 2676 2677 /// Out-of-line implementation with no arguments is handy for gdb. 2678 void ScheduleDAGMI::viewGraph() { 2679 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 2680 } 2681