1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/MachineScheduler.h" 18 #include "llvm/ADT/OwningPtr.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/CodeGen/RegisterClassInfo.h" 26 #include "llvm/CodeGen/ScheduleDFS.h" 27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/GraphWriter.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include "llvm/Target/TargetInstrInfo.h" 34 #include <queue> 35 36 using namespace llvm; 37 38 namespace llvm { 39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 40 cl::desc("Force top-down list scheduling")); 41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 42 cl::desc("Force bottom-up list scheduling")); 43 } 44 45 #ifndef NDEBUG 46 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 47 cl::desc("Pop up a window to show MISched dags after they are processed")); 48 49 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 50 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 51 #else 52 static bool ViewMISchedDAGs = false; 53 #endif // NDEBUG 54 55 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 56 cl::desc("Enable load clustering."), cl::init(true)); 57 58 // Experimental heuristics 59 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 60 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 61 62 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 63 cl::desc("Verify machine instrs before and after machine scheduling")); 64 65 // DAG subtrees must have at least this many nodes. 66 static const unsigned MinSubtreeSize = 8; 67 68 //===----------------------------------------------------------------------===// 69 // Machine Instruction Scheduling Pass and Registry 70 //===----------------------------------------------------------------------===// 71 72 MachineSchedContext::MachineSchedContext(): 73 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 74 RegClassInfo = new RegisterClassInfo(); 75 } 76 77 MachineSchedContext::~MachineSchedContext() { 78 delete RegClassInfo; 79 } 80 81 namespace { 82 /// MachineScheduler runs after coalescing and before register allocation. 83 class MachineScheduler : public MachineSchedContext, 84 public MachineFunctionPass { 85 public: 86 MachineScheduler(); 87 88 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 89 90 virtual void releaseMemory() {} 91 92 virtual bool runOnMachineFunction(MachineFunction&); 93 94 virtual void print(raw_ostream &O, const Module* = 0) const; 95 96 static char ID; // Class identification, replacement for typeinfo 97 }; 98 } // namespace 99 100 char MachineScheduler::ID = 0; 101 102 char &llvm::MachineSchedulerID = MachineScheduler::ID; 103 104 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 105 "Machine Instruction Scheduler", false, false) 106 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 107 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 108 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 109 INITIALIZE_PASS_END(MachineScheduler, "misched", 110 "Machine Instruction Scheduler", false, false) 111 112 MachineScheduler::MachineScheduler() 113 : MachineFunctionPass(ID) { 114 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 115 } 116 117 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 118 AU.setPreservesCFG(); 119 AU.addRequiredID(MachineDominatorsID); 120 AU.addRequired<MachineLoopInfo>(); 121 AU.addRequired<AliasAnalysis>(); 122 AU.addRequired<TargetPassConfig>(); 123 AU.addRequired<SlotIndexes>(); 124 AU.addPreserved<SlotIndexes>(); 125 AU.addRequired<LiveIntervals>(); 126 AU.addPreserved<LiveIntervals>(); 127 MachineFunctionPass::getAnalysisUsage(AU); 128 } 129 130 MachinePassRegistry MachineSchedRegistry::Registry; 131 132 /// A dummy default scheduler factory indicates whether the scheduler 133 /// is overridden on the command line. 134 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 135 return 0; 136 } 137 138 /// MachineSchedOpt allows command line selection of the scheduler. 139 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 140 RegisterPassParser<MachineSchedRegistry> > 141 MachineSchedOpt("misched", 142 cl::init(&useDefaultMachineSched), cl::Hidden, 143 cl::desc("Machine instruction scheduler to use")); 144 145 static MachineSchedRegistry 146 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 147 useDefaultMachineSched); 148 149 /// Forward declare the standard machine scheduler. This will be used as the 150 /// default scheduler if the target does not set a default. 151 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 152 153 154 /// Decrement this iterator until reaching the top or a non-debug instr. 155 static MachineBasicBlock::iterator 156 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 157 assert(I != Beg && "reached the top of the region, cannot decrement"); 158 while (--I != Beg) { 159 if (!I->isDebugValue()) 160 break; 161 } 162 return I; 163 } 164 165 /// If this iterator is a debug value, increment until reaching the End or a 166 /// non-debug instruction. 167 static MachineBasicBlock::iterator 168 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 169 for(; I != End; ++I) { 170 if (!I->isDebugValue()) 171 break; 172 } 173 return I; 174 } 175 176 /// Top-level MachineScheduler pass driver. 177 /// 178 /// Visit blocks in function order. Divide each block into scheduling regions 179 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 180 /// consistent with the DAG builder, which traverses the interior of the 181 /// scheduling regions bottom-up. 182 /// 183 /// This design avoids exposing scheduling boundaries to the DAG builder, 184 /// simplifying the DAG builder's support for "special" target instructions. 185 /// At the same time the design allows target schedulers to operate across 186 /// scheduling boundaries, for example to bundle the boudary instructions 187 /// without reordering them. This creates complexity, because the target 188 /// scheduler must update the RegionBegin and RegionEnd positions cached by 189 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 190 /// design would be to split blocks at scheduling boundaries, but LLVM has a 191 /// general bias against block splitting purely for implementation simplicity. 192 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 193 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 194 195 // Initialize the context of the pass. 196 MF = &mf; 197 MLI = &getAnalysis<MachineLoopInfo>(); 198 MDT = &getAnalysis<MachineDominatorTree>(); 199 PassConfig = &getAnalysis<TargetPassConfig>(); 200 AA = &getAnalysis<AliasAnalysis>(); 201 202 LIS = &getAnalysis<LiveIntervals>(); 203 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 204 205 if (VerifyScheduling) { 206 DEBUG(LIS->print(dbgs())); 207 MF->verify(this, "Before machine scheduling."); 208 } 209 RegClassInfo->runOnMachineFunction(*MF); 210 211 // Select the scheduler, or set the default. 212 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 213 if (Ctor == useDefaultMachineSched) { 214 // Get the default scheduler set by the target. 215 Ctor = MachineSchedRegistry::getDefault(); 216 if (!Ctor) { 217 Ctor = createConvergingSched; 218 MachineSchedRegistry::setDefault(Ctor); 219 } 220 } 221 // Instantiate the selected scheduler. 222 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 223 224 // Visit all machine basic blocks. 225 // 226 // TODO: Visit blocks in global postorder or postorder within the bottom-up 227 // loop tree. Then we can optionally compute global RegPressure. 228 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 229 MBB != MBBEnd; ++MBB) { 230 231 Scheduler->startBlock(MBB); 232 233 // Break the block into scheduling regions [I, RegionEnd), and schedule each 234 // region as soon as it is discovered. RegionEnd points the scheduling 235 // boundary at the bottom of the region. The DAG does not include RegionEnd, 236 // but the region does (i.e. the next RegionEnd is above the previous 237 // RegionBegin). If the current block has no terminator then RegionEnd == 238 // MBB->end() for the bottom region. 239 // 240 // The Scheduler may insert instructions during either schedule() or 241 // exitRegion(), even for empty regions. So the local iterators 'I' and 242 // 'RegionEnd' are invalid across these calls. 243 unsigned RemainingInstrs = MBB->size(); 244 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 245 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 246 247 // Avoid decrementing RegionEnd for blocks with no terminator. 248 if (RegionEnd != MBB->end() 249 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 250 --RegionEnd; 251 // Count the boundary instruction. 252 --RemainingInstrs; 253 } 254 255 // The next region starts above the previous region. Look backward in the 256 // instruction stream until we find the nearest boundary. 257 MachineBasicBlock::iterator I = RegionEnd; 258 for(;I != MBB->begin(); --I, --RemainingInstrs) { 259 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 260 break; 261 } 262 // Notify the scheduler of the region, even if we may skip scheduling 263 // it. Perhaps it still needs to be bundled. 264 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs); 265 266 // Skip empty scheduling regions (0 or 1 schedulable instructions). 267 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 268 // Close the current region. Bundle the terminator if needed. 269 // This invalidates 'RegionEnd' and 'I'. 270 Scheduler->exitRegion(); 271 continue; 272 } 273 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 274 DEBUG(dbgs() << MF->getName() 275 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 276 << "\n From: " << *I << " To: "; 277 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 278 else dbgs() << "End"; 279 dbgs() << " Remaining: " << RemainingInstrs << "\n"); 280 281 // Schedule a region: possibly reorder instructions. 282 // This invalidates 'RegionEnd' and 'I'. 283 Scheduler->schedule(); 284 285 // Close the current region. 286 Scheduler->exitRegion(); 287 288 // Scheduling has invalidated the current iterator 'I'. Ask the 289 // scheduler for the top of it's scheduled region. 290 RegionEnd = Scheduler->begin(); 291 } 292 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 293 Scheduler->finishBlock(); 294 } 295 Scheduler->finalizeSchedule(); 296 DEBUG(LIS->print(dbgs())); 297 if (VerifyScheduling) 298 MF->verify(this, "After machine scheduling."); 299 return true; 300 } 301 302 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 303 // unimplemented 304 } 305 306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 307 void ReadyQueue::dump() { 308 dbgs() << " " << Name << ": "; 309 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 310 dbgs() << Queue[i]->NodeNum << " "; 311 dbgs() << "\n"; 312 } 313 #endif 314 315 //===----------------------------------------------------------------------===// 316 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 317 // preservation. 318 //===----------------------------------------------------------------------===// 319 320 ScheduleDAGMI::~ScheduleDAGMI() { 321 delete DFSResult; 322 DeleteContainerPointers(Mutations); 323 delete SchedImpl; 324 } 325 326 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 327 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 328 } 329 330 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 331 if (SuccSU != &ExitSU) { 332 // Do not use WillCreateCycle, it assumes SD scheduling. 333 // If Pred is reachable from Succ, then the edge creates a cycle. 334 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 335 return false; 336 Topo.AddPred(SuccSU, PredDep.getSUnit()); 337 } 338 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 339 // Return true regardless of whether a new edge needed to be inserted. 340 return true; 341 } 342 343 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 344 /// NumPredsLeft reaches zero, release the successor node. 345 /// 346 /// FIXME: Adjust SuccSU height based on MinLatency. 347 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 348 SUnit *SuccSU = SuccEdge->getSUnit(); 349 350 if (SuccEdge->isWeak()) { 351 --SuccSU->WeakPredsLeft; 352 if (SuccEdge->isCluster()) 353 NextClusterSucc = SuccSU; 354 return; 355 } 356 #ifndef NDEBUG 357 if (SuccSU->NumPredsLeft == 0) { 358 dbgs() << "*** Scheduling failed! ***\n"; 359 SuccSU->dump(this); 360 dbgs() << " has been released too many times!\n"; 361 llvm_unreachable(0); 362 } 363 #endif 364 --SuccSU->NumPredsLeft; 365 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 366 SchedImpl->releaseTopNode(SuccSU); 367 } 368 369 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 370 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 371 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 372 I != E; ++I) { 373 releaseSucc(SU, &*I); 374 } 375 } 376 377 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 378 /// NumSuccsLeft reaches zero, release the predecessor node. 379 /// 380 /// FIXME: Adjust PredSU height based on MinLatency. 381 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 382 SUnit *PredSU = PredEdge->getSUnit(); 383 384 if (PredEdge->isWeak()) { 385 --PredSU->WeakSuccsLeft; 386 if (PredEdge->isCluster()) 387 NextClusterPred = PredSU; 388 return; 389 } 390 #ifndef NDEBUG 391 if (PredSU->NumSuccsLeft == 0) { 392 dbgs() << "*** Scheduling failed! ***\n"; 393 PredSU->dump(this); 394 dbgs() << " has been released too many times!\n"; 395 llvm_unreachable(0); 396 } 397 #endif 398 --PredSU->NumSuccsLeft; 399 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 400 SchedImpl->releaseBottomNode(PredSU); 401 } 402 403 /// releasePredecessors - Call releasePred on each of SU's predecessors. 404 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 405 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 406 I != E; ++I) { 407 releasePred(SU, &*I); 408 } 409 } 410 411 /// This is normally called from the main scheduler loop but may also be invoked 412 /// by the scheduling strategy to perform additional code motion. 413 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 414 MachineBasicBlock::iterator InsertPos) { 415 // Advance RegionBegin if the first instruction moves down. 416 if (&*RegionBegin == MI) 417 ++RegionBegin; 418 419 // Update the instruction stream. 420 BB->splice(InsertPos, BB, MI); 421 422 // Update LiveIntervals 423 LIS->handleMove(MI, /*UpdateFlags=*/true); 424 425 // Recede RegionBegin if an instruction moves above the first. 426 if (RegionBegin == InsertPos) 427 RegionBegin = MI; 428 } 429 430 bool ScheduleDAGMI::checkSchedLimit() { 431 #ifndef NDEBUG 432 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 433 CurrentTop = CurrentBottom; 434 return false; 435 } 436 ++NumInstrsScheduled; 437 #endif 438 return true; 439 } 440 441 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 442 /// crossing a scheduling boundary. [begin, end) includes all instructions in 443 /// the region, including the boundary itself and single-instruction regions 444 /// that don't get scheduled. 445 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 446 MachineBasicBlock::iterator begin, 447 MachineBasicBlock::iterator end, 448 unsigned endcount) 449 { 450 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 451 452 // For convenience remember the end of the liveness region. 453 LiveRegionEnd = 454 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 455 } 456 457 // Setup the register pressure trackers for the top scheduled top and bottom 458 // scheduled regions. 459 void ScheduleDAGMI::initRegPressure() { 460 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 461 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 462 463 // Close the RPTracker to finalize live ins. 464 RPTracker.closeRegion(); 465 466 DEBUG(RPTracker.getPressure().dump(TRI)); 467 468 // Initialize the live ins and live outs. 469 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 470 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 471 472 // Close one end of the tracker so we can call 473 // getMaxUpward/DownwardPressureDelta before advancing across any 474 // instructions. This converts currently live regs into live ins/outs. 475 TopRPTracker.closeTop(); 476 BotRPTracker.closeBottom(); 477 478 // Account for liveness generated by the region boundary. 479 if (LiveRegionEnd != RegionEnd) 480 BotRPTracker.recede(); 481 482 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 483 484 // Cache the list of excess pressure sets in this region. This will also track 485 // the max pressure in the scheduled code for these sets. 486 RegionCriticalPSets.clear(); 487 const std::vector<unsigned> &RegionPressure = 488 RPTracker.getPressure().MaxSetPressure; 489 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 490 unsigned Limit = TRI->getRegPressureSetLimit(i); 491 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 492 << "Limit " << Limit 493 << " Actual " << RegionPressure[i] << "\n"); 494 if (RegionPressure[i] > Limit) 495 RegionCriticalPSets.push_back(PressureElement(i, 0)); 496 } 497 DEBUG(dbgs() << "Excess PSets: "; 498 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 499 dbgs() << TRI->getRegPressureSetName( 500 RegionCriticalPSets[i].PSetID) << " "; 501 dbgs() << "\n"); 502 } 503 504 // FIXME: When the pressure tracker deals in pressure differences then we won't 505 // iterate over all RegionCriticalPSets[i]. 506 void ScheduleDAGMI:: 507 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) { 508 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 509 unsigned ID = RegionCriticalPSets[i].PSetID; 510 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 511 if ((int)NewMaxPressure[ID] > MaxUnits) 512 MaxUnits = NewMaxPressure[ID]; 513 } 514 DEBUG( 515 for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) { 516 unsigned Limit = TRI->getRegPressureSetLimit(i); 517 if (NewMaxPressure[i] > Limit ) { 518 dbgs() << " " << TRI->getRegPressureSetName(i) << ": " 519 << NewMaxPressure[i] << " > " << Limit << "\n"; 520 } 521 }); 522 } 523 524 /// schedule - Called back from MachineScheduler::runOnMachineFunction 525 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 526 /// only includes instructions that have DAG nodes, not scheduling boundaries. 527 /// 528 /// This is a skeletal driver, with all the functionality pushed into helpers, 529 /// so that it can be easilly extended by experimental schedulers. Generally, 530 /// implementing MachineSchedStrategy should be sufficient to implement a new 531 /// scheduling algorithm. However, if a scheduler further subclasses 532 /// ScheduleDAGMI then it will want to override this virtual method in order to 533 /// update any specialized state. 534 void ScheduleDAGMI::schedule() { 535 buildDAGWithRegPressure(); 536 537 Topo.InitDAGTopologicalSorting(); 538 539 postprocessDAG(); 540 541 SmallVector<SUnit*, 8> TopRoots, BotRoots; 542 findRootsAndBiasEdges(TopRoots, BotRoots); 543 544 // Initialize the strategy before modifying the DAG. 545 // This may initialize a DFSResult to be used for queue priority. 546 SchedImpl->initialize(this); 547 548 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 549 SUnits[su].dumpAll(this)); 550 if (ViewMISchedDAGs) viewGraph(); 551 552 // Initialize ready queues now that the DAG and priority data are finalized. 553 initQueues(TopRoots, BotRoots); 554 555 bool IsTopNode = false; 556 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 557 assert(!SU->isScheduled && "Node already scheduled"); 558 if (!checkSchedLimit()) 559 break; 560 561 scheduleMI(SU, IsTopNode); 562 563 updateQueues(SU, IsTopNode); 564 } 565 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 566 567 placeDebugValues(); 568 569 DEBUG({ 570 unsigned BBNum = begin()->getParent()->getNumber(); 571 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 572 dumpSchedule(); 573 dbgs() << '\n'; 574 }); 575 } 576 577 /// Build the DAG and setup three register pressure trackers. 578 void ScheduleDAGMI::buildDAGWithRegPressure() { 579 // Initialize the register pressure tracker used by buildSchedGraph. 580 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 581 582 // Account for liveness generate by the region boundary. 583 if (LiveRegionEnd != RegionEnd) 584 RPTracker.recede(); 585 586 // Build the DAG, and compute current register pressure. 587 buildSchedGraph(AA, &RPTracker); 588 589 // Initialize top/bottom trackers after computing region pressure. 590 initRegPressure(); 591 } 592 593 /// Apply each ScheduleDAGMutation step in order. 594 void ScheduleDAGMI::postprocessDAG() { 595 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 596 Mutations[i]->apply(this); 597 } 598 } 599 600 void ScheduleDAGMI::computeDFSResult() { 601 if (!DFSResult) 602 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 603 DFSResult->clear(); 604 ScheduledTrees.clear(); 605 DFSResult->resize(SUnits.size()); 606 DFSResult->compute(SUnits); 607 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 608 } 609 610 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 611 SmallVectorImpl<SUnit*> &BotRoots) { 612 for (std::vector<SUnit>::iterator 613 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 614 SUnit *SU = &(*I); 615 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 616 617 // Order predecessors so DFSResult follows the critical path. 618 SU->biasCriticalPath(); 619 620 // A SUnit is ready to top schedule if it has no predecessors. 621 if (!I->NumPredsLeft) 622 TopRoots.push_back(SU); 623 // A SUnit is ready to bottom schedule if it has no successors. 624 if (!I->NumSuccsLeft) 625 BotRoots.push_back(SU); 626 } 627 ExitSU.biasCriticalPath(); 628 } 629 630 /// Identify DAG roots and setup scheduler queues. 631 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 632 ArrayRef<SUnit*> BotRoots) { 633 NextClusterSucc = NULL; 634 NextClusterPred = NULL; 635 636 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 637 // 638 // Nodes with unreleased weak edges can still be roots. 639 // Release top roots in forward order. 640 for (SmallVectorImpl<SUnit*>::const_iterator 641 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 642 SchedImpl->releaseTopNode(*I); 643 } 644 // Release bottom roots in reverse order so the higher priority nodes appear 645 // first. This is more natural and slightly more efficient. 646 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 647 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 648 SchedImpl->releaseBottomNode(*I); 649 } 650 651 releaseSuccessors(&EntrySU); 652 releasePredecessors(&ExitSU); 653 654 SchedImpl->registerRoots(); 655 656 // Advance past initial DebugValues. 657 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 658 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 659 TopRPTracker.setPos(CurrentTop); 660 661 CurrentBottom = RegionEnd; 662 } 663 664 /// Move an instruction and update register pressure. 665 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) { 666 // Move the instruction to its new location in the instruction stream. 667 MachineInstr *MI = SU->getInstr(); 668 669 if (IsTopNode) { 670 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 671 if (&*CurrentTop == MI) 672 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 673 else { 674 moveInstruction(MI, CurrentTop); 675 TopRPTracker.setPos(MI); 676 } 677 678 // Update top scheduled pressure. 679 TopRPTracker.advance(); 680 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 681 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 682 } 683 else { 684 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 685 MachineBasicBlock::iterator priorII = 686 priorNonDebug(CurrentBottom, CurrentTop); 687 if (&*priorII == MI) 688 CurrentBottom = priorII; 689 else { 690 if (&*CurrentTop == MI) { 691 CurrentTop = nextIfDebug(++CurrentTop, priorII); 692 TopRPTracker.setPos(CurrentTop); 693 } 694 moveInstruction(MI, CurrentBottom); 695 CurrentBottom = MI; 696 } 697 // Update bottom scheduled pressure. 698 BotRPTracker.recede(); 699 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 700 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 701 } 702 } 703 704 /// Update scheduler queues after scheduling an instruction. 705 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 706 // Release dependent instructions for scheduling. 707 if (IsTopNode) 708 releaseSuccessors(SU); 709 else 710 releasePredecessors(SU); 711 712 SU->isScheduled = true; 713 714 if (DFSResult) { 715 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 716 if (!ScheduledTrees.test(SubtreeID)) { 717 ScheduledTrees.set(SubtreeID); 718 DFSResult->scheduleTree(SubtreeID); 719 SchedImpl->scheduleTree(SubtreeID); 720 } 721 } 722 723 // Notify the scheduling strategy after updating the DAG. 724 SchedImpl->schedNode(SU, IsTopNode); 725 } 726 727 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 728 void ScheduleDAGMI::placeDebugValues() { 729 // If first instruction was a DBG_VALUE then put it back. 730 if (FirstDbgValue) { 731 BB->splice(RegionBegin, BB, FirstDbgValue); 732 RegionBegin = FirstDbgValue; 733 } 734 735 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 736 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 737 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 738 MachineInstr *DbgValue = P.first; 739 MachineBasicBlock::iterator OrigPrevMI = P.second; 740 if (&*RegionBegin == DbgValue) 741 ++RegionBegin; 742 BB->splice(++OrigPrevMI, BB, DbgValue); 743 if (OrigPrevMI == llvm::prior(RegionEnd)) 744 RegionEnd = DbgValue; 745 } 746 DbgValues.clear(); 747 FirstDbgValue = NULL; 748 } 749 750 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 751 void ScheduleDAGMI::dumpSchedule() const { 752 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 753 if (SUnit *SU = getSUnit(&(*MI))) 754 SU->dump(this); 755 else 756 dbgs() << "Missing SUnit\n"; 757 } 758 } 759 #endif 760 761 //===----------------------------------------------------------------------===// 762 // LoadClusterMutation - DAG post-processing to cluster loads. 763 //===----------------------------------------------------------------------===// 764 765 namespace { 766 /// \brief Post-process the DAG to create cluster edges between neighboring 767 /// loads. 768 class LoadClusterMutation : public ScheduleDAGMutation { 769 struct LoadInfo { 770 SUnit *SU; 771 unsigned BaseReg; 772 unsigned Offset; 773 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 774 : SU(su), BaseReg(reg), Offset(ofs) {} 775 }; 776 static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS, 777 const LoadClusterMutation::LoadInfo &RHS); 778 779 const TargetInstrInfo *TII; 780 const TargetRegisterInfo *TRI; 781 public: 782 LoadClusterMutation(const TargetInstrInfo *tii, 783 const TargetRegisterInfo *tri) 784 : TII(tii), TRI(tri) {} 785 786 virtual void apply(ScheduleDAGMI *DAG); 787 protected: 788 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 789 }; 790 } // anonymous 791 792 bool LoadClusterMutation::LoadInfoLess( 793 const LoadClusterMutation::LoadInfo &LHS, 794 const LoadClusterMutation::LoadInfo &RHS) { 795 if (LHS.BaseReg != RHS.BaseReg) 796 return LHS.BaseReg < RHS.BaseReg; 797 return LHS.Offset < RHS.Offset; 798 } 799 800 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 801 ScheduleDAGMI *DAG) { 802 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 803 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 804 SUnit *SU = Loads[Idx]; 805 unsigned BaseReg; 806 unsigned Offset; 807 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 808 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 809 } 810 if (LoadRecords.size() < 2) 811 return; 812 std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess); 813 unsigned ClusterLength = 1; 814 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 815 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 816 ClusterLength = 1; 817 continue; 818 } 819 820 SUnit *SUa = LoadRecords[Idx].SU; 821 SUnit *SUb = LoadRecords[Idx+1].SU; 822 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 823 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 824 825 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 826 << SUb->NodeNum << ")\n"); 827 // Copy successor edges from SUa to SUb. Interleaving computation 828 // dependent on SUa can prevent load combining due to register reuse. 829 // Predecessor edges do not need to be copied from SUb to SUa since nearby 830 // loads should have effectively the same inputs. 831 for (SUnit::const_succ_iterator 832 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 833 if (SI->getSUnit() == SUb) 834 continue; 835 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 836 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 837 } 838 ++ClusterLength; 839 } 840 else 841 ClusterLength = 1; 842 } 843 } 844 845 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 846 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 847 // Map DAG NodeNum to store chain ID. 848 DenseMap<unsigned, unsigned> StoreChainIDs; 849 // Map each store chain to a set of dependent loads. 850 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 851 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 852 SUnit *SU = &DAG->SUnits[Idx]; 853 if (!SU->getInstr()->mayLoad()) 854 continue; 855 unsigned ChainPredID = DAG->SUnits.size(); 856 for (SUnit::const_pred_iterator 857 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 858 if (PI->isCtrl()) { 859 ChainPredID = PI->getSUnit()->NodeNum; 860 break; 861 } 862 } 863 // Check if this chain-like pred has been seen 864 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 865 unsigned NumChains = StoreChainDependents.size(); 866 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 867 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 868 if (Result.second) 869 StoreChainDependents.resize(NumChains + 1); 870 StoreChainDependents[Result.first->second].push_back(SU); 871 } 872 // Iterate over the store chains. 873 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 874 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 875 } 876 877 //===----------------------------------------------------------------------===// 878 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 879 //===----------------------------------------------------------------------===// 880 881 namespace { 882 /// \brief Post-process the DAG to create cluster edges between instructions 883 /// that may be fused by the processor into a single operation. 884 class MacroFusion : public ScheduleDAGMutation { 885 const TargetInstrInfo *TII; 886 public: 887 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 888 889 virtual void apply(ScheduleDAGMI *DAG); 890 }; 891 } // anonymous 892 893 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 894 /// fused operations. 895 void MacroFusion::apply(ScheduleDAGMI *DAG) { 896 // For now, assume targets can only fuse with the branch. 897 MachineInstr *Branch = DAG->ExitSU.getInstr(); 898 if (!Branch) 899 return; 900 901 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 902 SUnit *SU = &DAG->SUnits[--Idx]; 903 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 904 continue; 905 906 // Create a single weak edge from SU to ExitSU. The only effect is to cause 907 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 908 // need to copy predecessor edges from ExitSU to SU, since top-down 909 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 910 // of SU, we could create an artificial edge from the deepest root, but it 911 // hasn't been needed yet. 912 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 913 (void)Success; 914 assert(Success && "No DAG nodes should be reachable from ExitSU"); 915 916 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 917 break; 918 } 919 } 920 921 //===----------------------------------------------------------------------===// 922 // CopyConstrain - DAG post-processing to encourage copy elimination. 923 //===----------------------------------------------------------------------===// 924 925 namespace { 926 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 927 /// the one use that defines the copy's source vreg, most likely an induction 928 /// variable increment. 929 class CopyConstrain : public ScheduleDAGMutation { 930 // Transient state. 931 SlotIndex RegionBeginIdx; 932 // RegionEndIdx is the slot index of the last non-debug instruction in the 933 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 934 SlotIndex RegionEndIdx; 935 public: 936 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 937 938 virtual void apply(ScheduleDAGMI *DAG); 939 940 protected: 941 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG); 942 }; 943 } // anonymous 944 945 /// constrainLocalCopy handles two possibilities: 946 /// 1) Local src: 947 /// I0: = dst 948 /// I1: src = ... 949 /// I2: = dst 950 /// I3: dst = src (copy) 951 /// (create pred->succ edges I0->I1, I2->I1) 952 /// 953 /// 2) Local copy: 954 /// I0: dst = src (copy) 955 /// I1: = dst 956 /// I2: src = ... 957 /// I3: = dst 958 /// (create pred->succ edges I1->I2, I3->I2) 959 /// 960 /// Although the MachineScheduler is currently constrained to single blocks, 961 /// this algorithm should handle extended blocks. An EBB is a set of 962 /// contiguously numbered blocks such that the previous block in the EBB is 963 /// always the single predecessor. 964 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) { 965 LiveIntervals *LIS = DAG->getLIS(); 966 MachineInstr *Copy = CopySU->getInstr(); 967 968 // Check for pure vreg copies. 969 unsigned SrcReg = Copy->getOperand(1).getReg(); 970 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 971 return; 972 973 unsigned DstReg = Copy->getOperand(0).getReg(); 974 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 975 return; 976 977 // Check if either the dest or source is local. If it's live across a back 978 // edge, it's not local. Note that if both vregs are live across the back 979 // edge, we cannot successfully contrain the copy without cyclic scheduling. 980 unsigned LocalReg = DstReg; 981 unsigned GlobalReg = SrcReg; 982 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 983 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 984 LocalReg = SrcReg; 985 GlobalReg = DstReg; 986 LocalLI = &LIS->getInterval(LocalReg); 987 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 988 return; 989 } 990 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 991 992 // Find the global segment after the start of the local LI. 993 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 994 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 995 // local live range. We could create edges from other global uses to the local 996 // start, but the coalescer should have already eliminated these cases, so 997 // don't bother dealing with it. 998 if (GlobalSegment == GlobalLI->end()) 999 return; 1000 1001 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1002 // returned the next global segment. But if GlobalSegment overlaps with 1003 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1004 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1005 if (GlobalSegment->contains(LocalLI->beginIndex())) 1006 ++GlobalSegment; 1007 1008 if (GlobalSegment == GlobalLI->end()) 1009 return; 1010 1011 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1012 if (GlobalSegment != GlobalLI->begin()) { 1013 // Two address defs have no hole. 1014 if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end, 1015 GlobalSegment->start)) { 1016 return; 1017 } 1018 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1019 // it would be a disconnected component in the live range. 1020 assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() && 1021 "Disconnected LRG within the scheduling region."); 1022 } 1023 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1024 if (!GlobalDef) 1025 return; 1026 1027 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1028 if (!GlobalSU) 1029 return; 1030 1031 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1032 // constraining the uses of the last local def to precede GlobalDef. 1033 SmallVector<SUnit*,8> LocalUses; 1034 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1035 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1036 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1037 for (SUnit::const_succ_iterator 1038 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1039 I != E; ++I) { 1040 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1041 continue; 1042 if (I->getSUnit() == GlobalSU) 1043 continue; 1044 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1045 return; 1046 LocalUses.push_back(I->getSUnit()); 1047 } 1048 // Open the top of the GlobalLI hole by constraining any earlier global uses 1049 // to precede the start of LocalLI. 1050 SmallVector<SUnit*,8> GlobalUses; 1051 MachineInstr *FirstLocalDef = 1052 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1053 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1054 for (SUnit::const_pred_iterator 1055 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1056 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1057 continue; 1058 if (I->getSUnit() == FirstLocalSU) 1059 continue; 1060 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1061 return; 1062 GlobalUses.push_back(I->getSUnit()); 1063 } 1064 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1065 // Add the weak edges. 1066 for (SmallVectorImpl<SUnit*>::const_iterator 1067 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1068 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1069 << GlobalSU->NodeNum << ")\n"); 1070 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1071 } 1072 for (SmallVectorImpl<SUnit*>::const_iterator 1073 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1074 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1075 << FirstLocalSU->NodeNum << ")\n"); 1076 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1077 } 1078 } 1079 1080 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1081 /// copy elimination. 1082 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1083 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1084 if (FirstPos == DAG->end()) 1085 return; 1086 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); 1087 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1088 &*priorNonDebug(DAG->end(), DAG->begin())); 1089 1090 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1091 SUnit *SU = &DAG->SUnits[Idx]; 1092 if (!SU->getInstr()->isCopy()) 1093 continue; 1094 1095 constrainLocalCopy(SU, DAG); 1096 } 1097 } 1098 1099 //===----------------------------------------------------------------------===// 1100 // ConvergingScheduler - Implementation of the generic MachineSchedStrategy. 1101 //===----------------------------------------------------------------------===// 1102 1103 namespace { 1104 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 1105 /// the schedule. 1106 class ConvergingScheduler : public MachineSchedStrategy { 1107 public: 1108 /// Represent the type of SchedCandidate found within a single queue. 1109 /// pickNodeBidirectional depends on these listed by decreasing priority. 1110 enum CandReason { 1111 NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak, 1112 ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, 1113 TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse, 1114 NodeOrder}; 1115 1116 #ifndef NDEBUG 1117 static const char *getReasonStr(ConvergingScheduler::CandReason Reason); 1118 #endif 1119 1120 /// Policy for scheduling the next instruction in the candidate's zone. 1121 struct CandPolicy { 1122 bool ReduceLatency; 1123 unsigned ReduceResIdx; 1124 unsigned DemandResIdx; 1125 1126 CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} 1127 }; 1128 1129 /// Status of an instruction's critical resource consumption. 1130 struct SchedResourceDelta { 1131 // Count critical resources in the scheduled region required by SU. 1132 unsigned CritResources; 1133 1134 // Count critical resources from another region consumed by SU. 1135 unsigned DemandedResources; 1136 1137 SchedResourceDelta(): CritResources(0), DemandedResources(0) {} 1138 1139 bool operator==(const SchedResourceDelta &RHS) const { 1140 return CritResources == RHS.CritResources 1141 && DemandedResources == RHS.DemandedResources; 1142 } 1143 bool operator!=(const SchedResourceDelta &RHS) const { 1144 return !operator==(RHS); 1145 } 1146 }; 1147 1148 /// Store the state used by ConvergingScheduler heuristics, required for the 1149 /// lifetime of one invocation of pickNode(). 1150 struct SchedCandidate { 1151 CandPolicy Policy; 1152 1153 // The best SUnit candidate. 1154 SUnit *SU; 1155 1156 // The reason for this candidate. 1157 CandReason Reason; 1158 1159 // Register pressure values for the best candidate. 1160 RegPressureDelta RPDelta; 1161 1162 // Critical resource consumption of the best candidate. 1163 SchedResourceDelta ResDelta; 1164 1165 SchedCandidate(const CandPolicy &policy) 1166 : Policy(policy), SU(NULL), Reason(NoCand) {} 1167 1168 bool isValid() const { return SU; } 1169 1170 // Copy the status of another candidate without changing policy. 1171 void setBest(SchedCandidate &Best) { 1172 assert(Best.Reason != NoCand && "uninitialized Sched candidate"); 1173 SU = Best.SU; 1174 Reason = Best.Reason; 1175 RPDelta = Best.RPDelta; 1176 ResDelta = Best.ResDelta; 1177 } 1178 1179 void initResourceDelta(const ScheduleDAGMI *DAG, 1180 const TargetSchedModel *SchedModel); 1181 }; 1182 1183 /// Summarize the unscheduled region. 1184 struct SchedRemainder { 1185 // Critical path through the DAG in expected latency. 1186 unsigned CriticalPath; 1187 1188 // Scaled count of micro-ops left to schedule. 1189 unsigned RemIssueCount; 1190 1191 // Unscheduled resources 1192 SmallVector<unsigned, 16> RemainingCounts; 1193 1194 void reset() { 1195 CriticalPath = 0; 1196 RemIssueCount = 0; 1197 RemainingCounts.clear(); 1198 } 1199 1200 SchedRemainder() { reset(); } 1201 1202 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); 1203 }; 1204 1205 /// Each Scheduling boundary is associated with ready queues. It tracks the 1206 /// current cycle in the direction of movement, and maintains the state 1207 /// of "hazards" and other interlocks at the current cycle. 1208 struct SchedBoundary { 1209 ScheduleDAGMI *DAG; 1210 const TargetSchedModel *SchedModel; 1211 SchedRemainder *Rem; 1212 1213 ReadyQueue Available; 1214 ReadyQueue Pending; 1215 bool CheckPending; 1216 1217 // For heuristics, keep a list of the nodes that immediately depend on the 1218 // most recently scheduled node. 1219 SmallPtrSet<const SUnit*, 8> NextSUs; 1220 1221 ScheduleHazardRecognizer *HazardRec; 1222 1223 /// Number of cycles it takes to issue the instructions scheduled in this 1224 /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls. 1225 /// See getStalls(). 1226 unsigned CurrCycle; 1227 1228 /// Micro-ops issued in the current cycle 1229 unsigned CurrMOps; 1230 1231 /// MinReadyCycle - Cycle of the soonest available instruction. 1232 unsigned MinReadyCycle; 1233 1234 // The expected latency of the critical path in this scheduled zone. 1235 unsigned ExpectedLatency; 1236 1237 // The latency of dependence chains leading into this zone. 1238 // For each node scheduled top-down: DLat = max DLat, N.Depth. 1239 // For each cycle scheduled: DLat -= 1. 1240 unsigned DependentLatency; 1241 1242 /// Count the scheduled (issued) micro-ops that can be retired by 1243 /// time=CurrCycle assuming the first scheduled instr is retired at time=0. 1244 unsigned RetiredMOps; 1245 1246 // Count scheduled resources that have been executed. Resources are 1247 // considered executed if they become ready in the time that it takes to 1248 // saturate any resource including the one in question. Counts are scaled 1249 // for direct comparison with other resources. Counts ca be compared with 1250 // MOps * getMicroOpFactor and Latency * getLatencyFactor. 1251 SmallVector<unsigned, 16> ExecutedResCounts; 1252 1253 /// Cache the max count for a single resource. 1254 unsigned MaxExecutedResCount; 1255 1256 // Cache the critical resources ID in this scheduled zone. 1257 unsigned ZoneCritResIdx; 1258 1259 // Is the scheduled region resource limited vs. latency limited. 1260 bool IsResourceLimited; 1261 1262 #ifndef NDEBUG 1263 // Remember the greatest operand latency as an upper bound on the number of 1264 // times we should retry the pending queue because of a hazard. 1265 unsigned MaxObservedLatency; 1266 #endif 1267 1268 void reset() { 1269 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1270 delete HazardRec; 1271 1272 Available.clear(); 1273 Pending.clear(); 1274 CheckPending = false; 1275 NextSUs.clear(); 1276 HazardRec = 0; 1277 CurrCycle = 0; 1278 CurrMOps = 0; 1279 MinReadyCycle = UINT_MAX; 1280 ExpectedLatency = 0; 1281 DependentLatency = 0; 1282 RetiredMOps = 0; 1283 MaxExecutedResCount = 0; 1284 ZoneCritResIdx = 0; 1285 IsResourceLimited = false; 1286 #ifndef NDEBUG 1287 MaxObservedLatency = 0; 1288 #endif 1289 // Reserve a zero-count for invalid CritResIdx. 1290 ExecutedResCounts.resize(1); 1291 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1292 } 1293 1294 /// Pending queues extend the ready queues with the same ID and the 1295 /// PendingFlag set. 1296 SchedBoundary(unsigned ID, const Twine &Name): 1297 DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"), 1298 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 1299 HazardRec(0) { 1300 reset(); 1301 } 1302 1303 ~SchedBoundary() { delete HazardRec; } 1304 1305 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, 1306 SchedRemainder *rem); 1307 1308 bool isTop() const { 1309 return Available.getID() == ConvergingScheduler::TopQID; 1310 } 1311 1312 #ifndef NDEBUG 1313 const char *getResourceName(unsigned PIdx) { 1314 if (!PIdx) 1315 return "MOps"; 1316 return SchedModel->getProcResource(PIdx)->Name; 1317 } 1318 #endif 1319 1320 /// Get the number of latency cycles "covered" by the scheduled 1321 /// instructions. This is the larger of the critical path within the zone 1322 /// and the number of cycles required to issue the instructions. 1323 unsigned getScheduledLatency() const { 1324 return std::max(ExpectedLatency, CurrCycle); 1325 } 1326 1327 unsigned getUnscheduledLatency(SUnit *SU) const { 1328 return isTop() ? SU->getHeight() : SU->getDepth(); 1329 } 1330 1331 unsigned getResourceCount(unsigned ResIdx) const { 1332 return ExecutedResCounts[ResIdx]; 1333 } 1334 1335 /// Get the scaled count of scheduled micro-ops and resources, including 1336 /// executed resources. 1337 unsigned getCriticalCount() const { 1338 if (!ZoneCritResIdx) 1339 return RetiredMOps * SchedModel->getMicroOpFactor(); 1340 return getResourceCount(ZoneCritResIdx); 1341 } 1342 1343 /// Get a scaled count for the minimum execution time of the scheduled 1344 /// micro-ops that are ready to execute by getExecutedCount. Notice the 1345 /// feedback loop. 1346 unsigned getExecutedCount() const { 1347 return std::max(CurrCycle * SchedModel->getLatencyFactor(), 1348 MaxExecutedResCount); 1349 } 1350 1351 bool checkHazard(SUnit *SU); 1352 1353 unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs); 1354 1355 unsigned getOtherResourceCount(unsigned &OtherCritIdx); 1356 1357 void setPolicy(CandPolicy &Policy, SchedBoundary &OtherZone); 1358 1359 void releaseNode(SUnit *SU, unsigned ReadyCycle); 1360 1361 void bumpCycle(unsigned NextCycle); 1362 1363 void incExecutedResources(unsigned PIdx, unsigned Count); 1364 1365 unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle); 1366 1367 void bumpNode(SUnit *SU); 1368 1369 void releasePending(); 1370 1371 void removeReady(SUnit *SU); 1372 1373 SUnit *pickOnlyChoice(); 1374 1375 #ifndef NDEBUG 1376 void dumpScheduledState(); 1377 #endif 1378 }; 1379 1380 private: 1381 ScheduleDAGMI *DAG; 1382 const TargetSchedModel *SchedModel; 1383 const TargetRegisterInfo *TRI; 1384 1385 // State of the top and bottom scheduled instruction boundaries. 1386 SchedRemainder Rem; 1387 SchedBoundary Top; 1388 SchedBoundary Bot; 1389 1390 public: 1391 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 1392 enum { 1393 TopQID = 1, 1394 BotQID = 2, 1395 LogMaxQID = 2 1396 }; 1397 1398 ConvergingScheduler(): 1399 DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 1400 1401 virtual void initialize(ScheduleDAGMI *dag); 1402 1403 virtual SUnit *pickNode(bool &IsTopNode); 1404 1405 virtual void schedNode(SUnit *SU, bool IsTopNode); 1406 1407 virtual void releaseTopNode(SUnit *SU); 1408 1409 virtual void releaseBottomNode(SUnit *SU); 1410 1411 virtual void registerRoots(); 1412 1413 protected: 1414 void tryCandidate(SchedCandidate &Cand, 1415 SchedCandidate &TryCand, 1416 SchedBoundary &Zone, 1417 const RegPressureTracker &RPTracker, 1418 RegPressureTracker &TempTracker); 1419 1420 SUnit *pickNodeBidirectional(bool &IsTopNode); 1421 1422 void pickNodeFromQueue(SchedBoundary &Zone, 1423 const RegPressureTracker &RPTracker, 1424 SchedCandidate &Candidate); 1425 1426 void reschedulePhysRegCopies(SUnit *SU, bool isTop); 1427 1428 #ifndef NDEBUG 1429 void traceCandidate(const SchedCandidate &Cand); 1430 #endif 1431 }; 1432 } // namespace 1433 1434 void ConvergingScheduler::SchedRemainder:: 1435 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1436 reset(); 1437 if (!SchedModel->hasInstrSchedModel()) 1438 return; 1439 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1440 for (std::vector<SUnit>::iterator 1441 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1442 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1443 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC) 1444 * SchedModel->getMicroOpFactor(); 1445 for (TargetSchedModel::ProcResIter 1446 PI = SchedModel->getWriteProcResBegin(SC), 1447 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1448 unsigned PIdx = PI->ProcResourceIdx; 1449 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1450 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1451 } 1452 } 1453 } 1454 1455 void ConvergingScheduler::SchedBoundary:: 1456 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1457 reset(); 1458 DAG = dag; 1459 SchedModel = smodel; 1460 Rem = rem; 1461 if (SchedModel->hasInstrSchedModel()) 1462 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1463 } 1464 1465 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 1466 DAG = dag; 1467 SchedModel = DAG->getSchedModel(); 1468 TRI = DAG->TRI; 1469 1470 Rem.init(DAG, SchedModel); 1471 Top.init(DAG, SchedModel, &Rem); 1472 Bot.init(DAG, SchedModel, &Rem); 1473 1474 // Initialize resource counts. 1475 1476 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 1477 // are disabled, then these HazardRecs will be disabled. 1478 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 1479 const TargetMachine &TM = DAG->MF.getTarget(); 1480 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1481 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1482 1483 assert((!ForceTopDown || !ForceBottomUp) && 1484 "-misched-topdown incompatible with -misched-bottomup"); 1485 } 1486 1487 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 1488 if (SU->isScheduled) 1489 return; 1490 1491 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1492 I != E; ++I) { 1493 if (I->isWeak()) 1494 continue; 1495 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 1496 unsigned Latency = I->getLatency(); 1497 #ifndef NDEBUG 1498 Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency); 1499 #endif 1500 if (SU->TopReadyCycle < PredReadyCycle + Latency) 1501 SU->TopReadyCycle = PredReadyCycle + Latency; 1502 } 1503 Top.releaseNode(SU, SU->TopReadyCycle); 1504 } 1505 1506 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 1507 if (SU->isScheduled) 1508 return; 1509 1510 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1511 1512 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1513 I != E; ++I) { 1514 if (I->isWeak()) 1515 continue; 1516 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 1517 unsigned Latency = I->getLatency(); 1518 #ifndef NDEBUG 1519 Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency); 1520 #endif 1521 if (SU->BotReadyCycle < SuccReadyCycle + Latency) 1522 SU->BotReadyCycle = SuccReadyCycle + Latency; 1523 } 1524 Bot.releaseNode(SU, SU->BotReadyCycle); 1525 } 1526 1527 void ConvergingScheduler::registerRoots() { 1528 Rem.CriticalPath = DAG->ExitSU.getDepth(); 1529 // Some roots may not feed into ExitSU. Check all of them in case. 1530 for (std::vector<SUnit*>::const_iterator 1531 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 1532 if ((*I)->getDepth() > Rem.CriticalPath) 1533 Rem.CriticalPath = (*I)->getDepth(); 1534 } 1535 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 1536 } 1537 1538 /// Does this SU have a hazard within the current instruction group. 1539 /// 1540 /// The scheduler supports two modes of hazard recognition. The first is the 1541 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1542 /// supports highly complicated in-order reservation tables 1543 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1544 /// 1545 /// The second is a streamlined mechanism that checks for hazards based on 1546 /// simple counters that the scheduler itself maintains. It explicitly checks 1547 /// for instruction dispatch limitations, including the number of micro-ops that 1548 /// can dispatch per cycle. 1549 /// 1550 /// TODO: Also check whether the SU must start a new group. 1551 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 1552 if (HazardRec->isEnabled()) 1553 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 1554 1555 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1556 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1557 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1558 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1559 return true; 1560 } 1561 return false; 1562 } 1563 1564 // Find the unscheduled node in ReadySUs with the highest latency. 1565 unsigned ConvergingScheduler::SchedBoundary:: 1566 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1567 SUnit *LateSU = 0; 1568 unsigned RemLatency = 0; 1569 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end(); 1570 I != E; ++I) { 1571 unsigned L = getUnscheduledLatency(*I); 1572 if (L > RemLatency) { 1573 RemLatency = L; 1574 LateSU = *I; 1575 } 1576 } 1577 if (LateSU) { 1578 DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1579 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1580 } 1581 return RemLatency; 1582 } 1583 1584 // Count resources in this zone and the remaining unscheduled 1585 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 1586 // resource index, or zero if the zone is issue limited. 1587 unsigned ConvergingScheduler::SchedBoundary:: 1588 getOtherResourceCount(unsigned &OtherCritIdx) { 1589 if (!SchedModel->hasInstrSchedModel()) 1590 return 0; 1591 1592 unsigned OtherCritCount = Rem->RemIssueCount 1593 + (RetiredMOps * SchedModel->getMicroOpFactor()); 1594 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 1595 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 1596 OtherCritIdx = 0; 1597 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 1598 PIdx != PEnd; ++PIdx) { 1599 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 1600 if (OtherCount > OtherCritCount) { 1601 OtherCritCount = OtherCount; 1602 OtherCritIdx = PIdx; 1603 } 1604 } 1605 if (OtherCritIdx) { 1606 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: " 1607 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 1608 << " " << getResourceName(OtherCritIdx) << "\n"); 1609 } 1610 return OtherCritCount; 1611 } 1612 1613 /// Set the CandPolicy for this zone given the current resources and latencies 1614 /// inside and outside the zone. 1615 void ConvergingScheduler::SchedBoundary::setPolicy(CandPolicy &Policy, 1616 SchedBoundary &OtherZone) { 1617 // Now that potential stalls have been considered, apply preemptive heuristics 1618 // based on the the total latency and resources inside and outside this 1619 // zone. 1620 1621 // Compute remaining latency. We need this both to determine whether the 1622 // overall schedule has become latency-limited and whether the instructions 1623 // outside this zone are resource or latency limited. 1624 // 1625 // The "dependent" latency is updated incrementally during scheduling as the 1626 // max height/depth of scheduled nodes minus the cycles since it was 1627 // scheduled: 1628 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 1629 // 1630 // The "independent" latency is the max ready queue depth: 1631 // ILat = max N.depth for N in Available|Pending 1632 // 1633 // RemainingLatency is the greater of independent and dependent latency. 1634 unsigned RemLatency = DependentLatency; 1635 RemLatency = std::max(RemLatency, findMaxLatency(Available.elements())); 1636 RemLatency = std::max(RemLatency, findMaxLatency(Pending.elements())); 1637 1638 // Compute the critical resource outside the zone. 1639 unsigned OtherCritIdx; 1640 unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx); 1641 1642 bool OtherResLimited = false; 1643 if (SchedModel->hasInstrSchedModel()) { 1644 unsigned LFactor = SchedModel->getLatencyFactor(); 1645 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor; 1646 } 1647 if (!OtherResLimited && (RemLatency + CurrCycle > Rem->CriticalPath)) { 1648 Policy.ReduceLatency |= true; 1649 DEBUG(dbgs() << " " << Available.getName() << " RemainingLatency " 1650 << RemLatency << " + " << CurrCycle << "c > CritPath " 1651 << Rem->CriticalPath << "\n"); 1652 } 1653 // If the same resource is limiting inside and outside the zone, do nothing. 1654 if (IsResourceLimited && OtherResLimited && (ZoneCritResIdx == OtherCritIdx)) 1655 return; 1656 1657 DEBUG( 1658 if (IsResourceLimited) { 1659 dbgs() << " " << Available.getName() << " ResourceLimited: " 1660 << getResourceName(ZoneCritResIdx) << "\n"; 1661 } 1662 if (OtherResLimited) 1663 dbgs() << " RemainingLimit: " << getResourceName(OtherCritIdx); 1664 if (!IsResourceLimited && !OtherResLimited) 1665 dbgs() << " Latency limited both directions.\n"); 1666 1667 if (IsResourceLimited && !Policy.ReduceResIdx) 1668 Policy.ReduceResIdx = ZoneCritResIdx; 1669 1670 if (OtherResLimited) 1671 Policy.DemandResIdx = OtherCritIdx; 1672 } 1673 1674 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 1675 unsigned ReadyCycle) { 1676 if (ReadyCycle < MinReadyCycle) 1677 MinReadyCycle = ReadyCycle; 1678 1679 // Check for interlocks first. For the purpose of other heuristics, an 1680 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1681 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1682 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU)) 1683 Pending.push(SU); 1684 else 1685 Available.push(SU); 1686 1687 // Record this node as an immediate dependent of the scheduled node. 1688 NextSUs.insert(SU); 1689 } 1690 1691 /// Move the boundary of scheduled code by one cycle. 1692 void ConvergingScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) { 1693 if (SchedModel->getMicroOpBufferSize() == 0) { 1694 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1695 if (MinReadyCycle > NextCycle) 1696 NextCycle = MinReadyCycle; 1697 } 1698 // Update the current micro-ops, which will issue in the next cycle. 1699 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 1700 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 1701 1702 // Decrement DependentLatency based on the next cycle. 1703 if ((NextCycle - CurrCycle) > DependentLatency) 1704 DependentLatency = 0; 1705 else 1706 DependentLatency -= (NextCycle - CurrCycle); 1707 1708 if (!HazardRec->isEnabled()) { 1709 // Bypass HazardRec virtual calls. 1710 CurrCycle = NextCycle; 1711 } 1712 else { 1713 // Bypass getHazardType calls in case of long latency. 1714 for (; CurrCycle != NextCycle; ++CurrCycle) { 1715 if (isTop()) 1716 HazardRec->AdvanceCycle(); 1717 else 1718 HazardRec->RecedeCycle(); 1719 } 1720 } 1721 CheckPending = true; 1722 unsigned LFactor = SchedModel->getLatencyFactor(); 1723 IsResourceLimited = 1724 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1725 > (int)LFactor; 1726 1727 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n'); 1728 } 1729 1730 void ConvergingScheduler::SchedBoundary::incExecutedResources(unsigned PIdx, 1731 unsigned Count) { 1732 ExecutedResCounts[PIdx] += Count; 1733 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 1734 MaxExecutedResCount = ExecutedResCounts[PIdx]; 1735 } 1736 1737 /// Add the given processor resource to this scheduled zone. 1738 /// 1739 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 1740 /// during which this resource is consumed. 1741 /// 1742 /// \return the next cycle at which the instruction may execute without 1743 /// oversubscribing resources. 1744 unsigned ConvergingScheduler::SchedBoundary:: 1745 countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle) { 1746 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1747 unsigned Count = Factor * Cycles; 1748 DEBUG(dbgs() << " " << getResourceName(PIdx) 1749 << " +" << Cycles << "x" << Factor << "u\n"); 1750 1751 // Update Executed resources counts. 1752 incExecutedResources(PIdx, Count); 1753 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1754 Rem->RemainingCounts[PIdx] -= Count; 1755 1756 // Check if this resource exceeds the current critical resource by a full 1757 // cycle. If so, it becomes the critical resource. 1758 if (ZoneCritResIdx != PIdx 1759 && ((int)(getResourceCount(PIdx) - getCriticalCount()) 1760 >= (int)SchedModel->getLatencyFactor())) { 1761 ZoneCritResIdx = PIdx; 1762 DEBUG(dbgs() << " *** Critical resource " 1763 << getResourceName(PIdx) << ": " 1764 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n"); 1765 } 1766 // TODO: We don't yet model reserved resources. It's not hard though. 1767 return CurrCycle; 1768 } 1769 1770 /// Move the boundary of scheduled code by one SUnit. 1771 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 1772 // Update the reservation table. 1773 if (HazardRec->isEnabled()) { 1774 if (!isTop() && SU->isCall) { 1775 // Calls are scheduled with their preceding instructions. For bottom-up 1776 // scheduling, clear the pipeline state before emitting. 1777 HazardRec->Reset(); 1778 } 1779 HazardRec->EmitInstruction(SU); 1780 } 1781 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1782 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 1783 CurrMOps += IncMOps; 1784 // checkHazard prevents scheduling multiple instructions per cycle that exceed 1785 // issue width. However, we commonly reach the maximum. In this case 1786 // opportunistically bump the cycle to avoid uselessly checking everything in 1787 // the readyQ. Furthermore, a single instruction may produce more than one 1788 // cycle's worth of micro-ops. 1789 // 1790 // TODO: Also check if this SU must end a dispatch group. 1791 unsigned NextCycle = CurrCycle; 1792 if (CurrMOps >= SchedModel->getIssueWidth()) { 1793 ++NextCycle; 1794 DEBUG(dbgs() << " *** Max MOps " << CurrMOps 1795 << " at cycle " << CurrCycle << '\n'); 1796 } 1797 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 1798 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 1799 1800 switch (SchedModel->getMicroOpBufferSize()) { 1801 case 0: 1802 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 1803 break; 1804 case 1: 1805 if (ReadyCycle > NextCycle) { 1806 NextCycle = ReadyCycle; 1807 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 1808 } 1809 break; 1810 default: 1811 // We don't currently model the OOO reorder buffer, so consider all 1812 // scheduled MOps to be "retired". 1813 break; 1814 } 1815 RetiredMOps += IncMOps; 1816 1817 // Update resource counts and critical resource. 1818 if (SchedModel->hasInstrSchedModel()) { 1819 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 1820 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 1821 Rem->RemIssueCount -= DecRemIssue; 1822 if (ZoneCritResIdx) { 1823 // Scale scheduled micro-ops for comparing with the critical resource. 1824 unsigned ScaledMOps = 1825 RetiredMOps * SchedModel->getMicroOpFactor(); 1826 1827 // If scaled micro-ops are now more than the previous critical resource by 1828 // a full cycle, then micro-ops issue becomes critical. 1829 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 1830 >= (int)SchedModel->getLatencyFactor()) { 1831 ZoneCritResIdx = 0; 1832 DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 1833 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); 1834 } 1835 } 1836 for (TargetSchedModel::ProcResIter 1837 PI = SchedModel->getWriteProcResBegin(SC), 1838 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1839 unsigned RCycle = 1840 countResource(PI->ProcResourceIdx, PI->Cycles, ReadyCycle); 1841 if (RCycle > NextCycle) 1842 NextCycle = RCycle; 1843 } 1844 } 1845 // Update ExpectedLatency and DependentLatency. 1846 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 1847 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 1848 if (SU->getDepth() > TopLatency) { 1849 TopLatency = SU->getDepth(); 1850 DEBUG(dbgs() << " " << Available.getName() 1851 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n"); 1852 } 1853 if (SU->getHeight() > BotLatency) { 1854 BotLatency = SU->getHeight(); 1855 DEBUG(dbgs() << " " << Available.getName() 1856 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n"); 1857 } 1858 // If we stall for any reason, bump the cycle. 1859 if (NextCycle > CurrCycle) { 1860 bumpCycle(NextCycle); 1861 } 1862 else { 1863 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 1864 // resource limited. If a stall occured, bumpCycle does this. 1865 unsigned LFactor = SchedModel->getLatencyFactor(); 1866 IsResourceLimited = 1867 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 1868 > (int)LFactor; 1869 } 1870 DEBUG(dumpScheduledState()); 1871 } 1872 1873 /// Release pending ready nodes in to the available queue. This makes them 1874 /// visible to heuristics. 1875 void ConvergingScheduler::SchedBoundary::releasePending() { 1876 // If the available queue is empty, it is safe to reset MinReadyCycle. 1877 if (Available.empty()) 1878 MinReadyCycle = UINT_MAX; 1879 1880 // Check to see if any of the pending instructions are ready to issue. If 1881 // so, add them to the available queue. 1882 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1883 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1884 SUnit *SU = *(Pending.begin()+i); 1885 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1886 1887 if (ReadyCycle < MinReadyCycle) 1888 MinReadyCycle = ReadyCycle; 1889 1890 if (!IsBuffered && ReadyCycle > CurrCycle) 1891 continue; 1892 1893 if (checkHazard(SU)) 1894 continue; 1895 1896 Available.push(SU); 1897 Pending.remove(Pending.begin()+i); 1898 --i; --e; 1899 } 1900 DEBUG(if (!Pending.empty()) Pending.dump()); 1901 CheckPending = false; 1902 } 1903 1904 /// Remove SU from the ready set for this boundary. 1905 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1906 if (Available.isInQueue(SU)) 1907 Available.remove(Available.find(SU)); 1908 else { 1909 assert(Pending.isInQueue(SU) && "bad ready count"); 1910 Pending.remove(Pending.find(SU)); 1911 } 1912 } 1913 1914 /// If this queue only has one ready candidate, return it. As a side effect, 1915 /// defer any nodes that now hit a hazard, and advance the cycle until at least 1916 /// one node is ready. If multiple instructions are ready, return NULL. 1917 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1918 if (CheckPending) 1919 releasePending(); 1920 1921 if (CurrMOps > 0) { 1922 // Defer any ready instrs that now have a hazard. 1923 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 1924 if (checkHazard(*I)) { 1925 Pending.push(*I); 1926 I = Available.remove(I); 1927 continue; 1928 } 1929 ++I; 1930 } 1931 } 1932 for (unsigned i = 0; Available.empty(); ++i) { 1933 assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) && 1934 "permanent hazard"); (void)i; 1935 bumpCycle(CurrCycle + 1); 1936 releasePending(); 1937 } 1938 if (Available.size() == 1) 1939 return *Available.begin(); 1940 return NULL; 1941 } 1942 1943 #ifndef NDEBUG 1944 // This is useful information to dump after bumpNode. 1945 // Note that the Queue contents are more useful before pickNodeFromQueue. 1946 void ConvergingScheduler::SchedBoundary::dumpScheduledState() { 1947 unsigned ResFactor; 1948 unsigned ResCount; 1949 if (ZoneCritResIdx) { 1950 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 1951 ResCount = getResourceCount(ZoneCritResIdx); 1952 } 1953 else { 1954 ResFactor = SchedModel->getMicroOpFactor(); 1955 ResCount = RetiredMOps * SchedModel->getMicroOpFactor(); 1956 } 1957 unsigned LFactor = SchedModel->getLatencyFactor(); 1958 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 1959 << " Retired: " << RetiredMOps; 1960 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 1961 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 1962 << ResCount / ResFactor << " " << getResourceName(ZoneCritResIdx) 1963 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 1964 << (IsResourceLimited ? " - Resource" : " - Latency") 1965 << " limited.\n"; 1966 } 1967 #endif 1968 1969 void ConvergingScheduler::SchedCandidate:: 1970 initResourceDelta(const ScheduleDAGMI *DAG, 1971 const TargetSchedModel *SchedModel) { 1972 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 1973 return; 1974 1975 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1976 for (TargetSchedModel::ProcResIter 1977 PI = SchedModel->getWriteProcResBegin(SC), 1978 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1979 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 1980 ResDelta.CritResources += PI->Cycles; 1981 if (PI->ProcResourceIdx == Policy.DemandResIdx) 1982 ResDelta.DemandedResources += PI->Cycles; 1983 } 1984 } 1985 1986 /// Return true if this heuristic determines order. 1987 static bool tryLess(int TryVal, int CandVal, 1988 ConvergingScheduler::SchedCandidate &TryCand, 1989 ConvergingScheduler::SchedCandidate &Cand, 1990 ConvergingScheduler::CandReason Reason) { 1991 if (TryVal < CandVal) { 1992 TryCand.Reason = Reason; 1993 return true; 1994 } 1995 if (TryVal > CandVal) { 1996 if (Cand.Reason > Reason) 1997 Cand.Reason = Reason; 1998 return true; 1999 } 2000 return false; 2001 } 2002 2003 static bool tryGreater(int TryVal, int CandVal, 2004 ConvergingScheduler::SchedCandidate &TryCand, 2005 ConvergingScheduler::SchedCandidate &Cand, 2006 ConvergingScheduler::CandReason Reason) { 2007 if (TryVal > CandVal) { 2008 TryCand.Reason = Reason; 2009 return true; 2010 } 2011 if (TryVal < CandVal) { 2012 if (Cand.Reason > Reason) 2013 Cand.Reason = Reason; 2014 return true; 2015 } 2016 return false; 2017 } 2018 2019 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2020 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2021 } 2022 2023 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2024 /// their physreg def/use. 2025 /// 2026 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2027 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2028 /// with the operation that produces or consumes the physreg. We'll do this when 2029 /// regalloc has support for parallel copies. 2030 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2031 const MachineInstr *MI = SU->getInstr(); 2032 if (!MI->isCopy()) 2033 return 0; 2034 2035 unsigned ScheduledOper = isTop ? 1 : 0; 2036 unsigned UnscheduledOper = isTop ? 0 : 1; 2037 // If we have already scheduled the physreg produce/consumer, immediately 2038 // schedule the copy. 2039 if (TargetRegisterInfo::isPhysicalRegister( 2040 MI->getOperand(ScheduledOper).getReg())) 2041 return 1; 2042 // If the physreg is at the boundary, defer it. Otherwise schedule it 2043 // immediately to free the dependent. We can hoist the copy later. 2044 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2045 if (TargetRegisterInfo::isPhysicalRegister( 2046 MI->getOperand(UnscheduledOper).getReg())) 2047 return AtBoundary ? -1 : 1; 2048 return 0; 2049 } 2050 2051 /// Apply a set of heursitics to a new candidate. Heuristics are currently 2052 /// hierarchical. This may be more efficient than a graduated cost model because 2053 /// we don't need to evaluate all aspects of the model for each node in the 2054 /// queue. But it's really done to make the heuristics easier to debug and 2055 /// statistically analyze. 2056 /// 2057 /// \param Cand provides the policy and current best candidate. 2058 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2059 /// \param Zone describes the scheduled zone that we are extending. 2060 /// \param RPTracker describes reg pressure within the scheduled zone. 2061 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 2062 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, 2063 SchedCandidate &TryCand, 2064 SchedBoundary &Zone, 2065 const RegPressureTracker &RPTracker, 2066 RegPressureTracker &TempTracker) { 2067 2068 // Always initialize TryCand's RPDelta. 2069 TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta, 2070 DAG->getRegionCriticalPSets(), 2071 DAG->getRegPressure().MaxSetPressure); 2072 2073 // Initialize the candidate if needed. 2074 if (!Cand.isValid()) { 2075 TryCand.Reason = NodeOrder; 2076 return; 2077 } 2078 2079 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 2080 biasPhysRegCopy(Cand.SU, Zone.isTop()), 2081 TryCand, Cand, PhysRegCopy)) 2082 return; 2083 2084 // Avoid exceeding the target's limit. 2085 if (tryLess(TryCand.RPDelta.Excess.UnitIncrease, 2086 Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess)) 2087 return; 2088 if (Cand.Reason == SingleExcess) 2089 Cand.Reason = MultiPressure; 2090 2091 // Avoid increasing the max critical pressure in the scheduled region. 2092 if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease, 2093 Cand.RPDelta.CriticalMax.UnitIncrease, 2094 TryCand, Cand, SingleCritical)) 2095 return; 2096 if (Cand.Reason == SingleCritical) 2097 Cand.Reason = MultiPressure; 2098 2099 // Keep clustered nodes together to encourage downstream peephole 2100 // optimizations which may reduce resource requirements. 2101 // 2102 // This is a best effort to set things up for a post-RA pass. Optimizations 2103 // like generating loads of multiple registers should ideally be done within 2104 // the scheduler pass by combining the loads during DAG postprocessing. 2105 const SUnit *NextClusterSU = 2106 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2107 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 2108 TryCand, Cand, Cluster)) 2109 return; 2110 2111 // Weak edges are for clustering and other constraints. 2112 // 2113 // Deferring TryCand here does not change Cand's reason. This is good in the 2114 // sense that a bad candidate shouldn't affect a previous candidate's 2115 // goodness, but bad in that it is assymetric and depends on queue order. 2116 CandReason OrigReason = Cand.Reason; 2117 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 2118 getWeakLeft(Cand.SU, Zone.isTop()), 2119 TryCand, Cand, Weak)) { 2120 Cand.Reason = OrigReason; 2121 return; 2122 } 2123 // Avoid critical resource consumption and balance the schedule. 2124 TryCand.initResourceDelta(DAG, SchedModel); 2125 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2126 TryCand, Cand, ResourceReduce)) 2127 return; 2128 if (tryGreater(TryCand.ResDelta.DemandedResources, 2129 Cand.ResDelta.DemandedResources, 2130 TryCand, Cand, ResourceDemand)) 2131 return; 2132 2133 // Avoid serializing long latency dependence chains. 2134 if (Cand.Policy.ReduceLatency) { 2135 if (Zone.isTop()) { 2136 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2137 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2138 TryCand, Cand, TopDepthReduce)) 2139 return; 2140 } 2141 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2142 TryCand, Cand, TopPathReduce)) 2143 return; 2144 } 2145 else { 2146 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2147 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2148 TryCand, Cand, BotHeightReduce)) 2149 return; 2150 } 2151 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2152 TryCand, Cand, BotPathReduce)) 2153 return; 2154 } 2155 } 2156 2157 // Avoid increasing the max pressure of the entire region. 2158 if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease, 2159 Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax)) 2160 return; 2161 if (Cand.Reason == SingleMax) 2162 Cand.Reason = MultiPressure; 2163 2164 // Prefer immediate defs/users of the last scheduled instruction. This is a 2165 // local pressure avoidance strategy that also makes the machine code 2166 // readable. 2167 if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU), 2168 TryCand, Cand, NextDefUse)) 2169 return; 2170 2171 // Fall through to original instruction order. 2172 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2173 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2174 TryCand.Reason = NodeOrder; 2175 } 2176 } 2177 2178 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 2179 /// more desirable than RHS from scheduling standpoint. 2180 static bool compareRPDelta(const RegPressureDelta &LHS, 2181 const RegPressureDelta &RHS) { 2182 // Compare each component of pressure in decreasing order of importance 2183 // without checking if any are valid. Invalid PressureElements are assumed to 2184 // have UnitIncrease==0, so are neutral. 2185 2186 // Avoid increasing the max critical pressure in the scheduled region. 2187 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) { 2188 DEBUG(dbgs() << " RP excess top - bot: " 2189 << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n'); 2190 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 2191 } 2192 // Avoid increasing the max critical pressure in the scheduled region. 2193 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) { 2194 DEBUG(dbgs() << " RP critical top - bot: " 2195 << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease) 2196 << '\n'); 2197 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 2198 } 2199 // Avoid increasing the max pressure of the entire region. 2200 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) { 2201 DEBUG(dbgs() << " RP current top - bot: " 2202 << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease) 2203 << '\n'); 2204 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 2205 } 2206 return false; 2207 } 2208 2209 #ifndef NDEBUG 2210 const char *ConvergingScheduler::getReasonStr( 2211 ConvergingScheduler::CandReason Reason) { 2212 switch (Reason) { 2213 case NoCand: return "NOCAND "; 2214 case PhysRegCopy: return "PREG-COPY"; 2215 case SingleExcess: return "REG-EXCESS"; 2216 case SingleCritical: return "REG-CRIT "; 2217 case Cluster: return "CLUSTER "; 2218 case Weak: return "WEAK "; 2219 case SingleMax: return "REG-MAX "; 2220 case MultiPressure: return "REG-MULTI "; 2221 case ResourceReduce: return "RES-REDUCE"; 2222 case ResourceDemand: return "RES-DEMAND"; 2223 case TopDepthReduce: return "TOP-DEPTH "; 2224 case TopPathReduce: return "TOP-PATH "; 2225 case BotHeightReduce:return "BOT-HEIGHT"; 2226 case BotPathReduce: return "BOT-PATH "; 2227 case NextDefUse: return "DEF-USE "; 2228 case NodeOrder: return "ORDER "; 2229 }; 2230 llvm_unreachable("Unknown reason!"); 2231 } 2232 2233 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) { 2234 PressureElement P; 2235 unsigned ResIdx = 0; 2236 unsigned Latency = 0; 2237 switch (Cand.Reason) { 2238 default: 2239 break; 2240 case SingleExcess: 2241 P = Cand.RPDelta.Excess; 2242 break; 2243 case SingleCritical: 2244 P = Cand.RPDelta.CriticalMax; 2245 break; 2246 case SingleMax: 2247 P = Cand.RPDelta.CurrentMax; 2248 break; 2249 case ResourceReduce: 2250 ResIdx = Cand.Policy.ReduceResIdx; 2251 break; 2252 case ResourceDemand: 2253 ResIdx = Cand.Policy.DemandResIdx; 2254 break; 2255 case TopDepthReduce: 2256 Latency = Cand.SU->getDepth(); 2257 break; 2258 case TopPathReduce: 2259 Latency = Cand.SU->getHeight(); 2260 break; 2261 case BotHeightReduce: 2262 Latency = Cand.SU->getHeight(); 2263 break; 2264 case BotPathReduce: 2265 Latency = Cand.SU->getDepth(); 2266 break; 2267 } 2268 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2269 if (P.isValid()) 2270 dbgs() << " " << TRI->getRegPressureSetName(P.PSetID) 2271 << ":" << P.UnitIncrease << " "; 2272 else 2273 dbgs() << " "; 2274 if (ResIdx) 2275 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2276 else 2277 dbgs() << " "; 2278 if (Latency) 2279 dbgs() << " " << Latency << " cycles "; 2280 else 2281 dbgs() << " "; 2282 dbgs() << '\n'; 2283 } 2284 #endif 2285 2286 /// Pick the best candidate from the top queue. 2287 /// 2288 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2289 /// DAG building. To adjust for the current scheduling location we need to 2290 /// maintain the number of vreg uses remaining to be top-scheduled. 2291 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2292 const RegPressureTracker &RPTracker, 2293 SchedCandidate &Cand) { 2294 ReadyQueue &Q = Zone.Available; 2295 2296 DEBUG(Q.dump()); 2297 2298 // getMaxPressureDelta temporarily modifies the tracker. 2299 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2300 2301 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2302 2303 SchedCandidate TryCand(Cand.Policy); 2304 TryCand.SU = *I; 2305 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2306 if (TryCand.Reason != NoCand) { 2307 // Initialize resource delta if needed in case future heuristics query it. 2308 if (TryCand.ResDelta == SchedResourceDelta()) 2309 TryCand.initResourceDelta(DAG, SchedModel); 2310 Cand.setBest(TryCand); 2311 DEBUG(traceCandidate(Cand)); 2312 } 2313 } 2314 } 2315 2316 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, 2317 bool IsTop) { 2318 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2319 << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); 2320 } 2321 2322 /// Pick the best candidate node from either the top or bottom queue. 2323 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { 2324 // Schedule as far as possible in the direction of no choice. This is most 2325 // efficient, but also provides the best heuristics for CriticalPSets. 2326 if (SUnit *SU = Bot.pickOnlyChoice()) { 2327 IsTopNode = false; 2328 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2329 return SU; 2330 } 2331 if (SUnit *SU = Top.pickOnlyChoice()) { 2332 IsTopNode = true; 2333 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2334 return SU; 2335 } 2336 CandPolicy NoPolicy; 2337 SchedCandidate BotCand(NoPolicy); 2338 SchedCandidate TopCand(NoPolicy); 2339 Bot.setPolicy(BotCand.Policy, Top); 2340 Top.setPolicy(TopCand.Policy, Bot); 2341 2342 // Prefer bottom scheduling when heuristics are silent. 2343 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2344 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2345 2346 // If either Q has a single candidate that provides the least increase in 2347 // Excess pressure, we can immediately schedule from that Q. 2348 // 2349 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2350 // affects picking from either Q. If scheduling in one direction must 2351 // increase pressure for one of the excess PSets, then schedule in that 2352 // direction first to provide more freedom in the other direction. 2353 if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) { 2354 IsTopNode = false; 2355 tracePick(BotCand, IsTopNode); 2356 return BotCand.SU; 2357 } 2358 // Check if the top Q has a better candidate. 2359 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2360 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2361 2362 // If either Q has a single candidate that minimizes pressure above the 2363 // original region's pressure pick it. 2364 if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) { 2365 if (TopCand.Reason < BotCand.Reason) { 2366 IsTopNode = true; 2367 tracePick(TopCand, IsTopNode); 2368 return TopCand.SU; 2369 } 2370 IsTopNode = false; 2371 tracePick(BotCand, IsTopNode); 2372 return BotCand.SU; 2373 } 2374 // Check for a salient pressure difference and pick the best from either side. 2375 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 2376 IsTopNode = true; 2377 tracePick(TopCand, IsTopNode); 2378 return TopCand.SU; 2379 } 2380 // Otherwise prefer the bottom candidate, in node order if all else failed. 2381 if (TopCand.Reason < BotCand.Reason) { 2382 IsTopNode = true; 2383 tracePick(TopCand, IsTopNode); 2384 return TopCand.SU; 2385 } 2386 IsTopNode = false; 2387 tracePick(BotCand, IsTopNode); 2388 return BotCand.SU; 2389 } 2390 2391 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2392 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 2393 if (DAG->top() == DAG->bottom()) { 2394 assert(Top.Available.empty() && Top.Pending.empty() && 2395 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2396 return NULL; 2397 } 2398 SUnit *SU; 2399 do { 2400 if (ForceTopDown) { 2401 SU = Top.pickOnlyChoice(); 2402 if (!SU) { 2403 CandPolicy NoPolicy; 2404 SchedCandidate TopCand(NoPolicy); 2405 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2406 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2407 SU = TopCand.SU; 2408 } 2409 IsTopNode = true; 2410 } 2411 else if (ForceBottomUp) { 2412 SU = Bot.pickOnlyChoice(); 2413 if (!SU) { 2414 CandPolicy NoPolicy; 2415 SchedCandidate BotCand(NoPolicy); 2416 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2417 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2418 SU = BotCand.SU; 2419 } 2420 IsTopNode = false; 2421 } 2422 else { 2423 SU = pickNodeBidirectional(IsTopNode); 2424 } 2425 } while (SU->isScheduled); 2426 2427 if (SU->isTopReady()) 2428 Top.removeReady(SU); 2429 if (SU->isBottomReady()) 2430 Bot.removeReady(SU); 2431 2432 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2433 return SU; 2434 } 2435 2436 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2437 2438 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2439 if (!isTop) 2440 ++InsertPos; 2441 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2442 2443 // Find already scheduled copies with a single physreg dependence and move 2444 // them just above the scheduled instruction. 2445 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2446 I != E; ++I) { 2447 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2448 continue; 2449 SUnit *DepSU = I->getSUnit(); 2450 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2451 continue; 2452 MachineInstr *Copy = DepSU->getInstr(); 2453 if (!Copy->isCopy()) 2454 continue; 2455 DEBUG(dbgs() << " Rescheduling physreg copy "; 2456 I->getSUnit()->dump(DAG)); 2457 DAG->moveInstruction(Copy, InsertPos); 2458 } 2459 } 2460 2461 /// Update the scheduler's state after scheduling a node. This is the same node 2462 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 2463 /// it's state based on the current cycle before MachineSchedStrategy does. 2464 /// 2465 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2466 /// them here. See comments in biasPhysRegCopy. 2467 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2468 if (IsTopNode) { 2469 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.CurrCycle); 2470 Top.bumpNode(SU); 2471 if (SU->hasPhysRegUses) 2472 reschedulePhysRegCopies(SU, true); 2473 } 2474 else { 2475 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.CurrCycle); 2476 Bot.bumpNode(SU); 2477 if (SU->hasPhysRegDefs) 2478 reschedulePhysRegCopies(SU, false); 2479 } 2480 } 2481 2482 /// Create the standard converging machine scheduler. This will be used as the 2483 /// default scheduler if the target does not set a default. 2484 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 2485 assert((!ForceTopDown || !ForceBottomUp) && 2486 "-misched-topdown incompatible with -misched-bottomup"); 2487 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler()); 2488 // Register DAG post-processors. 2489 // 2490 // FIXME: extend the mutation API to allow earlier mutations to instantiate 2491 // data and pass it to later mutations. Have a single mutation that gathers 2492 // the interesting nodes in one pass. 2493 DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI)); 2494 if (EnableLoadCluster) 2495 DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); 2496 if (EnableMacroFusion) 2497 DAG->addMutation(new MacroFusion(DAG->TII)); 2498 return DAG; 2499 } 2500 static MachineSchedRegistry 2501 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 2502 createConvergingSched); 2503 2504 //===----------------------------------------------------------------------===// 2505 // ILP Scheduler. Currently for experimental analysis of heuristics. 2506 //===----------------------------------------------------------------------===// 2507 2508 namespace { 2509 /// \brief Order nodes by the ILP metric. 2510 struct ILPOrder { 2511 const SchedDFSResult *DFSResult; 2512 const BitVector *ScheduledTrees; 2513 bool MaximizeILP; 2514 2515 ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {} 2516 2517 /// \brief Apply a less-than relation on node priority. 2518 /// 2519 /// (Return true if A comes after B in the Q.) 2520 bool operator()(const SUnit *A, const SUnit *B) const { 2521 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 2522 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 2523 if (SchedTreeA != SchedTreeB) { 2524 // Unscheduled trees have lower priority. 2525 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 2526 return ScheduledTrees->test(SchedTreeB); 2527 2528 // Trees with shallower connections have have lower priority. 2529 if (DFSResult->getSubtreeLevel(SchedTreeA) 2530 != DFSResult->getSubtreeLevel(SchedTreeB)) { 2531 return DFSResult->getSubtreeLevel(SchedTreeA) 2532 < DFSResult->getSubtreeLevel(SchedTreeB); 2533 } 2534 } 2535 if (MaximizeILP) 2536 return DFSResult->getILP(A) < DFSResult->getILP(B); 2537 else 2538 return DFSResult->getILP(A) > DFSResult->getILP(B); 2539 } 2540 }; 2541 2542 /// \brief Schedule based on the ILP metric. 2543 class ILPScheduler : public MachineSchedStrategy { 2544 /// In case all subtrees are eventually connected to a common root through 2545 /// data dependence (e.g. reduction), place an upper limit on their size. 2546 /// 2547 /// FIXME: A subtree limit is generally good, but in the situation commented 2548 /// above, where multiple similar subtrees feed a common root, we should 2549 /// only split at a point where the resulting subtrees will be balanced. 2550 /// (a motivating test case must be found). 2551 static const unsigned SubtreeLimit = 16; 2552 2553 ScheduleDAGMI *DAG; 2554 ILPOrder Cmp; 2555 2556 std::vector<SUnit*> ReadyQ; 2557 public: 2558 ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {} 2559 2560 virtual void initialize(ScheduleDAGMI *dag) { 2561 DAG = dag; 2562 DAG->computeDFSResult(); 2563 Cmp.DFSResult = DAG->getDFSResult(); 2564 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 2565 ReadyQ.clear(); 2566 } 2567 2568 virtual void registerRoots() { 2569 // Restore the heap in ReadyQ with the updated DFS results. 2570 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2571 } 2572 2573 /// Implement MachineSchedStrategy interface. 2574 /// ----------------------------------------- 2575 2576 /// Callback to select the highest priority node from the ready Q. 2577 virtual SUnit *pickNode(bool &IsTopNode) { 2578 if (ReadyQ.empty()) return NULL; 2579 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2580 SUnit *SU = ReadyQ.back(); 2581 ReadyQ.pop_back(); 2582 IsTopNode = false; 2583 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 2584 << " ILP: " << DAG->getDFSResult()->getILP(SU) 2585 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 2586 << DAG->getDFSResult()->getSubtreeLevel( 2587 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 2588 << "Scheduling " << *SU->getInstr()); 2589 return SU; 2590 } 2591 2592 /// \brief Scheduler callback to notify that a new subtree is scheduled. 2593 virtual void scheduleTree(unsigned SubtreeID) { 2594 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2595 } 2596 2597 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 2598 /// DFSResults, and resort the priority Q. 2599 virtual void schedNode(SUnit *SU, bool IsTopNode) { 2600 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 2601 } 2602 2603 virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } 2604 2605 virtual void releaseBottomNode(SUnit *SU) { 2606 ReadyQ.push_back(SU); 2607 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2608 } 2609 }; 2610 } // namespace 2611 2612 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 2613 return new ScheduleDAGMI(C, new ILPScheduler(true)); 2614 } 2615 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 2616 return new ScheduleDAGMI(C, new ILPScheduler(false)); 2617 } 2618 static MachineSchedRegistry ILPMaxRegistry( 2619 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 2620 static MachineSchedRegistry ILPMinRegistry( 2621 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 2622 2623 //===----------------------------------------------------------------------===// 2624 // Machine Instruction Shuffler for Correctness Testing 2625 //===----------------------------------------------------------------------===// 2626 2627 #ifndef NDEBUG 2628 namespace { 2629 /// Apply a less-than relation on the node order, which corresponds to the 2630 /// instruction order prior to scheduling. IsReverse implements greater-than. 2631 template<bool IsReverse> 2632 struct SUnitOrder { 2633 bool operator()(SUnit *A, SUnit *B) const { 2634 if (IsReverse) 2635 return A->NodeNum > B->NodeNum; 2636 else 2637 return A->NodeNum < B->NodeNum; 2638 } 2639 }; 2640 2641 /// Reorder instructions as much as possible. 2642 class InstructionShuffler : public MachineSchedStrategy { 2643 bool IsAlternating; 2644 bool IsTopDown; 2645 2646 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 2647 // gives nodes with a higher number higher priority causing the latest 2648 // instructions to be scheduled first. 2649 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 2650 TopQ; 2651 // When scheduling bottom-up, use greater-than as the queue priority. 2652 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 2653 BottomQ; 2654 public: 2655 InstructionShuffler(bool alternate, bool topdown) 2656 : IsAlternating(alternate), IsTopDown(topdown) {} 2657 2658 virtual void initialize(ScheduleDAGMI *) { 2659 TopQ.clear(); 2660 BottomQ.clear(); 2661 } 2662 2663 /// Implement MachineSchedStrategy interface. 2664 /// ----------------------------------------- 2665 2666 virtual SUnit *pickNode(bool &IsTopNode) { 2667 SUnit *SU; 2668 if (IsTopDown) { 2669 do { 2670 if (TopQ.empty()) return NULL; 2671 SU = TopQ.top(); 2672 TopQ.pop(); 2673 } while (SU->isScheduled); 2674 IsTopNode = true; 2675 } 2676 else { 2677 do { 2678 if (BottomQ.empty()) return NULL; 2679 SU = BottomQ.top(); 2680 BottomQ.pop(); 2681 } while (SU->isScheduled); 2682 IsTopNode = false; 2683 } 2684 if (IsAlternating) 2685 IsTopDown = !IsTopDown; 2686 return SU; 2687 } 2688 2689 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 2690 2691 virtual void releaseTopNode(SUnit *SU) { 2692 TopQ.push(SU); 2693 } 2694 virtual void releaseBottomNode(SUnit *SU) { 2695 BottomQ.push(SU); 2696 } 2697 }; 2698 } // namespace 2699 2700 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 2701 bool Alternate = !ForceTopDown && !ForceBottomUp; 2702 bool TopDown = !ForceBottomUp; 2703 assert((TopDown || !ForceTopDown) && 2704 "-misched-topdown incompatible with -misched-bottomup"); 2705 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 2706 } 2707 static MachineSchedRegistry ShufflerRegistry( 2708 "shuffle", "Shuffle machine instructions alternating directions", 2709 createInstructionShuffler); 2710 #endif // !NDEBUG 2711 2712 //===----------------------------------------------------------------------===// 2713 // GraphWriter support for ScheduleDAGMI. 2714 //===----------------------------------------------------------------------===// 2715 2716 #ifndef NDEBUG 2717 namespace llvm { 2718 2719 template<> struct GraphTraits< 2720 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 2721 2722 template<> 2723 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 2724 2725 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 2726 2727 static std::string getGraphName(const ScheduleDAG *G) { 2728 return G->MF.getName(); 2729 } 2730 2731 static bool renderGraphFromBottomUp() { 2732 return true; 2733 } 2734 2735 static bool isNodeHidden(const SUnit *Node) { 2736 return (Node->NumPreds > 10 || Node->NumSuccs > 10); 2737 } 2738 2739 static bool hasNodeAddressLabel(const SUnit *Node, 2740 const ScheduleDAG *Graph) { 2741 return false; 2742 } 2743 2744 /// If you want to override the dot attributes printed for a particular 2745 /// edge, override this method. 2746 static std::string getEdgeAttributes(const SUnit *Node, 2747 SUnitIterator EI, 2748 const ScheduleDAG *Graph) { 2749 if (EI.isArtificialDep()) 2750 return "color=cyan,style=dashed"; 2751 if (EI.isCtrlDep()) 2752 return "color=blue,style=dashed"; 2753 return ""; 2754 } 2755 2756 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 2757 std::string Str; 2758 raw_string_ostream SS(Str); 2759 SS << "SU(" << SU->NodeNum << ')'; 2760 return SS.str(); 2761 } 2762 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 2763 return G->getGraphNodeLabel(SU); 2764 } 2765 2766 static std::string getNodeAttributes(const SUnit *N, 2767 const ScheduleDAG *Graph) { 2768 std::string Str("shape=Mrecord"); 2769 const SchedDFSResult *DFS = 2770 static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult(); 2771 if (DFS) { 2772 Str += ",style=filled,fillcolor=\"#"; 2773 Str += DOT::getColorString(DFS->getSubtreeID(N)); 2774 Str += '"'; 2775 } 2776 return Str; 2777 } 2778 }; 2779 } // namespace llvm 2780 #endif // NDEBUG 2781 2782 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 2783 /// rendered using 'dot'. 2784 /// 2785 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 2786 #ifndef NDEBUG 2787 ViewGraph(this, Name, false, Title); 2788 #else 2789 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 2790 << "systems with Graphviz or gv!\n"; 2791 #endif // NDEBUG 2792 } 2793 2794 /// Out-of-line implementation with no arguments is handy for gdb. 2795 void ScheduleDAGMI::viewGraph() { 2796 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 2797 } 2798