1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 18 #include "llvm/CodeGen/MachineScheduler.h" 19 #include "llvm/CodeGen/Passes.h" 20 #include "llvm/CodeGen/RegisterClassInfo.h" 21 #include "llvm/CodeGen/RegisterPressure.h" 22 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 24 #include "llvm/Target/TargetInstrInfo.h" 25 #include "llvm/MC/MCInstrItineraries.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/ADT/OwningPtr.h" 32 #include "llvm/ADT/PriorityQueue.h" 33 34 #include <queue> 35 36 using namespace llvm; 37 38 static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 43 #ifndef NDEBUG 44 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 45 cl::desc("Pop up a window to show MISched dags after they are processed")); 46 47 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 48 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 49 #else 50 static bool ViewMISchedDAGs = false; 51 #endif // NDEBUG 52 53 //===----------------------------------------------------------------------===// 54 // Machine Instruction Scheduling Pass and Registry 55 //===----------------------------------------------------------------------===// 56 57 MachineSchedContext::MachineSchedContext(): 58 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 59 RegClassInfo = new RegisterClassInfo(); 60 } 61 62 MachineSchedContext::~MachineSchedContext() { 63 delete RegClassInfo; 64 } 65 66 namespace { 67 /// MachineScheduler runs after coalescing and before register allocation. 68 class MachineScheduler : public MachineSchedContext, 69 public MachineFunctionPass { 70 public: 71 MachineScheduler(); 72 73 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 74 75 virtual void releaseMemory() {} 76 77 virtual bool runOnMachineFunction(MachineFunction&); 78 79 virtual void print(raw_ostream &O, const Module* = 0) const; 80 81 static char ID; // Class identification, replacement for typeinfo 82 }; 83 } // namespace 84 85 char MachineScheduler::ID = 0; 86 87 char &llvm::MachineSchedulerID = MachineScheduler::ID; 88 89 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 90 "Machine Instruction Scheduler", false, false) 91 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 92 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 93 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 94 INITIALIZE_PASS_END(MachineScheduler, "misched", 95 "Machine Instruction Scheduler", false, false) 96 97 MachineScheduler::MachineScheduler() 98 : MachineFunctionPass(ID) { 99 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 100 } 101 102 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 103 AU.setPreservesCFG(); 104 AU.addRequiredID(MachineDominatorsID); 105 AU.addRequired<MachineLoopInfo>(); 106 AU.addRequired<AliasAnalysis>(); 107 AU.addRequired<TargetPassConfig>(); 108 AU.addRequired<SlotIndexes>(); 109 AU.addPreserved<SlotIndexes>(); 110 AU.addRequired<LiveIntervals>(); 111 AU.addPreserved<LiveIntervals>(); 112 MachineFunctionPass::getAnalysisUsage(AU); 113 } 114 115 MachinePassRegistry MachineSchedRegistry::Registry; 116 117 /// A dummy default scheduler factory indicates whether the scheduler 118 /// is overridden on the command line. 119 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 120 return 0; 121 } 122 123 /// MachineSchedOpt allows command line selection of the scheduler. 124 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 125 RegisterPassParser<MachineSchedRegistry> > 126 MachineSchedOpt("misched", 127 cl::init(&useDefaultMachineSched), cl::Hidden, 128 cl::desc("Machine instruction scheduler to use")); 129 130 static MachineSchedRegistry 131 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 132 useDefaultMachineSched); 133 134 /// Forward declare the standard machine scheduler. This will be used as the 135 /// default scheduler if the target does not set a default. 136 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 137 138 139 /// Decrement this iterator until reaching the top or a non-debug instr. 140 static MachineBasicBlock::iterator 141 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 142 assert(I != Beg && "reached the top of the region, cannot decrement"); 143 while (--I != Beg) { 144 if (!I->isDebugValue()) 145 break; 146 } 147 return I; 148 } 149 150 /// If this iterator is a debug value, increment until reaching the End or a 151 /// non-debug instruction. 152 static MachineBasicBlock::iterator 153 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 154 for(; I != End; ++I) { 155 if (!I->isDebugValue()) 156 break; 157 } 158 return I; 159 } 160 161 /// Top-level MachineScheduler pass driver. 162 /// 163 /// Visit blocks in function order. Divide each block into scheduling regions 164 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 165 /// consistent with the DAG builder, which traverses the interior of the 166 /// scheduling regions bottom-up. 167 /// 168 /// This design avoids exposing scheduling boundaries to the DAG builder, 169 /// simplifying the DAG builder's support for "special" target instructions. 170 /// At the same time the design allows target schedulers to operate across 171 /// scheduling boundaries, for example to bundle the boudary instructions 172 /// without reordering them. This creates complexity, because the target 173 /// scheduler must update the RegionBegin and RegionEnd positions cached by 174 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 175 /// design would be to split blocks at scheduling boundaries, but LLVM has a 176 /// general bias against block splitting purely for implementation simplicity. 177 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 178 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 179 180 // Initialize the context of the pass. 181 MF = &mf; 182 MLI = &getAnalysis<MachineLoopInfo>(); 183 MDT = &getAnalysis<MachineDominatorTree>(); 184 PassConfig = &getAnalysis<TargetPassConfig>(); 185 AA = &getAnalysis<AliasAnalysis>(); 186 187 LIS = &getAnalysis<LiveIntervals>(); 188 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 189 190 RegClassInfo->runOnMachineFunction(*MF); 191 192 // Select the scheduler, or set the default. 193 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 194 if (Ctor == useDefaultMachineSched) { 195 // Get the default scheduler set by the target. 196 Ctor = MachineSchedRegistry::getDefault(); 197 if (!Ctor) { 198 Ctor = createConvergingSched; 199 MachineSchedRegistry::setDefault(Ctor); 200 } 201 } 202 // Instantiate the selected scheduler. 203 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 204 205 // Visit all machine basic blocks. 206 // 207 // TODO: Visit blocks in global postorder or postorder within the bottom-up 208 // loop tree. Then we can optionally compute global RegPressure. 209 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 210 MBB != MBBEnd; ++MBB) { 211 212 Scheduler->startBlock(MBB); 213 214 // Break the block into scheduling regions [I, RegionEnd), and schedule each 215 // region as soon as it is discovered. RegionEnd points the the scheduling 216 // boundary at the bottom of the region. The DAG does not include RegionEnd, 217 // but the region does (i.e. the next RegionEnd is above the previous 218 // RegionBegin). If the current block has no terminator then RegionEnd == 219 // MBB->end() for the bottom region. 220 // 221 // The Scheduler may insert instructions during either schedule() or 222 // exitRegion(), even for empty regions. So the local iterators 'I' and 223 // 'RegionEnd' are invalid across these calls. 224 unsigned RemainingCount = MBB->size(); 225 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 226 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 227 228 // Avoid decrementing RegionEnd for blocks with no terminator. 229 if (RegionEnd != MBB->end() 230 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 231 --RegionEnd; 232 // Count the boundary instruction. 233 --RemainingCount; 234 } 235 236 // The next region starts above the previous region. Look backward in the 237 // instruction stream until we find the nearest boundary. 238 MachineBasicBlock::iterator I = RegionEnd; 239 for(;I != MBB->begin(); --I, --RemainingCount) { 240 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 241 break; 242 } 243 // Notify the scheduler of the region, even if we may skip scheduling 244 // it. Perhaps it still needs to be bundled. 245 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount); 246 247 // Skip empty scheduling regions (0 or 1 schedulable instructions). 248 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 249 // Close the current region. Bundle the terminator if needed. 250 // This invalidates 'RegionEnd' and 'I'. 251 Scheduler->exitRegion(); 252 continue; 253 } 254 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 255 DEBUG(dbgs() << MF->getFunction()->getName() 256 << ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: "; 257 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 258 else dbgs() << "End"; 259 dbgs() << " Remaining: " << RemainingCount << "\n"); 260 261 // Schedule a region: possibly reorder instructions. 262 // This invalidates 'RegionEnd' and 'I'. 263 Scheduler->schedule(); 264 265 // Close the current region. 266 Scheduler->exitRegion(); 267 268 // Scheduling has invalidated the current iterator 'I'. Ask the 269 // scheduler for the top of it's scheduled region. 270 RegionEnd = Scheduler->begin(); 271 } 272 assert(RemainingCount == 0 && "Instruction count mismatch!"); 273 Scheduler->finishBlock(); 274 } 275 Scheduler->finalizeSchedule(); 276 DEBUG(LIS->print(dbgs())); 277 return true; 278 } 279 280 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 281 // unimplemented 282 } 283 284 //===----------------------------------------------------------------------===// 285 // MachineSchedStrategy - Interface to a machine scheduling algorithm. 286 //===----------------------------------------------------------------------===// 287 288 namespace { 289 class ScheduleDAGMI; 290 291 /// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected 292 /// scheduling algorithm. 293 /// 294 /// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it 295 /// in ScheduleDAGInstrs.h 296 class MachineSchedStrategy { 297 public: 298 virtual ~MachineSchedStrategy() {} 299 300 /// Initialize the strategy after building the DAG for a new region. 301 virtual void initialize(ScheduleDAGMI *DAG) = 0; 302 303 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to 304 /// schedule the node at the top of the unscheduled region. Otherwise it will 305 /// be scheduled at the bottom. 306 virtual SUnit *pickNode(bool &IsTopNode) = 0; 307 308 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node. 309 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0; 310 311 /// When all predecessor dependencies have been resolved, free this node for 312 /// top-down scheduling. 313 virtual void releaseTopNode(SUnit *SU) = 0; 314 /// When all successor dependencies have been resolved, free this node for 315 /// bottom-up scheduling. 316 virtual void releaseBottomNode(SUnit *SU) = 0; 317 }; 318 } // namespace 319 320 //===----------------------------------------------------------------------===// 321 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 322 // preservation. 323 //===----------------------------------------------------------------------===// 324 325 namespace { 326 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules 327 /// machine instructions while updating LiveIntervals. 328 class ScheduleDAGMI : public ScheduleDAGInstrs { 329 AliasAnalysis *AA; 330 RegisterClassInfo *RegClassInfo; 331 MachineSchedStrategy *SchedImpl; 332 333 MachineBasicBlock::iterator LiveRegionEnd; 334 335 /// Register pressure in this region computed by buildSchedGraph. 336 IntervalPressure RegPressure; 337 RegPressureTracker RPTracker; 338 339 /// List of pressure sets that exceed the target's pressure limit before 340 /// scheduling, listed in increasing set ID order. Each pressure set is paired 341 /// with its max pressure in the currently scheduled regions. 342 std::vector<PressureElement> RegionCriticalPSets; 343 344 /// The top of the unscheduled zone. 345 MachineBasicBlock::iterator CurrentTop; 346 IntervalPressure TopPressure; 347 RegPressureTracker TopRPTracker; 348 349 /// The bottom of the unscheduled zone. 350 MachineBasicBlock::iterator CurrentBottom; 351 IntervalPressure BotPressure; 352 RegPressureTracker BotRPTracker; 353 354 #ifndef NDEBUG 355 /// The number of instructions scheduled so far. Used to cut off the 356 /// scheduler at the point determined by misched-cutoff. 357 unsigned NumInstrsScheduled; 358 #endif 359 public: 360 ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S): 361 ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS), 362 AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), 363 RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure), 364 CurrentBottom(), BotRPTracker(BotPressure) { 365 #ifndef NDEBUG 366 NumInstrsScheduled = 0; 367 #endif 368 } 369 370 ~ScheduleDAGMI() { 371 delete SchedImpl; 372 } 373 374 MachineBasicBlock::iterator top() const { return CurrentTop; } 375 MachineBasicBlock::iterator bottom() const { return CurrentBottom; } 376 377 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling 378 /// region. This covers all instructions in a block, while schedule() may only 379 /// cover a subset. 380 void enterRegion(MachineBasicBlock *bb, 381 MachineBasicBlock::iterator begin, 382 MachineBasicBlock::iterator end, 383 unsigned endcount); 384 385 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of 386 /// reorderable instructions. 387 void schedule(); 388 389 /// Get current register pressure for the top scheduled instructions. 390 const IntervalPressure &getTopPressure() const { return TopPressure; } 391 const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } 392 393 /// Get current register pressure for the bottom scheduled instructions. 394 const IntervalPressure &getBotPressure() const { return BotPressure; } 395 const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } 396 397 /// Get register pressure for the entire scheduling region before scheduling. 398 const IntervalPressure &getRegPressure() const { return RegPressure; } 399 400 const std::vector<PressureElement> &getRegionCriticalPSets() const { 401 return RegionCriticalPSets; 402 } 403 404 /// getIssueWidth - Return the max instructions per scheduling group. 405 unsigned getIssueWidth() const { 406 return InstrItins ? InstrItins->Props.IssueWidth : 1; 407 } 408 409 /// getNumMicroOps - Return the number of issue slots required for this MI. 410 unsigned getNumMicroOps(MachineInstr *MI) const { 411 int UOps = InstrItins->getNumMicroOps(MI->getDesc().getSchedClass()); 412 return (UOps >= 0) ? UOps : TII->getNumMicroOps(InstrItins, MI); 413 } 414 415 protected: 416 void initRegPressure(); 417 void updateScheduledPressure(std::vector<unsigned> NewMaxPressure); 418 419 void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); 420 bool checkSchedLimit(); 421 422 void releaseRoots(); 423 424 void releaseSucc(SUnit *SU, SDep *SuccEdge); 425 void releaseSuccessors(SUnit *SU); 426 void releasePred(SUnit *SU, SDep *PredEdge); 427 void releasePredecessors(SUnit *SU); 428 429 void placeDebugValues(); 430 }; 431 } // namespace 432 433 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 434 /// NumPredsLeft reaches zero, release the successor node. 435 /// 436 /// FIXME: Adjust SuccSU height based on MinLatency. 437 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 438 SUnit *SuccSU = SuccEdge->getSUnit(); 439 440 #ifndef NDEBUG 441 if (SuccSU->NumPredsLeft == 0) { 442 dbgs() << "*** Scheduling failed! ***\n"; 443 SuccSU->dump(this); 444 dbgs() << " has been released too many times!\n"; 445 llvm_unreachable(0); 446 } 447 #endif 448 --SuccSU->NumPredsLeft; 449 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 450 SchedImpl->releaseTopNode(SuccSU); 451 } 452 453 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 454 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 455 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 456 I != E; ++I) { 457 releaseSucc(SU, &*I); 458 } 459 } 460 461 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 462 /// NumSuccsLeft reaches zero, release the predecessor node. 463 /// 464 /// FIXME: Adjust PredSU height based on MinLatency. 465 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 466 SUnit *PredSU = PredEdge->getSUnit(); 467 468 #ifndef NDEBUG 469 if (PredSU->NumSuccsLeft == 0) { 470 dbgs() << "*** Scheduling failed! ***\n"; 471 PredSU->dump(this); 472 dbgs() << " has been released too many times!\n"; 473 llvm_unreachable(0); 474 } 475 #endif 476 --PredSU->NumSuccsLeft; 477 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 478 SchedImpl->releaseBottomNode(PredSU); 479 } 480 481 /// releasePredecessors - Call releasePred on each of SU's predecessors. 482 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 483 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 484 I != E; ++I) { 485 releasePred(SU, &*I); 486 } 487 } 488 489 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 490 MachineBasicBlock::iterator InsertPos) { 491 // Advance RegionBegin if the first instruction moves down. 492 if (&*RegionBegin == MI) 493 ++RegionBegin; 494 495 // Update the instruction stream. 496 BB->splice(InsertPos, BB, MI); 497 498 // Update LiveIntervals 499 LIS->handleMove(MI); 500 501 // Recede RegionBegin if an instruction moves above the first. 502 if (RegionBegin == InsertPos) 503 RegionBegin = MI; 504 } 505 506 bool ScheduleDAGMI::checkSchedLimit() { 507 #ifndef NDEBUG 508 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 509 CurrentTop = CurrentBottom; 510 return false; 511 } 512 ++NumInstrsScheduled; 513 #endif 514 return true; 515 } 516 517 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 518 /// crossing a scheduling boundary. [begin, end) includes all instructions in 519 /// the region, including the boundary itself and single-instruction regions 520 /// that don't get scheduled. 521 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 522 MachineBasicBlock::iterator begin, 523 MachineBasicBlock::iterator end, 524 unsigned endcount) 525 { 526 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 527 528 // For convenience remember the end of the liveness region. 529 LiveRegionEnd = 530 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 531 } 532 533 // Setup the register pressure trackers for the top scheduled top and bottom 534 // scheduled regions. 535 void ScheduleDAGMI::initRegPressure() { 536 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 537 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 538 539 // Close the RPTracker to finalize live ins. 540 RPTracker.closeRegion(); 541 542 DEBUG(RPTracker.getPressure().dump(TRI)); 543 544 // Initialize the live ins and live outs. 545 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 546 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 547 548 // Close one end of the tracker so we can call 549 // getMaxUpward/DownwardPressureDelta before advancing across any 550 // instructions. This converts currently live regs into live ins/outs. 551 TopRPTracker.closeTop(); 552 BotRPTracker.closeBottom(); 553 554 // Account for liveness generated by the region boundary. 555 if (LiveRegionEnd != RegionEnd) 556 BotRPTracker.recede(); 557 558 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 559 560 // Cache the list of excess pressure sets in this region. This will also track 561 // the max pressure in the scheduled code for these sets. 562 RegionCriticalPSets.clear(); 563 std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure; 564 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 565 unsigned Limit = TRI->getRegPressureSetLimit(i); 566 if (RegionPressure[i] > Limit) 567 RegionCriticalPSets.push_back(PressureElement(i, 0)); 568 } 569 DEBUG(dbgs() << "Excess PSets: "; 570 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 571 dbgs() << TRI->getRegPressureSetName( 572 RegionCriticalPSets[i].PSetID) << " "; 573 dbgs() << "\n"); 574 } 575 576 // FIXME: When the pressure tracker deals in pressure differences then we won't 577 // iterate over all RegionCriticalPSets[i]. 578 void ScheduleDAGMI:: 579 updateScheduledPressure(std::vector<unsigned> NewMaxPressure) { 580 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 581 unsigned ID = RegionCriticalPSets[i].PSetID; 582 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 583 if ((int)NewMaxPressure[ID] > MaxUnits) 584 MaxUnits = NewMaxPressure[ID]; 585 } 586 } 587 588 // Release all DAG roots for scheduling. 589 void ScheduleDAGMI::releaseRoots() { 590 SmallVector<SUnit*, 16> BotRoots; 591 592 for (std::vector<SUnit>::iterator 593 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 594 // A SUnit is ready to top schedule if it has no predecessors. 595 if (I->Preds.empty()) 596 SchedImpl->releaseTopNode(&(*I)); 597 // A SUnit is ready to bottom schedule if it has no successors. 598 if (I->Succs.empty()) 599 BotRoots.push_back(&(*I)); 600 } 601 // Release bottom roots in reverse order so the higher priority nodes appear 602 // first. This is more natural and slightly more efficient. 603 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 604 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) 605 SchedImpl->releaseBottomNode(*I); 606 } 607 608 /// schedule - Called back from MachineScheduler::runOnMachineFunction 609 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 610 /// only includes instructions that have DAG nodes, not scheduling boundaries. 611 void ScheduleDAGMI::schedule() { 612 // Initialize the register pressure tracker used by buildSchedGraph. 613 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 614 615 // Account for liveness generate by the region boundary. 616 if (LiveRegionEnd != RegionEnd) 617 RPTracker.recede(); 618 619 // Build the DAG, and compute current register pressure. 620 buildSchedGraph(AA, &RPTracker); 621 622 // Initialize top/bottom trackers after computing region pressure. 623 initRegPressure(); 624 625 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 626 SUnits[su].dumpAll(this)); 627 628 if (ViewMISchedDAGs) viewGraph(); 629 630 SchedImpl->initialize(this); 631 632 // Release edges from the special Entry node or to the special Exit node. 633 releaseSuccessors(&EntrySU); 634 releasePredecessors(&ExitSU); 635 636 // Release all DAG roots for scheduling. 637 releaseRoots(); 638 639 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 640 CurrentBottom = RegionEnd; 641 bool IsTopNode = false; 642 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 643 if (!checkSchedLimit()) 644 break; 645 646 // Move the instruction to its new location in the instruction stream. 647 MachineInstr *MI = SU->getInstr(); 648 649 if (IsTopNode) { 650 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 651 if (&*CurrentTop == MI) 652 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 653 else { 654 moveInstruction(MI, CurrentTop); 655 TopRPTracker.setPos(MI); 656 } 657 658 // Update top scheduled pressure. 659 TopRPTracker.advance(); 660 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 661 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 662 663 // Release dependent instructions for scheduling. 664 releaseSuccessors(SU); 665 } 666 else { 667 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 668 MachineBasicBlock::iterator priorII = 669 priorNonDebug(CurrentBottom, CurrentTop); 670 if (&*priorII == MI) 671 CurrentBottom = priorII; 672 else { 673 if (&*CurrentTop == MI) { 674 CurrentTop = nextIfDebug(++CurrentTop, priorII); 675 TopRPTracker.setPos(CurrentTop); 676 } 677 moveInstruction(MI, CurrentBottom); 678 CurrentBottom = MI; 679 } 680 // Update bottom scheduled pressure. 681 BotRPTracker.recede(); 682 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 683 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 684 685 // Release dependent instructions for scheduling. 686 releasePredecessors(SU); 687 } 688 SU->isScheduled = true; 689 SchedImpl->schedNode(SU, IsTopNode); 690 } 691 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 692 693 placeDebugValues(); 694 } 695 696 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 697 void ScheduleDAGMI::placeDebugValues() { 698 // If first instruction was a DBG_VALUE then put it back. 699 if (FirstDbgValue) { 700 BB->splice(RegionBegin, BB, FirstDbgValue); 701 RegionBegin = FirstDbgValue; 702 } 703 704 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 705 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 706 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 707 MachineInstr *DbgValue = P.first; 708 MachineBasicBlock::iterator OrigPrevMI = P.second; 709 BB->splice(++OrigPrevMI, BB, DbgValue); 710 if (OrigPrevMI == llvm::prior(RegionEnd)) 711 RegionEnd = DbgValue; 712 } 713 DbgValues.clear(); 714 FirstDbgValue = NULL; 715 } 716 717 //===----------------------------------------------------------------------===// 718 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 719 //===----------------------------------------------------------------------===// 720 721 namespace { 722 /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience 723 /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified 724 /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in. 725 class ReadyQueue { 726 unsigned ID; 727 std::string Name; 728 std::vector<SUnit*> Queue; 729 730 public: 731 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {} 732 733 unsigned getID() const { return ID; } 734 735 StringRef getName() const { return Name; } 736 737 // SU is in this queue if it's NodeQueueID is a superset of this ID. 738 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); } 739 740 bool empty() const { return Queue.empty(); } 741 742 unsigned size() const { return Queue.size(); } 743 744 typedef std::vector<SUnit*>::iterator iterator; 745 746 iterator begin() { return Queue.begin(); } 747 748 iterator end() { return Queue.end(); } 749 750 iterator find(SUnit *SU) { 751 return std::find(Queue.begin(), Queue.end(), SU); 752 } 753 754 void push(SUnit *SU) { 755 Queue.push_back(SU); 756 SU->NodeQueueId |= ID; 757 } 758 759 void remove(iterator I) { 760 (*I)->NodeQueueId &= ~ID; 761 *I = Queue.back(); 762 Queue.pop_back(); 763 } 764 765 void dump() { 766 dbgs() << Name << ": "; 767 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 768 dbgs() << Queue[i]->NodeNum << " "; 769 dbgs() << "\n"; 770 } 771 }; 772 773 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 774 /// the schedule. 775 class ConvergingScheduler : public MachineSchedStrategy { 776 777 /// Store the state used by ConvergingScheduler heuristics, required for the 778 /// lifetime of one invocation of pickNode(). 779 struct SchedCandidate { 780 // The best SUnit candidate. 781 SUnit *SU; 782 783 // Register pressure values for the best candidate. 784 RegPressureDelta RPDelta; 785 786 SchedCandidate(): SU(NULL) {} 787 }; 788 /// Represent the type of SchedCandidate found within a single queue. 789 enum CandResult { 790 NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure }; 791 792 /// Each Scheduling boundary is associated with ready queues. It tracks the 793 /// current cycle in whichever direction at has moved, and maintains the state 794 /// of "hazards" and other interlocks at the current cycle. 795 struct SchedBoundary { 796 ScheduleDAGMI *DAG; 797 798 ReadyQueue Available; 799 ReadyQueue Pending; 800 bool CheckPending; 801 802 ScheduleHazardRecognizer *HazardRec; 803 804 unsigned CurrCycle; 805 unsigned IssueCount; 806 807 /// MinReadyCycle - Cycle of the soonest available instruction. 808 unsigned MinReadyCycle; 809 810 // Remember the greatest min operand latency. 811 unsigned MaxMinLatency; 812 813 /// Pending queues extend the ready queues with the same ID and the 814 /// PendingFlag set. 815 SchedBoundary(unsigned ID, const Twine &Name): 816 DAG(0), Available(ID, Name+".A"), 817 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 818 CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0), 819 MinReadyCycle(UINT_MAX), MaxMinLatency(0) {} 820 821 ~SchedBoundary() { delete HazardRec; } 822 823 bool isTop() const { 824 return Available.getID() == ConvergingScheduler::TopQID; 825 } 826 827 void releaseNode(SUnit *SU, unsigned ReadyCycle); 828 829 void bumpCycle(); 830 831 void bumpNode(SUnit *SU); 832 833 void releasePending(); 834 835 void removeReady(SUnit *SU); 836 837 SUnit *pickOnlyChoice(); 838 }; 839 840 ScheduleDAGMI *DAG; 841 const TargetRegisterInfo *TRI; 842 843 // State of the top and bottom scheduled instruction boundaries. 844 SchedBoundary Top; 845 SchedBoundary Bot; 846 847 public: 848 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 849 enum { 850 TopQID = 1, 851 BotQID = 2, 852 LogMaxQID = 2 853 }; 854 855 ConvergingScheduler(): 856 DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 857 858 virtual void initialize(ScheduleDAGMI *dag); 859 860 virtual SUnit *pickNode(bool &IsTopNode); 861 862 virtual void schedNode(SUnit *SU, bool IsTopNode); 863 864 virtual void releaseTopNode(SUnit *SU); 865 866 virtual void releaseBottomNode(SUnit *SU); 867 868 protected: 869 SUnit *pickNodeBidrectional(bool &IsTopNode); 870 871 CandResult pickNodeFromQueue(ReadyQueue &Q, 872 const RegPressureTracker &RPTracker, 873 SchedCandidate &Candidate); 874 #ifndef NDEBUG 875 void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU, 876 PressureElement P = PressureElement()); 877 #endif 878 }; 879 } // namespace 880 881 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 882 DAG = dag; 883 TRI = DAG->TRI; 884 Top.DAG = dag; 885 Bot.DAG = dag; 886 887 // Initialize the HazardRecognizers. 888 const TargetMachine &TM = DAG->MF.getTarget(); 889 const InstrItineraryData *Itin = TM.getInstrItineraryData(); 890 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 891 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 892 893 assert((!ForceTopDown || !ForceBottomUp) && 894 "-misched-topdown incompatible with -misched-bottomup"); 895 } 896 897 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 898 if (SU->isScheduled) 899 return; 900 901 for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 902 I != E; ++I) { 903 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 904 unsigned Latency = 905 DAG->computeOperandLatency(I->getSUnit(), SU, *I, /*FindMin=*/true); 906 #ifndef NDEBUG 907 Top.MaxMinLatency = std::max(Latency, Top.MaxMinLatency); 908 #endif 909 if (SU->TopReadyCycle < PredReadyCycle + Latency) 910 SU->TopReadyCycle = PredReadyCycle + Latency; 911 } 912 Top.releaseNode(SU, SU->TopReadyCycle); 913 } 914 915 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 916 if (SU->isScheduled) 917 return; 918 919 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 920 921 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 922 I != E; ++I) { 923 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 924 unsigned Latency = 925 DAG->computeOperandLatency(SU, I->getSUnit(), *I, /*FindMin=*/true); 926 #ifndef NDEBUG 927 Bot.MaxMinLatency = std::max(Latency, Bot.MaxMinLatency); 928 #endif 929 if (SU->BotReadyCycle < SuccReadyCycle + Latency) 930 SU->BotReadyCycle = SuccReadyCycle + Latency; 931 } 932 Bot.releaseNode(SU, SU->BotReadyCycle); 933 } 934 935 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 936 unsigned ReadyCycle) { 937 if (ReadyCycle < MinReadyCycle) 938 MinReadyCycle = ReadyCycle; 939 940 // Check for interlocks first. For the purpose of other heuristics, an 941 // instruction that cannot issue appears as if it's not in the ReadyQueue. 942 if (ReadyCycle > CurrCycle 943 || (HazardRec->isEnabled() && (HazardRec->getHazardType(SU) 944 != ScheduleHazardRecognizer::NoHazard))) 945 Pending.push(SU); 946 else 947 Available.push(SU); 948 } 949 950 /// Move the boundary of scheduled code by one cycle. 951 void ConvergingScheduler::SchedBoundary::bumpCycle() { 952 unsigned Width = DAG->getIssueWidth(); 953 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; 954 955 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 956 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle); 957 958 if (!HazardRec->isEnabled()) { 959 // Bypass HazardRec virtual calls. 960 CurrCycle = NextCycle; 961 } 962 else { 963 // Bypass getHazardType calls in case of long latency. 964 for (; CurrCycle != NextCycle; ++CurrCycle) { 965 if (isTop()) 966 HazardRec->AdvanceCycle(); 967 else 968 HazardRec->RecedeCycle(); 969 } 970 } 971 CheckPending = true; 972 973 DEBUG(dbgs() << "*** " << Available.getName() << " cycle " 974 << CurrCycle << '\n'); 975 } 976 977 /// Move the boundary of scheduled code by one SUnit. 978 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 979 // Update the reservation table. 980 if (HazardRec->isEnabled()) { 981 if (!isTop() && SU->isCall) { 982 // Calls are scheduled with their preceding instructions. For bottom-up 983 // scheduling, clear the pipeline state before emitting. 984 HazardRec->Reset(); 985 } 986 HazardRec->EmitInstruction(SU); 987 } 988 // Check the instruction group size limit. 989 IssueCount += DAG->getNumMicroOps(SU->getInstr()); 990 if (IssueCount >= DAG->getIssueWidth()) { 991 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n'); 992 bumpCycle(); 993 } 994 } 995 996 /// Release pending ready nodes in to the available queue. This makes them 997 /// visible to heuristics. 998 void ConvergingScheduler::SchedBoundary::releasePending() { 999 // If the available queue is empty, it is safe to reset MinReadyCycle. 1000 if (Available.empty()) 1001 MinReadyCycle = UINT_MAX; 1002 1003 // Check to see if any of the pending instructions are ready to issue. If 1004 // so, add them to the available queue. 1005 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1006 SUnit *SU = *(Pending.begin()+i); 1007 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1008 1009 if (ReadyCycle < MinReadyCycle) 1010 MinReadyCycle = ReadyCycle; 1011 1012 if (ReadyCycle > CurrCycle) 1013 continue; 1014 1015 if (HazardRec->isEnabled() 1016 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) 1017 continue; 1018 1019 Available.push(SU); 1020 Pending.remove(Pending.begin()+i); 1021 --i; --e; 1022 } 1023 CheckPending = false; 1024 } 1025 1026 /// Remove SU from the ready set for this boundary. 1027 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1028 if (Available.isInQueue(SU)) 1029 Available.remove(Available.find(SU)); 1030 else { 1031 assert(Pending.isInQueue(SU) && "bad ready count"); 1032 Pending.remove(Pending.find(SU)); 1033 } 1034 } 1035 1036 /// If this queue only has one ready candidate, return it. As a side effect, 1037 /// advance the cycle until at least one node is ready. If multiple instructions 1038 /// are ready, return NULL. 1039 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1040 if (CheckPending) 1041 releasePending(); 1042 1043 for (unsigned i = 0; Available.empty(); ++i) { 1044 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1045 "permanent hazard"); (void)i; 1046 bumpCycle(); 1047 releasePending(); 1048 } 1049 if (Available.size() == 1) 1050 return *Available.begin(); 1051 return NULL; 1052 } 1053 1054 #ifndef NDEBUG 1055 void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q, 1056 SUnit *SU, PressureElement P) { 1057 dbgs() << Label << " " << Q.getName() << " "; 1058 if (P.isValid()) 1059 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease 1060 << " "; 1061 else 1062 dbgs() << " "; 1063 SU->dump(DAG); 1064 } 1065 #endif 1066 1067 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 1068 /// more desirable than RHS from scheduling standpoint. 1069 static bool compareRPDelta(const RegPressureDelta &LHS, 1070 const RegPressureDelta &RHS) { 1071 // Compare each component of pressure in decreasing order of importance 1072 // without checking if any are valid. Invalid PressureElements are assumed to 1073 // have UnitIncrease==0, so are neutral. 1074 1075 // Avoid increasing the max critical pressure in the scheduled region. 1076 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) 1077 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 1078 1079 // Avoid increasing the max critical pressure in the scheduled region. 1080 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) 1081 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 1082 1083 // Avoid increasing the max pressure of the entire region. 1084 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) 1085 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 1086 1087 return false; 1088 } 1089 1090 /// Pick the best candidate from the top queue. 1091 /// 1092 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 1093 /// DAG building. To adjust for the current scheduling location we need to 1094 /// maintain the number of vreg uses remaining to be top-scheduled. 1095 ConvergingScheduler::CandResult ConvergingScheduler:: 1096 pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker, 1097 SchedCandidate &Candidate) { 1098 DEBUG(Q.dump()); 1099 1100 // getMaxPressureDelta temporarily modifies the tracker. 1101 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 1102 1103 // BestSU remains NULL if no top candidates beat the best existing candidate. 1104 CandResult FoundCandidate = NoCand; 1105 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 1106 RegPressureDelta RPDelta; 1107 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta, 1108 DAG->getRegionCriticalPSets(), 1109 DAG->getRegPressure().MaxSetPressure); 1110 1111 // Initialize the candidate if needed. 1112 if (!Candidate.SU) { 1113 Candidate.SU = *I; 1114 Candidate.RPDelta = RPDelta; 1115 FoundCandidate = NodeOrder; 1116 continue; 1117 } 1118 // Avoid exceeding the target's limit. 1119 if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) { 1120 DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess)); 1121 Candidate.SU = *I; 1122 Candidate.RPDelta = RPDelta; 1123 FoundCandidate = SingleExcess; 1124 continue; 1125 } 1126 if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease) 1127 continue; 1128 if (FoundCandidate == SingleExcess) 1129 FoundCandidate = MultiPressure; 1130 1131 // Avoid increasing the max critical pressure in the scheduled region. 1132 if (RPDelta.CriticalMax.UnitIncrease 1133 < Candidate.RPDelta.CriticalMax.UnitIncrease) { 1134 DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax)); 1135 Candidate.SU = *I; 1136 Candidate.RPDelta = RPDelta; 1137 FoundCandidate = SingleCritical; 1138 continue; 1139 } 1140 if (RPDelta.CriticalMax.UnitIncrease 1141 > Candidate.RPDelta.CriticalMax.UnitIncrease) 1142 continue; 1143 if (FoundCandidate == SingleCritical) 1144 FoundCandidate = MultiPressure; 1145 1146 // Avoid increasing the max pressure of the entire region. 1147 if (RPDelta.CurrentMax.UnitIncrease 1148 < Candidate.RPDelta.CurrentMax.UnitIncrease) { 1149 DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax)); 1150 Candidate.SU = *I; 1151 Candidate.RPDelta = RPDelta; 1152 FoundCandidate = SingleMax; 1153 continue; 1154 } 1155 if (RPDelta.CurrentMax.UnitIncrease 1156 > Candidate.RPDelta.CurrentMax.UnitIncrease) 1157 continue; 1158 if (FoundCandidate == SingleMax) 1159 FoundCandidate = MultiPressure; 1160 1161 // Fall through to original instruction order. 1162 // Only consider node order if Candidate was chosen from this Q. 1163 if (FoundCandidate == NoCand) 1164 continue; 1165 1166 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum) 1167 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) { 1168 DEBUG(traceCandidate("NCAND", Q, *I)); 1169 Candidate.SU = *I; 1170 Candidate.RPDelta = RPDelta; 1171 FoundCandidate = NodeOrder; 1172 } 1173 } 1174 return FoundCandidate; 1175 } 1176 1177 /// Pick the best candidate node from either the top or bottom queue. 1178 SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { 1179 // Schedule as far as possible in the direction of no choice. This is most 1180 // efficient, but also provides the best heuristics for CriticalPSets. 1181 if (SUnit *SU = Bot.pickOnlyChoice()) { 1182 IsTopNode = false; 1183 return SU; 1184 } 1185 if (SUnit *SU = Top.pickOnlyChoice()) { 1186 IsTopNode = true; 1187 return SU; 1188 } 1189 SchedCandidate BotCand; 1190 // Prefer bottom scheduling when heuristics are silent. 1191 CandResult BotResult = pickNodeFromQueue(Bot.Available, 1192 DAG->getBotRPTracker(), BotCand); 1193 assert(BotResult != NoCand && "failed to find the first candidate"); 1194 1195 // If either Q has a single candidate that provides the least increase in 1196 // Excess pressure, we can immediately schedule from that Q. 1197 // 1198 // RegionCriticalPSets summarizes the pressure within the scheduled region and 1199 // affects picking from either Q. If scheduling in one direction must 1200 // increase pressure for one of the excess PSets, then schedule in that 1201 // direction first to provide more freedom in the other direction. 1202 if (BotResult == SingleExcess || BotResult == SingleCritical) { 1203 IsTopNode = false; 1204 return BotCand.SU; 1205 } 1206 // Check if the top Q has a better candidate. 1207 SchedCandidate TopCand; 1208 CandResult TopResult = pickNodeFromQueue(Top.Available, 1209 DAG->getTopRPTracker(), TopCand); 1210 assert(TopResult != NoCand && "failed to find the first candidate"); 1211 1212 if (TopResult == SingleExcess || TopResult == SingleCritical) { 1213 IsTopNode = true; 1214 return TopCand.SU; 1215 } 1216 // If either Q has a single candidate that minimizes pressure above the 1217 // original region's pressure pick it. 1218 if (BotResult == SingleMax) { 1219 IsTopNode = false; 1220 return BotCand.SU; 1221 } 1222 if (TopResult == SingleMax) { 1223 IsTopNode = true; 1224 return TopCand.SU; 1225 } 1226 // Check for a salient pressure difference and pick the best from either side. 1227 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 1228 IsTopNode = true; 1229 return TopCand.SU; 1230 } 1231 // Otherwise prefer the bottom candidate in node order. 1232 IsTopNode = false; 1233 return BotCand.SU; 1234 } 1235 1236 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 1237 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 1238 if (DAG->top() == DAG->bottom()) { 1239 assert(Top.Available.empty() && Top.Pending.empty() && 1240 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 1241 return NULL; 1242 } 1243 SUnit *SU; 1244 if (ForceTopDown) { 1245 SU = Top.pickOnlyChoice(); 1246 if (!SU) { 1247 SchedCandidate TopCand; 1248 CandResult TopResult = 1249 pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand); 1250 assert(TopResult != NoCand && "failed to find the first candidate"); 1251 (void)TopResult; 1252 SU = TopCand.SU; 1253 } 1254 IsTopNode = true; 1255 } 1256 else if (ForceBottomUp) { 1257 SU = Bot.pickOnlyChoice(); 1258 if (!SU) { 1259 SchedCandidate BotCand; 1260 CandResult BotResult = 1261 pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand); 1262 assert(BotResult != NoCand && "failed to find the first candidate"); 1263 (void)BotResult; 1264 SU = BotCand.SU; 1265 } 1266 IsTopNode = false; 1267 } 1268 else { 1269 SU = pickNodeBidrectional(IsTopNode); 1270 } 1271 if (SU->isTopReady()) 1272 Top.removeReady(SU); 1273 if (SU->isBottomReady()) 1274 Bot.removeReady(SU); 1275 1276 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") 1277 << " Scheduling Instruction in cycle " 1278 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n'; 1279 SU->dump(DAG)); 1280 return SU; 1281 } 1282 1283 /// Update the scheduler's state after scheduling a node. This is the same node 1284 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 1285 /// it's state based on the current cycle before MachineSchedStrategy does. 1286 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 1287 if (IsTopNode) { 1288 SU->TopReadyCycle = Top.CurrCycle; 1289 Top.bumpNode(SU); 1290 } 1291 else { 1292 SU->BotReadyCycle = Bot.CurrCycle; 1293 Bot.bumpNode(SU); 1294 } 1295 } 1296 1297 /// Create the standard converging machine scheduler. This will be used as the 1298 /// default scheduler if the target does not set a default. 1299 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 1300 assert((!ForceTopDown || !ForceBottomUp) && 1301 "-misched-topdown incompatible with -misched-bottomup"); 1302 return new ScheduleDAGMI(C, new ConvergingScheduler()); 1303 } 1304 static MachineSchedRegistry 1305 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 1306 createConvergingSched); 1307 1308 //===----------------------------------------------------------------------===// 1309 // Machine Instruction Shuffler for Correctness Testing 1310 //===----------------------------------------------------------------------===// 1311 1312 #ifndef NDEBUG 1313 namespace { 1314 /// Apply a less-than relation on the node order, which corresponds to the 1315 /// instruction order prior to scheduling. IsReverse implements greater-than. 1316 template<bool IsReverse> 1317 struct SUnitOrder { 1318 bool operator()(SUnit *A, SUnit *B) const { 1319 if (IsReverse) 1320 return A->NodeNum > B->NodeNum; 1321 else 1322 return A->NodeNum < B->NodeNum; 1323 } 1324 }; 1325 1326 /// Reorder instructions as much as possible. 1327 class InstructionShuffler : public MachineSchedStrategy { 1328 bool IsAlternating; 1329 bool IsTopDown; 1330 1331 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 1332 // gives nodes with a higher number higher priority causing the latest 1333 // instructions to be scheduled first. 1334 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 1335 TopQ; 1336 // When scheduling bottom-up, use greater-than as the queue priority. 1337 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 1338 BottomQ; 1339 public: 1340 InstructionShuffler(bool alternate, bool topdown) 1341 : IsAlternating(alternate), IsTopDown(topdown) {} 1342 1343 virtual void initialize(ScheduleDAGMI *) { 1344 TopQ.clear(); 1345 BottomQ.clear(); 1346 } 1347 1348 /// Implement MachineSchedStrategy interface. 1349 /// ----------------------------------------- 1350 1351 virtual SUnit *pickNode(bool &IsTopNode) { 1352 SUnit *SU; 1353 if (IsTopDown) { 1354 do { 1355 if (TopQ.empty()) return NULL; 1356 SU = TopQ.top(); 1357 TopQ.pop(); 1358 } while (SU->isScheduled); 1359 IsTopNode = true; 1360 } 1361 else { 1362 do { 1363 if (BottomQ.empty()) return NULL; 1364 SU = BottomQ.top(); 1365 BottomQ.pop(); 1366 } while (SU->isScheduled); 1367 IsTopNode = false; 1368 } 1369 if (IsAlternating) 1370 IsTopDown = !IsTopDown; 1371 return SU; 1372 } 1373 1374 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 1375 1376 virtual void releaseTopNode(SUnit *SU) { 1377 TopQ.push(SU); 1378 } 1379 virtual void releaseBottomNode(SUnit *SU) { 1380 BottomQ.push(SU); 1381 } 1382 }; 1383 } // namespace 1384 1385 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 1386 bool Alternate = !ForceTopDown && !ForceBottomUp; 1387 bool TopDown = !ForceBottomUp; 1388 assert((TopDown || !ForceTopDown) && 1389 "-misched-topdown incompatible with -misched-bottomup"); 1390 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 1391 } 1392 static MachineSchedRegistry ShufflerRegistry( 1393 "shuffle", "Shuffle machine instructions alternating directions", 1394 createInstructionShuffler); 1395 #endif // !NDEBUG 1396