1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 18 #include "llvm/CodeGen/MachineScheduler.h" 19 #include "llvm/CodeGen/Passes.h" 20 #include "llvm/CodeGen/RegisterClassInfo.h" 21 #include "llvm/CodeGen/RegisterPressure.h" 22 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 24 #include "llvm/Target/TargetInstrInfo.h" 25 #include "llvm/MC/MCInstrItineraries.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/ADT/OwningPtr.h" 32 #include "llvm/ADT/PriorityQueue.h" 33 34 #include <queue> 35 36 using namespace llvm; 37 38 static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 43 #ifndef NDEBUG 44 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 45 cl::desc("Pop up a window to show MISched dags after they are processed")); 46 47 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 48 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 49 #else 50 static bool ViewMISchedDAGs = false; 51 #endif // NDEBUG 52 53 //===----------------------------------------------------------------------===// 54 // Machine Instruction Scheduling Pass and Registry 55 //===----------------------------------------------------------------------===// 56 57 MachineSchedContext::MachineSchedContext(): 58 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 59 RegClassInfo = new RegisterClassInfo(); 60 } 61 62 MachineSchedContext::~MachineSchedContext() { 63 delete RegClassInfo; 64 } 65 66 namespace { 67 /// MachineScheduler runs after coalescing and before register allocation. 68 class MachineScheduler : public MachineSchedContext, 69 public MachineFunctionPass { 70 public: 71 MachineScheduler(); 72 73 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 74 75 virtual void releaseMemory() {} 76 77 virtual bool runOnMachineFunction(MachineFunction&); 78 79 virtual void print(raw_ostream &O, const Module* = 0) const; 80 81 static char ID; // Class identification, replacement for typeinfo 82 }; 83 } // namespace 84 85 char MachineScheduler::ID = 0; 86 87 char &llvm::MachineSchedulerID = MachineScheduler::ID; 88 89 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 90 "Machine Instruction Scheduler", false, false) 91 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 92 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 93 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 94 INITIALIZE_PASS_END(MachineScheduler, "misched", 95 "Machine Instruction Scheduler", false, false) 96 97 MachineScheduler::MachineScheduler() 98 : MachineFunctionPass(ID) { 99 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 100 } 101 102 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 103 AU.setPreservesCFG(); 104 AU.addRequiredID(MachineDominatorsID); 105 AU.addRequired<MachineLoopInfo>(); 106 AU.addRequired<AliasAnalysis>(); 107 AU.addRequired<TargetPassConfig>(); 108 AU.addRequired<SlotIndexes>(); 109 AU.addPreserved<SlotIndexes>(); 110 AU.addRequired<LiveIntervals>(); 111 AU.addPreserved<LiveIntervals>(); 112 MachineFunctionPass::getAnalysisUsage(AU); 113 } 114 115 MachinePassRegistry MachineSchedRegistry::Registry; 116 117 /// A dummy default scheduler factory indicates whether the scheduler 118 /// is overridden on the command line. 119 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 120 return 0; 121 } 122 123 /// MachineSchedOpt allows command line selection of the scheduler. 124 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 125 RegisterPassParser<MachineSchedRegistry> > 126 MachineSchedOpt("misched", 127 cl::init(&useDefaultMachineSched), cl::Hidden, 128 cl::desc("Machine instruction scheduler to use")); 129 130 static MachineSchedRegistry 131 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 132 useDefaultMachineSched); 133 134 /// Forward declare the standard machine scheduler. This will be used as the 135 /// default scheduler if the target does not set a default. 136 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 137 138 139 /// Decrement this iterator until reaching the top or a non-debug instr. 140 static MachineBasicBlock::iterator 141 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 142 assert(I != Beg && "reached the top of the region, cannot decrement"); 143 while (--I != Beg) { 144 if (!I->isDebugValue()) 145 break; 146 } 147 return I; 148 } 149 150 /// If this iterator is a debug value, increment until reaching the End or a 151 /// non-debug instruction. 152 static MachineBasicBlock::iterator 153 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 154 for(; I != End; ++I) { 155 if (!I->isDebugValue()) 156 break; 157 } 158 return I; 159 } 160 161 /// Top-level MachineScheduler pass driver. 162 /// 163 /// Visit blocks in function order. Divide each block into scheduling regions 164 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 165 /// consistent with the DAG builder, which traverses the interior of the 166 /// scheduling regions bottom-up. 167 /// 168 /// This design avoids exposing scheduling boundaries to the DAG builder, 169 /// simplifying the DAG builder's support for "special" target instructions. 170 /// At the same time the design allows target schedulers to operate across 171 /// scheduling boundaries, for example to bundle the boudary instructions 172 /// without reordering them. This creates complexity, because the target 173 /// scheduler must update the RegionBegin and RegionEnd positions cached by 174 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 175 /// design would be to split blocks at scheduling boundaries, but LLVM has a 176 /// general bias against block splitting purely for implementation simplicity. 177 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 178 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 179 180 // Initialize the context of the pass. 181 MF = &mf; 182 MLI = &getAnalysis<MachineLoopInfo>(); 183 MDT = &getAnalysis<MachineDominatorTree>(); 184 PassConfig = &getAnalysis<TargetPassConfig>(); 185 AA = &getAnalysis<AliasAnalysis>(); 186 187 LIS = &getAnalysis<LiveIntervals>(); 188 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 189 190 RegClassInfo->runOnMachineFunction(*MF); 191 192 // Select the scheduler, or set the default. 193 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 194 if (Ctor == useDefaultMachineSched) { 195 // Get the default scheduler set by the target. 196 Ctor = MachineSchedRegistry::getDefault(); 197 if (!Ctor) { 198 Ctor = createConvergingSched; 199 MachineSchedRegistry::setDefault(Ctor); 200 } 201 } 202 // Instantiate the selected scheduler. 203 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 204 205 // Visit all machine basic blocks. 206 // 207 // TODO: Visit blocks in global postorder or postorder within the bottom-up 208 // loop tree. Then we can optionally compute global RegPressure. 209 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 210 MBB != MBBEnd; ++MBB) { 211 212 Scheduler->startBlock(MBB); 213 214 // Break the block into scheduling regions [I, RegionEnd), and schedule each 215 // region as soon as it is discovered. RegionEnd points the the scheduling 216 // boundary at the bottom of the region. The DAG does not include RegionEnd, 217 // but the region does (i.e. the next RegionEnd is above the previous 218 // RegionBegin). If the current block has no terminator then RegionEnd == 219 // MBB->end() for the bottom region. 220 // 221 // The Scheduler may insert instructions during either schedule() or 222 // exitRegion(), even for empty regions. So the local iterators 'I' and 223 // 'RegionEnd' are invalid across these calls. 224 unsigned RemainingCount = MBB->size(); 225 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 226 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 227 228 // Avoid decrementing RegionEnd for blocks with no terminator. 229 if (RegionEnd != MBB->end() 230 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 231 --RegionEnd; 232 // Count the boundary instruction. 233 --RemainingCount; 234 } 235 236 // The next region starts above the previous region. Look backward in the 237 // instruction stream until we find the nearest boundary. 238 MachineBasicBlock::iterator I = RegionEnd; 239 for(;I != MBB->begin(); --I, --RemainingCount) { 240 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 241 break; 242 } 243 // Notify the scheduler of the region, even if we may skip scheduling 244 // it. Perhaps it still needs to be bundled. 245 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount); 246 247 // Skip empty scheduling regions (0 or 1 schedulable instructions). 248 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 249 // Close the current region. Bundle the terminator if needed. 250 // This invalidates 'RegionEnd' and 'I'. 251 Scheduler->exitRegion(); 252 continue; 253 } 254 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 255 DEBUG(dbgs() << MF->getFunction()->getName() 256 << ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: "; 257 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 258 else dbgs() << "End"; 259 dbgs() << " Remaining: " << RemainingCount << "\n"); 260 261 // Schedule a region: possibly reorder instructions. 262 // This invalidates 'RegionEnd' and 'I'. 263 Scheduler->schedule(); 264 265 // Close the current region. 266 Scheduler->exitRegion(); 267 268 // Scheduling has invalidated the current iterator 'I'. Ask the 269 // scheduler for the top of it's scheduled region. 270 RegionEnd = Scheduler->begin(); 271 } 272 assert(RemainingCount == 0 && "Instruction count mismatch!"); 273 Scheduler->finishBlock(); 274 } 275 Scheduler->finalizeSchedule(); 276 DEBUG(LIS->print(dbgs())); 277 return true; 278 } 279 280 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 281 // unimplemented 282 } 283 284 //===----------------------------------------------------------------------===// 285 // MachineSchedStrategy - Interface to a machine scheduling algorithm. 286 //===----------------------------------------------------------------------===// 287 288 namespace { 289 class ScheduleDAGMI; 290 291 /// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected 292 /// scheduling algorithm. 293 /// 294 /// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it 295 /// in ScheduleDAGInstrs.h 296 class MachineSchedStrategy { 297 public: 298 virtual ~MachineSchedStrategy() {} 299 300 /// Initialize the strategy after building the DAG for a new region. 301 virtual void initialize(ScheduleDAGMI *DAG) = 0; 302 303 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to 304 /// schedule the node at the top of the unscheduled region. Otherwise it will 305 /// be scheduled at the bottom. 306 virtual SUnit *pickNode(bool &IsTopNode) = 0; 307 308 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node. 309 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0; 310 311 /// When all predecessor dependencies have been resolved, free this node for 312 /// top-down scheduling. 313 virtual void releaseTopNode(SUnit *SU) = 0; 314 /// When all successor dependencies have been resolved, free this node for 315 /// bottom-up scheduling. 316 virtual void releaseBottomNode(SUnit *SU) = 0; 317 }; 318 } // namespace 319 320 //===----------------------------------------------------------------------===// 321 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 322 // preservation. 323 //===----------------------------------------------------------------------===// 324 325 namespace { 326 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules 327 /// machine instructions while updating LiveIntervals. 328 class ScheduleDAGMI : public ScheduleDAGInstrs { 329 AliasAnalysis *AA; 330 RegisterClassInfo *RegClassInfo; 331 MachineSchedStrategy *SchedImpl; 332 333 MachineBasicBlock::iterator LiveRegionEnd; 334 335 /// Register pressure in this region computed by buildSchedGraph. 336 IntervalPressure RegPressure; 337 RegPressureTracker RPTracker; 338 339 /// List of pressure sets that exceed the target's pressure limit before 340 /// scheduling, listed in increasing set ID order. Each pressure set is paired 341 /// with its max pressure in the currently scheduled regions. 342 std::vector<PressureElement> RegionCriticalPSets; 343 344 /// The top of the unscheduled zone. 345 MachineBasicBlock::iterator CurrentTop; 346 IntervalPressure TopPressure; 347 RegPressureTracker TopRPTracker; 348 349 /// The bottom of the unscheduled zone. 350 MachineBasicBlock::iterator CurrentBottom; 351 IntervalPressure BotPressure; 352 RegPressureTracker BotRPTracker; 353 354 #ifndef NDEBUG 355 /// The number of instructions scheduled so far. Used to cut off the 356 /// scheduler at the point determined by misched-cutoff. 357 unsigned NumInstrsScheduled; 358 #endif 359 public: 360 ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S): 361 ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS), 362 AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), 363 RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure), 364 CurrentBottom(), BotRPTracker(BotPressure) { 365 #ifndef NDEBUG 366 NumInstrsScheduled = 0; 367 #endif 368 } 369 370 ~ScheduleDAGMI() { 371 delete SchedImpl; 372 } 373 374 MachineBasicBlock::iterator top() const { return CurrentTop; } 375 MachineBasicBlock::iterator bottom() const { return CurrentBottom; } 376 377 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling 378 /// region. This covers all instructions in a block, while schedule() may only 379 /// cover a subset. 380 void enterRegion(MachineBasicBlock *bb, 381 MachineBasicBlock::iterator begin, 382 MachineBasicBlock::iterator end, 383 unsigned endcount); 384 385 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of 386 /// reorderable instructions. 387 void schedule(); 388 389 /// Get current register pressure for the top scheduled instructions. 390 const IntervalPressure &getTopPressure() const { return TopPressure; } 391 const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } 392 393 /// Get current register pressure for the bottom scheduled instructions. 394 const IntervalPressure &getBotPressure() const { return BotPressure; } 395 const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } 396 397 /// Get register pressure for the entire scheduling region before scheduling. 398 const IntervalPressure &getRegPressure() const { return RegPressure; } 399 400 const std::vector<PressureElement> &getRegionCriticalPSets() const { 401 return RegionCriticalPSets; 402 } 403 404 /// getIssueWidth - Return the max instructions per scheduling group. 405 unsigned getIssueWidth() const { 406 return InstrItins ? InstrItins->Props.IssueWidth : 1; 407 } 408 409 /// getNumMicroOps - Return the number of issue slots required for this MI. 410 unsigned getNumMicroOps(MachineInstr *MI) const { 411 if (!InstrItins) return 1; 412 int UOps = InstrItins->getNumMicroOps(MI->getDesc().getSchedClass()); 413 return (UOps >= 0) ? UOps : TII->getNumMicroOps(InstrItins, MI); 414 } 415 416 protected: 417 void initRegPressure(); 418 void updateScheduledPressure(std::vector<unsigned> NewMaxPressure); 419 420 void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); 421 bool checkSchedLimit(); 422 423 void releaseRoots(); 424 425 void releaseSucc(SUnit *SU, SDep *SuccEdge); 426 void releaseSuccessors(SUnit *SU); 427 void releasePred(SUnit *SU, SDep *PredEdge); 428 void releasePredecessors(SUnit *SU); 429 430 void placeDebugValues(); 431 }; 432 } // namespace 433 434 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 435 /// NumPredsLeft reaches zero, release the successor node. 436 /// 437 /// FIXME: Adjust SuccSU height based on MinLatency. 438 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 439 SUnit *SuccSU = SuccEdge->getSUnit(); 440 441 #ifndef NDEBUG 442 if (SuccSU->NumPredsLeft == 0) { 443 dbgs() << "*** Scheduling failed! ***\n"; 444 SuccSU->dump(this); 445 dbgs() << " has been released too many times!\n"; 446 llvm_unreachable(0); 447 } 448 #endif 449 --SuccSU->NumPredsLeft; 450 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 451 SchedImpl->releaseTopNode(SuccSU); 452 } 453 454 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 455 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 456 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 457 I != E; ++I) { 458 releaseSucc(SU, &*I); 459 } 460 } 461 462 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 463 /// NumSuccsLeft reaches zero, release the predecessor node. 464 /// 465 /// FIXME: Adjust PredSU height based on MinLatency. 466 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 467 SUnit *PredSU = PredEdge->getSUnit(); 468 469 #ifndef NDEBUG 470 if (PredSU->NumSuccsLeft == 0) { 471 dbgs() << "*** Scheduling failed! ***\n"; 472 PredSU->dump(this); 473 dbgs() << " has been released too many times!\n"; 474 llvm_unreachable(0); 475 } 476 #endif 477 --PredSU->NumSuccsLeft; 478 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 479 SchedImpl->releaseBottomNode(PredSU); 480 } 481 482 /// releasePredecessors - Call releasePred on each of SU's predecessors. 483 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 484 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 485 I != E; ++I) { 486 releasePred(SU, &*I); 487 } 488 } 489 490 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 491 MachineBasicBlock::iterator InsertPos) { 492 // Advance RegionBegin if the first instruction moves down. 493 if (&*RegionBegin == MI) 494 ++RegionBegin; 495 496 // Update the instruction stream. 497 BB->splice(InsertPos, BB, MI); 498 499 // Update LiveIntervals 500 LIS->handleMove(MI); 501 502 // Recede RegionBegin if an instruction moves above the first. 503 if (RegionBegin == InsertPos) 504 RegionBegin = MI; 505 } 506 507 bool ScheduleDAGMI::checkSchedLimit() { 508 #ifndef NDEBUG 509 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 510 CurrentTop = CurrentBottom; 511 return false; 512 } 513 ++NumInstrsScheduled; 514 #endif 515 return true; 516 } 517 518 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 519 /// crossing a scheduling boundary. [begin, end) includes all instructions in 520 /// the region, including the boundary itself and single-instruction regions 521 /// that don't get scheduled. 522 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 523 MachineBasicBlock::iterator begin, 524 MachineBasicBlock::iterator end, 525 unsigned endcount) 526 { 527 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 528 529 // For convenience remember the end of the liveness region. 530 LiveRegionEnd = 531 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 532 } 533 534 // Setup the register pressure trackers for the top scheduled top and bottom 535 // scheduled regions. 536 void ScheduleDAGMI::initRegPressure() { 537 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 538 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 539 540 // Close the RPTracker to finalize live ins. 541 RPTracker.closeRegion(); 542 543 DEBUG(RPTracker.getPressure().dump(TRI)); 544 545 // Initialize the live ins and live outs. 546 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 547 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 548 549 // Close one end of the tracker so we can call 550 // getMaxUpward/DownwardPressureDelta before advancing across any 551 // instructions. This converts currently live regs into live ins/outs. 552 TopRPTracker.closeTop(); 553 BotRPTracker.closeBottom(); 554 555 // Account for liveness generated by the region boundary. 556 if (LiveRegionEnd != RegionEnd) 557 BotRPTracker.recede(); 558 559 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 560 561 // Cache the list of excess pressure sets in this region. This will also track 562 // the max pressure in the scheduled code for these sets. 563 RegionCriticalPSets.clear(); 564 std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure; 565 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 566 unsigned Limit = TRI->getRegPressureSetLimit(i); 567 if (RegionPressure[i] > Limit) 568 RegionCriticalPSets.push_back(PressureElement(i, 0)); 569 } 570 DEBUG(dbgs() << "Excess PSets: "; 571 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 572 dbgs() << TRI->getRegPressureSetName( 573 RegionCriticalPSets[i].PSetID) << " "; 574 dbgs() << "\n"); 575 } 576 577 // FIXME: When the pressure tracker deals in pressure differences then we won't 578 // iterate over all RegionCriticalPSets[i]. 579 void ScheduleDAGMI:: 580 updateScheduledPressure(std::vector<unsigned> NewMaxPressure) { 581 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 582 unsigned ID = RegionCriticalPSets[i].PSetID; 583 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 584 if ((int)NewMaxPressure[ID] > MaxUnits) 585 MaxUnits = NewMaxPressure[ID]; 586 } 587 } 588 589 // Release all DAG roots for scheduling. 590 void ScheduleDAGMI::releaseRoots() { 591 SmallVector<SUnit*, 16> BotRoots; 592 593 for (std::vector<SUnit>::iterator 594 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 595 // A SUnit is ready to top schedule if it has no predecessors. 596 if (I->Preds.empty()) 597 SchedImpl->releaseTopNode(&(*I)); 598 // A SUnit is ready to bottom schedule if it has no successors. 599 if (I->Succs.empty()) 600 BotRoots.push_back(&(*I)); 601 } 602 // Release bottom roots in reverse order so the higher priority nodes appear 603 // first. This is more natural and slightly more efficient. 604 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 605 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) 606 SchedImpl->releaseBottomNode(*I); 607 } 608 609 /// schedule - Called back from MachineScheduler::runOnMachineFunction 610 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 611 /// only includes instructions that have DAG nodes, not scheduling boundaries. 612 void ScheduleDAGMI::schedule() { 613 // Initialize the register pressure tracker used by buildSchedGraph. 614 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 615 616 // Account for liveness generate by the region boundary. 617 if (LiveRegionEnd != RegionEnd) 618 RPTracker.recede(); 619 620 // Build the DAG, and compute current register pressure. 621 buildSchedGraph(AA, &RPTracker); 622 623 // Initialize top/bottom trackers after computing region pressure. 624 initRegPressure(); 625 626 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 627 SUnits[su].dumpAll(this)); 628 629 if (ViewMISchedDAGs) viewGraph(); 630 631 SchedImpl->initialize(this); 632 633 // Release edges from the special Entry node or to the special Exit node. 634 releaseSuccessors(&EntrySU); 635 releasePredecessors(&ExitSU); 636 637 // Release all DAG roots for scheduling. 638 releaseRoots(); 639 640 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 641 CurrentBottom = RegionEnd; 642 bool IsTopNode = false; 643 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 644 if (!checkSchedLimit()) 645 break; 646 647 // Move the instruction to its new location in the instruction stream. 648 MachineInstr *MI = SU->getInstr(); 649 650 if (IsTopNode) { 651 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 652 if (&*CurrentTop == MI) 653 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 654 else { 655 moveInstruction(MI, CurrentTop); 656 TopRPTracker.setPos(MI); 657 } 658 659 // Update top scheduled pressure. 660 TopRPTracker.advance(); 661 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 662 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 663 664 // Release dependent instructions for scheduling. 665 releaseSuccessors(SU); 666 } 667 else { 668 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 669 MachineBasicBlock::iterator priorII = 670 priorNonDebug(CurrentBottom, CurrentTop); 671 if (&*priorII == MI) 672 CurrentBottom = priorII; 673 else { 674 if (&*CurrentTop == MI) { 675 CurrentTop = nextIfDebug(++CurrentTop, priorII); 676 TopRPTracker.setPos(CurrentTop); 677 } 678 moveInstruction(MI, CurrentBottom); 679 CurrentBottom = MI; 680 } 681 // Update bottom scheduled pressure. 682 BotRPTracker.recede(); 683 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 684 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 685 686 // Release dependent instructions for scheduling. 687 releasePredecessors(SU); 688 } 689 SU->isScheduled = true; 690 SchedImpl->schedNode(SU, IsTopNode); 691 } 692 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 693 694 placeDebugValues(); 695 } 696 697 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 698 void ScheduleDAGMI::placeDebugValues() { 699 // If first instruction was a DBG_VALUE then put it back. 700 if (FirstDbgValue) { 701 BB->splice(RegionBegin, BB, FirstDbgValue); 702 RegionBegin = FirstDbgValue; 703 } 704 705 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 706 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 707 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 708 MachineInstr *DbgValue = P.first; 709 MachineBasicBlock::iterator OrigPrevMI = P.second; 710 BB->splice(++OrigPrevMI, BB, DbgValue); 711 if (OrigPrevMI == llvm::prior(RegionEnd)) 712 RegionEnd = DbgValue; 713 } 714 DbgValues.clear(); 715 FirstDbgValue = NULL; 716 } 717 718 //===----------------------------------------------------------------------===// 719 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 720 //===----------------------------------------------------------------------===// 721 722 namespace { 723 /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience 724 /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified 725 /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in. 726 class ReadyQueue { 727 unsigned ID; 728 std::string Name; 729 std::vector<SUnit*> Queue; 730 731 public: 732 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {} 733 734 unsigned getID() const { return ID; } 735 736 StringRef getName() const { return Name; } 737 738 // SU is in this queue if it's NodeQueueID is a superset of this ID. 739 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); } 740 741 bool empty() const { return Queue.empty(); } 742 743 unsigned size() const { return Queue.size(); } 744 745 typedef std::vector<SUnit*>::iterator iterator; 746 747 iterator begin() { return Queue.begin(); } 748 749 iterator end() { return Queue.end(); } 750 751 iterator find(SUnit *SU) { 752 return std::find(Queue.begin(), Queue.end(), SU); 753 } 754 755 void push(SUnit *SU) { 756 Queue.push_back(SU); 757 SU->NodeQueueId |= ID; 758 } 759 760 void remove(iterator I) { 761 (*I)->NodeQueueId &= ~ID; 762 *I = Queue.back(); 763 Queue.pop_back(); 764 } 765 766 void dump() { 767 dbgs() << Name << ": "; 768 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 769 dbgs() << Queue[i]->NodeNum << " "; 770 dbgs() << "\n"; 771 } 772 }; 773 774 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 775 /// the schedule. 776 class ConvergingScheduler : public MachineSchedStrategy { 777 778 /// Store the state used by ConvergingScheduler heuristics, required for the 779 /// lifetime of one invocation of pickNode(). 780 struct SchedCandidate { 781 // The best SUnit candidate. 782 SUnit *SU; 783 784 // Register pressure values for the best candidate. 785 RegPressureDelta RPDelta; 786 787 SchedCandidate(): SU(NULL) {} 788 }; 789 /// Represent the type of SchedCandidate found within a single queue. 790 enum CandResult { 791 NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure }; 792 793 /// Each Scheduling boundary is associated with ready queues. It tracks the 794 /// current cycle in whichever direction at has moved, and maintains the state 795 /// of "hazards" and other interlocks at the current cycle. 796 struct SchedBoundary { 797 ScheduleDAGMI *DAG; 798 799 ReadyQueue Available; 800 ReadyQueue Pending; 801 bool CheckPending; 802 803 ScheduleHazardRecognizer *HazardRec; 804 805 unsigned CurrCycle; 806 unsigned IssueCount; 807 808 /// MinReadyCycle - Cycle of the soonest available instruction. 809 unsigned MinReadyCycle; 810 811 // Remember the greatest min operand latency. 812 unsigned MaxMinLatency; 813 814 /// Pending queues extend the ready queues with the same ID and the 815 /// PendingFlag set. 816 SchedBoundary(unsigned ID, const Twine &Name): 817 DAG(0), Available(ID, Name+".A"), 818 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 819 CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0), 820 MinReadyCycle(UINT_MAX), MaxMinLatency(0) {} 821 822 ~SchedBoundary() { delete HazardRec; } 823 824 bool isTop() const { 825 return Available.getID() == ConvergingScheduler::TopQID; 826 } 827 828 bool checkHazard(SUnit *SU); 829 830 void releaseNode(SUnit *SU, unsigned ReadyCycle); 831 832 void bumpCycle(); 833 834 void bumpNode(SUnit *SU); 835 836 void releasePending(); 837 838 void removeReady(SUnit *SU); 839 840 SUnit *pickOnlyChoice(); 841 }; 842 843 ScheduleDAGMI *DAG; 844 const TargetRegisterInfo *TRI; 845 846 // State of the top and bottom scheduled instruction boundaries. 847 SchedBoundary Top; 848 SchedBoundary Bot; 849 850 public: 851 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 852 enum { 853 TopQID = 1, 854 BotQID = 2, 855 LogMaxQID = 2 856 }; 857 858 ConvergingScheduler(): 859 DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 860 861 virtual void initialize(ScheduleDAGMI *dag); 862 863 virtual SUnit *pickNode(bool &IsTopNode); 864 865 virtual void schedNode(SUnit *SU, bool IsTopNode); 866 867 virtual void releaseTopNode(SUnit *SU); 868 869 virtual void releaseBottomNode(SUnit *SU); 870 871 protected: 872 SUnit *pickNodeBidrectional(bool &IsTopNode); 873 874 CandResult pickNodeFromQueue(ReadyQueue &Q, 875 const RegPressureTracker &RPTracker, 876 SchedCandidate &Candidate); 877 #ifndef NDEBUG 878 void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU, 879 PressureElement P = PressureElement()); 880 #endif 881 }; 882 } // namespace 883 884 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 885 DAG = dag; 886 TRI = DAG->TRI; 887 Top.DAG = dag; 888 Bot.DAG = dag; 889 890 // Initialize the HazardRecognizers. 891 const TargetMachine &TM = DAG->MF.getTarget(); 892 const InstrItineraryData *Itin = TM.getInstrItineraryData(); 893 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 894 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 895 896 assert((!ForceTopDown || !ForceBottomUp) && 897 "-misched-topdown incompatible with -misched-bottomup"); 898 } 899 900 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 901 if (SU->isScheduled) 902 return; 903 904 for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 905 I != E; ++I) { 906 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 907 unsigned Latency = 908 DAG->computeOperandLatency(I->getSUnit(), SU, *I, /*FindMin=*/true); 909 #ifndef NDEBUG 910 Top.MaxMinLatency = std::max(Latency, Top.MaxMinLatency); 911 #endif 912 if (SU->TopReadyCycle < PredReadyCycle + Latency) 913 SU->TopReadyCycle = PredReadyCycle + Latency; 914 } 915 Top.releaseNode(SU, SU->TopReadyCycle); 916 } 917 918 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 919 if (SU->isScheduled) 920 return; 921 922 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 923 924 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 925 I != E; ++I) { 926 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 927 unsigned Latency = 928 DAG->computeOperandLatency(SU, I->getSUnit(), *I, /*FindMin=*/true); 929 #ifndef NDEBUG 930 Bot.MaxMinLatency = std::max(Latency, Bot.MaxMinLatency); 931 #endif 932 if (SU->BotReadyCycle < SuccReadyCycle + Latency) 933 SU->BotReadyCycle = SuccReadyCycle + Latency; 934 } 935 Bot.releaseNode(SU, SU->BotReadyCycle); 936 } 937 938 /// Does this SU have a hazard within the current instruction group. 939 /// 940 /// The scheduler supports two modes of hazard recognition. The first is the 941 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 942 /// supports highly complicated in-order reservation tables 943 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 944 /// 945 /// The second is a streamlined mechanism that checks for hazards based on 946 /// simple counters that the scheduler itself maintains. It explicitly checks 947 /// for instruction dispatch limitations, including the number of micro-ops that 948 /// can dispatch per cycle. 949 /// 950 /// TODO: Also check whether the SU must start a new group. 951 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 952 if (HazardRec->isEnabled()) 953 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 954 955 if (IssueCount + DAG->getNumMicroOps(SU->getInstr()) > DAG->getIssueWidth()) 956 return true; 957 958 return false; 959 } 960 961 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 962 unsigned ReadyCycle) { 963 if (ReadyCycle < MinReadyCycle) 964 MinReadyCycle = ReadyCycle; 965 966 // Check for interlocks first. For the purpose of other heuristics, an 967 // instruction that cannot issue appears as if it's not in the ReadyQueue. 968 if (ReadyCycle > CurrCycle || checkHazard(SU)) 969 Pending.push(SU); 970 else 971 Available.push(SU); 972 } 973 974 /// Move the boundary of scheduled code by one cycle. 975 void ConvergingScheduler::SchedBoundary::bumpCycle() { 976 unsigned Width = DAG->getIssueWidth(); 977 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; 978 979 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 980 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle); 981 982 if (!HazardRec->isEnabled()) { 983 // Bypass HazardRec virtual calls. 984 CurrCycle = NextCycle; 985 } 986 else { 987 // Bypass getHazardType calls in case of long latency. 988 for (; CurrCycle != NextCycle; ++CurrCycle) { 989 if (isTop()) 990 HazardRec->AdvanceCycle(); 991 else 992 HazardRec->RecedeCycle(); 993 } 994 } 995 CheckPending = true; 996 997 DEBUG(dbgs() << "*** " << Available.getName() << " cycle " 998 << CurrCycle << '\n'); 999 } 1000 1001 /// Move the boundary of scheduled code by one SUnit. 1002 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 1003 // Update the reservation table. 1004 if (HazardRec->isEnabled()) { 1005 if (!isTop() && SU->isCall) { 1006 // Calls are scheduled with their preceding instructions. For bottom-up 1007 // scheduling, clear the pipeline state before emitting. 1008 HazardRec->Reset(); 1009 } 1010 HazardRec->EmitInstruction(SU); 1011 } 1012 // Check the instruction group dispatch limit. 1013 // TODO: Check if this SU must end a dispatch group. 1014 IssueCount += DAG->getNumMicroOps(SU->getInstr()); 1015 if (IssueCount >= DAG->getIssueWidth()) { 1016 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n'); 1017 bumpCycle(); 1018 } 1019 } 1020 1021 /// Release pending ready nodes in to the available queue. This makes them 1022 /// visible to heuristics. 1023 void ConvergingScheduler::SchedBoundary::releasePending() { 1024 // If the available queue is empty, it is safe to reset MinReadyCycle. 1025 if (Available.empty()) 1026 MinReadyCycle = UINT_MAX; 1027 1028 // Check to see if any of the pending instructions are ready to issue. If 1029 // so, add them to the available queue. 1030 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1031 SUnit *SU = *(Pending.begin()+i); 1032 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1033 1034 if (ReadyCycle < MinReadyCycle) 1035 MinReadyCycle = ReadyCycle; 1036 1037 if (ReadyCycle > CurrCycle) 1038 continue; 1039 1040 if (checkHazard(SU)) 1041 continue; 1042 1043 Available.push(SU); 1044 Pending.remove(Pending.begin()+i); 1045 --i; --e; 1046 } 1047 CheckPending = false; 1048 } 1049 1050 /// Remove SU from the ready set for this boundary. 1051 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1052 if (Available.isInQueue(SU)) 1053 Available.remove(Available.find(SU)); 1054 else { 1055 assert(Pending.isInQueue(SU) && "bad ready count"); 1056 Pending.remove(Pending.find(SU)); 1057 } 1058 } 1059 1060 /// If this queue only has one ready candidate, return it. As a side effect, 1061 /// advance the cycle until at least one node is ready. If multiple instructions 1062 /// are ready, return NULL. 1063 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1064 if (CheckPending) 1065 releasePending(); 1066 1067 for (unsigned i = 0; Available.empty(); ++i) { 1068 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1069 "permanent hazard"); (void)i; 1070 bumpCycle(); 1071 releasePending(); 1072 } 1073 if (Available.size() == 1) 1074 return *Available.begin(); 1075 return NULL; 1076 } 1077 1078 #ifndef NDEBUG 1079 void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q, 1080 SUnit *SU, PressureElement P) { 1081 dbgs() << Label << " " << Q.getName() << " "; 1082 if (P.isValid()) 1083 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease 1084 << " "; 1085 else 1086 dbgs() << " "; 1087 SU->dump(DAG); 1088 } 1089 #endif 1090 1091 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 1092 /// more desirable than RHS from scheduling standpoint. 1093 static bool compareRPDelta(const RegPressureDelta &LHS, 1094 const RegPressureDelta &RHS) { 1095 // Compare each component of pressure in decreasing order of importance 1096 // without checking if any are valid. Invalid PressureElements are assumed to 1097 // have UnitIncrease==0, so are neutral. 1098 1099 // Avoid increasing the max critical pressure in the scheduled region. 1100 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) 1101 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 1102 1103 // Avoid increasing the max critical pressure in the scheduled region. 1104 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) 1105 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 1106 1107 // Avoid increasing the max pressure of the entire region. 1108 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) 1109 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 1110 1111 return false; 1112 } 1113 1114 /// Pick the best candidate from the top queue. 1115 /// 1116 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 1117 /// DAG building. To adjust for the current scheduling location we need to 1118 /// maintain the number of vreg uses remaining to be top-scheduled. 1119 ConvergingScheduler::CandResult ConvergingScheduler:: 1120 pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker, 1121 SchedCandidate &Candidate) { 1122 DEBUG(Q.dump()); 1123 1124 // getMaxPressureDelta temporarily modifies the tracker. 1125 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 1126 1127 // BestSU remains NULL if no top candidates beat the best existing candidate. 1128 CandResult FoundCandidate = NoCand; 1129 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 1130 RegPressureDelta RPDelta; 1131 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta, 1132 DAG->getRegionCriticalPSets(), 1133 DAG->getRegPressure().MaxSetPressure); 1134 1135 // Initialize the candidate if needed. 1136 if (!Candidate.SU) { 1137 Candidate.SU = *I; 1138 Candidate.RPDelta = RPDelta; 1139 FoundCandidate = NodeOrder; 1140 continue; 1141 } 1142 // Avoid exceeding the target's limit. 1143 if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) { 1144 DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess)); 1145 Candidate.SU = *I; 1146 Candidate.RPDelta = RPDelta; 1147 FoundCandidate = SingleExcess; 1148 continue; 1149 } 1150 if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease) 1151 continue; 1152 if (FoundCandidate == SingleExcess) 1153 FoundCandidate = MultiPressure; 1154 1155 // Avoid increasing the max critical pressure in the scheduled region. 1156 if (RPDelta.CriticalMax.UnitIncrease 1157 < Candidate.RPDelta.CriticalMax.UnitIncrease) { 1158 DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax)); 1159 Candidate.SU = *I; 1160 Candidate.RPDelta = RPDelta; 1161 FoundCandidate = SingleCritical; 1162 continue; 1163 } 1164 if (RPDelta.CriticalMax.UnitIncrease 1165 > Candidate.RPDelta.CriticalMax.UnitIncrease) 1166 continue; 1167 if (FoundCandidate == SingleCritical) 1168 FoundCandidate = MultiPressure; 1169 1170 // Avoid increasing the max pressure of the entire region. 1171 if (RPDelta.CurrentMax.UnitIncrease 1172 < Candidate.RPDelta.CurrentMax.UnitIncrease) { 1173 DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax)); 1174 Candidate.SU = *I; 1175 Candidate.RPDelta = RPDelta; 1176 FoundCandidate = SingleMax; 1177 continue; 1178 } 1179 if (RPDelta.CurrentMax.UnitIncrease 1180 > Candidate.RPDelta.CurrentMax.UnitIncrease) 1181 continue; 1182 if (FoundCandidate == SingleMax) 1183 FoundCandidate = MultiPressure; 1184 1185 // Fall through to original instruction order. 1186 // Only consider node order if Candidate was chosen from this Q. 1187 if (FoundCandidate == NoCand) 1188 continue; 1189 1190 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum) 1191 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) { 1192 DEBUG(traceCandidate("NCAND", Q, *I)); 1193 Candidate.SU = *I; 1194 Candidate.RPDelta = RPDelta; 1195 FoundCandidate = NodeOrder; 1196 } 1197 } 1198 return FoundCandidate; 1199 } 1200 1201 /// Pick the best candidate node from either the top or bottom queue. 1202 SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { 1203 // Schedule as far as possible in the direction of no choice. This is most 1204 // efficient, but also provides the best heuristics for CriticalPSets. 1205 if (SUnit *SU = Bot.pickOnlyChoice()) { 1206 IsTopNode = false; 1207 return SU; 1208 } 1209 if (SUnit *SU = Top.pickOnlyChoice()) { 1210 IsTopNode = true; 1211 return SU; 1212 } 1213 SchedCandidate BotCand; 1214 // Prefer bottom scheduling when heuristics are silent. 1215 CandResult BotResult = pickNodeFromQueue(Bot.Available, 1216 DAG->getBotRPTracker(), BotCand); 1217 assert(BotResult != NoCand && "failed to find the first candidate"); 1218 1219 // If either Q has a single candidate that provides the least increase in 1220 // Excess pressure, we can immediately schedule from that Q. 1221 // 1222 // RegionCriticalPSets summarizes the pressure within the scheduled region and 1223 // affects picking from either Q. If scheduling in one direction must 1224 // increase pressure for one of the excess PSets, then schedule in that 1225 // direction first to provide more freedom in the other direction. 1226 if (BotResult == SingleExcess || BotResult == SingleCritical) { 1227 IsTopNode = false; 1228 return BotCand.SU; 1229 } 1230 // Check if the top Q has a better candidate. 1231 SchedCandidate TopCand; 1232 CandResult TopResult = pickNodeFromQueue(Top.Available, 1233 DAG->getTopRPTracker(), TopCand); 1234 assert(TopResult != NoCand && "failed to find the first candidate"); 1235 1236 if (TopResult == SingleExcess || TopResult == SingleCritical) { 1237 IsTopNode = true; 1238 return TopCand.SU; 1239 } 1240 // If either Q has a single candidate that minimizes pressure above the 1241 // original region's pressure pick it. 1242 if (BotResult == SingleMax) { 1243 IsTopNode = false; 1244 return BotCand.SU; 1245 } 1246 if (TopResult == SingleMax) { 1247 IsTopNode = true; 1248 return TopCand.SU; 1249 } 1250 // Check for a salient pressure difference and pick the best from either side. 1251 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 1252 IsTopNode = true; 1253 return TopCand.SU; 1254 } 1255 // Otherwise prefer the bottom candidate in node order. 1256 IsTopNode = false; 1257 return BotCand.SU; 1258 } 1259 1260 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 1261 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 1262 if (DAG->top() == DAG->bottom()) { 1263 assert(Top.Available.empty() && Top.Pending.empty() && 1264 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 1265 return NULL; 1266 } 1267 SUnit *SU; 1268 if (ForceTopDown) { 1269 SU = Top.pickOnlyChoice(); 1270 if (!SU) { 1271 SchedCandidate TopCand; 1272 CandResult TopResult = 1273 pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand); 1274 assert(TopResult != NoCand && "failed to find the first candidate"); 1275 (void)TopResult; 1276 SU = TopCand.SU; 1277 } 1278 IsTopNode = true; 1279 } 1280 else if (ForceBottomUp) { 1281 SU = Bot.pickOnlyChoice(); 1282 if (!SU) { 1283 SchedCandidate BotCand; 1284 CandResult BotResult = 1285 pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand); 1286 assert(BotResult != NoCand && "failed to find the first candidate"); 1287 (void)BotResult; 1288 SU = BotCand.SU; 1289 } 1290 IsTopNode = false; 1291 } 1292 else { 1293 SU = pickNodeBidrectional(IsTopNode); 1294 } 1295 if (SU->isTopReady()) 1296 Top.removeReady(SU); 1297 if (SU->isBottomReady()) 1298 Bot.removeReady(SU); 1299 1300 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") 1301 << " Scheduling Instruction in cycle " 1302 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n'; 1303 SU->dump(DAG)); 1304 return SU; 1305 } 1306 1307 /// Update the scheduler's state after scheduling a node. This is the same node 1308 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 1309 /// it's state based on the current cycle before MachineSchedStrategy does. 1310 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 1311 if (IsTopNode) { 1312 SU->TopReadyCycle = Top.CurrCycle; 1313 Top.bumpNode(SU); 1314 } 1315 else { 1316 SU->BotReadyCycle = Bot.CurrCycle; 1317 Bot.bumpNode(SU); 1318 } 1319 } 1320 1321 /// Create the standard converging machine scheduler. This will be used as the 1322 /// default scheduler if the target does not set a default. 1323 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 1324 assert((!ForceTopDown || !ForceBottomUp) && 1325 "-misched-topdown incompatible with -misched-bottomup"); 1326 return new ScheduleDAGMI(C, new ConvergingScheduler()); 1327 } 1328 static MachineSchedRegistry 1329 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 1330 createConvergingSched); 1331 1332 //===----------------------------------------------------------------------===// 1333 // Machine Instruction Shuffler for Correctness Testing 1334 //===----------------------------------------------------------------------===// 1335 1336 #ifndef NDEBUG 1337 namespace { 1338 /// Apply a less-than relation on the node order, which corresponds to the 1339 /// instruction order prior to scheduling. IsReverse implements greater-than. 1340 template<bool IsReverse> 1341 struct SUnitOrder { 1342 bool operator()(SUnit *A, SUnit *B) const { 1343 if (IsReverse) 1344 return A->NodeNum > B->NodeNum; 1345 else 1346 return A->NodeNum < B->NodeNum; 1347 } 1348 }; 1349 1350 /// Reorder instructions as much as possible. 1351 class InstructionShuffler : public MachineSchedStrategy { 1352 bool IsAlternating; 1353 bool IsTopDown; 1354 1355 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 1356 // gives nodes with a higher number higher priority causing the latest 1357 // instructions to be scheduled first. 1358 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 1359 TopQ; 1360 // When scheduling bottom-up, use greater-than as the queue priority. 1361 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 1362 BottomQ; 1363 public: 1364 InstructionShuffler(bool alternate, bool topdown) 1365 : IsAlternating(alternate), IsTopDown(topdown) {} 1366 1367 virtual void initialize(ScheduleDAGMI *) { 1368 TopQ.clear(); 1369 BottomQ.clear(); 1370 } 1371 1372 /// Implement MachineSchedStrategy interface. 1373 /// ----------------------------------------- 1374 1375 virtual SUnit *pickNode(bool &IsTopNode) { 1376 SUnit *SU; 1377 if (IsTopDown) { 1378 do { 1379 if (TopQ.empty()) return NULL; 1380 SU = TopQ.top(); 1381 TopQ.pop(); 1382 } while (SU->isScheduled); 1383 IsTopNode = true; 1384 } 1385 else { 1386 do { 1387 if (BottomQ.empty()) return NULL; 1388 SU = BottomQ.top(); 1389 BottomQ.pop(); 1390 } while (SU->isScheduled); 1391 IsTopNode = false; 1392 } 1393 if (IsAlternating) 1394 IsTopDown = !IsTopDown; 1395 return SU; 1396 } 1397 1398 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 1399 1400 virtual void releaseTopNode(SUnit *SU) { 1401 TopQ.push(SU); 1402 } 1403 virtual void releaseBottomNode(SUnit *SU) { 1404 BottomQ.push(SU); 1405 } 1406 }; 1407 } // namespace 1408 1409 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 1410 bool Alternate = !ForceTopDown && !ForceBottomUp; 1411 bool TopDown = !ForceBottomUp; 1412 assert((TopDown || !ForceTopDown) && 1413 "-misched-topdown incompatible with -misched-bottomup"); 1414 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 1415 } 1416 static MachineSchedRegistry ShufflerRegistry( 1417 "shuffle", "Shuffle machine instructions alternating directions", 1418 createInstructionShuffler); 1419 #endif // !NDEBUG 1420