1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "RegisterClassInfo.h" 18 #include "RegisterPressure.h" 19 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 20 #include "llvm/CodeGen/MachineScheduler.h" 21 #include "llvm/CodeGen/Passes.h" 22 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Target/TargetInstrInfo.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/ErrorHandling.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include "llvm/ADT/OwningPtr.h" 30 #include "llvm/ADT/PriorityQueue.h" 31 32 #include <queue> 33 34 using namespace llvm; 35 36 static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 37 cl::desc("Force top-down list scheduling")); 38 static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 39 cl::desc("Force bottom-up list scheduling")); 40 41 #ifndef NDEBUG 42 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 43 cl::desc("Pop up a window to show MISched dags after they are processed")); 44 45 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 46 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 47 #else 48 static bool ViewMISchedDAGs = false; 49 #endif // NDEBUG 50 51 //===----------------------------------------------------------------------===// 52 // Machine Instruction Scheduling Pass and Registry 53 //===----------------------------------------------------------------------===// 54 55 MachineSchedContext::MachineSchedContext(): 56 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 57 RegClassInfo = new RegisterClassInfo(); 58 } 59 60 MachineSchedContext::~MachineSchedContext() { 61 delete RegClassInfo; 62 } 63 64 namespace { 65 /// MachineScheduler runs after coalescing and before register allocation. 66 class MachineScheduler : public MachineSchedContext, 67 public MachineFunctionPass { 68 public: 69 MachineScheduler(); 70 71 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 72 73 virtual void releaseMemory() {} 74 75 virtual bool runOnMachineFunction(MachineFunction&); 76 77 virtual void print(raw_ostream &O, const Module* = 0) const; 78 79 static char ID; // Class identification, replacement for typeinfo 80 }; 81 } // namespace 82 83 char MachineScheduler::ID = 0; 84 85 char &llvm::MachineSchedulerID = MachineScheduler::ID; 86 87 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 88 "Machine Instruction Scheduler", false, false) 89 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 90 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 91 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 92 INITIALIZE_PASS_END(MachineScheduler, "misched", 93 "Machine Instruction Scheduler", false, false) 94 95 MachineScheduler::MachineScheduler() 96 : MachineFunctionPass(ID) { 97 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 98 } 99 100 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 101 AU.setPreservesCFG(); 102 AU.addRequiredID(MachineDominatorsID); 103 AU.addRequired<MachineLoopInfo>(); 104 AU.addRequired<AliasAnalysis>(); 105 AU.addRequired<TargetPassConfig>(); 106 AU.addRequired<SlotIndexes>(); 107 AU.addPreserved<SlotIndexes>(); 108 AU.addRequired<LiveIntervals>(); 109 AU.addPreserved<LiveIntervals>(); 110 MachineFunctionPass::getAnalysisUsage(AU); 111 } 112 113 MachinePassRegistry MachineSchedRegistry::Registry; 114 115 /// A dummy default scheduler factory indicates whether the scheduler 116 /// is overridden on the command line. 117 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 118 return 0; 119 } 120 121 /// MachineSchedOpt allows command line selection of the scheduler. 122 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 123 RegisterPassParser<MachineSchedRegistry> > 124 MachineSchedOpt("misched", 125 cl::init(&useDefaultMachineSched), cl::Hidden, 126 cl::desc("Machine instruction scheduler to use")); 127 128 static MachineSchedRegistry 129 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 130 useDefaultMachineSched); 131 132 /// Forward declare the standard machine scheduler. This will be used as the 133 /// default scheduler if the target does not set a default. 134 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 135 136 137 /// Decrement this iterator until reaching the top or a non-debug instr. 138 static MachineBasicBlock::iterator 139 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 140 assert(I != Beg && "reached the top of the region, cannot decrement"); 141 while (--I != Beg) { 142 if (!I->isDebugValue()) 143 break; 144 } 145 return I; 146 } 147 148 /// If this iterator is a debug value, increment until reaching the End or a 149 /// non-debug instruction. 150 static MachineBasicBlock::iterator 151 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 152 for(; I != End; ++I) { 153 if (!I->isDebugValue()) 154 break; 155 } 156 return I; 157 } 158 159 /// Top-level MachineScheduler pass driver. 160 /// 161 /// Visit blocks in function order. Divide each block into scheduling regions 162 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 163 /// consistent with the DAG builder, which traverses the interior of the 164 /// scheduling regions bottom-up. 165 /// 166 /// This design avoids exposing scheduling boundaries to the DAG builder, 167 /// simplifying the DAG builder's support for "special" target instructions. 168 /// At the same time the design allows target schedulers to operate across 169 /// scheduling boundaries, for example to bundle the boudary instructions 170 /// without reordering them. This creates complexity, because the target 171 /// scheduler must update the RegionBegin and RegionEnd positions cached by 172 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 173 /// design would be to split blocks at scheduling boundaries, but LLVM has a 174 /// general bias against block splitting purely for implementation simplicity. 175 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 176 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 177 178 // Initialize the context of the pass. 179 MF = &mf; 180 MLI = &getAnalysis<MachineLoopInfo>(); 181 MDT = &getAnalysis<MachineDominatorTree>(); 182 PassConfig = &getAnalysis<TargetPassConfig>(); 183 AA = &getAnalysis<AliasAnalysis>(); 184 185 LIS = &getAnalysis<LiveIntervals>(); 186 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 187 188 RegClassInfo->runOnMachineFunction(*MF); 189 190 // Select the scheduler, or set the default. 191 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 192 if (Ctor == useDefaultMachineSched) { 193 // Get the default scheduler set by the target. 194 Ctor = MachineSchedRegistry::getDefault(); 195 if (!Ctor) { 196 Ctor = createConvergingSched; 197 MachineSchedRegistry::setDefault(Ctor); 198 } 199 } 200 // Instantiate the selected scheduler. 201 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 202 203 // Visit all machine basic blocks. 204 // 205 // TODO: Visit blocks in global postorder or postorder within the bottom-up 206 // loop tree. Then we can optionally compute global RegPressure. 207 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 208 MBB != MBBEnd; ++MBB) { 209 210 Scheduler->startBlock(MBB); 211 212 // Break the block into scheduling regions [I, RegionEnd), and schedule each 213 // region as soon as it is discovered. RegionEnd points the the scheduling 214 // boundary at the bottom of the region. The DAG does not include RegionEnd, 215 // but the region does (i.e. the next RegionEnd is above the previous 216 // RegionBegin). If the current block has no terminator then RegionEnd == 217 // MBB->end() for the bottom region. 218 // 219 // The Scheduler may insert instructions during either schedule() or 220 // exitRegion(), even for empty regions. So the local iterators 'I' and 221 // 'RegionEnd' are invalid across these calls. 222 unsigned RemainingCount = MBB->size(); 223 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 224 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 225 226 // Avoid decrementing RegionEnd for blocks with no terminator. 227 if (RegionEnd != MBB->end() 228 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 229 --RegionEnd; 230 // Count the boundary instruction. 231 --RemainingCount; 232 } 233 234 // The next region starts above the previous region. Look backward in the 235 // instruction stream until we find the nearest boundary. 236 MachineBasicBlock::iterator I = RegionEnd; 237 for(;I != MBB->begin(); --I, --RemainingCount) { 238 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 239 break; 240 } 241 // Notify the scheduler of the region, even if we may skip scheduling 242 // it. Perhaps it still needs to be bundled. 243 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount); 244 245 // Skip empty scheduling regions (0 or 1 schedulable instructions). 246 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 247 // Close the current region. Bundle the terminator if needed. 248 // This invalidates 'RegionEnd' and 'I'. 249 Scheduler->exitRegion(); 250 continue; 251 } 252 DEBUG(dbgs() << "MachineScheduling " << MF->getFunction()->getName() 253 << ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: "; 254 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 255 else dbgs() << "End"; 256 dbgs() << " Remaining: " << RemainingCount << "\n"); 257 258 // Schedule a region: possibly reorder instructions. 259 // This invalidates 'RegionEnd' and 'I'. 260 Scheduler->schedule(); 261 262 // Close the current region. 263 Scheduler->exitRegion(); 264 265 // Scheduling has invalidated the current iterator 'I'. Ask the 266 // scheduler for the top of it's scheduled region. 267 RegionEnd = Scheduler->begin(); 268 } 269 assert(RemainingCount == 0 && "Instruction count mismatch!"); 270 Scheduler->finishBlock(); 271 } 272 Scheduler->finalizeSchedule(); 273 DEBUG(LIS->print(dbgs())); 274 return true; 275 } 276 277 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 278 // unimplemented 279 } 280 281 //===----------------------------------------------------------------------===// 282 // MachineSchedStrategy - Interface to a machine scheduling algorithm. 283 //===----------------------------------------------------------------------===// 284 285 namespace { 286 class ScheduleDAGMI; 287 288 /// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected 289 /// scheduling algorithm. 290 /// 291 /// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it 292 /// in ScheduleDAGInstrs.h 293 class MachineSchedStrategy { 294 public: 295 virtual ~MachineSchedStrategy() {} 296 297 /// Initialize the strategy after building the DAG for a new region. 298 virtual void initialize(ScheduleDAGMI *DAG) = 0; 299 300 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to 301 /// schedule the node at the top of the unscheduled region. Otherwise it will 302 /// be scheduled at the bottom. 303 virtual SUnit *pickNode(bool &IsTopNode) = 0; 304 305 /// When all predecessor dependencies have been resolved, free this node for 306 /// top-down scheduling. 307 virtual void releaseTopNode(SUnit *SU) = 0; 308 /// When all successor dependencies have been resolved, free this node for 309 /// bottom-up scheduling. 310 virtual void releaseBottomNode(SUnit *SU) = 0; 311 }; 312 } // namespace 313 314 //===----------------------------------------------------------------------===// 315 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 316 // preservation. 317 //===----------------------------------------------------------------------===// 318 319 namespace { 320 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules 321 /// machine instructions while updating LiveIntervals. 322 class ScheduleDAGMI : public ScheduleDAGInstrs { 323 AliasAnalysis *AA; 324 RegisterClassInfo *RegClassInfo; 325 MachineSchedStrategy *SchedImpl; 326 327 MachineBasicBlock::iterator LiveRegionEnd; 328 329 /// Register pressure in this region computed by buildSchedGraph. 330 IntervalPressure RegPressure; 331 RegPressureTracker RPTracker; 332 333 /// List of pressure sets that exceed the target's pressure limit before 334 /// scheduling, listed in increasing set ID order. Each pressure set is paired 335 /// with its max pressure in the currently scheduled regions. 336 std::vector<PressureElement> RegionCriticalPSets; 337 338 /// The top of the unscheduled zone. 339 MachineBasicBlock::iterator CurrentTop; 340 IntervalPressure TopPressure; 341 RegPressureTracker TopRPTracker; 342 343 /// The bottom of the unscheduled zone. 344 MachineBasicBlock::iterator CurrentBottom; 345 IntervalPressure BotPressure; 346 RegPressureTracker BotRPTracker; 347 348 /// The number of instructions scheduled so far. Used to cut off the 349 /// scheduler at the point determined by misched-cutoff. 350 unsigned NumInstrsScheduled; 351 public: 352 ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S): 353 ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS), 354 AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), 355 RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure), 356 CurrentBottom(), BotRPTracker(BotPressure), NumInstrsScheduled(0) {} 357 358 ~ScheduleDAGMI() { 359 delete SchedImpl; 360 } 361 362 MachineBasicBlock::iterator top() const { return CurrentTop; } 363 MachineBasicBlock::iterator bottom() const { return CurrentBottom; } 364 365 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling 366 /// region. This covers all instructions in a block, while schedule() may only 367 /// cover a subset. 368 void enterRegion(MachineBasicBlock *bb, 369 MachineBasicBlock::iterator begin, 370 MachineBasicBlock::iterator end, 371 unsigned endcount); 372 373 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of 374 /// reorderable instructions. 375 void schedule(); 376 377 /// Get current register pressure for the top scheduled instructions. 378 const IntervalPressure &getTopPressure() const { return TopPressure; } 379 const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } 380 381 /// Get current register pressure for the bottom scheduled instructions. 382 const IntervalPressure &getBotPressure() const { return BotPressure; } 383 const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } 384 385 /// Get register pressure for the entire scheduling region before scheduling. 386 const IntervalPressure &getRegPressure() const { return RegPressure; } 387 388 const std::vector<PressureElement> &getRegionCriticalPSets() const { 389 return RegionCriticalPSets; 390 } 391 392 protected: 393 void initRegPressure(); 394 void updateScheduledPressure(std::vector<unsigned> NewMaxPressure); 395 396 void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); 397 bool checkSchedLimit(); 398 399 void releaseRoots(); 400 401 void releaseSucc(SUnit *SU, SDep *SuccEdge); 402 void releaseSuccessors(SUnit *SU); 403 void releasePred(SUnit *SU, SDep *PredEdge); 404 void releasePredecessors(SUnit *SU); 405 406 void placeDebugValues(); 407 }; 408 } // namespace 409 410 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 411 /// NumPredsLeft reaches zero, release the successor node. 412 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 413 SUnit *SuccSU = SuccEdge->getSUnit(); 414 415 #ifndef NDEBUG 416 if (SuccSU->NumPredsLeft == 0) { 417 dbgs() << "*** Scheduling failed! ***\n"; 418 SuccSU->dump(this); 419 dbgs() << " has been released too many times!\n"; 420 llvm_unreachable(0); 421 } 422 #endif 423 --SuccSU->NumPredsLeft; 424 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 425 SchedImpl->releaseTopNode(SuccSU); 426 } 427 428 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 429 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 430 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 431 I != E; ++I) { 432 releaseSucc(SU, &*I); 433 } 434 } 435 436 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 437 /// NumSuccsLeft reaches zero, release the predecessor node. 438 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 439 SUnit *PredSU = PredEdge->getSUnit(); 440 441 #ifndef NDEBUG 442 if (PredSU->NumSuccsLeft == 0) { 443 dbgs() << "*** Scheduling failed! ***\n"; 444 PredSU->dump(this); 445 dbgs() << " has been released too many times!\n"; 446 llvm_unreachable(0); 447 } 448 #endif 449 --PredSU->NumSuccsLeft; 450 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 451 SchedImpl->releaseBottomNode(PredSU); 452 } 453 454 /// releasePredecessors - Call releasePred on each of SU's predecessors. 455 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 456 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 457 I != E; ++I) { 458 releasePred(SU, &*I); 459 } 460 } 461 462 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 463 MachineBasicBlock::iterator InsertPos) { 464 // Advance RegionBegin if the first instruction moves down. 465 if (&*RegionBegin == MI) 466 ++RegionBegin; 467 468 // Update the instruction stream. 469 BB->splice(InsertPos, BB, MI); 470 471 // Update LiveIntervals 472 LIS->handleMove(MI); 473 474 // Recede RegionBegin if an instruction moves above the first. 475 if (RegionBegin == InsertPos) 476 RegionBegin = MI; 477 } 478 479 bool ScheduleDAGMI::checkSchedLimit() { 480 #ifndef NDEBUG 481 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 482 CurrentTop = CurrentBottom; 483 return false; 484 } 485 ++NumInstrsScheduled; 486 #endif 487 return true; 488 } 489 490 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 491 /// crossing a scheduling boundary. [begin, end) includes all instructions in 492 /// the region, including the boundary itself and single-instruction regions 493 /// that don't get scheduled. 494 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 495 MachineBasicBlock::iterator begin, 496 MachineBasicBlock::iterator end, 497 unsigned endcount) 498 { 499 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 500 501 // For convenience remember the end of the liveness region. 502 LiveRegionEnd = 503 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 504 } 505 506 // Setup the register pressure trackers for the top scheduled top and bottom 507 // scheduled regions. 508 void ScheduleDAGMI::initRegPressure() { 509 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 510 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 511 512 // Close the RPTracker to finalize live ins. 513 RPTracker.closeRegion(); 514 515 // Initialize the live ins and live outs. 516 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 517 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 518 519 // Close one end of the tracker so we can call 520 // getMaxUpward/DownwardPressureDelta before advancing across any 521 // instructions. This converts currently live regs into live ins/outs. 522 TopRPTracker.closeTop(); 523 BotRPTracker.closeBottom(); 524 525 // Account for liveness generated by the region boundary. 526 if (LiveRegionEnd != RegionEnd) 527 BotRPTracker.recede(); 528 529 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 530 531 // Cache the list of excess pressure sets in this region. This will also track 532 // the max pressure in the scheduled code for these sets. 533 RegionCriticalPSets.clear(); 534 std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure; 535 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 536 unsigned Limit = TRI->getRegPressureSetLimit(i); 537 if (RegionPressure[i] > Limit) 538 RegionCriticalPSets.push_back(PressureElement(i, 0)); 539 } 540 DEBUG(dbgs() << "Excess PSets: "; 541 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 542 dbgs() << TRI->getRegPressureSetName( 543 RegionCriticalPSets[i].PSetID) << " "; 544 dbgs() << "\n"); 545 } 546 547 // FIXME: When the pressure tracker deals in pressure differences then we won't 548 // iterate over all RegionCriticalPSets[i]. 549 void ScheduleDAGMI:: 550 updateScheduledPressure(std::vector<unsigned> NewMaxPressure) { 551 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 552 unsigned ID = RegionCriticalPSets[i].PSetID; 553 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 554 if ((int)NewMaxPressure[ID] > MaxUnits) 555 MaxUnits = NewMaxPressure[ID]; 556 } 557 } 558 559 // Release all DAG roots for scheduling. 560 void ScheduleDAGMI::releaseRoots() { 561 SmallVector<SUnit*, 16> BotRoots; 562 563 for (std::vector<SUnit>::iterator 564 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 565 // A SUnit is ready to top schedule if it has no predecessors. 566 if (I->Preds.empty()) 567 SchedImpl->releaseTopNode(&(*I)); 568 // A SUnit is ready to bottom schedule if it has no successors. 569 if (I->Succs.empty()) 570 BotRoots.push_back(&(*I)); 571 } 572 // Release bottom roots in reverse order so the higher priority nodes appear 573 // first. This is more natural and slightly more efficient. 574 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 575 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) 576 SchedImpl->releaseBottomNode(*I); 577 } 578 579 /// schedule - Called back from MachineScheduler::runOnMachineFunction 580 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 581 /// only includes instructions that have DAG nodes, not scheduling boundaries. 582 void ScheduleDAGMI::schedule() { 583 // Initialize the register pressure tracker used by buildSchedGraph. 584 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 585 586 // Account for liveness generate by the region boundary. 587 if (LiveRegionEnd != RegionEnd) 588 RPTracker.recede(); 589 590 // Build the DAG, and compute current register pressure. 591 buildSchedGraph(AA, &RPTracker); 592 593 // Initialize top/bottom trackers after computing region pressure. 594 initRegPressure(); 595 596 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 597 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 598 SUnits[su].dumpAll(this)); 599 600 if (ViewMISchedDAGs) viewGraph(); 601 602 SchedImpl->initialize(this); 603 604 // Release edges from the special Entry node or to the special Exit node. 605 releaseSuccessors(&EntrySU); 606 releasePredecessors(&ExitSU); 607 608 // Release all DAG roots for scheduling. 609 releaseRoots(); 610 611 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 612 CurrentBottom = RegionEnd; 613 bool IsTopNode = false; 614 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 615 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") 616 << " Scheduling Instruction:\n"; SU->dump(this)); 617 if (!checkSchedLimit()) 618 break; 619 620 // Move the instruction to its new location in the instruction stream. 621 MachineInstr *MI = SU->getInstr(); 622 623 if (IsTopNode) { 624 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 625 if (&*CurrentTop == MI) 626 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 627 else { 628 moveInstruction(MI, CurrentTop); 629 TopRPTracker.setPos(MI); 630 } 631 632 // Update top scheduled pressure. 633 TopRPTracker.advance(); 634 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 635 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 636 637 // Release dependent instructions for scheduling. 638 releaseSuccessors(SU); 639 } 640 else { 641 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 642 MachineBasicBlock::iterator priorII = 643 priorNonDebug(CurrentBottom, CurrentTop); 644 if (&*priorII == MI) 645 CurrentBottom = priorII; 646 else { 647 if (&*CurrentTop == MI) { 648 CurrentTop = nextIfDebug(++CurrentTop, priorII); 649 TopRPTracker.setPos(CurrentTop); 650 } 651 moveInstruction(MI, CurrentBottom); 652 CurrentBottom = MI; 653 } 654 // Update bottom scheduled pressure. 655 BotRPTracker.recede(); 656 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 657 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 658 659 // Release dependent instructions for scheduling. 660 releasePredecessors(SU); 661 } 662 SU->isScheduled = true; 663 } 664 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 665 666 placeDebugValues(); 667 } 668 669 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 670 void ScheduleDAGMI::placeDebugValues() { 671 // If first instruction was a DBG_VALUE then put it back. 672 if (FirstDbgValue) { 673 BB->splice(RegionBegin, BB, FirstDbgValue); 674 RegionBegin = FirstDbgValue; 675 } 676 677 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 678 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 679 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 680 MachineInstr *DbgValue = P.first; 681 MachineBasicBlock::iterator OrigPrevMI = P.second; 682 BB->splice(++OrigPrevMI, BB, DbgValue); 683 if (OrigPrevMI == llvm::prior(RegionEnd)) 684 RegionEnd = DbgValue; 685 } 686 DbgValues.clear(); 687 FirstDbgValue = NULL; 688 } 689 690 //===----------------------------------------------------------------------===// 691 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 692 //===----------------------------------------------------------------------===// 693 694 namespace { 695 /// Wrapper around a vector of SUnits with some basic convenience methods. 696 struct ReadyQueue { 697 typedef std::vector<SUnit*>::iterator iterator; 698 699 unsigned ID; 700 std::vector<SUnit*> Queue; 701 702 ReadyQueue(unsigned id): ID(id) {} 703 704 bool isInQueue(SUnit *SU) const { 705 return SU->NodeQueueId & ID; 706 } 707 708 bool empty() const { return Queue.empty(); } 709 710 unsigned size() const { return Queue.size(); } 711 712 iterator begin() { return Queue.begin(); } 713 714 iterator end() { return Queue.end(); } 715 716 iterator find(SUnit *SU) { 717 return std::find(Queue.begin(), Queue.end(), SU); 718 } 719 720 void push(SUnit *SU) { 721 Queue.push_back(SU); 722 SU->NodeQueueId |= ID; 723 } 724 725 void remove(iterator I) { 726 (*I)->NodeQueueId &= ~ID; 727 *I = Queue.back(); 728 Queue.pop_back(); 729 } 730 731 void dump(const char* Name) { 732 dbgs() << Name << ": "; 733 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 734 dbgs() << Queue[i]->NodeNum << " "; 735 dbgs() << "\n"; 736 } 737 }; 738 739 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 740 /// the schedule. 741 class ConvergingScheduler : public MachineSchedStrategy { 742 743 /// Store the state used by ConvergingScheduler heuristics, required for the 744 /// lifetime of one invocation of pickNode(). 745 struct SchedCandidate { 746 // The best SUnit candidate. 747 SUnit *SU; 748 749 // Register pressure values for the best candidate. 750 RegPressureDelta RPDelta; 751 752 SchedCandidate(): SU(NULL) {} 753 }; 754 /// Represent the type of SchedCandidate found within a single queue. 755 enum CandResult { 756 NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure }; 757 758 ScheduleDAGMI *DAG; 759 const TargetRegisterInfo *TRI; 760 761 ReadyQueue TopQueue; 762 ReadyQueue BotQueue; 763 764 public: 765 /// SUnit::NodeQueueId = 0 (none), = 1 (top), = 2 (bottom), = 3 (both) 766 enum { 767 TopQID = 1, 768 BotQID = 2 769 }; 770 771 ConvergingScheduler(): DAG(0), TRI(0), TopQueue(TopQID), BotQueue(BotQID) {} 772 773 static const char *getQName(unsigned ID) { 774 switch(ID) { 775 default: return "NoQ"; 776 case TopQID: return "TopQ"; 777 case BotQID: return "BotQ"; 778 }; 779 } 780 781 virtual void initialize(ScheduleDAGMI *dag) { 782 DAG = dag; 783 TRI = DAG->TRI; 784 785 assert((!ForceTopDown || !ForceBottomUp) && 786 "-misched-topdown incompatible with -misched-bottomup"); 787 } 788 789 virtual SUnit *pickNode(bool &IsTopNode); 790 791 virtual void releaseTopNode(SUnit *SU) { 792 if (!SU->isScheduled) 793 TopQueue.push(SU); 794 } 795 virtual void releaseBottomNode(SUnit *SU) { 796 if (!SU->isScheduled) 797 BotQueue.push(SU); 798 } 799 protected: 800 SUnit *pickNodeBidrectional(bool &IsTopNode); 801 802 CandResult pickNodeFromQueue(ReadyQueue &Q, 803 const RegPressureTracker &RPTracker, 804 SchedCandidate &Candidate); 805 #ifndef NDEBUG 806 void traceCandidate(const char *Label, unsigned QID, SUnit *SU, 807 PressureElement P = PressureElement()); 808 #endif 809 }; 810 } // namespace 811 812 #ifndef NDEBUG 813 void ConvergingScheduler:: 814 traceCandidate(const char *Label, unsigned QID, SUnit *SU, 815 PressureElement P) { 816 dbgs() << Label << getQName(QID) << " "; 817 if (P.isValid()) 818 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease 819 << " "; 820 else 821 dbgs() << " "; 822 SU->dump(DAG); 823 } 824 #endif 825 826 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 827 /// more desirable than RHS from scheduling standpoint. 828 static bool compareRPDelta(const RegPressureDelta &LHS, 829 const RegPressureDelta &RHS) { 830 // Compare each component of pressure in decreasing order of importance 831 // without checking if any are valid. Invalid PressureElements are assumed to 832 // have UnitIncrease==0, so are neutral. 833 834 // Avoid increasing the max critical pressure in the scheduled region. 835 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) 836 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 837 838 // Avoid increasing the max critical pressure in the scheduled region. 839 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) 840 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 841 842 // Avoid increasing the max pressure of the entire region. 843 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) 844 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 845 846 return false; 847 } 848 849 /// Pick the best candidate from the top queue. 850 /// 851 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 852 /// DAG building. To adjust for the current scheduling location we need to 853 /// maintain the number of vreg uses remaining to be top-scheduled. 854 ConvergingScheduler::CandResult ConvergingScheduler:: 855 pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker, 856 SchedCandidate &Candidate) { 857 DEBUG(Q.dump(getQName(Q.ID))); 858 859 // getMaxPressureDelta temporarily modifies the tracker. 860 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 861 862 // BestSU remains NULL if no top candidates beat the best existing candidate. 863 CandResult FoundCandidate = NoCand; 864 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 865 866 RegPressureDelta RPDelta; 867 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta, 868 DAG->getRegionCriticalPSets(), 869 DAG->getRegPressure().MaxSetPressure); 870 871 // Initialize the candidate if needed. 872 if (!Candidate.SU) { 873 Candidate.SU = *I; 874 Candidate.RPDelta = RPDelta; 875 FoundCandidate = NodeOrder; 876 continue; 877 } 878 // Avoid exceeding the target's limit. 879 if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) { 880 DEBUG(traceCandidate("ECAND", Q.ID, *I, RPDelta.Excess)); 881 Candidate.SU = *I; 882 Candidate.RPDelta = RPDelta; 883 FoundCandidate = SingleExcess; 884 continue; 885 } 886 if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease) 887 continue; 888 if (FoundCandidate == SingleExcess) 889 FoundCandidate = MultiPressure; 890 891 // Avoid increasing the max critical pressure in the scheduled region. 892 if (RPDelta.CriticalMax.UnitIncrease 893 < Candidate.RPDelta.CriticalMax.UnitIncrease) { 894 DEBUG(traceCandidate("PCAND", Q.ID, *I, RPDelta.CriticalMax)); 895 Candidate.SU = *I; 896 Candidate.RPDelta = RPDelta; 897 FoundCandidate = SingleCritical; 898 continue; 899 } 900 if (RPDelta.CriticalMax.UnitIncrease 901 > Candidate.RPDelta.CriticalMax.UnitIncrease) 902 continue; 903 if (FoundCandidate == SingleCritical) 904 FoundCandidate = MultiPressure; 905 906 // Avoid increasing the max pressure of the entire region. 907 if (RPDelta.CurrentMax.UnitIncrease 908 < Candidate.RPDelta.CurrentMax.UnitIncrease) { 909 DEBUG(traceCandidate("MCAND", Q.ID, *I, RPDelta.CurrentMax)); 910 Candidate.SU = *I; 911 Candidate.RPDelta = RPDelta; 912 FoundCandidate = SingleMax; 913 continue; 914 } 915 if (RPDelta.CurrentMax.UnitIncrease 916 > Candidate.RPDelta.CurrentMax.UnitIncrease) 917 continue; 918 if (FoundCandidate == SingleMax) 919 FoundCandidate = MultiPressure; 920 921 // Fall through to original instruction order. 922 // Only consider node order if Candidate was chosen from this Q. 923 if (FoundCandidate == NoCand) 924 continue; 925 926 if ((Q.ID == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum) 927 || (Q.ID == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) { 928 DEBUG(traceCandidate("NCAND", Q.ID, *I)); 929 Candidate.SU = *I; 930 Candidate.RPDelta = RPDelta; 931 FoundCandidate = NodeOrder; 932 } 933 } 934 return FoundCandidate; 935 } 936 937 /// Pick the best candidate node from either the top or bottom queue. 938 SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { 939 // Schedule as far as possible in the direction of no choice. This is most 940 // efficient, but also provides the best heuristics for CriticalPSets. 941 if (BotQueue.size() == 1) { 942 IsTopNode = false; 943 return *BotQueue.begin(); 944 } 945 if (TopQueue.size() == 1) { 946 IsTopNode = true; 947 return *TopQueue.begin(); 948 } 949 SchedCandidate BotCandidate; 950 // Prefer bottom scheduling when heuristics are silent. 951 CandResult BotResult = 952 pickNodeFromQueue(BotQueue, DAG->getBotRPTracker(), BotCandidate); 953 assert(BotResult != NoCand && "failed to find the first candidate"); 954 955 // If either Q has a single candidate that provides the least increase in 956 // Excess pressure, we can immediately schedule from that Q. 957 // 958 // RegionCriticalPSets summarizes the pressure within the scheduled region and 959 // affects picking from either Q. If scheduling in one direction must 960 // increase pressure for one of the excess PSets, then schedule in that 961 // direction first to provide more freedom in the other direction. 962 if (BotResult == SingleExcess || BotResult == SingleCritical) { 963 IsTopNode = false; 964 return BotCandidate.SU; 965 } 966 // Check if the top Q has a better candidate. 967 SchedCandidate TopCandidate; 968 CandResult TopResult = 969 pickNodeFromQueue(TopQueue, DAG->getTopRPTracker(), TopCandidate); 970 assert(TopResult != NoCand && "failed to find the first candidate"); 971 972 if (TopResult == SingleExcess || TopResult == SingleCritical) { 973 IsTopNode = true; 974 return TopCandidate.SU; 975 } 976 // If either Q has a single candidate that minimizes pressure above the 977 // original region's pressure pick it. 978 if (BotResult == SingleMax) { 979 IsTopNode = false; 980 return BotCandidate.SU; 981 } 982 if (TopResult == SingleMax) { 983 IsTopNode = true; 984 return TopCandidate.SU; 985 } 986 // Check for a salient pressure difference and pick the best from either side. 987 if (compareRPDelta(TopCandidate.RPDelta, BotCandidate.RPDelta)) { 988 IsTopNode = true; 989 return TopCandidate.SU; 990 } 991 // Otherwise prefer the bottom candidate in node order. 992 IsTopNode = false; 993 return BotCandidate.SU; 994 } 995 996 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 997 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 998 if (DAG->top() == DAG->bottom()) { 999 assert(TopQueue.empty() && BotQueue.empty() && "ReadyQueue garbage"); 1000 return NULL; 1001 } 1002 SUnit *SU; 1003 if (ForceTopDown) { 1004 SU = DAG->getSUnit(DAG->top()); 1005 IsTopNode = true; 1006 } 1007 else if (ForceBottomUp) { 1008 SU = DAG->getSUnit(priorNonDebug(DAG->bottom(), DAG->top())); 1009 IsTopNode = false; 1010 } 1011 else { 1012 SU = pickNodeBidrectional(IsTopNode); 1013 } 1014 if (SU->isTopReady()) { 1015 assert(!TopQueue.empty() && "bad ready count"); 1016 TopQueue.remove(TopQueue.find(SU)); 1017 } 1018 if (SU->isBottomReady()) { 1019 assert(!BotQueue.empty() && "bad ready count"); 1020 BotQueue.remove(BotQueue.find(SU)); 1021 } 1022 return SU; 1023 } 1024 1025 /// Create the standard converging machine scheduler. This will be used as the 1026 /// default scheduler if the target does not set a default. 1027 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 1028 assert((!ForceTopDown || !ForceBottomUp) && 1029 "-misched-topdown incompatible with -misched-bottomup"); 1030 return new ScheduleDAGMI(C, new ConvergingScheduler()); 1031 } 1032 static MachineSchedRegistry 1033 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 1034 createConvergingSched); 1035 1036 //===----------------------------------------------------------------------===// 1037 // Machine Instruction Shuffler for Correctness Testing 1038 //===----------------------------------------------------------------------===// 1039 1040 #ifndef NDEBUG 1041 namespace { 1042 /// Apply a less-than relation on the node order, which corresponds to the 1043 /// instruction order prior to scheduling. IsReverse implements greater-than. 1044 template<bool IsReverse> 1045 struct SUnitOrder { 1046 bool operator()(SUnit *A, SUnit *B) const { 1047 if (IsReverse) 1048 return A->NodeNum > B->NodeNum; 1049 else 1050 return A->NodeNum < B->NodeNum; 1051 } 1052 }; 1053 1054 /// Reorder instructions as much as possible. 1055 class InstructionShuffler : public MachineSchedStrategy { 1056 bool IsAlternating; 1057 bool IsTopDown; 1058 1059 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 1060 // gives nodes with a higher number higher priority causing the latest 1061 // instructions to be scheduled first. 1062 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 1063 TopQ; 1064 // When scheduling bottom-up, use greater-than as the queue priority. 1065 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 1066 BottomQ; 1067 public: 1068 InstructionShuffler(bool alternate, bool topdown) 1069 : IsAlternating(alternate), IsTopDown(topdown) {} 1070 1071 virtual void initialize(ScheduleDAGMI *) { 1072 TopQ.clear(); 1073 BottomQ.clear(); 1074 } 1075 1076 /// Implement MachineSchedStrategy interface. 1077 /// ----------------------------------------- 1078 1079 virtual SUnit *pickNode(bool &IsTopNode) { 1080 SUnit *SU; 1081 if (IsTopDown) { 1082 do { 1083 if (TopQ.empty()) return NULL; 1084 SU = TopQ.top(); 1085 TopQ.pop(); 1086 } while (SU->isScheduled); 1087 IsTopNode = true; 1088 } 1089 else { 1090 do { 1091 if (BottomQ.empty()) return NULL; 1092 SU = BottomQ.top(); 1093 BottomQ.pop(); 1094 } while (SU->isScheduled); 1095 IsTopNode = false; 1096 } 1097 if (IsAlternating) 1098 IsTopDown = !IsTopDown; 1099 return SU; 1100 } 1101 1102 virtual void releaseTopNode(SUnit *SU) { 1103 TopQ.push(SU); 1104 } 1105 virtual void releaseBottomNode(SUnit *SU) { 1106 BottomQ.push(SU); 1107 } 1108 }; 1109 } // namespace 1110 1111 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 1112 bool Alternate = !ForceTopDown && !ForceBottomUp; 1113 bool TopDown = !ForceBottomUp; 1114 assert((TopDown || !ForceTopDown) && 1115 "-misched-topdown incompatible with -misched-bottomup"); 1116 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 1117 } 1118 static MachineSchedRegistry ShufflerRegistry( 1119 "shuffle", "Shuffle machine instructions alternating directions", 1120 createInstructionShuffler); 1121 #endif // !NDEBUG 1122