1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/MachineScheduler.h" 18 #include "llvm/ADT/OwningPtr.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/Passes.h" 26 #include "llvm/CodeGen/RegisterClassInfo.h" 27 #include "llvm/CodeGen/ScheduleDFS.h" 28 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/GraphWriter.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Target/TargetInstrInfo.h" 35 #include <queue> 36 37 using namespace llvm; 38 39 namespace llvm { 40 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 41 cl::desc("Force top-down list scheduling")); 42 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 43 cl::desc("Force bottom-up list scheduling")); 44 } 45 46 #ifndef NDEBUG 47 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 48 cl::desc("Pop up a window to show MISched dags after they are processed")); 49 50 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 51 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 52 #else 53 static bool ViewMISchedDAGs = false; 54 #endif // NDEBUG 55 56 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden, 57 cl::desc("Enable register pressure scheduling."), cl::init(true)); 58 59 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden, 60 cl::desc("Enable cyclic critical path analysis."), cl::init(false)); 61 62 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 63 cl::desc("Enable load clustering."), cl::init(true)); 64 65 // Experimental heuristics 66 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 67 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 68 69 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 70 cl::desc("Verify machine instrs before and after machine scheduling")); 71 72 // DAG subtrees must have at least this many nodes. 73 static const unsigned MinSubtreeSize = 8; 74 75 //===----------------------------------------------------------------------===// 76 // Machine Instruction Scheduling Pass and Registry 77 //===----------------------------------------------------------------------===// 78 79 MachineSchedContext::MachineSchedContext(): 80 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 81 RegClassInfo = new RegisterClassInfo(); 82 } 83 84 MachineSchedContext::~MachineSchedContext() { 85 delete RegClassInfo; 86 } 87 88 namespace { 89 /// MachineScheduler runs after coalescing and before register allocation. 90 class MachineScheduler : public MachineSchedContext, 91 public MachineFunctionPass { 92 public: 93 MachineScheduler(); 94 95 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 96 97 virtual void releaseMemory() {} 98 99 virtual bool runOnMachineFunction(MachineFunction&); 100 101 virtual void print(raw_ostream &O, const Module* = 0) const; 102 103 static char ID; // Class identification, replacement for typeinfo 104 }; 105 } // namespace 106 107 char MachineScheduler::ID = 0; 108 109 char &llvm::MachineSchedulerID = MachineScheduler::ID; 110 111 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 112 "Machine Instruction Scheduler", false, false) 113 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 114 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 115 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 116 INITIALIZE_PASS_END(MachineScheduler, "misched", 117 "Machine Instruction Scheduler", false, false) 118 119 MachineScheduler::MachineScheduler() 120 : MachineFunctionPass(ID) { 121 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 122 } 123 124 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 125 AU.setPreservesCFG(); 126 AU.addRequiredID(MachineDominatorsID); 127 AU.addRequired<MachineLoopInfo>(); 128 AU.addRequired<AliasAnalysis>(); 129 AU.addRequired<TargetPassConfig>(); 130 AU.addRequired<SlotIndexes>(); 131 AU.addPreserved<SlotIndexes>(); 132 AU.addRequired<LiveIntervals>(); 133 AU.addPreserved<LiveIntervals>(); 134 MachineFunctionPass::getAnalysisUsage(AU); 135 } 136 137 MachinePassRegistry MachineSchedRegistry::Registry; 138 139 /// A dummy default scheduler factory indicates whether the scheduler 140 /// is overridden on the command line. 141 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 142 return 0; 143 } 144 145 /// MachineSchedOpt allows command line selection of the scheduler. 146 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 147 RegisterPassParser<MachineSchedRegistry> > 148 MachineSchedOpt("misched", 149 cl::init(&useDefaultMachineSched), cl::Hidden, 150 cl::desc("Machine instruction scheduler to use")); 151 152 static MachineSchedRegistry 153 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 154 useDefaultMachineSched); 155 156 /// Forward declare the standard machine scheduler. This will be used as the 157 /// default scheduler if the target does not set a default. 158 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 159 160 161 /// Decrement this iterator until reaching the top or a non-debug instr. 162 static MachineBasicBlock::const_iterator 163 priorNonDebug(MachineBasicBlock::const_iterator I, 164 MachineBasicBlock::const_iterator Beg) { 165 assert(I != Beg && "reached the top of the region, cannot decrement"); 166 while (--I != Beg) { 167 if (!I->isDebugValue()) 168 break; 169 } 170 return I; 171 } 172 173 /// Non-const version. 174 static MachineBasicBlock::iterator 175 priorNonDebug(MachineBasicBlock::iterator I, 176 MachineBasicBlock::const_iterator Beg) { 177 return const_cast<MachineInstr*>( 178 &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)); 179 } 180 181 /// If this iterator is a debug value, increment until reaching the End or a 182 /// non-debug instruction. 183 static MachineBasicBlock::const_iterator 184 nextIfDebug(MachineBasicBlock::const_iterator I, 185 MachineBasicBlock::const_iterator End) { 186 for(; I != End; ++I) { 187 if (!I->isDebugValue()) 188 break; 189 } 190 return I; 191 } 192 193 /// Non-const version. 194 static MachineBasicBlock::iterator 195 nextIfDebug(MachineBasicBlock::iterator I, 196 MachineBasicBlock::const_iterator End) { 197 // Cast the return value to nonconst MachineInstr, then cast to an 198 // instr_iterator, which does not check for null, finally return a 199 // bundle_iterator. 200 return MachineBasicBlock::instr_iterator( 201 const_cast<MachineInstr*>( 202 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 203 } 204 205 /// Top-level MachineScheduler pass driver. 206 /// 207 /// Visit blocks in function order. Divide each block into scheduling regions 208 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 209 /// consistent with the DAG builder, which traverses the interior of the 210 /// scheduling regions bottom-up. 211 /// 212 /// This design avoids exposing scheduling boundaries to the DAG builder, 213 /// simplifying the DAG builder's support for "special" target instructions. 214 /// At the same time the design allows target schedulers to operate across 215 /// scheduling boundaries, for example to bundle the boudary instructions 216 /// without reordering them. This creates complexity, because the target 217 /// scheduler must update the RegionBegin and RegionEnd positions cached by 218 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 219 /// design would be to split blocks at scheduling boundaries, but LLVM has a 220 /// general bias against block splitting purely for implementation simplicity. 221 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 222 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 223 224 // Initialize the context of the pass. 225 MF = &mf; 226 MLI = &getAnalysis<MachineLoopInfo>(); 227 MDT = &getAnalysis<MachineDominatorTree>(); 228 PassConfig = &getAnalysis<TargetPassConfig>(); 229 AA = &getAnalysis<AliasAnalysis>(); 230 231 LIS = &getAnalysis<LiveIntervals>(); 232 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 233 234 if (VerifyScheduling) { 235 DEBUG(LIS->dump()); 236 MF->verify(this, "Before machine scheduling."); 237 } 238 RegClassInfo->runOnMachineFunction(*MF); 239 240 // Select the scheduler, or set the default. 241 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 242 if (Ctor == useDefaultMachineSched) { 243 // Get the default scheduler set by the target. 244 Ctor = MachineSchedRegistry::getDefault(); 245 if (!Ctor) { 246 Ctor = createConvergingSched; 247 MachineSchedRegistry::setDefault(Ctor); 248 } 249 } 250 // Instantiate the selected scheduler. 251 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 252 253 // Visit all machine basic blocks. 254 // 255 // TODO: Visit blocks in global postorder or postorder within the bottom-up 256 // loop tree. Then we can optionally compute global RegPressure. 257 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 258 MBB != MBBEnd; ++MBB) { 259 260 Scheduler->startBlock(MBB); 261 262 // Break the block into scheduling regions [I, RegionEnd), and schedule each 263 // region as soon as it is discovered. RegionEnd points the scheduling 264 // boundary at the bottom of the region. The DAG does not include RegionEnd, 265 // but the region does (i.e. the next RegionEnd is above the previous 266 // RegionBegin). If the current block has no terminator then RegionEnd == 267 // MBB->end() for the bottom region. 268 // 269 // The Scheduler may insert instructions during either schedule() or 270 // exitRegion(), even for empty regions. So the local iterators 'I' and 271 // 'RegionEnd' are invalid across these calls. 272 unsigned RemainingInstrs = MBB->size(); 273 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 274 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 275 276 // Avoid decrementing RegionEnd for blocks with no terminator. 277 if (RegionEnd != MBB->end() 278 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 279 --RegionEnd; 280 // Count the boundary instruction. 281 --RemainingInstrs; 282 } 283 284 // The next region starts above the previous region. Look backward in the 285 // instruction stream until we find the nearest boundary. 286 unsigned NumRegionInstrs = 0; 287 MachineBasicBlock::iterator I = RegionEnd; 288 for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) { 289 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 290 break; 291 } 292 // Notify the scheduler of the region, even if we may skip scheduling 293 // it. Perhaps it still needs to be bundled. 294 Scheduler->enterRegion(MBB, I, RegionEnd, NumRegionInstrs); 295 296 // Skip empty scheduling regions (0 or 1 schedulable instructions). 297 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 298 // Close the current region. Bundle the terminator if needed. 299 // This invalidates 'RegionEnd' and 'I'. 300 Scheduler->exitRegion(); 301 continue; 302 } 303 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 304 DEBUG(dbgs() << MF->getName() 305 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 306 << "\n From: " << *I << " To: "; 307 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 308 else dbgs() << "End"; 309 dbgs() << " RegionInstrs: " << NumRegionInstrs 310 << " Remaining: " << RemainingInstrs << "\n"); 311 312 // Schedule a region: possibly reorder instructions. 313 // This invalidates 'RegionEnd' and 'I'. 314 Scheduler->schedule(); 315 316 // Close the current region. 317 Scheduler->exitRegion(); 318 319 // Scheduling has invalidated the current iterator 'I'. Ask the 320 // scheduler for the top of it's scheduled region. 321 RegionEnd = Scheduler->begin(); 322 } 323 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 324 Scheduler->finishBlock(); 325 } 326 Scheduler->finalizeSchedule(); 327 DEBUG(LIS->dump()); 328 if (VerifyScheduling) 329 MF->verify(this, "After machine scheduling."); 330 return true; 331 } 332 333 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 334 // unimplemented 335 } 336 337 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 338 void ReadyQueue::dump() { 339 dbgs() << Name << ": "; 340 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 341 dbgs() << Queue[i]->NodeNum << " "; 342 dbgs() << "\n"; 343 } 344 #endif 345 346 //===----------------------------------------------------------------------===// 347 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 348 // preservation. 349 //===----------------------------------------------------------------------===// 350 351 ScheduleDAGMI::~ScheduleDAGMI() { 352 delete DFSResult; 353 DeleteContainerPointers(Mutations); 354 delete SchedImpl; 355 } 356 357 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 358 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 359 } 360 361 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 362 if (SuccSU != &ExitSU) { 363 // Do not use WillCreateCycle, it assumes SD scheduling. 364 // If Pred is reachable from Succ, then the edge creates a cycle. 365 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 366 return false; 367 Topo.AddPred(SuccSU, PredDep.getSUnit()); 368 } 369 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 370 // Return true regardless of whether a new edge needed to be inserted. 371 return true; 372 } 373 374 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 375 /// NumPredsLeft reaches zero, release the successor node. 376 /// 377 /// FIXME: Adjust SuccSU height based on MinLatency. 378 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 379 SUnit *SuccSU = SuccEdge->getSUnit(); 380 381 if (SuccEdge->isWeak()) { 382 --SuccSU->WeakPredsLeft; 383 if (SuccEdge->isCluster()) 384 NextClusterSucc = SuccSU; 385 return; 386 } 387 #ifndef NDEBUG 388 if (SuccSU->NumPredsLeft == 0) { 389 dbgs() << "*** Scheduling failed! ***\n"; 390 SuccSU->dump(this); 391 dbgs() << " has been released too many times!\n"; 392 llvm_unreachable(0); 393 } 394 #endif 395 --SuccSU->NumPredsLeft; 396 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 397 SchedImpl->releaseTopNode(SuccSU); 398 } 399 400 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 401 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 402 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 403 I != E; ++I) { 404 releaseSucc(SU, &*I); 405 } 406 } 407 408 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 409 /// NumSuccsLeft reaches zero, release the predecessor node. 410 /// 411 /// FIXME: Adjust PredSU height based on MinLatency. 412 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 413 SUnit *PredSU = PredEdge->getSUnit(); 414 415 if (PredEdge->isWeak()) { 416 --PredSU->WeakSuccsLeft; 417 if (PredEdge->isCluster()) 418 NextClusterPred = PredSU; 419 return; 420 } 421 #ifndef NDEBUG 422 if (PredSU->NumSuccsLeft == 0) { 423 dbgs() << "*** Scheduling failed! ***\n"; 424 PredSU->dump(this); 425 dbgs() << " has been released too many times!\n"; 426 llvm_unreachable(0); 427 } 428 #endif 429 --PredSU->NumSuccsLeft; 430 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 431 SchedImpl->releaseBottomNode(PredSU); 432 } 433 434 /// releasePredecessors - Call releasePred on each of SU's predecessors. 435 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 436 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 437 I != E; ++I) { 438 releasePred(SU, &*I); 439 } 440 } 441 442 /// This is normally called from the main scheduler loop but may also be invoked 443 /// by the scheduling strategy to perform additional code motion. 444 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 445 MachineBasicBlock::iterator InsertPos) { 446 // Advance RegionBegin if the first instruction moves down. 447 if (&*RegionBegin == MI) 448 ++RegionBegin; 449 450 // Update the instruction stream. 451 BB->splice(InsertPos, BB, MI); 452 453 // Update LiveIntervals 454 LIS->handleMove(MI, /*UpdateFlags=*/true); 455 456 // Recede RegionBegin if an instruction moves above the first. 457 if (RegionBegin == InsertPos) 458 RegionBegin = MI; 459 } 460 461 bool ScheduleDAGMI::checkSchedLimit() { 462 #ifndef NDEBUG 463 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 464 CurrentTop = CurrentBottom; 465 return false; 466 } 467 ++NumInstrsScheduled; 468 #endif 469 return true; 470 } 471 472 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 473 /// crossing a scheduling boundary. [begin, end) includes all instructions in 474 /// the region, including the boundary itself and single-instruction regions 475 /// that don't get scheduled. 476 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 477 MachineBasicBlock::iterator begin, 478 MachineBasicBlock::iterator end, 479 unsigned regioninstrs) 480 { 481 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 482 483 // For convenience remember the end of the liveness region. 484 LiveRegionEnd = 485 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 486 487 SchedImpl->initPolicy(begin, end, regioninstrs); 488 489 ShouldTrackPressure = SchedImpl->shouldTrackPressure(); 490 } 491 492 // Setup the register pressure trackers for the top scheduled top and bottom 493 // scheduled regions. 494 void ScheduleDAGMI::initRegPressure() { 495 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 496 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 497 498 // Close the RPTracker to finalize live ins. 499 RPTracker.closeRegion(); 500 501 DEBUG(RPTracker.dump()); 502 503 // Initialize the live ins and live outs. 504 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 505 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 506 507 // Close one end of the tracker so we can call 508 // getMaxUpward/DownwardPressureDelta before advancing across any 509 // instructions. This converts currently live regs into live ins/outs. 510 TopRPTracker.closeTop(); 511 BotRPTracker.closeBottom(); 512 513 BotRPTracker.initLiveThru(RPTracker); 514 if (!BotRPTracker.getLiveThru().empty()) { 515 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru()); 516 DEBUG(dbgs() << "Live Thru: "; 517 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI)); 518 }; 519 520 // For each live out vreg reduce the pressure change associated with other 521 // uses of the same vreg below the live-out reaching def. 522 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs); 523 524 // Account for liveness generated by the region boundary. 525 if (LiveRegionEnd != RegionEnd) { 526 SmallVector<unsigned, 8> LiveUses; 527 BotRPTracker.recede(&LiveUses); 528 updatePressureDiffs(LiveUses); 529 } 530 531 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 532 533 // Cache the list of excess pressure sets in this region. This will also track 534 // the max pressure in the scheduled code for these sets. 535 RegionCriticalPSets.clear(); 536 const std::vector<unsigned> &RegionPressure = 537 RPTracker.getPressure().MaxSetPressure; 538 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 539 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 540 if (RegionPressure[i] > Limit) { 541 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 542 << " Limit " << Limit 543 << " Actual " << RegionPressure[i] << "\n"); 544 RegionCriticalPSets.push_back(PressureChange(i)); 545 } 546 } 547 DEBUG(dbgs() << "Excess PSets: "; 548 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 549 dbgs() << TRI->getRegPressureSetName( 550 RegionCriticalPSets[i].getPSet()) << " "; 551 dbgs() << "\n"); 552 } 553 554 // FIXME: When the pressure tracker deals in pressure differences then we won't 555 // iterate over all RegionCriticalPSets[i]. 556 void ScheduleDAGMI:: 557 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) { 558 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 559 unsigned ID = RegionCriticalPSets[i].getPSet(); 560 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[i].getUnitInc() 561 && NewMaxPressure[ID] <= INT16_MAX) 562 RegionCriticalPSets[i].setUnitInc(NewMaxPressure[ID]); 563 } 564 DEBUG( 565 for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) { 566 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i); 567 if (NewMaxPressure[i] > Limit ) { 568 dbgs() << " " << TRI->getRegPressureSetName(i) << ": " 569 << NewMaxPressure[i] << " > " << Limit << "\n"; 570 } 571 }); 572 } 573 574 /// Update the PressureDiff array for liveness after scheduling this 575 /// instruction. 576 void ScheduleDAGMI::updatePressureDiffs(ArrayRef<unsigned> LiveUses) { 577 for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) { 578 /// FIXME: Currently assuming single-use physregs. 579 unsigned Reg = LiveUses[LUIdx]; 580 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n"); 581 if (!TRI->isVirtualRegister(Reg)) 582 continue; 583 584 // This may be called before CurrentBottom has been initialized. However, 585 // BotRPTracker must have a valid position. We want the value live into the 586 // instruction or live out of the block, so ask for the previous 587 // instruction's live-out. 588 const LiveInterval &LI = LIS->getInterval(Reg); 589 VNInfo *VNI; 590 MachineBasicBlock::const_iterator I = 591 nextIfDebug(BotRPTracker.getPos(), BB->end()); 592 if (I == BB->end()) 593 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 594 else { 595 LiveRangeQuery LRQ(LI, LIS->getInstructionIndex(I)); 596 VNI = LRQ.valueIn(); 597 } 598 // RegisterPressureTracker guarantees that readsReg is true for LiveUses. 599 assert(VNI && "No live value at use."); 600 for (VReg2UseMap::iterator 601 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 602 SUnit *SU = UI->SU; 603 DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") " 604 << *SU->getInstr()); 605 // If this use comes before the reaching def, it cannot be a last use, so 606 // descrease its pressure change. 607 if (!SU->isScheduled && SU != &ExitSU) { 608 LiveRangeQuery LRQ(LI, LIS->getInstructionIndex(SU->getInstr())); 609 if (LRQ.valueIn() == VNI) 610 getPressureDiff(SU).addPressureChange(Reg, true, &MRI); 611 } 612 } 613 } 614 } 615 616 /// schedule - Called back from MachineScheduler::runOnMachineFunction 617 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 618 /// only includes instructions that have DAG nodes, not scheduling boundaries. 619 /// 620 /// This is a skeletal driver, with all the functionality pushed into helpers, 621 /// so that it can be easilly extended by experimental schedulers. Generally, 622 /// implementing MachineSchedStrategy should be sufficient to implement a new 623 /// scheduling algorithm. However, if a scheduler further subclasses 624 /// ScheduleDAGMI then it will want to override this virtual method in order to 625 /// update any specialized state. 626 void ScheduleDAGMI::schedule() { 627 buildDAGWithRegPressure(); 628 629 Topo.InitDAGTopologicalSorting(); 630 631 postprocessDAG(); 632 633 SmallVector<SUnit*, 8> TopRoots, BotRoots; 634 findRootsAndBiasEdges(TopRoots, BotRoots); 635 636 // Initialize the strategy before modifying the DAG. 637 // This may initialize a DFSResult to be used for queue priority. 638 SchedImpl->initialize(this); 639 640 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 641 SUnits[su].dumpAll(this)); 642 if (ViewMISchedDAGs) viewGraph(); 643 644 // Initialize ready queues now that the DAG and priority data are finalized. 645 initQueues(TopRoots, BotRoots); 646 647 bool IsTopNode = false; 648 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 649 assert(!SU->isScheduled && "Node already scheduled"); 650 if (!checkSchedLimit()) 651 break; 652 653 scheduleMI(SU, IsTopNode); 654 655 updateQueues(SU, IsTopNode); 656 } 657 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 658 659 placeDebugValues(); 660 661 DEBUG({ 662 unsigned BBNum = begin()->getParent()->getNumber(); 663 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 664 dumpSchedule(); 665 dbgs() << '\n'; 666 }); 667 } 668 669 /// Build the DAG and setup three register pressure trackers. 670 void ScheduleDAGMI::buildDAGWithRegPressure() { 671 if (!ShouldTrackPressure) { 672 RPTracker.reset(); 673 RegionCriticalPSets.clear(); 674 buildSchedGraph(AA); 675 return; 676 } 677 678 // Initialize the register pressure tracker used by buildSchedGraph. 679 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd, 680 /*TrackUntiedDefs=*/true); 681 682 // Account for liveness generate by the region boundary. 683 if (LiveRegionEnd != RegionEnd) 684 RPTracker.recede(); 685 686 // Build the DAG, and compute current register pressure. 687 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs); 688 689 // Initialize top/bottom trackers after computing region pressure. 690 initRegPressure(); 691 } 692 693 /// Apply each ScheduleDAGMutation step in order. 694 void ScheduleDAGMI::postprocessDAG() { 695 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 696 Mutations[i]->apply(this); 697 } 698 } 699 700 void ScheduleDAGMI::computeDFSResult() { 701 if (!DFSResult) 702 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 703 DFSResult->clear(); 704 ScheduledTrees.clear(); 705 DFSResult->resize(SUnits.size()); 706 DFSResult->compute(SUnits); 707 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 708 } 709 710 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 711 SmallVectorImpl<SUnit*> &BotRoots) { 712 for (std::vector<SUnit>::iterator 713 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 714 SUnit *SU = &(*I); 715 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 716 717 // Order predecessors so DFSResult follows the critical path. 718 SU->biasCriticalPath(); 719 720 // A SUnit is ready to top schedule if it has no predecessors. 721 if (!I->NumPredsLeft) 722 TopRoots.push_back(SU); 723 // A SUnit is ready to bottom schedule if it has no successors. 724 if (!I->NumSuccsLeft) 725 BotRoots.push_back(SU); 726 } 727 ExitSU.biasCriticalPath(); 728 } 729 730 /// Compute the max cyclic critical path through the DAG. The scheduling DAG 731 /// only provides the critical path for single block loops. To handle loops that 732 /// span blocks, we could use the vreg path latencies provided by 733 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently 734 /// available for use in the scheduler. 735 /// 736 /// The cyclic path estimation identifies a def-use pair that crosses the back 737 /// edge and considers the depth and height of the nodes. For example, consider 738 /// the following instruction sequence where each instruction has unit latency 739 /// and defines an epomymous virtual register: 740 /// 741 /// a->b(a,c)->c(b)->d(c)->exit 742 /// 743 /// The cyclic critical path is a two cycles: b->c->b 744 /// The acyclic critical path is four cycles: a->b->c->d->exit 745 /// LiveOutHeight = height(c) = len(c->d->exit) = 2 746 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3 747 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4 748 /// LiveInDepth = depth(b) = len(a->b) = 1 749 /// 750 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2 751 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2 752 /// CyclicCriticalPath = min(2, 2) = 2 753 unsigned ScheduleDAGMI::computeCyclicCriticalPath() { 754 // This only applies to single block loop. 755 if (!BB->isSuccessor(BB)) 756 return 0; 757 758 unsigned MaxCyclicLatency = 0; 759 // Visit each live out vreg def to find def/use pairs that cross iterations. 760 ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs; 761 for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end(); 762 RI != RE; ++RI) { 763 unsigned Reg = *RI; 764 if (!TRI->isVirtualRegister(Reg)) 765 continue; 766 const LiveInterval &LI = LIS->getInterval(Reg); 767 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB)); 768 if (!DefVNI) 769 continue; 770 771 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def); 772 const SUnit *DefSU = getSUnit(DefMI); 773 if (!DefSU) 774 continue; 775 776 unsigned LiveOutHeight = DefSU->getHeight(); 777 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency; 778 // Visit all local users of the vreg def. 779 for (VReg2UseMap::iterator 780 UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { 781 if (UI->SU == &ExitSU) 782 continue; 783 784 // Only consider uses of the phi. 785 LiveRangeQuery LRQ(LI, LIS->getInstructionIndex(UI->SU->getInstr())); 786 if (!LRQ.valueIn()->isPHIDef()) 787 continue; 788 789 // Assume that a path spanning two iterations is a cycle, which could 790 // overestimate in strange cases. This allows cyclic latency to be 791 // estimated as the minimum slack of the vreg's depth or height. 792 unsigned CyclicLatency = 0; 793 if (LiveOutDepth > UI->SU->getDepth()) 794 CyclicLatency = LiveOutDepth - UI->SU->getDepth(); 795 796 unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency; 797 if (LiveInHeight > LiveOutHeight) { 798 if (LiveInHeight - LiveOutHeight < CyclicLatency) 799 CyclicLatency = LiveInHeight - LiveOutHeight; 800 } 801 else 802 CyclicLatency = 0; 803 804 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU(" 805 << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n"); 806 if (CyclicLatency > MaxCyclicLatency) 807 MaxCyclicLatency = CyclicLatency; 808 } 809 } 810 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n"); 811 return MaxCyclicLatency; 812 } 813 814 /// Identify DAG roots and setup scheduler queues. 815 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 816 ArrayRef<SUnit*> BotRoots) { 817 NextClusterSucc = NULL; 818 NextClusterPred = NULL; 819 820 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 821 // 822 // Nodes with unreleased weak edges can still be roots. 823 // Release top roots in forward order. 824 for (SmallVectorImpl<SUnit*>::const_iterator 825 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 826 SchedImpl->releaseTopNode(*I); 827 } 828 // Release bottom roots in reverse order so the higher priority nodes appear 829 // first. This is more natural and slightly more efficient. 830 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 831 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 832 SchedImpl->releaseBottomNode(*I); 833 } 834 835 releaseSuccessors(&EntrySU); 836 releasePredecessors(&ExitSU); 837 838 SchedImpl->registerRoots(); 839 840 // Advance past initial DebugValues. 841 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 842 CurrentBottom = RegionEnd; 843 844 if (ShouldTrackPressure) { 845 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 846 TopRPTracker.setPos(CurrentTop); 847 } 848 } 849 850 /// Move an instruction and update register pressure. 851 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) { 852 // Move the instruction to its new location in the instruction stream. 853 MachineInstr *MI = SU->getInstr(); 854 855 if (IsTopNode) { 856 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 857 if (&*CurrentTop == MI) 858 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 859 else { 860 moveInstruction(MI, CurrentTop); 861 TopRPTracker.setPos(MI); 862 } 863 864 if (ShouldTrackPressure) { 865 // Update top scheduled pressure. 866 TopRPTracker.advance(); 867 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 868 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 869 } 870 } 871 else { 872 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 873 MachineBasicBlock::iterator priorII = 874 priorNonDebug(CurrentBottom, CurrentTop); 875 if (&*priorII == MI) 876 CurrentBottom = priorII; 877 else { 878 if (&*CurrentTop == MI) { 879 CurrentTop = nextIfDebug(++CurrentTop, priorII); 880 TopRPTracker.setPos(CurrentTop); 881 } 882 moveInstruction(MI, CurrentBottom); 883 CurrentBottom = MI; 884 } 885 if (ShouldTrackPressure) { 886 // Update bottom scheduled pressure. 887 SmallVector<unsigned, 8> LiveUses; 888 BotRPTracker.recede(&LiveUses); 889 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 890 updatePressureDiffs(LiveUses); 891 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 892 } 893 } 894 } 895 896 /// Update scheduler queues after scheduling an instruction. 897 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 898 // Release dependent instructions for scheduling. 899 if (IsTopNode) 900 releaseSuccessors(SU); 901 else 902 releasePredecessors(SU); 903 904 SU->isScheduled = true; 905 906 if (DFSResult) { 907 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 908 if (!ScheduledTrees.test(SubtreeID)) { 909 ScheduledTrees.set(SubtreeID); 910 DFSResult->scheduleTree(SubtreeID); 911 SchedImpl->scheduleTree(SubtreeID); 912 } 913 } 914 915 // Notify the scheduling strategy after updating the DAG. 916 SchedImpl->schedNode(SU, IsTopNode); 917 } 918 919 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 920 void ScheduleDAGMI::placeDebugValues() { 921 // If first instruction was a DBG_VALUE then put it back. 922 if (FirstDbgValue) { 923 BB->splice(RegionBegin, BB, FirstDbgValue); 924 RegionBegin = FirstDbgValue; 925 } 926 927 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 928 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 929 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 930 MachineInstr *DbgValue = P.first; 931 MachineBasicBlock::iterator OrigPrevMI = P.second; 932 if (&*RegionBegin == DbgValue) 933 ++RegionBegin; 934 BB->splice(++OrigPrevMI, BB, DbgValue); 935 if (OrigPrevMI == llvm::prior(RegionEnd)) 936 RegionEnd = DbgValue; 937 } 938 DbgValues.clear(); 939 FirstDbgValue = NULL; 940 } 941 942 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 943 void ScheduleDAGMI::dumpSchedule() const { 944 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 945 if (SUnit *SU = getSUnit(&(*MI))) 946 SU->dump(this); 947 else 948 dbgs() << "Missing SUnit\n"; 949 } 950 } 951 #endif 952 953 //===----------------------------------------------------------------------===// 954 // LoadClusterMutation - DAG post-processing to cluster loads. 955 //===----------------------------------------------------------------------===// 956 957 namespace { 958 /// \brief Post-process the DAG to create cluster edges between neighboring 959 /// loads. 960 class LoadClusterMutation : public ScheduleDAGMutation { 961 struct LoadInfo { 962 SUnit *SU; 963 unsigned BaseReg; 964 unsigned Offset; 965 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 966 : SU(su), BaseReg(reg), Offset(ofs) {} 967 }; 968 static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS, 969 const LoadClusterMutation::LoadInfo &RHS); 970 971 const TargetInstrInfo *TII; 972 const TargetRegisterInfo *TRI; 973 public: 974 LoadClusterMutation(const TargetInstrInfo *tii, 975 const TargetRegisterInfo *tri) 976 : TII(tii), TRI(tri) {} 977 978 virtual void apply(ScheduleDAGMI *DAG); 979 protected: 980 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 981 }; 982 } // anonymous 983 984 bool LoadClusterMutation::LoadInfoLess( 985 const LoadClusterMutation::LoadInfo &LHS, 986 const LoadClusterMutation::LoadInfo &RHS) { 987 if (LHS.BaseReg != RHS.BaseReg) 988 return LHS.BaseReg < RHS.BaseReg; 989 return LHS.Offset < RHS.Offset; 990 } 991 992 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 993 ScheduleDAGMI *DAG) { 994 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 995 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 996 SUnit *SU = Loads[Idx]; 997 unsigned BaseReg; 998 unsigned Offset; 999 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 1000 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 1001 } 1002 if (LoadRecords.size() < 2) 1003 return; 1004 std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess); 1005 unsigned ClusterLength = 1; 1006 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 1007 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 1008 ClusterLength = 1; 1009 continue; 1010 } 1011 1012 SUnit *SUa = LoadRecords[Idx].SU; 1013 SUnit *SUb = LoadRecords[Idx+1].SU; 1014 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 1015 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 1016 1017 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 1018 << SUb->NodeNum << ")\n"); 1019 // Copy successor edges from SUa to SUb. Interleaving computation 1020 // dependent on SUa can prevent load combining due to register reuse. 1021 // Predecessor edges do not need to be copied from SUb to SUa since nearby 1022 // loads should have effectively the same inputs. 1023 for (SUnit::const_succ_iterator 1024 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 1025 if (SI->getSUnit() == SUb) 1026 continue; 1027 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 1028 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 1029 } 1030 ++ClusterLength; 1031 } 1032 else 1033 ClusterLength = 1; 1034 } 1035 } 1036 1037 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 1038 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 1039 // Map DAG NodeNum to store chain ID. 1040 DenseMap<unsigned, unsigned> StoreChainIDs; 1041 // Map each store chain to a set of dependent loads. 1042 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 1043 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1044 SUnit *SU = &DAG->SUnits[Idx]; 1045 if (!SU->getInstr()->mayLoad()) 1046 continue; 1047 unsigned ChainPredID = DAG->SUnits.size(); 1048 for (SUnit::const_pred_iterator 1049 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1050 if (PI->isCtrl()) { 1051 ChainPredID = PI->getSUnit()->NodeNum; 1052 break; 1053 } 1054 } 1055 // Check if this chain-like pred has been seen 1056 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 1057 unsigned NumChains = StoreChainDependents.size(); 1058 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 1059 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 1060 if (Result.second) 1061 StoreChainDependents.resize(NumChains + 1); 1062 StoreChainDependents[Result.first->second].push_back(SU); 1063 } 1064 // Iterate over the store chains. 1065 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 1066 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 1067 } 1068 1069 //===----------------------------------------------------------------------===// 1070 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 1071 //===----------------------------------------------------------------------===// 1072 1073 namespace { 1074 /// \brief Post-process the DAG to create cluster edges between instructions 1075 /// that may be fused by the processor into a single operation. 1076 class MacroFusion : public ScheduleDAGMutation { 1077 const TargetInstrInfo *TII; 1078 public: 1079 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 1080 1081 virtual void apply(ScheduleDAGMI *DAG); 1082 }; 1083 } // anonymous 1084 1085 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 1086 /// fused operations. 1087 void MacroFusion::apply(ScheduleDAGMI *DAG) { 1088 // For now, assume targets can only fuse with the branch. 1089 MachineInstr *Branch = DAG->ExitSU.getInstr(); 1090 if (!Branch) 1091 return; 1092 1093 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 1094 SUnit *SU = &DAG->SUnits[--Idx]; 1095 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 1096 continue; 1097 1098 // Create a single weak edge from SU to ExitSU. The only effect is to cause 1099 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 1100 // need to copy predecessor edges from ExitSU to SU, since top-down 1101 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 1102 // of SU, we could create an artificial edge from the deepest root, but it 1103 // hasn't been needed yet. 1104 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 1105 (void)Success; 1106 assert(Success && "No DAG nodes should be reachable from ExitSU"); 1107 1108 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 1109 break; 1110 } 1111 } 1112 1113 //===----------------------------------------------------------------------===// 1114 // CopyConstrain - DAG post-processing to encourage copy elimination. 1115 //===----------------------------------------------------------------------===// 1116 1117 namespace { 1118 /// \brief Post-process the DAG to create weak edges from all uses of a copy to 1119 /// the one use that defines the copy's source vreg, most likely an induction 1120 /// variable increment. 1121 class CopyConstrain : public ScheduleDAGMutation { 1122 // Transient state. 1123 SlotIndex RegionBeginIdx; 1124 // RegionEndIdx is the slot index of the last non-debug instruction in the 1125 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. 1126 SlotIndex RegionEndIdx; 1127 public: 1128 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} 1129 1130 virtual void apply(ScheduleDAGMI *DAG); 1131 1132 protected: 1133 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG); 1134 }; 1135 } // anonymous 1136 1137 /// constrainLocalCopy handles two possibilities: 1138 /// 1) Local src: 1139 /// I0: = dst 1140 /// I1: src = ... 1141 /// I2: = dst 1142 /// I3: dst = src (copy) 1143 /// (create pred->succ edges I0->I1, I2->I1) 1144 /// 1145 /// 2) Local copy: 1146 /// I0: dst = src (copy) 1147 /// I1: = dst 1148 /// I2: src = ... 1149 /// I3: = dst 1150 /// (create pred->succ edges I1->I2, I3->I2) 1151 /// 1152 /// Although the MachineScheduler is currently constrained to single blocks, 1153 /// this algorithm should handle extended blocks. An EBB is a set of 1154 /// contiguously numbered blocks such that the previous block in the EBB is 1155 /// always the single predecessor. 1156 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) { 1157 LiveIntervals *LIS = DAG->getLIS(); 1158 MachineInstr *Copy = CopySU->getInstr(); 1159 1160 // Check for pure vreg copies. 1161 unsigned SrcReg = Copy->getOperand(1).getReg(); 1162 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 1163 return; 1164 1165 unsigned DstReg = Copy->getOperand(0).getReg(); 1166 if (!TargetRegisterInfo::isVirtualRegister(DstReg)) 1167 return; 1168 1169 // Check if either the dest or source is local. If it's live across a back 1170 // edge, it's not local. Note that if both vregs are live across the back 1171 // edge, we cannot successfully contrain the copy without cyclic scheduling. 1172 unsigned LocalReg = DstReg; 1173 unsigned GlobalReg = SrcReg; 1174 LiveInterval *LocalLI = &LIS->getInterval(LocalReg); 1175 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { 1176 LocalReg = SrcReg; 1177 GlobalReg = DstReg; 1178 LocalLI = &LIS->getInterval(LocalReg); 1179 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) 1180 return; 1181 } 1182 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); 1183 1184 // Find the global segment after the start of the local LI. 1185 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); 1186 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a 1187 // local live range. We could create edges from other global uses to the local 1188 // start, but the coalescer should have already eliminated these cases, so 1189 // don't bother dealing with it. 1190 if (GlobalSegment == GlobalLI->end()) 1191 return; 1192 1193 // If GlobalSegment is killed at the LocalLI->start, the call to find() 1194 // returned the next global segment. But if GlobalSegment overlaps with 1195 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI 1196 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. 1197 if (GlobalSegment->contains(LocalLI->beginIndex())) 1198 ++GlobalSegment; 1199 1200 if (GlobalSegment == GlobalLI->end()) 1201 return; 1202 1203 // Check if GlobalLI contains a hole in the vicinity of LocalLI. 1204 if (GlobalSegment != GlobalLI->begin()) { 1205 // Two address defs have no hole. 1206 if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end, 1207 GlobalSegment->start)) { 1208 return; 1209 } 1210 // If the prior global segment may be defined by the same two-address 1211 // instruction that also defines LocalLI, then can't make a hole here. 1212 if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start, 1213 LocalLI->beginIndex())) { 1214 return; 1215 } 1216 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise 1217 // it would be a disconnected component in the live range. 1218 assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() && 1219 "Disconnected LRG within the scheduling region."); 1220 } 1221 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); 1222 if (!GlobalDef) 1223 return; 1224 1225 SUnit *GlobalSU = DAG->getSUnit(GlobalDef); 1226 if (!GlobalSU) 1227 return; 1228 1229 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by 1230 // constraining the uses of the last local def to precede GlobalDef. 1231 SmallVector<SUnit*,8> LocalUses; 1232 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); 1233 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); 1234 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); 1235 for (SUnit::const_succ_iterator 1236 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); 1237 I != E; ++I) { 1238 if (I->getKind() != SDep::Data || I->getReg() != LocalReg) 1239 continue; 1240 if (I->getSUnit() == GlobalSU) 1241 continue; 1242 if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) 1243 return; 1244 LocalUses.push_back(I->getSUnit()); 1245 } 1246 // Open the top of the GlobalLI hole by constraining any earlier global uses 1247 // to precede the start of LocalLI. 1248 SmallVector<SUnit*,8> GlobalUses; 1249 MachineInstr *FirstLocalDef = 1250 LIS->getInstructionFromIndex(LocalLI->beginIndex()); 1251 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); 1252 for (SUnit::const_pred_iterator 1253 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { 1254 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) 1255 continue; 1256 if (I->getSUnit() == FirstLocalSU) 1257 continue; 1258 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) 1259 return; 1260 GlobalUses.push_back(I->getSUnit()); 1261 } 1262 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); 1263 // Add the weak edges. 1264 for (SmallVectorImpl<SUnit*>::const_iterator 1265 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { 1266 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" 1267 << GlobalSU->NodeNum << ")\n"); 1268 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); 1269 } 1270 for (SmallVectorImpl<SUnit*>::const_iterator 1271 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { 1272 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" 1273 << FirstLocalSU->NodeNum << ")\n"); 1274 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); 1275 } 1276 } 1277 1278 /// \brief Callback from DAG postProcessing to create weak edges to encourage 1279 /// copy elimination. 1280 void CopyConstrain::apply(ScheduleDAGMI *DAG) { 1281 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); 1282 if (FirstPos == DAG->end()) 1283 return; 1284 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); 1285 RegionEndIdx = DAG->getLIS()->getInstructionIndex( 1286 &*priorNonDebug(DAG->end(), DAG->begin())); 1287 1288 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 1289 SUnit *SU = &DAG->SUnits[Idx]; 1290 if (!SU->getInstr()->isCopy()) 1291 continue; 1292 1293 constrainLocalCopy(SU, DAG); 1294 } 1295 } 1296 1297 //===----------------------------------------------------------------------===// 1298 // ConvergingScheduler - Implementation of the generic MachineSchedStrategy. 1299 //===----------------------------------------------------------------------===// 1300 1301 namespace { 1302 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 1303 /// the schedule. 1304 class ConvergingScheduler : public MachineSchedStrategy { 1305 public: 1306 /// Represent the type of SchedCandidate found within a single queue. 1307 /// pickNodeBidirectional depends on these listed by decreasing priority. 1308 enum CandReason { 1309 NoCand, PhysRegCopy, RegExcess, RegCritical, Cluster, Weak, RegMax, 1310 ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, 1311 TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder}; 1312 1313 #ifndef NDEBUG 1314 static const char *getReasonStr(ConvergingScheduler::CandReason Reason); 1315 #endif 1316 1317 /// Policy for scheduling the next instruction in the candidate's zone. 1318 struct CandPolicy { 1319 bool ReduceLatency; 1320 unsigned ReduceResIdx; 1321 unsigned DemandResIdx; 1322 1323 CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} 1324 }; 1325 1326 /// Status of an instruction's critical resource consumption. 1327 struct SchedResourceDelta { 1328 // Count critical resources in the scheduled region required by SU. 1329 unsigned CritResources; 1330 1331 // Count critical resources from another region consumed by SU. 1332 unsigned DemandedResources; 1333 1334 SchedResourceDelta(): CritResources(0), DemandedResources(0) {} 1335 1336 bool operator==(const SchedResourceDelta &RHS) const { 1337 return CritResources == RHS.CritResources 1338 && DemandedResources == RHS.DemandedResources; 1339 } 1340 bool operator!=(const SchedResourceDelta &RHS) const { 1341 return !operator==(RHS); 1342 } 1343 }; 1344 1345 /// Store the state used by ConvergingScheduler heuristics, required for the 1346 /// lifetime of one invocation of pickNode(). 1347 struct SchedCandidate { 1348 CandPolicy Policy; 1349 1350 // The best SUnit candidate. 1351 SUnit *SU; 1352 1353 // The reason for this candidate. 1354 CandReason Reason; 1355 1356 // Set of reasons that apply to multiple candidates. 1357 uint32_t RepeatReasonSet; 1358 1359 // Register pressure values for the best candidate. 1360 RegPressureDelta RPDelta; 1361 1362 // Critical resource consumption of the best candidate. 1363 SchedResourceDelta ResDelta; 1364 1365 SchedCandidate(const CandPolicy &policy) 1366 : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {} 1367 1368 bool isValid() const { return SU; } 1369 1370 // Copy the status of another candidate without changing policy. 1371 void setBest(SchedCandidate &Best) { 1372 assert(Best.Reason != NoCand && "uninitialized Sched candidate"); 1373 SU = Best.SU; 1374 Reason = Best.Reason; 1375 RPDelta = Best.RPDelta; 1376 ResDelta = Best.ResDelta; 1377 } 1378 1379 bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); } 1380 void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); } 1381 1382 void initResourceDelta(const ScheduleDAGMI *DAG, 1383 const TargetSchedModel *SchedModel); 1384 }; 1385 1386 /// Summarize the unscheduled region. 1387 struct SchedRemainder { 1388 // Critical path through the DAG in expected latency. 1389 unsigned CriticalPath; 1390 unsigned CyclicCritPath; 1391 1392 // Scaled count of micro-ops left to schedule. 1393 unsigned RemIssueCount; 1394 1395 bool IsAcyclicLatencyLimited; 1396 1397 // Unscheduled resources 1398 SmallVector<unsigned, 16> RemainingCounts; 1399 1400 void reset() { 1401 CriticalPath = 0; 1402 CyclicCritPath = 0; 1403 RemIssueCount = 0; 1404 IsAcyclicLatencyLimited = false; 1405 RemainingCounts.clear(); 1406 } 1407 1408 SchedRemainder() { reset(); } 1409 1410 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); 1411 }; 1412 1413 /// Each Scheduling boundary is associated with ready queues. It tracks the 1414 /// current cycle in the direction of movement, and maintains the state 1415 /// of "hazards" and other interlocks at the current cycle. 1416 struct SchedBoundary { 1417 ScheduleDAGMI *DAG; 1418 const TargetSchedModel *SchedModel; 1419 SchedRemainder *Rem; 1420 1421 ReadyQueue Available; 1422 ReadyQueue Pending; 1423 bool CheckPending; 1424 1425 // For heuristics, keep a list of the nodes that immediately depend on the 1426 // most recently scheduled node. 1427 SmallPtrSet<const SUnit*, 8> NextSUs; 1428 1429 ScheduleHazardRecognizer *HazardRec; 1430 1431 /// Number of cycles it takes to issue the instructions scheduled in this 1432 /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls. 1433 /// See getStalls(). 1434 unsigned CurrCycle; 1435 1436 /// Micro-ops issued in the current cycle 1437 unsigned CurrMOps; 1438 1439 /// MinReadyCycle - Cycle of the soonest available instruction. 1440 unsigned MinReadyCycle; 1441 1442 // The expected latency of the critical path in this scheduled zone. 1443 unsigned ExpectedLatency; 1444 1445 // The latency of dependence chains leading into this zone. 1446 // For each node scheduled bottom-up: DLat = max DLat, N.Depth. 1447 // For each cycle scheduled: DLat -= 1. 1448 unsigned DependentLatency; 1449 1450 /// Count the scheduled (issued) micro-ops that can be retired by 1451 /// time=CurrCycle assuming the first scheduled instr is retired at time=0. 1452 unsigned RetiredMOps; 1453 1454 // Count scheduled resources that have been executed. Resources are 1455 // considered executed if they become ready in the time that it takes to 1456 // saturate any resource including the one in question. Counts are scaled 1457 // for direct comparison with other resources. Counts can be compared with 1458 // MOps * getMicroOpFactor and Latency * getLatencyFactor. 1459 SmallVector<unsigned, 16> ExecutedResCounts; 1460 1461 /// Cache the max count for a single resource. 1462 unsigned MaxExecutedResCount; 1463 1464 // Cache the critical resources ID in this scheduled zone. 1465 unsigned ZoneCritResIdx; 1466 1467 // Is the scheduled region resource limited vs. latency limited. 1468 bool IsResourceLimited; 1469 1470 #ifndef NDEBUG 1471 // Remember the greatest operand latency as an upper bound on the number of 1472 // times we should retry the pending queue because of a hazard. 1473 unsigned MaxObservedLatency; 1474 #endif 1475 1476 void reset() { 1477 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1478 // Destroying and reconstructing it is very expensive though. So keep 1479 // invalid, placeholder HazardRecs. 1480 if (HazardRec && HazardRec->isEnabled()) { 1481 delete HazardRec; 1482 HazardRec = 0; 1483 } 1484 Available.clear(); 1485 Pending.clear(); 1486 CheckPending = false; 1487 NextSUs.clear(); 1488 CurrCycle = 0; 1489 CurrMOps = 0; 1490 MinReadyCycle = UINT_MAX; 1491 ExpectedLatency = 0; 1492 DependentLatency = 0; 1493 RetiredMOps = 0; 1494 MaxExecutedResCount = 0; 1495 ZoneCritResIdx = 0; 1496 IsResourceLimited = false; 1497 #ifndef NDEBUG 1498 MaxObservedLatency = 0; 1499 #endif 1500 // Reserve a zero-count for invalid CritResIdx. 1501 ExecutedResCounts.resize(1); 1502 assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); 1503 } 1504 1505 /// Pending queues extend the ready queues with the same ID and the 1506 /// PendingFlag set. 1507 SchedBoundary(unsigned ID, const Twine &Name): 1508 DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"), 1509 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 1510 HazardRec(0) { 1511 reset(); 1512 } 1513 1514 ~SchedBoundary() { delete HazardRec; } 1515 1516 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, 1517 SchedRemainder *rem); 1518 1519 bool isTop() const { 1520 return Available.getID() == ConvergingScheduler::TopQID; 1521 } 1522 1523 #ifndef NDEBUG 1524 const char *getResourceName(unsigned PIdx) { 1525 if (!PIdx) 1526 return "MOps"; 1527 return SchedModel->getProcResource(PIdx)->Name; 1528 } 1529 #endif 1530 1531 /// Get the number of latency cycles "covered" by the scheduled 1532 /// instructions. This is the larger of the critical path within the zone 1533 /// and the number of cycles required to issue the instructions. 1534 unsigned getScheduledLatency() const { 1535 return std::max(ExpectedLatency, CurrCycle); 1536 } 1537 1538 unsigned getUnscheduledLatency(SUnit *SU) const { 1539 return isTop() ? SU->getHeight() : SU->getDepth(); 1540 } 1541 1542 unsigned getResourceCount(unsigned ResIdx) const { 1543 return ExecutedResCounts[ResIdx]; 1544 } 1545 1546 /// Get the scaled count of scheduled micro-ops and resources, including 1547 /// executed resources. 1548 unsigned getCriticalCount() const { 1549 if (!ZoneCritResIdx) 1550 return RetiredMOps * SchedModel->getMicroOpFactor(); 1551 return getResourceCount(ZoneCritResIdx); 1552 } 1553 1554 /// Get a scaled count for the minimum execution time of the scheduled 1555 /// micro-ops that are ready to execute by getExecutedCount. Notice the 1556 /// feedback loop. 1557 unsigned getExecutedCount() const { 1558 return std::max(CurrCycle * SchedModel->getLatencyFactor(), 1559 MaxExecutedResCount); 1560 } 1561 1562 bool checkHazard(SUnit *SU); 1563 1564 unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs); 1565 1566 unsigned getOtherResourceCount(unsigned &OtherCritIdx); 1567 1568 void setPolicy(CandPolicy &Policy, SchedBoundary &OtherZone); 1569 1570 void releaseNode(SUnit *SU, unsigned ReadyCycle); 1571 1572 void bumpCycle(unsigned NextCycle); 1573 1574 void incExecutedResources(unsigned PIdx, unsigned Count); 1575 1576 unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle); 1577 1578 void bumpNode(SUnit *SU); 1579 1580 void releasePending(); 1581 1582 void removeReady(SUnit *SU); 1583 1584 SUnit *pickOnlyChoice(); 1585 1586 #ifndef NDEBUG 1587 void dumpScheduledState(); 1588 #endif 1589 }; 1590 1591 private: 1592 const MachineSchedContext *Context; 1593 ScheduleDAGMI *DAG; 1594 const TargetSchedModel *SchedModel; 1595 const TargetRegisterInfo *TRI; 1596 1597 // State of the top and bottom scheduled instruction boundaries. 1598 SchedRemainder Rem; 1599 SchedBoundary Top; 1600 SchedBoundary Bot; 1601 1602 MachineSchedPolicy RegionPolicy; 1603 public: 1604 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 1605 enum { 1606 TopQID = 1, 1607 BotQID = 2, 1608 LogMaxQID = 2 1609 }; 1610 1611 ConvergingScheduler(const MachineSchedContext *C): 1612 Context(C), DAG(0), SchedModel(0), TRI(0), 1613 Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 1614 1615 virtual void initPolicy(MachineBasicBlock::iterator Begin, 1616 MachineBasicBlock::iterator End, 1617 unsigned NumRegionInstrs); 1618 1619 bool shouldTrackPressure() const { return RegionPolicy.ShouldTrackPressure; } 1620 1621 virtual void initialize(ScheduleDAGMI *dag); 1622 1623 virtual SUnit *pickNode(bool &IsTopNode); 1624 1625 virtual void schedNode(SUnit *SU, bool IsTopNode); 1626 1627 virtual void releaseTopNode(SUnit *SU); 1628 1629 virtual void releaseBottomNode(SUnit *SU); 1630 1631 virtual void registerRoots(); 1632 1633 protected: 1634 void checkAcyclicLatency(); 1635 1636 void tryCandidate(SchedCandidate &Cand, 1637 SchedCandidate &TryCand, 1638 SchedBoundary &Zone, 1639 const RegPressureTracker &RPTracker, 1640 RegPressureTracker &TempTracker); 1641 1642 SUnit *pickNodeBidirectional(bool &IsTopNode); 1643 1644 void pickNodeFromQueue(SchedBoundary &Zone, 1645 const RegPressureTracker &RPTracker, 1646 SchedCandidate &Candidate); 1647 1648 void reschedulePhysRegCopies(SUnit *SU, bool isTop); 1649 1650 #ifndef NDEBUG 1651 void traceCandidate(const SchedCandidate &Cand); 1652 #endif 1653 }; 1654 } // namespace 1655 1656 void ConvergingScheduler::SchedRemainder:: 1657 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1658 reset(); 1659 if (!SchedModel->hasInstrSchedModel()) 1660 return; 1661 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1662 for (std::vector<SUnit>::iterator 1663 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1664 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1665 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC) 1666 * SchedModel->getMicroOpFactor(); 1667 for (TargetSchedModel::ProcResIter 1668 PI = SchedModel->getWriteProcResBegin(SC), 1669 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1670 unsigned PIdx = PI->ProcResourceIdx; 1671 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1672 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1673 } 1674 } 1675 } 1676 1677 void ConvergingScheduler::SchedBoundary:: 1678 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1679 reset(); 1680 DAG = dag; 1681 SchedModel = smodel; 1682 Rem = rem; 1683 if (SchedModel->hasInstrSchedModel()) 1684 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds()); 1685 } 1686 1687 /// Initialize the per-region scheduling policy. 1688 void ConvergingScheduler::initPolicy(MachineBasicBlock::iterator Begin, 1689 MachineBasicBlock::iterator End, 1690 unsigned NumRegionInstrs) { 1691 const TargetMachine &TM = Context->MF->getTarget(); 1692 1693 // Avoid setting up the register pressure tracker for small regions to save 1694 // compile time. As a rough heuristic, only track pressure when the number of 1695 // schedulable instructions exceeds half the integer register file. 1696 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs( 1697 TM.getTargetLowering()->getRegClassFor(MVT::i32)); 1698 1699 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2); 1700 1701 // For generic targets, we default to bottom-up, because it's simpler and more 1702 // compile-time optimizations have been implemented in that direction. 1703 RegionPolicy.OnlyBottomUp = true; 1704 1705 // Allow the subtarget to override default policy. 1706 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 1707 ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs); 1708 1709 // After subtarget overrides, apply command line options. 1710 if (!EnableRegPressure) 1711 RegionPolicy.ShouldTrackPressure = false; 1712 1713 // Check -misched-topdown/bottomup can force or unforce scheduling direction. 1714 // e.g. -misched-bottomup=false allows scheduling in both directions. 1715 assert((!ForceTopDown || !ForceBottomUp) && 1716 "-misched-topdown incompatible with -misched-bottomup"); 1717 if (ForceBottomUp.getNumOccurrences() > 0) { 1718 RegionPolicy.OnlyBottomUp = ForceBottomUp; 1719 if (RegionPolicy.OnlyBottomUp) 1720 RegionPolicy.OnlyTopDown = false; 1721 } 1722 if (ForceTopDown.getNumOccurrences() > 0) { 1723 RegionPolicy.OnlyTopDown = ForceTopDown; 1724 if (RegionPolicy.OnlyTopDown) 1725 RegionPolicy.OnlyBottomUp = false; 1726 } 1727 } 1728 1729 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 1730 DAG = dag; 1731 SchedModel = DAG->getSchedModel(); 1732 TRI = DAG->TRI; 1733 1734 Rem.init(DAG, SchedModel); 1735 Top.init(DAG, SchedModel, &Rem); 1736 Bot.init(DAG, SchedModel, &Rem); 1737 1738 // Initialize resource counts. 1739 1740 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 1741 // are disabled, then these HazardRecs will be disabled. 1742 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 1743 const TargetMachine &TM = DAG->MF.getTarget(); 1744 if (!Top.HazardRec) { 1745 Top.HazardRec = 1746 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1747 } 1748 if (!Bot.HazardRec) { 1749 Bot.HazardRec = 1750 TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1751 } 1752 } 1753 1754 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 1755 if (SU->isScheduled) 1756 return; 1757 1758 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1759 I != E; ++I) { 1760 if (I->isWeak()) 1761 continue; 1762 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 1763 unsigned Latency = I->getLatency(); 1764 #ifndef NDEBUG 1765 Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency); 1766 #endif 1767 if (SU->TopReadyCycle < PredReadyCycle + Latency) 1768 SU->TopReadyCycle = PredReadyCycle + Latency; 1769 } 1770 Top.releaseNode(SU, SU->TopReadyCycle); 1771 } 1772 1773 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 1774 if (SU->isScheduled) 1775 return; 1776 1777 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1778 1779 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1780 I != E; ++I) { 1781 if (I->isWeak()) 1782 continue; 1783 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 1784 unsigned Latency = I->getLatency(); 1785 #ifndef NDEBUG 1786 Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency); 1787 #endif 1788 if (SU->BotReadyCycle < SuccReadyCycle + Latency) 1789 SU->BotReadyCycle = SuccReadyCycle + Latency; 1790 } 1791 Bot.releaseNode(SU, SU->BotReadyCycle); 1792 } 1793 1794 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic 1795 /// critical path by more cycles than it takes to drain the instruction buffer. 1796 /// We estimate an upper bounds on in-flight instructions as: 1797 /// 1798 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height ) 1799 /// InFlightIterations = AcyclicPath / CyclesPerIteration 1800 /// InFlightResources = InFlightIterations * LoopResources 1801 /// 1802 /// TODO: Check execution resources in addition to IssueCount. 1803 void ConvergingScheduler::checkAcyclicLatency() { 1804 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath) 1805 return; 1806 1807 // Scaled number of cycles per loop iteration. 1808 unsigned IterCount = 1809 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(), 1810 Rem.RemIssueCount); 1811 // Scaled acyclic critical path. 1812 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor(); 1813 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop 1814 unsigned InFlightCount = 1815 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount; 1816 unsigned BufferLimit = 1817 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor(); 1818 1819 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit; 1820 1821 DEBUG(dbgs() << "IssueCycles=" 1822 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c " 1823 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor() 1824 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount 1825 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor() 1826 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n"; 1827 if (Rem.IsAcyclicLatencyLimited) 1828 dbgs() << " ACYCLIC LATENCY LIMIT\n"); 1829 } 1830 1831 void ConvergingScheduler::registerRoots() { 1832 Rem.CriticalPath = DAG->ExitSU.getDepth(); 1833 1834 // Some roots may not feed into ExitSU. Check all of them in case. 1835 for (std::vector<SUnit*>::const_iterator 1836 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 1837 if ((*I)->getDepth() > Rem.CriticalPath) 1838 Rem.CriticalPath = (*I)->getDepth(); 1839 } 1840 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 1841 1842 if (EnableCyclicPath) { 1843 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); 1844 checkAcyclicLatency(); 1845 } 1846 } 1847 1848 /// Does this SU have a hazard within the current instruction group. 1849 /// 1850 /// The scheduler supports two modes of hazard recognition. The first is the 1851 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1852 /// supports highly complicated in-order reservation tables 1853 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1854 /// 1855 /// The second is a streamlined mechanism that checks for hazards based on 1856 /// simple counters that the scheduler itself maintains. It explicitly checks 1857 /// for instruction dispatch limitations, including the number of micro-ops that 1858 /// can dispatch per cycle. 1859 /// 1860 /// TODO: Also check whether the SU must start a new group. 1861 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 1862 if (HazardRec->isEnabled()) 1863 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 1864 1865 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1866 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { 1867 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1868 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1869 return true; 1870 } 1871 return false; 1872 } 1873 1874 // Find the unscheduled node in ReadySUs with the highest latency. 1875 unsigned ConvergingScheduler::SchedBoundary:: 1876 findMaxLatency(ArrayRef<SUnit*> ReadySUs) { 1877 SUnit *LateSU = 0; 1878 unsigned RemLatency = 0; 1879 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end(); 1880 I != E; ++I) { 1881 unsigned L = getUnscheduledLatency(*I); 1882 if (L > RemLatency) { 1883 RemLatency = L; 1884 LateSU = *I; 1885 } 1886 } 1887 if (LateSU) { 1888 DEBUG(dbgs() << Available.getName() << " RemLatency SU(" 1889 << LateSU->NodeNum << ") " << RemLatency << "c\n"); 1890 } 1891 return RemLatency; 1892 } 1893 1894 // Count resources in this zone and the remaining unscheduled 1895 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical 1896 // resource index, or zero if the zone is issue limited. 1897 unsigned ConvergingScheduler::SchedBoundary:: 1898 getOtherResourceCount(unsigned &OtherCritIdx) { 1899 OtherCritIdx = 0; 1900 if (!SchedModel->hasInstrSchedModel()) 1901 return 0; 1902 1903 unsigned OtherCritCount = Rem->RemIssueCount 1904 + (RetiredMOps * SchedModel->getMicroOpFactor()); 1905 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " 1906 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); 1907 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); 1908 PIdx != PEnd; ++PIdx) { 1909 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx]; 1910 if (OtherCount > OtherCritCount) { 1911 OtherCritCount = OtherCount; 1912 OtherCritIdx = PIdx; 1913 } 1914 } 1915 if (OtherCritIdx) { 1916 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: " 1917 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx) 1918 << " " << getResourceName(OtherCritIdx) << "\n"); 1919 } 1920 return OtherCritCount; 1921 } 1922 1923 /// Set the CandPolicy for this zone given the current resources and latencies 1924 /// inside and outside the zone. 1925 void ConvergingScheduler::SchedBoundary::setPolicy(CandPolicy &Policy, 1926 SchedBoundary &OtherZone) { 1927 // Now that potential stalls have been considered, apply preemptive heuristics 1928 // based on the the total latency and resources inside and outside this 1929 // zone. 1930 1931 // Compute remaining latency. We need this both to determine whether the 1932 // overall schedule has become latency-limited and whether the instructions 1933 // outside this zone are resource or latency limited. 1934 // 1935 // The "dependent" latency is updated incrementally during scheduling as the 1936 // max height/depth of scheduled nodes minus the cycles since it was 1937 // scheduled: 1938 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone 1939 // 1940 // The "independent" latency is the max ready queue depth: 1941 // ILat = max N.depth for N in Available|Pending 1942 // 1943 // RemainingLatency is the greater of independent and dependent latency. 1944 unsigned RemLatency = DependentLatency; 1945 RemLatency = std::max(RemLatency, findMaxLatency(Available.elements())); 1946 RemLatency = std::max(RemLatency, findMaxLatency(Pending.elements())); 1947 1948 // Compute the critical resource outside the zone. 1949 unsigned OtherCritIdx; 1950 unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx); 1951 1952 bool OtherResLimited = false; 1953 if (SchedModel->hasInstrSchedModel()) { 1954 unsigned LFactor = SchedModel->getLatencyFactor(); 1955 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor; 1956 } 1957 if (!OtherResLimited && (RemLatency + CurrCycle > Rem->CriticalPath)) { 1958 Policy.ReduceLatency |= true; 1959 DEBUG(dbgs() << " " << Available.getName() << " RemainingLatency " 1960 << RemLatency << " + " << CurrCycle << "c > CritPath " 1961 << Rem->CriticalPath << "\n"); 1962 } 1963 // If the same resource is limiting inside and outside the zone, do nothing. 1964 if (ZoneCritResIdx == OtherCritIdx) 1965 return; 1966 1967 DEBUG( 1968 if (IsResourceLimited) { 1969 dbgs() << " " << Available.getName() << " ResourceLimited: " 1970 << getResourceName(ZoneCritResIdx) << "\n"; 1971 } 1972 if (OtherResLimited) 1973 dbgs() << " RemainingLimit: " << getResourceName(OtherCritIdx) << "\n"; 1974 if (!IsResourceLimited && !OtherResLimited) 1975 dbgs() << " Latency limited both directions.\n"); 1976 1977 if (IsResourceLimited && !Policy.ReduceResIdx) 1978 Policy.ReduceResIdx = ZoneCritResIdx; 1979 1980 if (OtherResLimited) 1981 Policy.DemandResIdx = OtherCritIdx; 1982 } 1983 1984 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 1985 unsigned ReadyCycle) { 1986 if (ReadyCycle < MinReadyCycle) 1987 MinReadyCycle = ReadyCycle; 1988 1989 // Check for interlocks first. For the purpose of other heuristics, an 1990 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1991 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 1992 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU)) 1993 Pending.push(SU); 1994 else 1995 Available.push(SU); 1996 1997 // Record this node as an immediate dependent of the scheduled node. 1998 NextSUs.insert(SU); 1999 } 2000 2001 /// Move the boundary of scheduled code by one cycle. 2002 void ConvergingScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) { 2003 if (SchedModel->getMicroOpBufferSize() == 0) { 2004 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 2005 if (MinReadyCycle > NextCycle) 2006 NextCycle = MinReadyCycle; 2007 } 2008 // Update the current micro-ops, which will issue in the next cycle. 2009 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); 2010 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; 2011 2012 // Decrement DependentLatency based on the next cycle. 2013 if ((NextCycle - CurrCycle) > DependentLatency) 2014 DependentLatency = 0; 2015 else 2016 DependentLatency -= (NextCycle - CurrCycle); 2017 2018 if (!HazardRec->isEnabled()) { 2019 // Bypass HazardRec virtual calls. 2020 CurrCycle = NextCycle; 2021 } 2022 else { 2023 // Bypass getHazardType calls in case of long latency. 2024 for (; CurrCycle != NextCycle; ++CurrCycle) { 2025 if (isTop()) 2026 HazardRec->AdvanceCycle(); 2027 else 2028 HazardRec->RecedeCycle(); 2029 } 2030 } 2031 CheckPending = true; 2032 unsigned LFactor = SchedModel->getLatencyFactor(); 2033 IsResourceLimited = 2034 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 2035 > (int)LFactor; 2036 2037 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n'); 2038 } 2039 2040 void ConvergingScheduler::SchedBoundary::incExecutedResources(unsigned PIdx, 2041 unsigned Count) { 2042 ExecutedResCounts[PIdx] += Count; 2043 if (ExecutedResCounts[PIdx] > MaxExecutedResCount) 2044 MaxExecutedResCount = ExecutedResCounts[PIdx]; 2045 } 2046 2047 /// Add the given processor resource to this scheduled zone. 2048 /// 2049 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles 2050 /// during which this resource is consumed. 2051 /// 2052 /// \return the next cycle at which the instruction may execute without 2053 /// oversubscribing resources. 2054 unsigned ConvergingScheduler::SchedBoundary:: 2055 countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle) { 2056 unsigned Factor = SchedModel->getResourceFactor(PIdx); 2057 unsigned Count = Factor * Cycles; 2058 DEBUG(dbgs() << " " << getResourceName(PIdx) 2059 << " +" << Cycles << "x" << Factor << "u\n"); 2060 2061 // Update Executed resources counts. 2062 incExecutedResources(PIdx, Count); 2063 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 2064 Rem->RemainingCounts[PIdx] -= Count; 2065 2066 // Check if this resource exceeds the current critical resource. If so, it 2067 // becomes the critical resource. 2068 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { 2069 ZoneCritResIdx = PIdx; 2070 DEBUG(dbgs() << " *** Critical resource " 2071 << getResourceName(PIdx) << ": " 2072 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n"); 2073 } 2074 // TODO: We don't yet model reserved resources. It's not hard though. 2075 return CurrCycle; 2076 } 2077 2078 /// Move the boundary of scheduled code by one SUnit. 2079 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 2080 // Update the reservation table. 2081 if (HazardRec->isEnabled()) { 2082 if (!isTop() && SU->isCall) { 2083 // Calls are scheduled with their preceding instructions. For bottom-up 2084 // scheduling, clear the pipeline state before emitting. 2085 HazardRec->Reset(); 2086 } 2087 HazardRec->EmitInstruction(SU); 2088 } 2089 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2090 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); 2091 CurrMOps += IncMOps; 2092 // checkHazard prevents scheduling multiple instructions per cycle that exceed 2093 // issue width. However, we commonly reach the maximum. In this case 2094 // opportunistically bump the cycle to avoid uselessly checking everything in 2095 // the readyQ. Furthermore, a single instruction may produce more than one 2096 // cycle's worth of micro-ops. 2097 // 2098 // TODO: Also check if this SU must end a dispatch group. 2099 unsigned NextCycle = CurrCycle; 2100 if (CurrMOps >= SchedModel->getIssueWidth()) { 2101 ++NextCycle; 2102 DEBUG(dbgs() << " *** Max MOps " << CurrMOps 2103 << " at cycle " << CurrCycle << '\n'); 2104 } 2105 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); 2106 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); 2107 2108 switch (SchedModel->getMicroOpBufferSize()) { 2109 case 0: 2110 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); 2111 break; 2112 case 1: 2113 if (ReadyCycle > NextCycle) { 2114 NextCycle = ReadyCycle; 2115 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n"); 2116 } 2117 break; 2118 default: 2119 // We don't currently model the OOO reorder buffer, so consider all 2120 // scheduled MOps to be "retired". 2121 break; 2122 } 2123 RetiredMOps += IncMOps; 2124 2125 // Update resource counts and critical resource. 2126 if (SchedModel->hasInstrSchedModel()) { 2127 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); 2128 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); 2129 Rem->RemIssueCount -= DecRemIssue; 2130 if (ZoneCritResIdx) { 2131 // Scale scheduled micro-ops for comparing with the critical resource. 2132 unsigned ScaledMOps = 2133 RetiredMOps * SchedModel->getMicroOpFactor(); 2134 2135 // If scaled micro-ops are now more than the previous critical resource by 2136 // a full cycle, then micro-ops issue becomes critical. 2137 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) 2138 >= (int)SchedModel->getLatencyFactor()) { 2139 ZoneCritResIdx = 0; 2140 DEBUG(dbgs() << " *** Critical resource NumMicroOps: " 2141 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); 2142 } 2143 } 2144 for (TargetSchedModel::ProcResIter 2145 PI = SchedModel->getWriteProcResBegin(SC), 2146 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2147 unsigned RCycle = 2148 countResource(PI->ProcResourceIdx, PI->Cycles, ReadyCycle); 2149 if (RCycle > NextCycle) 2150 NextCycle = RCycle; 2151 } 2152 } 2153 // Update ExpectedLatency and DependentLatency. 2154 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; 2155 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; 2156 if (SU->getDepth() > TopLatency) { 2157 TopLatency = SU->getDepth(); 2158 DEBUG(dbgs() << " " << Available.getName() 2159 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n"); 2160 } 2161 if (SU->getHeight() > BotLatency) { 2162 BotLatency = SU->getHeight(); 2163 DEBUG(dbgs() << " " << Available.getName() 2164 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n"); 2165 } 2166 // If we stall for any reason, bump the cycle. 2167 if (NextCycle > CurrCycle) { 2168 bumpCycle(NextCycle); 2169 } 2170 else { 2171 // After updating ZoneCritResIdx and ExpectedLatency, check if we're 2172 // resource limited. If a stall occured, bumpCycle does this. 2173 unsigned LFactor = SchedModel->getLatencyFactor(); 2174 IsResourceLimited = 2175 (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) 2176 > (int)LFactor; 2177 } 2178 DEBUG(dumpScheduledState()); 2179 } 2180 2181 /// Release pending ready nodes in to the available queue. This makes them 2182 /// visible to heuristics. 2183 void ConvergingScheduler::SchedBoundary::releasePending() { 2184 // If the available queue is empty, it is safe to reset MinReadyCycle. 2185 if (Available.empty()) 2186 MinReadyCycle = UINT_MAX; 2187 2188 // Check to see if any of the pending instructions are ready to issue. If 2189 // so, add them to the available queue. 2190 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; 2191 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 2192 SUnit *SU = *(Pending.begin()+i); 2193 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 2194 2195 if (ReadyCycle < MinReadyCycle) 2196 MinReadyCycle = ReadyCycle; 2197 2198 if (!IsBuffered && ReadyCycle > CurrCycle) 2199 continue; 2200 2201 if (checkHazard(SU)) 2202 continue; 2203 2204 Available.push(SU); 2205 Pending.remove(Pending.begin()+i); 2206 --i; --e; 2207 } 2208 DEBUG(if (!Pending.empty()) Pending.dump()); 2209 CheckPending = false; 2210 } 2211 2212 /// Remove SU from the ready set for this boundary. 2213 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 2214 if (Available.isInQueue(SU)) 2215 Available.remove(Available.find(SU)); 2216 else { 2217 assert(Pending.isInQueue(SU) && "bad ready count"); 2218 Pending.remove(Pending.find(SU)); 2219 } 2220 } 2221 2222 /// If this queue only has one ready candidate, return it. As a side effect, 2223 /// defer any nodes that now hit a hazard, and advance the cycle until at least 2224 /// one node is ready. If multiple instructions are ready, return NULL. 2225 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 2226 if (CheckPending) 2227 releasePending(); 2228 2229 if (CurrMOps > 0) { 2230 // Defer any ready instrs that now have a hazard. 2231 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 2232 if (checkHazard(*I)) { 2233 Pending.push(*I); 2234 I = Available.remove(I); 2235 continue; 2236 } 2237 ++I; 2238 } 2239 } 2240 for (unsigned i = 0; Available.empty(); ++i) { 2241 assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) && 2242 "permanent hazard"); (void)i; 2243 bumpCycle(CurrCycle + 1); 2244 releasePending(); 2245 } 2246 if (Available.size() == 1) 2247 return *Available.begin(); 2248 return NULL; 2249 } 2250 2251 #ifndef NDEBUG 2252 // This is useful information to dump after bumpNode. 2253 // Note that the Queue contents are more useful before pickNodeFromQueue. 2254 void ConvergingScheduler::SchedBoundary::dumpScheduledState() { 2255 unsigned ResFactor; 2256 unsigned ResCount; 2257 if (ZoneCritResIdx) { 2258 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); 2259 ResCount = getResourceCount(ZoneCritResIdx); 2260 } 2261 else { 2262 ResFactor = SchedModel->getMicroOpFactor(); 2263 ResCount = RetiredMOps * SchedModel->getMicroOpFactor(); 2264 } 2265 unsigned LFactor = SchedModel->getLatencyFactor(); 2266 dbgs() << Available.getName() << " @" << CurrCycle << "c\n" 2267 << " Retired: " << RetiredMOps; 2268 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; 2269 dbgs() << "\n Critical: " << ResCount / LFactor << "c, " 2270 << ResCount / ResFactor << " " << getResourceName(ZoneCritResIdx) 2271 << "\n ExpectedLatency: " << ExpectedLatency << "c\n" 2272 << (IsResourceLimited ? " - Resource" : " - Latency") 2273 << " limited.\n"; 2274 } 2275 #endif 2276 2277 void ConvergingScheduler::SchedCandidate:: 2278 initResourceDelta(const ScheduleDAGMI *DAG, 2279 const TargetSchedModel *SchedModel) { 2280 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 2281 return; 2282 2283 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 2284 for (TargetSchedModel::ProcResIter 2285 PI = SchedModel->getWriteProcResBegin(SC), 2286 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 2287 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 2288 ResDelta.CritResources += PI->Cycles; 2289 if (PI->ProcResourceIdx == Policy.DemandResIdx) 2290 ResDelta.DemandedResources += PI->Cycles; 2291 } 2292 } 2293 2294 2295 /// Return true if this heuristic determines order. 2296 static bool tryLess(int TryVal, int CandVal, 2297 ConvergingScheduler::SchedCandidate &TryCand, 2298 ConvergingScheduler::SchedCandidate &Cand, 2299 ConvergingScheduler::CandReason Reason) { 2300 if (TryVal < CandVal) { 2301 TryCand.Reason = Reason; 2302 return true; 2303 } 2304 if (TryVal > CandVal) { 2305 if (Cand.Reason > Reason) 2306 Cand.Reason = Reason; 2307 return true; 2308 } 2309 Cand.setRepeat(Reason); 2310 return false; 2311 } 2312 2313 static bool tryGreater(int TryVal, int CandVal, 2314 ConvergingScheduler::SchedCandidate &TryCand, 2315 ConvergingScheduler::SchedCandidate &Cand, 2316 ConvergingScheduler::CandReason Reason) { 2317 if (TryVal > CandVal) { 2318 TryCand.Reason = Reason; 2319 return true; 2320 } 2321 if (TryVal < CandVal) { 2322 if (Cand.Reason > Reason) 2323 Cand.Reason = Reason; 2324 return true; 2325 } 2326 Cand.setRepeat(Reason); 2327 return false; 2328 } 2329 2330 static bool tryPressure(const PressureChange &TryP, 2331 const PressureChange &CandP, 2332 ConvergingScheduler::SchedCandidate &TryCand, 2333 ConvergingScheduler::SchedCandidate &Cand, 2334 ConvergingScheduler::CandReason Reason) { 2335 int TryRank = TryP.getPSetOrMax(); 2336 int CandRank = CandP.getPSetOrMax(); 2337 // If both candidates affect the same set, go with the smallest increase. 2338 if (TryRank == CandRank) { 2339 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand, 2340 Reason); 2341 } 2342 // If one candidate decreases and the other increases, go with it. 2343 // Invalid candidates have UnitInc==0. 2344 if (tryLess(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand, 2345 Reason)) { 2346 return true; 2347 } 2348 // If the candidates are decreasing pressure, reverse priority. 2349 if (TryP.getUnitInc() < 0) 2350 std::swap(TryRank, CandRank); 2351 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason); 2352 } 2353 2354 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 2355 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 2356 } 2357 2358 /// Minimize physical register live ranges. Regalloc wants them adjacent to 2359 /// their physreg def/use. 2360 /// 2361 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf 2362 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled 2363 /// with the operation that produces or consumes the physreg. We'll do this when 2364 /// regalloc has support for parallel copies. 2365 static int biasPhysRegCopy(const SUnit *SU, bool isTop) { 2366 const MachineInstr *MI = SU->getInstr(); 2367 if (!MI->isCopy()) 2368 return 0; 2369 2370 unsigned ScheduledOper = isTop ? 1 : 0; 2371 unsigned UnscheduledOper = isTop ? 0 : 1; 2372 // If we have already scheduled the physreg produce/consumer, immediately 2373 // schedule the copy. 2374 if (TargetRegisterInfo::isPhysicalRegister( 2375 MI->getOperand(ScheduledOper).getReg())) 2376 return 1; 2377 // If the physreg is at the boundary, defer it. Otherwise schedule it 2378 // immediately to free the dependent. We can hoist the copy later. 2379 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; 2380 if (TargetRegisterInfo::isPhysicalRegister( 2381 MI->getOperand(UnscheduledOper).getReg())) 2382 return AtBoundary ? -1 : 1; 2383 return 0; 2384 } 2385 2386 static bool tryLatency(ConvergingScheduler::SchedCandidate &TryCand, 2387 ConvergingScheduler::SchedCandidate &Cand, 2388 ConvergingScheduler::SchedBoundary &Zone) { 2389 if (Zone.isTop()) { 2390 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) { 2391 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2392 TryCand, Cand, ConvergingScheduler::TopDepthReduce)) 2393 return true; 2394 } 2395 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2396 TryCand, Cand, ConvergingScheduler::TopPathReduce)) 2397 return true; 2398 } 2399 else { 2400 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) { 2401 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 2402 TryCand, Cand, ConvergingScheduler::BotHeightReduce)) 2403 return true; 2404 } 2405 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 2406 TryCand, Cand, ConvergingScheduler::BotPathReduce)) 2407 return true; 2408 } 2409 return false; 2410 } 2411 2412 /// Apply a set of heursitics to a new candidate. Heuristics are currently 2413 /// hierarchical. This may be more efficient than a graduated cost model because 2414 /// we don't need to evaluate all aspects of the model for each node in the 2415 /// queue. But it's really done to make the heuristics easier to debug and 2416 /// statistically analyze. 2417 /// 2418 /// \param Cand provides the policy and current best candidate. 2419 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 2420 /// \param Zone describes the scheduled zone that we are extending. 2421 /// \param RPTracker describes reg pressure within the scheduled zone. 2422 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 2423 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, 2424 SchedCandidate &TryCand, 2425 SchedBoundary &Zone, 2426 const RegPressureTracker &RPTracker, 2427 RegPressureTracker &TempTracker) { 2428 2429 if (DAG->isTrackingPressure()) { 2430 // Always initialize TryCand's RPDelta. 2431 if (Zone.isTop()) { 2432 TempTracker.getMaxDownwardPressureDelta( 2433 TryCand.SU->getInstr(), 2434 TryCand.RPDelta, 2435 DAG->getRegionCriticalPSets(), 2436 DAG->getRegPressure().MaxSetPressure); 2437 } 2438 else { 2439 if (VerifyScheduling) { 2440 TempTracker.getMaxUpwardPressureDelta( 2441 TryCand.SU->getInstr(), 2442 &DAG->getPressureDiff(TryCand.SU), 2443 TryCand.RPDelta, 2444 DAG->getRegionCriticalPSets(), 2445 DAG->getRegPressure().MaxSetPressure); 2446 } 2447 else { 2448 RPTracker.getUpwardPressureDelta( 2449 TryCand.SU->getInstr(), 2450 DAG->getPressureDiff(TryCand.SU), 2451 TryCand.RPDelta, 2452 DAG->getRegionCriticalPSets(), 2453 DAG->getRegPressure().MaxSetPressure); 2454 } 2455 } 2456 } 2457 2458 // Initialize the candidate if needed. 2459 if (!Cand.isValid()) { 2460 TryCand.Reason = NodeOrder; 2461 return; 2462 } 2463 2464 if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), 2465 biasPhysRegCopy(Cand.SU, Zone.isTop()), 2466 TryCand, Cand, PhysRegCopy)) 2467 return; 2468 2469 // Avoid exceeding the target's limit. If signed PSetID is negative, it is 2470 // invalid; convert it to INT_MAX to give it lowest priority. 2471 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess, 2472 Cand.RPDelta.Excess, 2473 TryCand, Cand, RegExcess)) 2474 return; 2475 2476 // Avoid increasing the max critical pressure in the scheduled region. 2477 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax, 2478 Cand.RPDelta.CriticalMax, 2479 TryCand, Cand, RegCritical)) 2480 return; 2481 2482 // For loops that are acyclic path limited, aggressively schedule for latency. 2483 if (Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) 2484 return; 2485 2486 // Keep clustered nodes together to encourage downstream peephole 2487 // optimizations which may reduce resource requirements. 2488 // 2489 // This is a best effort to set things up for a post-RA pass. Optimizations 2490 // like generating loads of multiple registers should ideally be done within 2491 // the scheduler pass by combining the loads during DAG postprocessing. 2492 const SUnit *NextClusterSU = 2493 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 2494 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 2495 TryCand, Cand, Cluster)) 2496 return; 2497 2498 // Weak edges are for clustering and other constraints. 2499 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 2500 getWeakLeft(Cand.SU, Zone.isTop()), 2501 TryCand, Cand, Weak)) { 2502 return; 2503 } 2504 // Avoid increasing the max pressure of the entire region. 2505 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax, 2506 Cand.RPDelta.CurrentMax, 2507 TryCand, Cand, RegMax)) 2508 return; 2509 2510 // Avoid critical resource consumption and balance the schedule. 2511 TryCand.initResourceDelta(DAG, SchedModel); 2512 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 2513 TryCand, Cand, ResourceReduce)) 2514 return; 2515 if (tryGreater(TryCand.ResDelta.DemandedResources, 2516 Cand.ResDelta.DemandedResources, 2517 TryCand, Cand, ResourceDemand)) 2518 return; 2519 2520 // Avoid serializing long latency dependence chains. 2521 // For acyclic path limited loops, latency was already checked above. 2522 if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited 2523 && tryLatency(TryCand, Cand, Zone)) { 2524 return; 2525 } 2526 2527 // Prefer immediate defs/users of the last scheduled instruction. This is a 2528 // local pressure avoidance strategy that also makes the machine code 2529 // readable. 2530 if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU), 2531 TryCand, Cand, NextDefUse)) 2532 return; 2533 2534 // Fall through to original instruction order. 2535 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 2536 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 2537 TryCand.Reason = NodeOrder; 2538 } 2539 } 2540 2541 #ifndef NDEBUG 2542 const char *ConvergingScheduler::getReasonStr( 2543 ConvergingScheduler::CandReason Reason) { 2544 switch (Reason) { 2545 case NoCand: return "NOCAND "; 2546 case PhysRegCopy: return "PREG-COPY"; 2547 case RegExcess: return "REG-EXCESS"; 2548 case RegCritical: return "REG-CRIT "; 2549 case Cluster: return "CLUSTER "; 2550 case Weak: return "WEAK "; 2551 case RegMax: return "REG-MAX "; 2552 case ResourceReduce: return "RES-REDUCE"; 2553 case ResourceDemand: return "RES-DEMAND"; 2554 case TopDepthReduce: return "TOP-DEPTH "; 2555 case TopPathReduce: return "TOP-PATH "; 2556 case BotHeightReduce:return "BOT-HEIGHT"; 2557 case BotPathReduce: return "BOT-PATH "; 2558 case NextDefUse: return "DEF-USE "; 2559 case NodeOrder: return "ORDER "; 2560 }; 2561 llvm_unreachable("Unknown reason!"); 2562 } 2563 2564 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) { 2565 PressureChange P; 2566 unsigned ResIdx = 0; 2567 unsigned Latency = 0; 2568 switch (Cand.Reason) { 2569 default: 2570 break; 2571 case RegExcess: 2572 P = Cand.RPDelta.Excess; 2573 break; 2574 case RegCritical: 2575 P = Cand.RPDelta.CriticalMax; 2576 break; 2577 case RegMax: 2578 P = Cand.RPDelta.CurrentMax; 2579 break; 2580 case ResourceReduce: 2581 ResIdx = Cand.Policy.ReduceResIdx; 2582 break; 2583 case ResourceDemand: 2584 ResIdx = Cand.Policy.DemandResIdx; 2585 break; 2586 case TopDepthReduce: 2587 Latency = Cand.SU->getDepth(); 2588 break; 2589 case TopPathReduce: 2590 Latency = Cand.SU->getHeight(); 2591 break; 2592 case BotHeightReduce: 2593 Latency = Cand.SU->getHeight(); 2594 break; 2595 case BotPathReduce: 2596 Latency = Cand.SU->getDepth(); 2597 break; 2598 } 2599 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 2600 if (P.isValid()) 2601 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet()) 2602 << ":" << P.getUnitInc() << " "; 2603 else 2604 dbgs() << " "; 2605 if (ResIdx) 2606 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; 2607 else 2608 dbgs() << " "; 2609 if (Latency) 2610 dbgs() << " " << Latency << " cycles "; 2611 else 2612 dbgs() << " "; 2613 dbgs() << '\n'; 2614 } 2615 #endif 2616 2617 /// Pick the best candidate from the top queue. 2618 /// 2619 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 2620 /// DAG building. To adjust for the current scheduling location we need to 2621 /// maintain the number of vreg uses remaining to be top-scheduled. 2622 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, 2623 const RegPressureTracker &RPTracker, 2624 SchedCandidate &Cand) { 2625 ReadyQueue &Q = Zone.Available; 2626 2627 DEBUG(Q.dump()); 2628 2629 // getMaxPressureDelta temporarily modifies the tracker. 2630 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 2631 2632 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 2633 2634 SchedCandidate TryCand(Cand.Policy); 2635 TryCand.SU = *I; 2636 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 2637 if (TryCand.Reason != NoCand) { 2638 // Initialize resource delta if needed in case future heuristics query it. 2639 if (TryCand.ResDelta == SchedResourceDelta()) 2640 TryCand.initResourceDelta(DAG, SchedModel); 2641 Cand.setBest(TryCand); 2642 DEBUG(traceCandidate(Cand)); 2643 } 2644 } 2645 } 2646 2647 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, 2648 bool IsTop) { 2649 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") 2650 << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); 2651 } 2652 2653 /// Pick the best candidate node from either the top or bottom queue. 2654 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { 2655 // Schedule as far as possible in the direction of no choice. This is most 2656 // efficient, but also provides the best heuristics for CriticalPSets. 2657 if (SUnit *SU = Bot.pickOnlyChoice()) { 2658 IsTopNode = false; 2659 DEBUG(dbgs() << "Pick Bot NOCAND\n"); 2660 return SU; 2661 } 2662 if (SUnit *SU = Top.pickOnlyChoice()) { 2663 IsTopNode = true; 2664 DEBUG(dbgs() << "Pick Top NOCAND\n"); 2665 return SU; 2666 } 2667 CandPolicy NoPolicy; 2668 SchedCandidate BotCand(NoPolicy); 2669 SchedCandidate TopCand(NoPolicy); 2670 Bot.setPolicy(BotCand.Policy, Top); 2671 Top.setPolicy(TopCand.Policy, Bot); 2672 2673 // Prefer bottom scheduling when heuristics are silent. 2674 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2675 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2676 2677 // If either Q has a single candidate that provides the least increase in 2678 // Excess pressure, we can immediately schedule from that Q. 2679 // 2680 // RegionCriticalPSets summarizes the pressure within the scheduled region and 2681 // affects picking from either Q. If scheduling in one direction must 2682 // increase pressure for one of the excess PSets, then schedule in that 2683 // direction first to provide more freedom in the other direction. 2684 if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess)) 2685 || (BotCand.Reason == RegCritical 2686 && !BotCand.isRepeat(RegCritical))) 2687 { 2688 IsTopNode = false; 2689 tracePick(BotCand, IsTopNode); 2690 return BotCand.SU; 2691 } 2692 // Check if the top Q has a better candidate. 2693 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2694 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2695 2696 // Choose the queue with the most important (lowest enum) reason. 2697 if (TopCand.Reason < BotCand.Reason) { 2698 IsTopNode = true; 2699 tracePick(TopCand, IsTopNode); 2700 return TopCand.SU; 2701 } 2702 // Otherwise prefer the bottom candidate, in node order if all else failed. 2703 IsTopNode = false; 2704 tracePick(BotCand, IsTopNode); 2705 return BotCand.SU; 2706 } 2707 2708 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2709 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 2710 if (DAG->top() == DAG->bottom()) { 2711 assert(Top.Available.empty() && Top.Pending.empty() && 2712 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2713 return NULL; 2714 } 2715 SUnit *SU; 2716 do { 2717 if (RegionPolicy.OnlyTopDown) { 2718 SU = Top.pickOnlyChoice(); 2719 if (!SU) { 2720 CandPolicy NoPolicy; 2721 SchedCandidate TopCand(NoPolicy); 2722 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2723 assert(TopCand.Reason != NoCand && "failed to find a candidate"); 2724 tracePick(TopCand, true); 2725 SU = TopCand.SU; 2726 } 2727 IsTopNode = true; 2728 } 2729 else if (RegionPolicy.OnlyBottomUp) { 2730 SU = Bot.pickOnlyChoice(); 2731 if (!SU) { 2732 CandPolicy NoPolicy; 2733 SchedCandidate BotCand(NoPolicy); 2734 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2735 assert(BotCand.Reason != NoCand && "failed to find a candidate"); 2736 tracePick(BotCand, false); 2737 SU = BotCand.SU; 2738 } 2739 IsTopNode = false; 2740 } 2741 else { 2742 SU = pickNodeBidirectional(IsTopNode); 2743 } 2744 } while (SU->isScheduled); 2745 2746 if (SU->isTopReady()) 2747 Top.removeReady(SU); 2748 if (SU->isBottomReady()) 2749 Bot.removeReady(SU); 2750 2751 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); 2752 return SU; 2753 } 2754 2755 void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { 2756 2757 MachineBasicBlock::iterator InsertPos = SU->getInstr(); 2758 if (!isTop) 2759 ++InsertPos; 2760 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; 2761 2762 // Find already scheduled copies with a single physreg dependence and move 2763 // them just above the scheduled instruction. 2764 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); 2765 I != E; ++I) { 2766 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) 2767 continue; 2768 SUnit *DepSU = I->getSUnit(); 2769 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) 2770 continue; 2771 MachineInstr *Copy = DepSU->getInstr(); 2772 if (!Copy->isCopy()) 2773 continue; 2774 DEBUG(dbgs() << " Rescheduling physreg copy "; 2775 I->getSUnit()->dump(DAG)); 2776 DAG->moveInstruction(Copy, InsertPos); 2777 } 2778 } 2779 2780 /// Update the scheduler's state after scheduling a node. This is the same node 2781 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 2782 /// it's state based on the current cycle before MachineSchedStrategy does. 2783 /// 2784 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling 2785 /// them here. See comments in biasPhysRegCopy. 2786 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2787 if (IsTopNode) { 2788 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.CurrCycle); 2789 Top.bumpNode(SU); 2790 if (SU->hasPhysRegUses) 2791 reschedulePhysRegCopies(SU, true); 2792 } 2793 else { 2794 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.CurrCycle); 2795 Bot.bumpNode(SU); 2796 if (SU->hasPhysRegDefs) 2797 reschedulePhysRegCopies(SU, false); 2798 } 2799 } 2800 2801 /// Create the standard converging machine scheduler. This will be used as the 2802 /// default scheduler if the target does not set a default. 2803 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 2804 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler(C)); 2805 // Register DAG post-processors. 2806 // 2807 // FIXME: extend the mutation API to allow earlier mutations to instantiate 2808 // data and pass it to later mutations. Have a single mutation that gathers 2809 // the interesting nodes in one pass. 2810 DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI)); 2811 if (EnableLoadCluster && DAG->TII->enableClusterLoads()) 2812 DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); 2813 if (EnableMacroFusion) 2814 DAG->addMutation(new MacroFusion(DAG->TII)); 2815 return DAG; 2816 } 2817 static MachineSchedRegistry 2818 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 2819 createConvergingSched); 2820 2821 //===----------------------------------------------------------------------===// 2822 // ILP Scheduler. Currently for experimental analysis of heuristics. 2823 //===----------------------------------------------------------------------===// 2824 2825 namespace { 2826 /// \brief Order nodes by the ILP metric. 2827 struct ILPOrder { 2828 const SchedDFSResult *DFSResult; 2829 const BitVector *ScheduledTrees; 2830 bool MaximizeILP; 2831 2832 ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {} 2833 2834 /// \brief Apply a less-than relation on node priority. 2835 /// 2836 /// (Return true if A comes after B in the Q.) 2837 bool operator()(const SUnit *A, const SUnit *B) const { 2838 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 2839 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 2840 if (SchedTreeA != SchedTreeB) { 2841 // Unscheduled trees have lower priority. 2842 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 2843 return ScheduledTrees->test(SchedTreeB); 2844 2845 // Trees with shallower connections have have lower priority. 2846 if (DFSResult->getSubtreeLevel(SchedTreeA) 2847 != DFSResult->getSubtreeLevel(SchedTreeB)) { 2848 return DFSResult->getSubtreeLevel(SchedTreeA) 2849 < DFSResult->getSubtreeLevel(SchedTreeB); 2850 } 2851 } 2852 if (MaximizeILP) 2853 return DFSResult->getILP(A) < DFSResult->getILP(B); 2854 else 2855 return DFSResult->getILP(A) > DFSResult->getILP(B); 2856 } 2857 }; 2858 2859 /// \brief Schedule based on the ILP metric. 2860 class ILPScheduler : public MachineSchedStrategy { 2861 ScheduleDAGMI *DAG; 2862 ILPOrder Cmp; 2863 2864 std::vector<SUnit*> ReadyQ; 2865 public: 2866 ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {} 2867 2868 virtual void initialize(ScheduleDAGMI *dag) { 2869 DAG = dag; 2870 DAG->computeDFSResult(); 2871 Cmp.DFSResult = DAG->getDFSResult(); 2872 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 2873 ReadyQ.clear(); 2874 } 2875 2876 virtual void registerRoots() { 2877 // Restore the heap in ReadyQ with the updated DFS results. 2878 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2879 } 2880 2881 /// Implement MachineSchedStrategy interface. 2882 /// ----------------------------------------- 2883 2884 /// Callback to select the highest priority node from the ready Q. 2885 virtual SUnit *pickNode(bool &IsTopNode) { 2886 if (ReadyQ.empty()) return NULL; 2887 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2888 SUnit *SU = ReadyQ.back(); 2889 ReadyQ.pop_back(); 2890 IsTopNode = false; 2891 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " 2892 << " ILP: " << DAG->getDFSResult()->getILP(SU) 2893 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 2894 << DAG->getDFSResult()->getSubtreeLevel( 2895 DAG->getDFSResult()->getSubtreeID(SU)) << '\n' 2896 << "Scheduling " << *SU->getInstr()); 2897 return SU; 2898 } 2899 2900 /// \brief Scheduler callback to notify that a new subtree is scheduled. 2901 virtual void scheduleTree(unsigned SubtreeID) { 2902 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2903 } 2904 2905 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 2906 /// DFSResults, and resort the priority Q. 2907 virtual void schedNode(SUnit *SU, bool IsTopNode) { 2908 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 2909 } 2910 2911 virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } 2912 2913 virtual void releaseBottomNode(SUnit *SU) { 2914 ReadyQ.push_back(SU); 2915 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2916 } 2917 }; 2918 } // namespace 2919 2920 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 2921 return new ScheduleDAGMI(C, new ILPScheduler(true)); 2922 } 2923 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 2924 return new ScheduleDAGMI(C, new ILPScheduler(false)); 2925 } 2926 static MachineSchedRegistry ILPMaxRegistry( 2927 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 2928 static MachineSchedRegistry ILPMinRegistry( 2929 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 2930 2931 //===----------------------------------------------------------------------===// 2932 // Machine Instruction Shuffler for Correctness Testing 2933 //===----------------------------------------------------------------------===// 2934 2935 #ifndef NDEBUG 2936 namespace { 2937 /// Apply a less-than relation on the node order, which corresponds to the 2938 /// instruction order prior to scheduling. IsReverse implements greater-than. 2939 template<bool IsReverse> 2940 struct SUnitOrder { 2941 bool operator()(SUnit *A, SUnit *B) const { 2942 if (IsReverse) 2943 return A->NodeNum > B->NodeNum; 2944 else 2945 return A->NodeNum < B->NodeNum; 2946 } 2947 }; 2948 2949 /// Reorder instructions as much as possible. 2950 class InstructionShuffler : public MachineSchedStrategy { 2951 bool IsAlternating; 2952 bool IsTopDown; 2953 2954 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 2955 // gives nodes with a higher number higher priority causing the latest 2956 // instructions to be scheduled first. 2957 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 2958 TopQ; 2959 // When scheduling bottom-up, use greater-than as the queue priority. 2960 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 2961 BottomQ; 2962 public: 2963 InstructionShuffler(bool alternate, bool topdown) 2964 : IsAlternating(alternate), IsTopDown(topdown) {} 2965 2966 virtual void initialize(ScheduleDAGMI *) { 2967 TopQ.clear(); 2968 BottomQ.clear(); 2969 } 2970 2971 /// Implement MachineSchedStrategy interface. 2972 /// ----------------------------------------- 2973 2974 virtual SUnit *pickNode(bool &IsTopNode) { 2975 SUnit *SU; 2976 if (IsTopDown) { 2977 do { 2978 if (TopQ.empty()) return NULL; 2979 SU = TopQ.top(); 2980 TopQ.pop(); 2981 } while (SU->isScheduled); 2982 IsTopNode = true; 2983 } 2984 else { 2985 do { 2986 if (BottomQ.empty()) return NULL; 2987 SU = BottomQ.top(); 2988 BottomQ.pop(); 2989 } while (SU->isScheduled); 2990 IsTopNode = false; 2991 } 2992 if (IsAlternating) 2993 IsTopDown = !IsTopDown; 2994 return SU; 2995 } 2996 2997 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 2998 2999 virtual void releaseTopNode(SUnit *SU) { 3000 TopQ.push(SU); 3001 } 3002 virtual void releaseBottomNode(SUnit *SU) { 3003 BottomQ.push(SU); 3004 } 3005 }; 3006 } // namespace 3007 3008 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 3009 bool Alternate = !ForceTopDown && !ForceBottomUp; 3010 bool TopDown = !ForceBottomUp; 3011 assert((TopDown || !ForceTopDown) && 3012 "-misched-topdown incompatible with -misched-bottomup"); 3013 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 3014 } 3015 static MachineSchedRegistry ShufflerRegistry( 3016 "shuffle", "Shuffle machine instructions alternating directions", 3017 createInstructionShuffler); 3018 #endif // !NDEBUG 3019 3020 //===----------------------------------------------------------------------===// 3021 // GraphWriter support for ScheduleDAGMI. 3022 //===----------------------------------------------------------------------===// 3023 3024 #ifndef NDEBUG 3025 namespace llvm { 3026 3027 template<> struct GraphTraits< 3028 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 3029 3030 template<> 3031 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 3032 3033 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 3034 3035 static std::string getGraphName(const ScheduleDAG *G) { 3036 return G->MF.getName(); 3037 } 3038 3039 static bool renderGraphFromBottomUp() { 3040 return true; 3041 } 3042 3043 static bool isNodeHidden(const SUnit *Node) { 3044 return (Node->Preds.size() > 10 || Node->Succs.size() > 10); 3045 } 3046 3047 static bool hasNodeAddressLabel(const SUnit *Node, 3048 const ScheduleDAG *Graph) { 3049 return false; 3050 } 3051 3052 /// If you want to override the dot attributes printed for a particular 3053 /// edge, override this method. 3054 static std::string getEdgeAttributes(const SUnit *Node, 3055 SUnitIterator EI, 3056 const ScheduleDAG *Graph) { 3057 if (EI.isArtificialDep()) 3058 return "color=cyan,style=dashed"; 3059 if (EI.isCtrlDep()) 3060 return "color=blue,style=dashed"; 3061 return ""; 3062 } 3063 3064 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 3065 std::string Str; 3066 raw_string_ostream SS(Str); 3067 SS << "SU(" << SU->NodeNum << ')'; 3068 return SS.str(); 3069 } 3070 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 3071 return G->getGraphNodeLabel(SU); 3072 } 3073 3074 static std::string getNodeAttributes(const SUnit *N, 3075 const ScheduleDAG *Graph) { 3076 std::string Str("shape=Mrecord"); 3077 const SchedDFSResult *DFS = 3078 static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult(); 3079 if (DFS) { 3080 Str += ",style=filled,fillcolor=\"#"; 3081 Str += DOT::getColorString(DFS->getSubtreeID(N)); 3082 Str += '"'; 3083 } 3084 return Str; 3085 } 3086 }; 3087 } // namespace llvm 3088 #endif // NDEBUG 3089 3090 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 3091 /// rendered using 'dot'. 3092 /// 3093 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 3094 #ifndef NDEBUG 3095 ViewGraph(this, Name, false, Title); 3096 #else 3097 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 3098 << "systems with Graphviz or gv!\n"; 3099 #endif // NDEBUG 3100 } 3101 3102 /// Out-of-line implementation with no arguments is handy for gdb. 3103 void ScheduleDAGMI::viewGraph() { 3104 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 3105 } 3106