1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * DOC: Overview 26 * 27 * The GPU scheduler provides entities which allow userspace to push jobs 28 * into software queues which are then scheduled on a hardware run queue. 29 * The software queues have a priority among them. The scheduler selects the entities 30 * from the run queue using a FIFO. The scheduler provides dependency handling 31 * features among jobs. The driver is supposed to provide callback functions for 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 34 * 35 * The organisation of the scheduler is the following: 36 * 37 * 1. Each hw run queue has one scheduler 38 * 2. Each scheduler has multiple run queues with different priorities 39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40 * 3. Each scheduler run queue has a queue of entities to schedule 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42 * the hardware. 43 * 44 * The jobs in a entity are always scheduled in the order that they were pushed. 45 */ 46 47 #include <linux/kthread.h> 48 #include <linux/wait.h> 49 #include <linux/sched.h> 50 #include <linux/completion.h> 51 #ifdef __linux__ 52 #include <uapi/linux/sched/types.h> 53 #endif 54 55 #include <drm/drm_print.h> 56 #include <drm/gpu_scheduler.h> 57 #include <drm/spsc_queue.h> 58 59 #define CREATE_TRACE_POINTS 60 #include "gpu_scheduler_trace.h" 61 62 #define to_drm_sched_job(sched_job) \ 63 container_of((sched_job), struct drm_sched_job, queue_node) 64 65 /** 66 * drm_sched_rq_init - initialize a given run queue struct 67 * 68 * @sched: scheduler instance to associate with this run queue 69 * @rq: scheduler run queue 70 * 71 * Initializes a scheduler runqueue. 72 */ 73 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 74 struct drm_sched_rq *rq) 75 { 76 mtx_init(&rq->lock, IPL_NONE); 77 INIT_LIST_HEAD(&rq->entities); 78 rq->current_entity = NULL; 79 rq->sched = sched; 80 } 81 82 /** 83 * drm_sched_rq_add_entity - add an entity 84 * 85 * @rq: scheduler run queue 86 * @entity: scheduler entity 87 * 88 * Adds a scheduler entity to the run queue. 89 */ 90 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 91 struct drm_sched_entity *entity) 92 { 93 if (!list_empty(&entity->list)) 94 return; 95 spin_lock(&rq->lock); 96 atomic_inc(rq->sched->score); 97 list_add_tail(&entity->list, &rq->entities); 98 spin_unlock(&rq->lock); 99 } 100 101 /** 102 * drm_sched_rq_remove_entity - remove an entity 103 * 104 * @rq: scheduler run queue 105 * @entity: scheduler entity 106 * 107 * Removes a scheduler entity from the run queue. 108 */ 109 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 110 struct drm_sched_entity *entity) 111 { 112 if (list_empty(&entity->list)) 113 return; 114 spin_lock(&rq->lock); 115 atomic_dec(rq->sched->score); 116 list_del_init(&entity->list); 117 if (rq->current_entity == entity) 118 rq->current_entity = NULL; 119 spin_unlock(&rq->lock); 120 } 121 122 /** 123 * drm_sched_rq_select_entity - Select an entity which could provide a job to run 124 * 125 * @rq: scheduler run queue to check. 126 * 127 * Try to find a ready entity, returns NULL if none found. 128 */ 129 static struct drm_sched_entity * 130 drm_sched_rq_select_entity(struct drm_sched_rq *rq) 131 { 132 struct drm_sched_entity *entity; 133 134 spin_lock(&rq->lock); 135 136 entity = rq->current_entity; 137 if (entity) { 138 list_for_each_entry_continue(entity, &rq->entities, list) { 139 if (drm_sched_entity_is_ready(entity)) { 140 rq->current_entity = entity; 141 reinit_completion(&entity->entity_idle); 142 spin_unlock(&rq->lock); 143 return entity; 144 } 145 } 146 } 147 148 list_for_each_entry(entity, &rq->entities, list) { 149 150 if (drm_sched_entity_is_ready(entity)) { 151 rq->current_entity = entity; 152 reinit_completion(&entity->entity_idle); 153 spin_unlock(&rq->lock); 154 return entity; 155 } 156 157 if (entity == rq->current_entity) 158 break; 159 } 160 161 spin_unlock(&rq->lock); 162 163 return NULL; 164 } 165 166 /** 167 * drm_sched_job_done - complete a job 168 * @s_job: pointer to the job which is done 169 * 170 * Finish the job's fence and wake up the worker thread. 171 */ 172 static void drm_sched_job_done(struct drm_sched_job *s_job) 173 { 174 struct drm_sched_fence *s_fence = s_job->s_fence; 175 struct drm_gpu_scheduler *sched = s_fence->sched; 176 177 atomic_dec(&sched->hw_rq_count); 178 atomic_dec(sched->score); 179 180 trace_drm_sched_process_job(s_fence); 181 182 dma_fence_get(&s_fence->finished); 183 drm_sched_fence_finished(s_fence); 184 dma_fence_put(&s_fence->finished); 185 wake_up_interruptible(&sched->wake_up_worker); 186 } 187 188 /** 189 * drm_sched_job_done_cb - the callback for a done job 190 * @f: fence 191 * @cb: fence callbacks 192 */ 193 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) 194 { 195 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 196 197 drm_sched_job_done(s_job); 198 } 199 200 /** 201 * drm_sched_dependency_optimized 202 * 203 * @fence: the dependency fence 204 * @entity: the entity which depends on the above fence 205 * 206 * Returns true if the dependency can be optimized and false otherwise 207 */ 208 bool drm_sched_dependency_optimized(struct dma_fence* fence, 209 struct drm_sched_entity *entity) 210 { 211 struct drm_gpu_scheduler *sched = entity->rq->sched; 212 struct drm_sched_fence *s_fence; 213 214 if (!fence || dma_fence_is_signaled(fence)) 215 return false; 216 if (fence->context == entity->fence_context) 217 return true; 218 s_fence = to_drm_sched_fence(fence); 219 if (s_fence && s_fence->sched == sched) 220 return true; 221 222 return false; 223 } 224 EXPORT_SYMBOL(drm_sched_dependency_optimized); 225 226 /** 227 * drm_sched_start_timeout - start timeout for reset worker 228 * 229 * @sched: scheduler instance to start the worker for 230 * 231 * Start the timeout for the given scheduler. 232 */ 233 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 234 { 235 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 236 !list_empty(&sched->pending_list)) 237 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); 238 } 239 240 /** 241 * drm_sched_fault - immediately start timeout handler 242 * 243 * @sched: scheduler where the timeout handling should be started. 244 * 245 * Start timeout handling immediately when the driver detects a hardware fault. 246 */ 247 void drm_sched_fault(struct drm_gpu_scheduler *sched) 248 { 249 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); 250 } 251 EXPORT_SYMBOL(drm_sched_fault); 252 253 /** 254 * drm_sched_suspend_timeout - Suspend scheduler job timeout 255 * 256 * @sched: scheduler instance for which to suspend the timeout 257 * 258 * Suspend the delayed work timeout for the scheduler. This is done by 259 * modifying the delayed work timeout to an arbitrary large value, 260 * MAX_SCHEDULE_TIMEOUT in this case. 261 * 262 * Returns the timeout remaining 263 * 264 */ 265 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 266 { 267 unsigned long sched_timeout, now = jiffies; 268 269 #ifdef __linux__ 270 sched_timeout = sched->work_tdr.timer.expires; 271 #else 272 sched_timeout = sched->work_tdr.to.to_time; 273 #endif 274 275 /* 276 * Modify the timeout to an arbitrarily large value. This also prevents 277 * the timeout to be restarted when new submissions arrive 278 */ 279 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 280 && time_after(sched_timeout, now)) 281 return sched_timeout - now; 282 else 283 return sched->timeout; 284 } 285 EXPORT_SYMBOL(drm_sched_suspend_timeout); 286 287 /** 288 * drm_sched_resume_timeout - Resume scheduler job timeout 289 * 290 * @sched: scheduler instance for which to resume the timeout 291 * @remaining: remaining timeout 292 * 293 * Resume the delayed work timeout for the scheduler. 294 */ 295 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 296 unsigned long remaining) 297 { 298 spin_lock(&sched->job_list_lock); 299 300 if (list_empty(&sched->pending_list)) 301 cancel_delayed_work(&sched->work_tdr); 302 else 303 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); 304 305 spin_unlock(&sched->job_list_lock); 306 } 307 EXPORT_SYMBOL(drm_sched_resume_timeout); 308 309 static void drm_sched_job_begin(struct drm_sched_job *s_job) 310 { 311 struct drm_gpu_scheduler *sched = s_job->sched; 312 313 spin_lock(&sched->job_list_lock); 314 list_add_tail(&s_job->list, &sched->pending_list); 315 drm_sched_start_timeout(sched); 316 spin_unlock(&sched->job_list_lock); 317 } 318 319 static void drm_sched_job_timedout(struct work_struct *work) 320 { 321 struct drm_gpu_scheduler *sched; 322 struct drm_sched_job *job; 323 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; 324 325 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 326 327 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ 328 spin_lock(&sched->job_list_lock); 329 job = list_first_entry_or_null(&sched->pending_list, 330 struct drm_sched_job, list); 331 332 if (job) { 333 /* 334 * Remove the bad job so it cannot be freed by concurrent 335 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread 336 * is parked at which point it's safe. 337 */ 338 list_del_init(&job->list); 339 spin_unlock(&sched->job_list_lock); 340 341 status = job->sched->ops->timedout_job(job); 342 343 /* 344 * Guilty job did complete and hence needs to be manually removed 345 * See drm_sched_stop doc. 346 */ 347 if (sched->free_guilty) { 348 job->sched->ops->free_job(job); 349 sched->free_guilty = false; 350 } 351 } else { 352 spin_unlock(&sched->job_list_lock); 353 } 354 355 if (status != DRM_GPU_SCHED_STAT_ENODEV) { 356 spin_lock(&sched->job_list_lock); 357 drm_sched_start_timeout(sched); 358 spin_unlock(&sched->job_list_lock); 359 } 360 } 361 362 /** 363 * drm_sched_increase_karma - Update sched_entity guilty flag 364 * 365 * @bad: The job guilty of time out 366 * 367 * Increment on every hang caused by the 'bad' job. If this exceeds the hang 368 * limit of the scheduler then the respective sched entity is marked guilty and 369 * jobs from it will not be scheduled further 370 */ 371 void drm_sched_increase_karma(struct drm_sched_job *bad) 372 { 373 drm_sched_increase_karma_ext(bad, 1); 374 } 375 EXPORT_SYMBOL(drm_sched_increase_karma); 376 377 void drm_sched_reset_karma(struct drm_sched_job *bad) 378 { 379 drm_sched_increase_karma_ext(bad, 0); 380 } 381 EXPORT_SYMBOL(drm_sched_reset_karma); 382 383 /** 384 * drm_sched_stop - stop the scheduler 385 * 386 * @sched: scheduler instance 387 * @bad: job which caused the time out 388 * 389 * Stop the scheduler and also removes and frees all completed jobs. 390 * Note: bad job will not be freed as it might be used later and so it's 391 * callers responsibility to release it manually if it's not part of the 392 * pending list any more. 393 * 394 */ 395 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 396 { 397 struct drm_sched_job *s_job, *tmp; 398 399 kthread_park(sched->thread); 400 401 /* 402 * Reinsert back the bad job here - now it's safe as 403 * drm_sched_get_cleanup_job cannot race against us and release the 404 * bad job at this point - we parked (waited for) any in progress 405 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called 406 * now until the scheduler thread is unparked. 407 */ 408 if (bad && bad->sched == sched) 409 /* 410 * Add at the head of the queue to reflect it was the earliest 411 * job extracted. 412 */ 413 list_add(&bad->list, &sched->pending_list); 414 415 /* 416 * Iterate the job list from later to earlier one and either deactive 417 * their HW callbacks or remove them from pending list if they already 418 * signaled. 419 * This iteration is thread safe as sched thread is stopped. 420 */ 421 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, 422 list) { 423 if (s_job->s_fence->parent && 424 dma_fence_remove_callback(s_job->s_fence->parent, 425 &s_job->cb)) { 426 atomic_dec(&sched->hw_rq_count); 427 } else { 428 /* 429 * remove job from pending_list. 430 * Locking here is for concurrent resume timeout 431 */ 432 spin_lock(&sched->job_list_lock); 433 list_del_init(&s_job->list); 434 spin_unlock(&sched->job_list_lock); 435 436 /* 437 * Wait for job's HW fence callback to finish using s_job 438 * before releasing it. 439 * 440 * Job is still alive so fence refcount at least 1 441 */ 442 dma_fence_wait(&s_job->s_fence->finished, false); 443 444 /* 445 * We must keep bad job alive for later use during 446 * recovery by some of the drivers but leave a hint 447 * that the guilty job must be released. 448 */ 449 if (bad != s_job) 450 sched->ops->free_job(s_job); 451 else 452 sched->free_guilty = true; 453 } 454 } 455 456 /* 457 * Stop pending timer in flight as we rearm it in drm_sched_start. This 458 * avoids the pending timeout work in progress to fire right away after 459 * this TDR finished and before the newly restarted jobs had a 460 * chance to complete. 461 */ 462 cancel_delayed_work(&sched->work_tdr); 463 } 464 465 EXPORT_SYMBOL(drm_sched_stop); 466 467 /** 468 * drm_sched_start - recover jobs after a reset 469 * 470 * @sched: scheduler instance 471 * @full_recovery: proceed with complete sched restart 472 * 473 */ 474 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) 475 { 476 struct drm_sched_job *s_job, *tmp; 477 int r; 478 479 /* 480 * Locking the list is not required here as the sched thread is parked 481 * so no new jobs are being inserted or removed. Also concurrent 482 * GPU recovers can't run in parallel. 483 */ 484 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 485 struct dma_fence *fence = s_job->s_fence->parent; 486 487 atomic_inc(&sched->hw_rq_count); 488 489 if (!full_recovery) 490 continue; 491 492 if (fence) { 493 r = dma_fence_add_callback(fence, &s_job->cb, 494 drm_sched_job_done_cb); 495 if (r == -ENOENT) 496 drm_sched_job_done(s_job); 497 else if (r) 498 DRM_ERROR("fence add callback failed (%d)\n", 499 r); 500 } else 501 drm_sched_job_done(s_job); 502 } 503 504 if (full_recovery) { 505 spin_lock(&sched->job_list_lock); 506 drm_sched_start_timeout(sched); 507 spin_unlock(&sched->job_list_lock); 508 } 509 510 kthread_unpark(sched->thread); 511 } 512 EXPORT_SYMBOL(drm_sched_start); 513 514 /** 515 * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list 516 * 517 * @sched: scheduler instance 518 * 519 */ 520 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 521 { 522 drm_sched_resubmit_jobs_ext(sched, INT_MAX); 523 } 524 EXPORT_SYMBOL(drm_sched_resubmit_jobs); 525 526 /** 527 * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list 528 * 529 * @sched: scheduler instance 530 * @max: job numbers to relaunch 531 * 532 */ 533 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max) 534 { 535 struct drm_sched_job *s_job, *tmp; 536 uint64_t guilty_context; 537 bool found_guilty = false; 538 struct dma_fence *fence; 539 int i = 0; 540 541 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 542 struct drm_sched_fence *s_fence = s_job->s_fence; 543 544 if (i >= max) 545 break; 546 547 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 548 found_guilty = true; 549 guilty_context = s_job->s_fence->scheduled.context; 550 } 551 552 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 553 dma_fence_set_error(&s_fence->finished, -ECANCELED); 554 555 dma_fence_put(s_job->s_fence->parent); 556 fence = sched->ops->run_job(s_job); 557 i++; 558 559 if (IS_ERR_OR_NULL(fence)) { 560 if (IS_ERR(fence)) 561 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 562 563 s_job->s_fence->parent = NULL; 564 } else { 565 s_job->s_fence->parent = fence; 566 } 567 } 568 } 569 EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext); 570 571 /** 572 * drm_sched_job_init - init a scheduler job 573 * 574 * @job: scheduler job to init 575 * @entity: scheduler entity to use 576 * @owner: job owner for debugging 577 * 578 * Refer to drm_sched_entity_push_job() documentation 579 * for locking considerations. 580 * 581 * Returns 0 for success, negative error code otherwise. 582 */ 583 int drm_sched_job_init(struct drm_sched_job *job, 584 struct drm_sched_entity *entity, 585 void *owner) 586 { 587 struct drm_gpu_scheduler *sched; 588 589 drm_sched_entity_select_rq(entity); 590 if (!entity->rq) 591 return -ENOENT; 592 593 sched = entity->rq->sched; 594 595 job->sched = sched; 596 job->entity = entity; 597 job->s_priority = entity->rq - sched->sched_rq; 598 job->s_fence = drm_sched_fence_create(entity, owner); 599 if (!job->s_fence) 600 return -ENOMEM; 601 job->id = atomic64_inc_return(&sched->job_id_count); 602 603 INIT_LIST_HEAD(&job->list); 604 605 return 0; 606 } 607 EXPORT_SYMBOL(drm_sched_job_init); 608 609 /** 610 * drm_sched_job_cleanup - clean up scheduler job resources 611 * 612 * @job: scheduler job to clean up 613 */ 614 void drm_sched_job_cleanup(struct drm_sched_job *job) 615 { 616 dma_fence_put(&job->s_fence->finished); 617 job->s_fence = NULL; 618 } 619 EXPORT_SYMBOL(drm_sched_job_cleanup); 620 621 /** 622 * drm_sched_ready - is the scheduler ready 623 * 624 * @sched: scheduler instance 625 * 626 * Return true if we can push more jobs to the hw, otherwise false. 627 */ 628 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) 629 { 630 return atomic_read(&sched->hw_rq_count) < 631 sched->hw_submission_limit; 632 } 633 634 /** 635 * drm_sched_wakeup - Wake up the scheduler when it is ready 636 * 637 * @sched: scheduler instance 638 * 639 */ 640 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) 641 { 642 if (drm_sched_ready(sched)) 643 wake_up_interruptible(&sched->wake_up_worker); 644 } 645 646 /** 647 * drm_sched_select_entity - Select next entity to process 648 * 649 * @sched: scheduler instance 650 * 651 * Returns the entity to process or NULL if none are found. 652 */ 653 static struct drm_sched_entity * 654 drm_sched_select_entity(struct drm_gpu_scheduler *sched) 655 { 656 struct drm_sched_entity *entity; 657 int i; 658 659 if (!drm_sched_ready(sched)) 660 return NULL; 661 662 /* Kernel run queue has higher priority than normal run queue*/ 663 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 664 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); 665 if (entity) 666 break; 667 } 668 669 return entity; 670 } 671 672 /** 673 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed 674 * 675 * @sched: scheduler instance 676 * 677 * Returns the next finished job from the pending list (if there is one) 678 * ready for it to be destroyed. 679 */ 680 static struct drm_sched_job * 681 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) 682 { 683 struct drm_sched_job *job, *next; 684 685 /* 686 * Don't destroy jobs while the timeout worker is running OR thread 687 * is being parked and hence assumed to not touch pending_list 688 */ 689 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && 690 !cancel_delayed_work(&sched->work_tdr)) || 691 kthread_should_park()) 692 return NULL; 693 694 spin_lock(&sched->job_list_lock); 695 696 job = list_first_entry_or_null(&sched->pending_list, 697 struct drm_sched_job, list); 698 699 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { 700 /* remove job from pending_list */ 701 list_del_init(&job->list); 702 /* make the scheduled timestamp more accurate */ 703 next = list_first_entry_or_null(&sched->pending_list, 704 typeof(*next), list); 705 if (next) 706 next->s_fence->scheduled.timestamp = 707 job->s_fence->finished.timestamp; 708 709 } else { 710 job = NULL; 711 /* queue timeout for next job */ 712 drm_sched_start_timeout(sched); 713 } 714 715 spin_unlock(&sched->job_list_lock); 716 717 return job; 718 } 719 720 /** 721 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load 722 * @sched_list: list of drm_gpu_schedulers 723 * @num_sched_list: number of drm_gpu_schedulers in the sched_list 724 * 725 * Returns pointer of the sched with the least load or NULL if none of the 726 * drm_gpu_schedulers are ready 727 */ 728 struct drm_gpu_scheduler * 729 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 730 unsigned int num_sched_list) 731 { 732 struct drm_gpu_scheduler *sched, *picked_sched = NULL; 733 int i; 734 unsigned int min_score = UINT_MAX, num_score; 735 736 for (i = 0; i < num_sched_list; ++i) { 737 sched = sched_list[i]; 738 739 if (!sched->ready) { 740 DRM_WARN("scheduler %s is not ready, skipping", 741 sched->name); 742 continue; 743 } 744 745 num_score = atomic_read(sched->score); 746 if (num_score < min_score) { 747 min_score = num_score; 748 picked_sched = sched; 749 } 750 } 751 752 return picked_sched; 753 } 754 EXPORT_SYMBOL(drm_sched_pick_best); 755 756 /** 757 * drm_sched_blocked - check if the scheduler is blocked 758 * 759 * @sched: scheduler instance 760 * 761 * Returns true if blocked, otherwise false. 762 */ 763 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) 764 { 765 if (kthread_should_park()) { 766 kthread_parkme(); 767 return true; 768 } 769 770 return false; 771 } 772 773 /** 774 * drm_sched_main - main scheduler thread 775 * 776 * @param: scheduler instance 777 * 778 * Returns 0. 779 */ 780 static int drm_sched_main(void *param) 781 { 782 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; 783 int r; 784 785 #ifdef __linux__ 786 sched_set_fifo_low(current); 787 #endif 788 789 while (!kthread_should_stop()) { 790 struct drm_sched_entity *entity = NULL; 791 struct drm_sched_fence *s_fence; 792 struct drm_sched_job *sched_job; 793 struct dma_fence *fence; 794 struct drm_sched_job *cleanup_job = NULL; 795 796 wait_event_interruptible(sched->wake_up_worker, 797 (cleanup_job = drm_sched_get_cleanup_job(sched)) || 798 (!drm_sched_blocked(sched) && 799 (entity = drm_sched_select_entity(sched))) || 800 kthread_should_stop()); 801 802 if (cleanup_job) { 803 sched->ops->free_job(cleanup_job); 804 /* queue timeout for next job */ 805 drm_sched_start_timeout(sched); 806 } 807 808 if (!entity) 809 continue; 810 811 sched_job = drm_sched_entity_pop_job(entity); 812 813 if (!sched_job) { 814 complete(&entity->entity_idle); 815 continue; 816 } 817 818 s_fence = sched_job->s_fence; 819 820 atomic_inc(&sched->hw_rq_count); 821 drm_sched_job_begin(sched_job); 822 823 trace_drm_run_job(sched_job, entity); 824 fence = sched->ops->run_job(sched_job); 825 complete(&entity->entity_idle); 826 drm_sched_fence_scheduled(s_fence); 827 828 if (!IS_ERR_OR_NULL(fence)) { 829 s_fence->parent = dma_fence_get(fence); 830 r = dma_fence_add_callback(fence, &sched_job->cb, 831 drm_sched_job_done_cb); 832 if (r == -ENOENT) 833 drm_sched_job_done(sched_job); 834 else if (r) 835 DRM_ERROR("fence add callback failed (%d)\n", 836 r); 837 dma_fence_put(fence); 838 } else { 839 if (IS_ERR(fence)) 840 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 841 842 drm_sched_job_done(sched_job); 843 } 844 845 wake_up(&sched->job_scheduled); 846 } 847 return 0; 848 } 849 850 /** 851 * drm_sched_init - Init a gpu scheduler instance 852 * 853 * @sched: scheduler instance 854 * @ops: backend operations for this scheduler 855 * @hw_submission: number of hw submissions that can be in flight 856 * @hang_limit: number of times to allow a job to hang before dropping it 857 * @timeout: timeout value in jiffies for the scheduler 858 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is 859 * used 860 * @score: optional score atomic shared with other schedulers 861 * @name: name used for debugging 862 * 863 * Return 0 on success, otherwise error code. 864 */ 865 int drm_sched_init(struct drm_gpu_scheduler *sched, 866 const struct drm_sched_backend_ops *ops, 867 unsigned hw_submission, unsigned hang_limit, 868 long timeout, struct workqueue_struct *timeout_wq, 869 atomic_t *score, const char *name) 870 { 871 int i, ret; 872 sched->ops = ops; 873 sched->hw_submission_limit = hw_submission; 874 sched->name = name; 875 sched->timeout = timeout; 876 sched->timeout_wq = timeout_wq ? : system_wq; 877 sched->hang_limit = hang_limit; 878 sched->score = score ? score : &sched->_score; 879 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) 880 drm_sched_rq_init(sched, &sched->sched_rq[i]); 881 882 init_waitqueue_head(&sched->wake_up_worker); 883 init_waitqueue_head(&sched->job_scheduled); 884 INIT_LIST_HEAD(&sched->pending_list); 885 mtx_init(&sched->job_list_lock, IPL_NONE); 886 atomic_set(&sched->hw_rq_count, 0); 887 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 888 atomic_set(&sched->_score, 0); 889 atomic64_set(&sched->job_id_count, 0); 890 891 /* Each scheduler will run on a seperate kernel thread */ 892 sched->thread = kthread_run(drm_sched_main, sched, sched->name); 893 if (IS_ERR(sched->thread)) { 894 ret = PTR_ERR(sched->thread); 895 sched->thread = NULL; 896 DRM_ERROR("Failed to create scheduler for %s.\n", name); 897 return ret; 898 } 899 900 sched->ready = true; 901 return 0; 902 } 903 EXPORT_SYMBOL(drm_sched_init); 904 905 /** 906 * drm_sched_fini - Destroy a gpu scheduler 907 * 908 * @sched: scheduler instance 909 * 910 * Tears down and cleans up the scheduler. 911 */ 912 void drm_sched_fini(struct drm_gpu_scheduler *sched) 913 { 914 struct drm_sched_entity *s_entity; 915 int i; 916 917 if (sched->thread) 918 kthread_stop(sched->thread); 919 920 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 921 struct drm_sched_rq *rq = &sched->sched_rq[i]; 922 923 if (!rq) 924 continue; 925 926 spin_lock(&rq->lock); 927 list_for_each_entry(s_entity, &rq->entities, list) 928 /* 929 * Prevents reinsertion and marks job_queue as idle, 930 * it will removed from rq in drm_sched_entity_fini 931 * eventually 932 */ 933 s_entity->stopped = true; 934 spin_unlock(&rq->lock); 935 936 } 937 938 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */ 939 wake_up_all(&sched->job_scheduled); 940 941 /* Confirm no work left behind accessing device structures */ 942 cancel_delayed_work_sync(&sched->work_tdr); 943 944 sched->ready = false; 945 } 946 EXPORT_SYMBOL(drm_sched_fini); 947 948 /** 949 * drm_sched_increase_karma_ext - Update sched_entity guilty flag 950 * 951 * @bad: The job guilty of time out 952 * @type: type for increase/reset karma 953 * 954 */ 955 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type) 956 { 957 int i; 958 struct drm_sched_entity *tmp; 959 struct drm_sched_entity *entity; 960 struct drm_gpu_scheduler *sched = bad->sched; 961 962 /* don't change @bad's karma if it's from KERNEL RQ, 963 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 964 * corrupt but keep in mind that kernel jobs always considered good. 965 */ 966 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 967 if (type == 0) 968 atomic_set(&bad->karma, 0); 969 else if (type == 1) 970 atomic_inc(&bad->karma); 971 972 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; 973 i++) { 974 struct drm_sched_rq *rq = &sched->sched_rq[i]; 975 976 spin_lock(&rq->lock); 977 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 978 if (bad->s_fence->scheduled.context == 979 entity->fence_context) { 980 if (entity->guilty) 981 atomic_set(entity->guilty, type); 982 break; 983 } 984 } 985 spin_unlock(&rq->lock); 986 if (&entity->list != &rq->entities) 987 break; 988 } 989 } 990 } 991 EXPORT_SYMBOL(drm_sched_increase_karma_ext); 992