1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * DOC: Overview 26 * 27 * The GPU scheduler provides entities which allow userspace to push jobs 28 * into software queues which are then scheduled on a hardware run queue. 29 * The software queues have a priority among them. The scheduler selects the entities 30 * from the run queue using a FIFO. The scheduler provides dependency handling 31 * features among jobs. The driver is supposed to provide callback functions for 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 34 * 35 * The organisation of the scheduler is the following: 36 * 37 * 1. Each hw run queue has one scheduler 38 * 2. Each scheduler has multiple run queues with different priorities 39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40 * 3. Each scheduler run queue has a queue of entities to schedule 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42 * the hardware. 43 * 44 * The jobs in a entity are always scheduled in the order that they were pushed. 45 */ 46 47 #include <linux/kthread.h> 48 #include <linux/wait.h> 49 #include <linux/sched.h> 50 #include <linux/completion.h> 51 #ifdef __linux__ 52 #include <uapi/linux/sched/types.h> 53 #endif 54 55 #include <drm/drm_print.h> 56 #include <drm/gpu_scheduler.h> 57 #include <drm/spsc_queue.h> 58 59 #define CREATE_TRACE_POINTS 60 #include "gpu_scheduler_trace.h" 61 62 #define to_drm_sched_job(sched_job) \ 63 container_of((sched_job), struct drm_sched_job, queue_node) 64 65 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); 66 67 /** 68 * drm_sched_rq_init - initialize a given run queue struct 69 * 70 * @rq: scheduler run queue 71 * 72 * Initializes a scheduler runqueue. 73 */ 74 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 75 struct drm_sched_rq *rq) 76 { 77 mtx_init(&rq->lock, IPL_NONE); 78 INIT_LIST_HEAD(&rq->entities); 79 rq->current_entity = NULL; 80 rq->sched = sched; 81 } 82 83 /** 84 * drm_sched_rq_add_entity - add an entity 85 * 86 * @rq: scheduler run queue 87 * @entity: scheduler entity 88 * 89 * Adds a scheduler entity to the run queue. 90 */ 91 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 92 struct drm_sched_entity *entity) 93 { 94 if (!list_empty(&entity->list)) 95 return; 96 spin_lock(&rq->lock); 97 list_add_tail(&entity->list, &rq->entities); 98 spin_unlock(&rq->lock); 99 } 100 101 /** 102 * drm_sched_rq_remove_entity - remove an entity 103 * 104 * @rq: scheduler run queue 105 * @entity: scheduler entity 106 * 107 * Removes a scheduler entity from the run queue. 108 */ 109 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 110 struct drm_sched_entity *entity) 111 { 112 if (list_empty(&entity->list)) 113 return; 114 spin_lock(&rq->lock); 115 list_del_init(&entity->list); 116 if (rq->current_entity == entity) 117 rq->current_entity = NULL; 118 spin_unlock(&rq->lock); 119 } 120 121 /** 122 * drm_sched_rq_select_entity - Select an entity which could provide a job to run 123 * 124 * @rq: scheduler run queue to check. 125 * 126 * Try to find a ready entity, returns NULL if none found. 127 */ 128 static struct drm_sched_entity * 129 drm_sched_rq_select_entity(struct drm_sched_rq *rq) 130 { 131 struct drm_sched_entity *entity; 132 133 spin_lock(&rq->lock); 134 135 entity = rq->current_entity; 136 if (entity) { 137 list_for_each_entry_continue(entity, &rq->entities, list) { 138 if (drm_sched_entity_is_ready(entity)) { 139 rq->current_entity = entity; 140 reinit_completion(&entity->entity_idle); 141 spin_unlock(&rq->lock); 142 return entity; 143 } 144 } 145 } 146 147 list_for_each_entry(entity, &rq->entities, list) { 148 149 if (drm_sched_entity_is_ready(entity)) { 150 rq->current_entity = entity; 151 reinit_completion(&entity->entity_idle); 152 spin_unlock(&rq->lock); 153 return entity; 154 } 155 156 if (entity == rq->current_entity) 157 break; 158 } 159 160 spin_unlock(&rq->lock); 161 162 return NULL; 163 } 164 165 /** 166 * drm_sched_dependency_optimized 167 * 168 * @fence: the dependency fence 169 * @entity: the entity which depends on the above fence 170 * 171 * Returns true if the dependency can be optimized and false otherwise 172 */ 173 bool drm_sched_dependency_optimized(struct dma_fence* fence, 174 struct drm_sched_entity *entity) 175 { 176 struct drm_gpu_scheduler *sched = entity->rq->sched; 177 struct drm_sched_fence *s_fence; 178 179 if (!fence || dma_fence_is_signaled(fence)) 180 return false; 181 if (fence->context == entity->fence_context) 182 return true; 183 s_fence = to_drm_sched_fence(fence); 184 if (s_fence && s_fence->sched == sched) 185 return true; 186 187 return false; 188 } 189 EXPORT_SYMBOL(drm_sched_dependency_optimized); 190 191 /** 192 * drm_sched_start_timeout - start timeout for reset worker 193 * 194 * @sched: scheduler instance to start the worker for 195 * 196 * Start the timeout for the given scheduler. 197 */ 198 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 199 { 200 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 201 !list_empty(&sched->ring_mirror_list)) 202 schedule_delayed_work(&sched->work_tdr, sched->timeout); 203 } 204 205 /** 206 * drm_sched_fault - immediately start timeout handler 207 * 208 * @sched: scheduler where the timeout handling should be started. 209 * 210 * Start timeout handling immediately when the driver detects a hardware fault. 211 */ 212 void drm_sched_fault(struct drm_gpu_scheduler *sched) 213 { 214 mod_delayed_work(system_wq, &sched->work_tdr, 0); 215 } 216 EXPORT_SYMBOL(drm_sched_fault); 217 218 /** 219 * drm_sched_suspend_timeout - Suspend scheduler job timeout 220 * 221 * @sched: scheduler instance for which to suspend the timeout 222 * 223 * Suspend the delayed work timeout for the scheduler. This is done by 224 * modifying the delayed work timeout to an arbitrary large value, 225 * MAX_SCHEDULE_TIMEOUT in this case. 226 * 227 * Returns the timeout remaining 228 * 229 */ 230 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 231 { 232 unsigned long sched_timeout, now = jiffies; 233 234 #ifdef __linux__ 235 sched_timeout = sched->work_tdr.timer.expires; 236 #else 237 sched_timeout = sched->work_tdr.to.to_time; 238 #endif 239 240 /* 241 * Modify the timeout to an arbitrarily large value. This also prevents 242 * the timeout to be restarted when new submissions arrive 243 */ 244 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 245 && time_after(sched_timeout, now)) 246 return sched_timeout - now; 247 else 248 return sched->timeout; 249 } 250 EXPORT_SYMBOL(drm_sched_suspend_timeout); 251 252 /** 253 * drm_sched_resume_timeout - Resume scheduler job timeout 254 * 255 * @sched: scheduler instance for which to resume the timeout 256 * @remaining: remaining timeout 257 * 258 * Resume the delayed work timeout for the scheduler. 259 */ 260 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 261 unsigned long remaining) 262 { 263 spin_lock(&sched->job_list_lock); 264 265 if (list_empty(&sched->ring_mirror_list)) 266 cancel_delayed_work(&sched->work_tdr); 267 else 268 mod_delayed_work(system_wq, &sched->work_tdr, remaining); 269 270 spin_unlock(&sched->job_list_lock); 271 } 272 EXPORT_SYMBOL(drm_sched_resume_timeout); 273 274 static void drm_sched_job_begin(struct drm_sched_job *s_job) 275 { 276 struct drm_gpu_scheduler *sched = s_job->sched; 277 278 spin_lock(&sched->job_list_lock); 279 list_add_tail(&s_job->node, &sched->ring_mirror_list); 280 drm_sched_start_timeout(sched); 281 spin_unlock(&sched->job_list_lock); 282 } 283 284 static void drm_sched_job_timedout(struct work_struct *work) 285 { 286 struct drm_gpu_scheduler *sched; 287 struct drm_sched_job *job; 288 289 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 290 291 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ 292 spin_lock(&sched->job_list_lock); 293 job = list_first_entry_or_null(&sched->ring_mirror_list, 294 struct drm_sched_job, node); 295 296 if (job) { 297 /* 298 * Remove the bad job so it cannot be freed by concurrent 299 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread 300 * is parked at which point it's safe. 301 */ 302 list_del_init(&job->node); 303 spin_unlock(&sched->job_list_lock); 304 305 job->sched->ops->timedout_job(job); 306 307 /* 308 * Guilty job did complete and hence needs to be manually removed 309 * See drm_sched_stop doc. 310 */ 311 if (sched->free_guilty) { 312 job->sched->ops->free_job(job); 313 sched->free_guilty = false; 314 } 315 } else { 316 spin_unlock(&sched->job_list_lock); 317 } 318 319 spin_lock(&sched->job_list_lock); 320 drm_sched_start_timeout(sched); 321 spin_unlock(&sched->job_list_lock); 322 } 323 324 /** 325 * drm_sched_increase_karma - Update sched_entity guilty flag 326 * 327 * @bad: The job guilty of time out 328 * 329 * Increment on every hang caused by the 'bad' job. If this exceeds the hang 330 * limit of the scheduler then the respective sched entity is marked guilty and 331 * jobs from it will not be scheduled further 332 */ 333 void drm_sched_increase_karma(struct drm_sched_job *bad) 334 { 335 int i; 336 struct drm_sched_entity *tmp; 337 struct drm_sched_entity *entity; 338 struct drm_gpu_scheduler *sched = bad->sched; 339 340 /* don't increase @bad's karma if it's from KERNEL RQ, 341 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 342 * corrupt but keep in mind that kernel jobs always considered good. 343 */ 344 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 345 atomic_inc(&bad->karma); 346 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; 347 i++) { 348 struct drm_sched_rq *rq = &sched->sched_rq[i]; 349 350 spin_lock(&rq->lock); 351 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 352 if (bad->s_fence->scheduled.context == 353 entity->fence_context) { 354 if (atomic_read(&bad->karma) > 355 bad->sched->hang_limit) 356 if (entity->guilty) 357 atomic_set(entity->guilty, 1); 358 break; 359 } 360 } 361 spin_unlock(&rq->lock); 362 if (&entity->list != &rq->entities) 363 break; 364 } 365 } 366 } 367 EXPORT_SYMBOL(drm_sched_increase_karma); 368 369 /** 370 * drm_sched_stop - stop the scheduler 371 * 372 * @sched: scheduler instance 373 * @bad: job which caused the time out 374 * 375 * Stop the scheduler and also removes and frees all completed jobs. 376 * Note: bad job will not be freed as it might be used later and so it's 377 * callers responsibility to release it manually if it's not part of the 378 * mirror list any more. 379 * 380 */ 381 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 382 { 383 struct drm_sched_job *s_job, *tmp; 384 385 kthread_park(sched->thread); 386 387 /* 388 * Reinsert back the bad job here - now it's safe as 389 * drm_sched_get_cleanup_job cannot race against us and release the 390 * bad job at this point - we parked (waited for) any in progress 391 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called 392 * now until the scheduler thread is unparked. 393 */ 394 if (bad && bad->sched == sched) 395 /* 396 * Add at the head of the queue to reflect it was the earliest 397 * job extracted. 398 */ 399 list_add(&bad->node, &sched->ring_mirror_list); 400 401 /* 402 * Iterate the job list from later to earlier one and either deactive 403 * their HW callbacks or remove them from mirror list if they already 404 * signaled. 405 * This iteration is thread safe as sched thread is stopped. 406 */ 407 list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) { 408 if (s_job->s_fence->parent && 409 dma_fence_remove_callback(s_job->s_fence->parent, 410 &s_job->cb)) { 411 atomic_dec(&sched->hw_rq_count); 412 } else { 413 /* 414 * remove job from ring_mirror_list. 415 * Locking here is for concurrent resume timeout 416 */ 417 spin_lock(&sched->job_list_lock); 418 list_del_init(&s_job->node); 419 spin_unlock(&sched->job_list_lock); 420 421 /* 422 * Wait for job's HW fence callback to finish using s_job 423 * before releasing it. 424 * 425 * Job is still alive so fence refcount at least 1 426 */ 427 dma_fence_wait(&s_job->s_fence->finished, false); 428 429 /* 430 * We must keep bad job alive for later use during 431 * recovery by some of the drivers but leave a hint 432 * that the guilty job must be released. 433 */ 434 if (bad != s_job) 435 sched->ops->free_job(s_job); 436 else 437 sched->free_guilty = true; 438 } 439 } 440 441 /* 442 * Stop pending timer in flight as we rearm it in drm_sched_start. This 443 * avoids the pending timeout work in progress to fire right away after 444 * this TDR finished and before the newly restarted jobs had a 445 * chance to complete. 446 */ 447 cancel_delayed_work(&sched->work_tdr); 448 } 449 450 EXPORT_SYMBOL(drm_sched_stop); 451 452 /** 453 * drm_sched_job_recovery - recover jobs after a reset 454 * 455 * @sched: scheduler instance 456 * @full_recovery: proceed with complete sched restart 457 * 458 */ 459 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) 460 { 461 struct drm_sched_job *s_job, *tmp; 462 int r; 463 464 /* 465 * Locking the list is not required here as the sched thread is parked 466 * so no new jobs are being inserted or removed. Also concurrent 467 * GPU recovers can't run in parallel. 468 */ 469 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 470 struct dma_fence *fence = s_job->s_fence->parent; 471 472 atomic_inc(&sched->hw_rq_count); 473 474 if (!full_recovery) 475 continue; 476 477 if (fence) { 478 r = dma_fence_add_callback(fence, &s_job->cb, 479 drm_sched_process_job); 480 if (r == -ENOENT) 481 drm_sched_process_job(fence, &s_job->cb); 482 else if (r) 483 DRM_ERROR("fence add callback failed (%d)\n", 484 r); 485 } else 486 drm_sched_process_job(NULL, &s_job->cb); 487 } 488 489 if (full_recovery) { 490 spin_lock(&sched->job_list_lock); 491 drm_sched_start_timeout(sched); 492 spin_unlock(&sched->job_list_lock); 493 } 494 495 kthread_unpark(sched->thread); 496 } 497 EXPORT_SYMBOL(drm_sched_start); 498 499 /** 500 * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list 501 * 502 * @sched: scheduler instance 503 * 504 */ 505 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 506 { 507 struct drm_sched_job *s_job, *tmp; 508 uint64_t guilty_context; 509 bool found_guilty = false; 510 struct dma_fence *fence; 511 512 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 513 struct drm_sched_fence *s_fence = s_job->s_fence; 514 515 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 516 found_guilty = true; 517 guilty_context = s_job->s_fence->scheduled.context; 518 } 519 520 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 521 dma_fence_set_error(&s_fence->finished, -ECANCELED); 522 523 dma_fence_put(s_job->s_fence->parent); 524 fence = sched->ops->run_job(s_job); 525 526 if (IS_ERR_OR_NULL(fence)) { 527 if (IS_ERR(fence)) 528 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 529 530 s_job->s_fence->parent = NULL; 531 } else { 532 s_job->s_fence->parent = fence; 533 } 534 535 536 } 537 } 538 EXPORT_SYMBOL(drm_sched_resubmit_jobs); 539 540 /** 541 * drm_sched_job_init - init a scheduler job 542 * 543 * @job: scheduler job to init 544 * @entity: scheduler entity to use 545 * @owner: job owner for debugging 546 * 547 * Refer to drm_sched_entity_push_job() documentation 548 * for locking considerations. 549 * 550 * Returns 0 for success, negative error code otherwise. 551 */ 552 int drm_sched_job_init(struct drm_sched_job *job, 553 struct drm_sched_entity *entity, 554 void *owner) 555 { 556 struct drm_gpu_scheduler *sched; 557 558 drm_sched_entity_select_rq(entity); 559 if (!entity->rq) 560 return -ENOENT; 561 562 sched = entity->rq->sched; 563 564 job->sched = sched; 565 job->entity = entity; 566 job->s_priority = entity->rq - sched->sched_rq; 567 job->s_fence = drm_sched_fence_create(entity, owner); 568 if (!job->s_fence) 569 return -ENOMEM; 570 job->id = atomic64_inc_return(&sched->job_id_count); 571 572 INIT_LIST_HEAD(&job->node); 573 574 return 0; 575 } 576 EXPORT_SYMBOL(drm_sched_job_init); 577 578 /** 579 * drm_sched_job_cleanup - clean up scheduler job resources 580 * 581 * @job: scheduler job to clean up 582 */ 583 void drm_sched_job_cleanup(struct drm_sched_job *job) 584 { 585 dma_fence_put(&job->s_fence->finished); 586 job->s_fence = NULL; 587 } 588 EXPORT_SYMBOL(drm_sched_job_cleanup); 589 590 /** 591 * drm_sched_ready - is the scheduler ready 592 * 593 * @sched: scheduler instance 594 * 595 * Return true if we can push more jobs to the hw, otherwise false. 596 */ 597 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) 598 { 599 return atomic_read(&sched->hw_rq_count) < 600 sched->hw_submission_limit; 601 } 602 603 /** 604 * drm_sched_wakeup - Wake up the scheduler when it is ready 605 * 606 * @sched: scheduler instance 607 * 608 */ 609 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) 610 { 611 if (drm_sched_ready(sched)) 612 wake_up_interruptible(&sched->wake_up_worker); 613 } 614 615 /** 616 * drm_sched_select_entity - Select next entity to process 617 * 618 * @sched: scheduler instance 619 * 620 * Returns the entity to process or NULL if none are found. 621 */ 622 static struct drm_sched_entity * 623 drm_sched_select_entity(struct drm_gpu_scheduler *sched) 624 { 625 struct drm_sched_entity *entity; 626 int i; 627 628 if (!drm_sched_ready(sched)) 629 return NULL; 630 631 /* Kernel run queue has higher priority than normal run queue*/ 632 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 633 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); 634 if (entity) 635 break; 636 } 637 638 return entity; 639 } 640 641 /** 642 * drm_sched_process_job - process a job 643 * 644 * @f: fence 645 * @cb: fence callbacks 646 * 647 * Called after job has finished execution. 648 */ 649 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) 650 { 651 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 652 struct drm_sched_fence *s_fence = s_job->s_fence; 653 struct drm_gpu_scheduler *sched = s_fence->sched; 654 655 atomic_dec(&sched->hw_rq_count); 656 atomic_dec(&sched->num_jobs); 657 658 trace_drm_sched_process_job(s_fence); 659 660 dma_fence_get(&s_fence->finished); 661 drm_sched_fence_finished(s_fence); 662 dma_fence_put(&s_fence->finished); 663 wake_up_interruptible(&sched->wake_up_worker); 664 } 665 666 /** 667 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed 668 * 669 * @sched: scheduler instance 670 * 671 * Returns the next finished job from the mirror list (if there is one) 672 * ready for it to be destroyed. 673 */ 674 static struct drm_sched_job * 675 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) 676 { 677 struct drm_sched_job *job; 678 679 /* 680 * Don't destroy jobs while the timeout worker is running OR thread 681 * is being parked and hence assumed to not touch ring_mirror_list 682 */ 683 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && 684 !cancel_delayed_work(&sched->work_tdr)) || 685 kthread_should_park()) 686 return NULL; 687 688 spin_lock(&sched->job_list_lock); 689 690 job = list_first_entry_or_null(&sched->ring_mirror_list, 691 struct drm_sched_job, node); 692 693 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { 694 /* remove job from ring_mirror_list */ 695 list_del_init(&job->node); 696 } else { 697 job = NULL; 698 /* queue timeout for next job */ 699 drm_sched_start_timeout(sched); 700 } 701 702 spin_unlock(&sched->job_list_lock); 703 704 return job; 705 } 706 707 /** 708 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load 709 * @sched_list: list of drm_gpu_schedulers 710 * @num_sched_list: number of drm_gpu_schedulers in the sched_list 711 * 712 * Returns pointer of the sched with the least load or NULL if none of the 713 * drm_gpu_schedulers are ready 714 */ 715 struct drm_gpu_scheduler * 716 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 717 unsigned int num_sched_list) 718 { 719 struct drm_gpu_scheduler *sched, *picked_sched = NULL; 720 int i; 721 unsigned int min_jobs = UINT_MAX, num_jobs; 722 723 for (i = 0; i < num_sched_list; ++i) { 724 sched = sched_list[i]; 725 726 if (!sched->ready) { 727 DRM_WARN("scheduler %s is not ready, skipping", 728 sched->name); 729 continue; 730 } 731 732 num_jobs = atomic_read(&sched->num_jobs); 733 if (num_jobs < min_jobs) { 734 min_jobs = num_jobs; 735 picked_sched = sched; 736 } 737 } 738 739 return picked_sched; 740 } 741 EXPORT_SYMBOL(drm_sched_pick_best); 742 743 /** 744 * drm_sched_blocked - check if the scheduler is blocked 745 * 746 * @sched: scheduler instance 747 * 748 * Returns true if blocked, otherwise false. 749 */ 750 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) 751 { 752 if (kthread_should_park()) { 753 kthread_parkme(); 754 return true; 755 } 756 757 return false; 758 } 759 760 /** 761 * drm_sched_main - main scheduler thread 762 * 763 * @param: scheduler instance 764 * 765 * Returns 0. 766 */ 767 static int drm_sched_main(void *param) 768 { 769 #ifdef __linux__ 770 struct sched_param sparam = {.sched_priority = 1}; 771 #endif 772 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; 773 int r; 774 775 #ifdef __linux__ 776 sched_setscheduler(current, SCHED_FIFO, &sparam); 777 #endif 778 779 while (!kthread_should_stop()) { 780 struct drm_sched_entity *entity = NULL; 781 struct drm_sched_fence *s_fence; 782 struct drm_sched_job *sched_job; 783 struct dma_fence *fence; 784 struct drm_sched_job *cleanup_job = NULL; 785 786 wait_event_interruptible(sched->wake_up_worker, 787 (cleanup_job = drm_sched_get_cleanup_job(sched)) || 788 (!drm_sched_blocked(sched) && 789 (entity = drm_sched_select_entity(sched))) || 790 kthread_should_stop()); 791 792 if (cleanup_job) { 793 sched->ops->free_job(cleanup_job); 794 /* queue timeout for next job */ 795 drm_sched_start_timeout(sched); 796 } 797 798 if (!entity) 799 continue; 800 801 sched_job = drm_sched_entity_pop_job(entity); 802 803 complete(&entity->entity_idle); 804 805 if (!sched_job) 806 continue; 807 808 s_fence = sched_job->s_fence; 809 810 atomic_inc(&sched->hw_rq_count); 811 drm_sched_job_begin(sched_job); 812 813 trace_drm_run_job(sched_job, entity); 814 fence = sched->ops->run_job(sched_job); 815 drm_sched_fence_scheduled(s_fence); 816 817 if (!IS_ERR_OR_NULL(fence)) { 818 s_fence->parent = dma_fence_get(fence); 819 r = dma_fence_add_callback(fence, &sched_job->cb, 820 drm_sched_process_job); 821 if (r == -ENOENT) 822 drm_sched_process_job(fence, &sched_job->cb); 823 else if (r) 824 DRM_ERROR("fence add callback failed (%d)\n", 825 r); 826 dma_fence_put(fence); 827 } else { 828 if (IS_ERR(fence)) 829 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 830 831 drm_sched_process_job(NULL, &sched_job->cb); 832 } 833 834 wake_up(&sched->job_scheduled); 835 } 836 return 0; 837 } 838 839 /** 840 * drm_sched_init - Init a gpu scheduler instance 841 * 842 * @sched: scheduler instance 843 * @ops: backend operations for this scheduler 844 * @hw_submission: number of hw submissions that can be in flight 845 * @hang_limit: number of times to allow a job to hang before dropping it 846 * @timeout: timeout value in jiffies for the scheduler 847 * @name: name used for debugging 848 * 849 * Return 0 on success, otherwise error code. 850 */ 851 int drm_sched_init(struct drm_gpu_scheduler *sched, 852 const struct drm_sched_backend_ops *ops, 853 unsigned hw_submission, 854 unsigned hang_limit, 855 long timeout, 856 const char *name) 857 { 858 int i, ret; 859 sched->ops = ops; 860 sched->hw_submission_limit = hw_submission; 861 sched->name = name; 862 sched->timeout = timeout; 863 sched->hang_limit = hang_limit; 864 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) 865 drm_sched_rq_init(sched, &sched->sched_rq[i]); 866 867 init_waitqueue_head(&sched->wake_up_worker); 868 init_waitqueue_head(&sched->job_scheduled); 869 INIT_LIST_HEAD(&sched->ring_mirror_list); 870 mtx_init(&sched->job_list_lock, IPL_NONE); 871 atomic_set(&sched->hw_rq_count, 0); 872 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 873 atomic_set(&sched->num_jobs, 0); 874 atomic64_set(&sched->job_id_count, 0); 875 876 /* Each scheduler will run on a seperate kernel thread */ 877 sched->thread = kthread_run(drm_sched_main, sched, sched->name); 878 if (IS_ERR(sched->thread)) { 879 ret = PTR_ERR(sched->thread); 880 sched->thread = NULL; 881 DRM_ERROR("Failed to create scheduler for %s.\n", name); 882 return ret; 883 } 884 885 sched->ready = true; 886 return 0; 887 } 888 EXPORT_SYMBOL(drm_sched_init); 889 890 /** 891 * drm_sched_fini - Destroy a gpu scheduler 892 * 893 * @sched: scheduler instance 894 * 895 * Tears down and cleans up the scheduler. 896 */ 897 void drm_sched_fini(struct drm_gpu_scheduler *sched) 898 { 899 if (sched->thread) 900 kthread_stop(sched->thread); 901 902 sched->ready = false; 903 } 904 EXPORT_SYMBOL(drm_sched_fini); 905