1c349dbc7Sjsg /* 2c349dbc7Sjsg * Copyright 2015 Advanced Micro Devices, Inc. 3c349dbc7Sjsg * 4c349dbc7Sjsg * Permission is hereby granted, free of charge, to any person obtaining a 5c349dbc7Sjsg * copy of this software and associated documentation files (the "Software"), 6c349dbc7Sjsg * to deal in the Software without restriction, including without limitation 7c349dbc7Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c349dbc7Sjsg * and/or sell copies of the Software, and to permit persons to whom the 9c349dbc7Sjsg * Software is furnished to do so, subject to the following conditions: 10c349dbc7Sjsg * 11c349dbc7Sjsg * The above copyright notice and this permission notice shall be included in 12c349dbc7Sjsg * all copies or substantial portions of the Software. 13c349dbc7Sjsg * 14c349dbc7Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c349dbc7Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c349dbc7Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c349dbc7Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c349dbc7Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c349dbc7Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c349dbc7Sjsg * OTHER DEALINGS IN THE SOFTWARE. 21c349dbc7Sjsg * 22c349dbc7Sjsg */ 23c349dbc7Sjsg 24c349dbc7Sjsg /** 25c349dbc7Sjsg * DOC: Overview 26c349dbc7Sjsg * 27c349dbc7Sjsg * The GPU scheduler provides entities which allow userspace to push jobs 28c349dbc7Sjsg * into software queues which are then scheduled on a hardware run queue. 29c349dbc7Sjsg * The software queues have a priority among them. The scheduler selects the entities 30c349dbc7Sjsg * from the run queue using a FIFO. The scheduler provides dependency handling 31c349dbc7Sjsg * features among jobs. The driver is supposed to provide callback functions for 32c349dbc7Sjsg * backend operations to the scheduler like submitting a job to hardware run queue, 33c349dbc7Sjsg * returning the dependencies of a job etc. 34c349dbc7Sjsg * 35c349dbc7Sjsg * The organisation of the scheduler is the following: 36c349dbc7Sjsg * 37c349dbc7Sjsg * 1. Each hw run queue has one scheduler 38c349dbc7Sjsg * 2. Each scheduler has multiple run queues with different priorities 39c349dbc7Sjsg * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40c349dbc7Sjsg * 3. Each scheduler run queue has a queue of entities to schedule 41c349dbc7Sjsg * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42c349dbc7Sjsg * the hardware. 43c349dbc7Sjsg * 44c349dbc7Sjsg * The jobs in a entity are always scheduled in the order that they were pushed. 45f005ef32Sjsg * 46f005ef32Sjsg * Note that once a job was taken from the entities queue and pushed to the 47f005ef32Sjsg * hardware, i.e. the pending queue, the entity must not be referenced anymore 48f005ef32Sjsg * through the jobs entity pointer. 49c349dbc7Sjsg */ 50c349dbc7Sjsg 51c349dbc7Sjsg #include <linux/kthread.h> 52c349dbc7Sjsg #include <linux/wait.h> 53c349dbc7Sjsg #include <linux/sched.h> 54c349dbc7Sjsg #include <linux/completion.h> 551bb76ff1Sjsg #include <linux/dma-resv.h> 56c349dbc7Sjsg #ifdef __linux__ 57c349dbc7Sjsg #include <uapi/linux/sched/types.h> 58c349dbc7Sjsg #endif 59c349dbc7Sjsg 60c349dbc7Sjsg #include <drm/drm_print.h> 611bb76ff1Sjsg #include <drm/drm_gem.h> 62f005ef32Sjsg #include <drm/drm_syncobj.h> 63c349dbc7Sjsg #include <drm/gpu_scheduler.h> 64c349dbc7Sjsg #include <drm/spsc_queue.h> 65c349dbc7Sjsg 66c349dbc7Sjsg #define CREATE_TRACE_POINTS 67c349dbc7Sjsg #include "gpu_scheduler_trace.h" 68c349dbc7Sjsg 69c349dbc7Sjsg #define to_drm_sched_job(sched_job) \ 70c349dbc7Sjsg container_of((sched_job), struct drm_sched_job, queue_node) 71c349dbc7Sjsg 72f005ef32Sjsg int drm_sched_policy = DRM_SCHED_POLICY_FIFO; 73f005ef32Sjsg 74f005ef32Sjsg /** 75f005ef32Sjsg * DOC: sched_policy (int) 76f005ef32Sjsg * Used to override default entities scheduling policy in a run queue. 77f005ef32Sjsg */ 78f005ef32Sjsg MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default)."); 79f005ef32Sjsg module_param_named(sched_policy, drm_sched_policy, int, 0444); 80f005ef32Sjsg 81f005ef32Sjsg static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, 82f005ef32Sjsg const struct rb_node *b) 83f005ef32Sjsg { 84f005ef32Sjsg struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node); 85f005ef32Sjsg struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node); 86f005ef32Sjsg 87f005ef32Sjsg return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); 88f005ef32Sjsg } 89f005ef32Sjsg 90f005ef32Sjsg static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) 91f005ef32Sjsg { 92f005ef32Sjsg struct drm_sched_rq *rq = entity->rq; 93f005ef32Sjsg 94f005ef32Sjsg if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { 95f005ef32Sjsg rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); 96f005ef32Sjsg RB_CLEAR_NODE(&entity->rb_tree_node); 97f005ef32Sjsg } 98f005ef32Sjsg } 99f005ef32Sjsg 100f005ef32Sjsg void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) 101f005ef32Sjsg { 102f005ef32Sjsg /* 103f005ef32Sjsg * Both locks need to be grabbed, one to protect from entity->rq change 104f005ef32Sjsg * for entity from within concurrent drm_sched_entity_select_rq and the 105f005ef32Sjsg * other to update the rb tree structure. 106f005ef32Sjsg */ 107f005ef32Sjsg spin_lock(&entity->rq_lock); 108f005ef32Sjsg spin_lock(&entity->rq->lock); 109f005ef32Sjsg 110f005ef32Sjsg drm_sched_rq_remove_fifo_locked(entity); 111f005ef32Sjsg 112f005ef32Sjsg entity->oldest_job_waiting = ts; 113f005ef32Sjsg 114f005ef32Sjsg rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, 115f005ef32Sjsg drm_sched_entity_compare_before); 116f005ef32Sjsg 117f005ef32Sjsg spin_unlock(&entity->rq->lock); 118f005ef32Sjsg spin_unlock(&entity->rq_lock); 119f005ef32Sjsg } 120f005ef32Sjsg 121c349dbc7Sjsg /** 122c349dbc7Sjsg * drm_sched_rq_init - initialize a given run queue struct 123c349dbc7Sjsg * 1245ca02815Sjsg * @sched: scheduler instance to associate with this run queue 125c349dbc7Sjsg * @rq: scheduler run queue 126c349dbc7Sjsg * 127c349dbc7Sjsg * Initializes a scheduler runqueue. 128c349dbc7Sjsg */ 129c349dbc7Sjsg static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 130c349dbc7Sjsg struct drm_sched_rq *rq) 131c349dbc7Sjsg { 13258a32cd0Sjsg mtx_init(&rq->lock, IPL_NONE); 133c349dbc7Sjsg INIT_LIST_HEAD(&rq->entities); 134f005ef32Sjsg rq->rb_tree_root = RB_ROOT_CACHED; 135c349dbc7Sjsg rq->current_entity = NULL; 136c349dbc7Sjsg rq->sched = sched; 137c349dbc7Sjsg } 138c349dbc7Sjsg 139c349dbc7Sjsg /** 140c349dbc7Sjsg * drm_sched_rq_add_entity - add an entity 141c349dbc7Sjsg * 142c349dbc7Sjsg * @rq: scheduler run queue 143c349dbc7Sjsg * @entity: scheduler entity 144c349dbc7Sjsg * 145c349dbc7Sjsg * Adds a scheduler entity to the run queue. 146c349dbc7Sjsg */ 147c349dbc7Sjsg void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 148c349dbc7Sjsg struct drm_sched_entity *entity) 149c349dbc7Sjsg { 150c349dbc7Sjsg if (!list_empty(&entity->list)) 151c349dbc7Sjsg return; 152f005ef32Sjsg 153c349dbc7Sjsg spin_lock(&rq->lock); 154f005ef32Sjsg 1555ca02815Sjsg atomic_inc(rq->sched->score); 156c349dbc7Sjsg list_add_tail(&entity->list, &rq->entities); 157f005ef32Sjsg 158c349dbc7Sjsg spin_unlock(&rq->lock); 159c349dbc7Sjsg } 160c349dbc7Sjsg 161c349dbc7Sjsg /** 162c349dbc7Sjsg * drm_sched_rq_remove_entity - remove an entity 163c349dbc7Sjsg * 164c349dbc7Sjsg * @rq: scheduler run queue 165c349dbc7Sjsg * @entity: scheduler entity 166c349dbc7Sjsg * 167c349dbc7Sjsg * Removes a scheduler entity from the run queue. 168c349dbc7Sjsg */ 169c349dbc7Sjsg void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 170c349dbc7Sjsg struct drm_sched_entity *entity) 171c349dbc7Sjsg { 172c349dbc7Sjsg if (list_empty(&entity->list)) 173c349dbc7Sjsg return; 174f005ef32Sjsg 175c349dbc7Sjsg spin_lock(&rq->lock); 176f005ef32Sjsg 1775ca02815Sjsg atomic_dec(rq->sched->score); 178c349dbc7Sjsg list_del_init(&entity->list); 179f005ef32Sjsg 180c349dbc7Sjsg if (rq->current_entity == entity) 181c349dbc7Sjsg rq->current_entity = NULL; 182f005ef32Sjsg 183f005ef32Sjsg if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 184f005ef32Sjsg drm_sched_rq_remove_fifo_locked(entity); 185f005ef32Sjsg 186c349dbc7Sjsg spin_unlock(&rq->lock); 187c349dbc7Sjsg } 188c349dbc7Sjsg 189c349dbc7Sjsg /** 190f005ef32Sjsg * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run 191c349dbc7Sjsg * 192c349dbc7Sjsg * @rq: scheduler run queue to check. 193c349dbc7Sjsg * 194c349dbc7Sjsg * Try to find a ready entity, returns NULL if none found. 195c349dbc7Sjsg */ 196c349dbc7Sjsg static struct drm_sched_entity * 197f005ef32Sjsg drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) 198c349dbc7Sjsg { 199c349dbc7Sjsg struct drm_sched_entity *entity; 200c349dbc7Sjsg 201c349dbc7Sjsg spin_lock(&rq->lock); 202c349dbc7Sjsg 203c349dbc7Sjsg entity = rq->current_entity; 204c349dbc7Sjsg if (entity) { 205c349dbc7Sjsg list_for_each_entry_continue(entity, &rq->entities, list) { 206c349dbc7Sjsg if (drm_sched_entity_is_ready(entity)) { 207c349dbc7Sjsg rq->current_entity = entity; 208c349dbc7Sjsg reinit_completion(&entity->entity_idle); 209c349dbc7Sjsg spin_unlock(&rq->lock); 210c349dbc7Sjsg return entity; 211c349dbc7Sjsg } 212c349dbc7Sjsg } 213c349dbc7Sjsg } 214c349dbc7Sjsg 215c349dbc7Sjsg list_for_each_entry(entity, &rq->entities, list) { 216c349dbc7Sjsg 217c349dbc7Sjsg if (drm_sched_entity_is_ready(entity)) { 218c349dbc7Sjsg rq->current_entity = entity; 219c349dbc7Sjsg reinit_completion(&entity->entity_idle); 220c349dbc7Sjsg spin_unlock(&rq->lock); 221c349dbc7Sjsg return entity; 222c349dbc7Sjsg } 223c349dbc7Sjsg 224c349dbc7Sjsg if (entity == rq->current_entity) 225c349dbc7Sjsg break; 226c349dbc7Sjsg } 227c349dbc7Sjsg 228c349dbc7Sjsg spin_unlock(&rq->lock); 229c349dbc7Sjsg 230c349dbc7Sjsg return NULL; 231c349dbc7Sjsg } 232c349dbc7Sjsg 233c349dbc7Sjsg /** 234f005ef32Sjsg * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run 235f005ef32Sjsg * 236f005ef32Sjsg * @rq: scheduler run queue to check. 237f005ef32Sjsg * 238f005ef32Sjsg * Find oldest waiting ready entity, returns NULL if none found. 239f005ef32Sjsg */ 240f005ef32Sjsg static struct drm_sched_entity * 241f005ef32Sjsg drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) 242f005ef32Sjsg { 243f005ef32Sjsg struct rb_node *rb; 244f005ef32Sjsg 245f005ef32Sjsg spin_lock(&rq->lock); 246f005ef32Sjsg for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) { 247f005ef32Sjsg struct drm_sched_entity *entity; 248f005ef32Sjsg 249f005ef32Sjsg entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); 250f005ef32Sjsg if (drm_sched_entity_is_ready(entity)) { 251f005ef32Sjsg rq->current_entity = entity; 252f005ef32Sjsg reinit_completion(&entity->entity_idle); 253f005ef32Sjsg break; 254f005ef32Sjsg } 255f005ef32Sjsg } 256f005ef32Sjsg spin_unlock(&rq->lock); 257f005ef32Sjsg 258f005ef32Sjsg return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL; 259f005ef32Sjsg } 260f005ef32Sjsg 261f005ef32Sjsg /** 2625ca02815Sjsg * drm_sched_job_done - complete a job 2635ca02815Sjsg * @s_job: pointer to the job which is done 2645ca02815Sjsg * 2655ca02815Sjsg * Finish the job's fence and wake up the worker thread. 2665ca02815Sjsg */ 267f005ef32Sjsg static void drm_sched_job_done(struct drm_sched_job *s_job, int result) 2685ca02815Sjsg { 2695ca02815Sjsg struct drm_sched_fence *s_fence = s_job->s_fence; 2705ca02815Sjsg struct drm_gpu_scheduler *sched = s_fence->sched; 2715ca02815Sjsg 2725ca02815Sjsg atomic_dec(&sched->hw_rq_count); 2735ca02815Sjsg atomic_dec(sched->score); 2745ca02815Sjsg 2755ca02815Sjsg trace_drm_sched_process_job(s_fence); 2765ca02815Sjsg 2775ca02815Sjsg dma_fence_get(&s_fence->finished); 278f005ef32Sjsg drm_sched_fence_finished(s_fence, result); 2795ca02815Sjsg dma_fence_put(&s_fence->finished); 2805ca02815Sjsg wake_up_interruptible(&sched->wake_up_worker); 2815ca02815Sjsg } 2825ca02815Sjsg 2835ca02815Sjsg /** 2845ca02815Sjsg * drm_sched_job_done_cb - the callback for a done job 2855ca02815Sjsg * @f: fence 2865ca02815Sjsg * @cb: fence callbacks 2875ca02815Sjsg */ 2885ca02815Sjsg static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) 2895ca02815Sjsg { 2905ca02815Sjsg struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 2915ca02815Sjsg 292f005ef32Sjsg drm_sched_job_done(s_job, f->error); 2935ca02815Sjsg } 2945ca02815Sjsg 2955ca02815Sjsg /** 296c349dbc7Sjsg * drm_sched_start_timeout - start timeout for reset worker 297c349dbc7Sjsg * 298c349dbc7Sjsg * @sched: scheduler instance to start the worker for 299c349dbc7Sjsg * 300c349dbc7Sjsg * Start the timeout for the given scheduler. 301c349dbc7Sjsg */ 302c349dbc7Sjsg static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 303c349dbc7Sjsg { 304c349dbc7Sjsg if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 3055ca02815Sjsg !list_empty(&sched->pending_list)) 3065ca02815Sjsg queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); 307c349dbc7Sjsg } 308c349dbc7Sjsg 309c349dbc7Sjsg /** 310c349dbc7Sjsg * drm_sched_fault - immediately start timeout handler 311c349dbc7Sjsg * 312c349dbc7Sjsg * @sched: scheduler where the timeout handling should be started. 313c349dbc7Sjsg * 314c349dbc7Sjsg * Start timeout handling immediately when the driver detects a hardware fault. 315c349dbc7Sjsg */ 316c349dbc7Sjsg void drm_sched_fault(struct drm_gpu_scheduler *sched) 317c349dbc7Sjsg { 318f005ef32Sjsg if (sched->timeout_wq) 3195ca02815Sjsg mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); 320c349dbc7Sjsg } 321c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_fault); 322c349dbc7Sjsg 323c349dbc7Sjsg /** 324c349dbc7Sjsg * drm_sched_suspend_timeout - Suspend scheduler job timeout 325c349dbc7Sjsg * 326c349dbc7Sjsg * @sched: scheduler instance for which to suspend the timeout 327c349dbc7Sjsg * 328c349dbc7Sjsg * Suspend the delayed work timeout for the scheduler. This is done by 329c349dbc7Sjsg * modifying the delayed work timeout to an arbitrary large value, 330c349dbc7Sjsg * MAX_SCHEDULE_TIMEOUT in this case. 331c349dbc7Sjsg * 332c349dbc7Sjsg * Returns the timeout remaining 333c349dbc7Sjsg * 334c349dbc7Sjsg */ 335c349dbc7Sjsg unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 336c349dbc7Sjsg { 337c349dbc7Sjsg unsigned long sched_timeout, now = jiffies; 338c349dbc7Sjsg 339c349dbc7Sjsg #ifdef __linux__ 340c349dbc7Sjsg sched_timeout = sched->work_tdr.timer.expires; 341c349dbc7Sjsg #else 342eec49bc7Sjsg sched_timeout = sched->work_tdr.to.to_time; 343c349dbc7Sjsg #endif 344c349dbc7Sjsg 345c349dbc7Sjsg /* 346c349dbc7Sjsg * Modify the timeout to an arbitrarily large value. This also prevents 347c349dbc7Sjsg * the timeout to be restarted when new submissions arrive 348c349dbc7Sjsg */ 3495ca02815Sjsg if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 350c349dbc7Sjsg && time_after(sched_timeout, now)) 351c349dbc7Sjsg return sched_timeout - now; 352c349dbc7Sjsg else 353c349dbc7Sjsg return sched->timeout; 354c349dbc7Sjsg } 355c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_suspend_timeout); 356c349dbc7Sjsg 357c349dbc7Sjsg /** 358c349dbc7Sjsg * drm_sched_resume_timeout - Resume scheduler job timeout 359c349dbc7Sjsg * 360c349dbc7Sjsg * @sched: scheduler instance for which to resume the timeout 361c349dbc7Sjsg * @remaining: remaining timeout 362c349dbc7Sjsg * 363c349dbc7Sjsg * Resume the delayed work timeout for the scheduler. 364c349dbc7Sjsg */ 365c349dbc7Sjsg void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 366c349dbc7Sjsg unsigned long remaining) 367c349dbc7Sjsg { 368c349dbc7Sjsg spin_lock(&sched->job_list_lock); 369c349dbc7Sjsg 3705ca02815Sjsg if (list_empty(&sched->pending_list)) 371c349dbc7Sjsg cancel_delayed_work(&sched->work_tdr); 372c349dbc7Sjsg else 3735ca02815Sjsg mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); 374c349dbc7Sjsg 375c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 376c349dbc7Sjsg } 377c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_resume_timeout); 378c349dbc7Sjsg 379c349dbc7Sjsg static void drm_sched_job_begin(struct drm_sched_job *s_job) 380c349dbc7Sjsg { 381c349dbc7Sjsg struct drm_gpu_scheduler *sched = s_job->sched; 382c349dbc7Sjsg 383c349dbc7Sjsg spin_lock(&sched->job_list_lock); 3845ca02815Sjsg list_add_tail(&s_job->list, &sched->pending_list); 385c349dbc7Sjsg drm_sched_start_timeout(sched); 386c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 387c349dbc7Sjsg } 388c349dbc7Sjsg 389c349dbc7Sjsg static void drm_sched_job_timedout(struct work_struct *work) 390c349dbc7Sjsg { 391c349dbc7Sjsg struct drm_gpu_scheduler *sched; 392c349dbc7Sjsg struct drm_sched_job *job; 3935ca02815Sjsg enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; 394c349dbc7Sjsg 395c349dbc7Sjsg sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 396c349dbc7Sjsg 397c349dbc7Sjsg /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ 398c349dbc7Sjsg spin_lock(&sched->job_list_lock); 3995ca02815Sjsg job = list_first_entry_or_null(&sched->pending_list, 4005ca02815Sjsg struct drm_sched_job, list); 401c349dbc7Sjsg 402c349dbc7Sjsg if (job) { 403c349dbc7Sjsg /* 404c349dbc7Sjsg * Remove the bad job so it cannot be freed by concurrent 405c349dbc7Sjsg * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread 406c349dbc7Sjsg * is parked at which point it's safe. 407c349dbc7Sjsg */ 4085ca02815Sjsg list_del_init(&job->list); 409c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 410c349dbc7Sjsg 4115ca02815Sjsg status = job->sched->ops->timedout_job(job); 412c349dbc7Sjsg 413c349dbc7Sjsg /* 414c349dbc7Sjsg * Guilty job did complete and hence needs to be manually removed 415c349dbc7Sjsg * See drm_sched_stop doc. 416c349dbc7Sjsg */ 417c349dbc7Sjsg if (sched->free_guilty) { 418c349dbc7Sjsg job->sched->ops->free_job(job); 419c349dbc7Sjsg sched->free_guilty = false; 420c349dbc7Sjsg } 421c349dbc7Sjsg } else { 422c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 423c349dbc7Sjsg } 424c349dbc7Sjsg 4255ca02815Sjsg if (status != DRM_GPU_SCHED_STAT_ENODEV) { 426c349dbc7Sjsg spin_lock(&sched->job_list_lock); 427c349dbc7Sjsg drm_sched_start_timeout(sched); 428c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 429c349dbc7Sjsg } 4305ca02815Sjsg } 431c349dbc7Sjsg 432c349dbc7Sjsg /** 433c349dbc7Sjsg * drm_sched_stop - stop the scheduler 434c349dbc7Sjsg * 435c349dbc7Sjsg * @sched: scheduler instance 436c349dbc7Sjsg * @bad: job which caused the time out 437c349dbc7Sjsg * 438c349dbc7Sjsg * Stop the scheduler and also removes and frees all completed jobs. 439c349dbc7Sjsg * Note: bad job will not be freed as it might be used later and so it's 440c349dbc7Sjsg * callers responsibility to release it manually if it's not part of the 4415ca02815Sjsg * pending list any more. 442c349dbc7Sjsg * 443c349dbc7Sjsg */ 444c349dbc7Sjsg void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 445c349dbc7Sjsg { 446c349dbc7Sjsg struct drm_sched_job *s_job, *tmp; 447c349dbc7Sjsg 448c349dbc7Sjsg kthread_park(sched->thread); 449c349dbc7Sjsg 450c349dbc7Sjsg /* 451c349dbc7Sjsg * Reinsert back the bad job here - now it's safe as 452c349dbc7Sjsg * drm_sched_get_cleanup_job cannot race against us and release the 453c349dbc7Sjsg * bad job at this point - we parked (waited for) any in progress 454c349dbc7Sjsg * (earlier) cleanups and drm_sched_get_cleanup_job will not be called 455c349dbc7Sjsg * now until the scheduler thread is unparked. 456c349dbc7Sjsg */ 457c349dbc7Sjsg if (bad && bad->sched == sched) 458c349dbc7Sjsg /* 459c349dbc7Sjsg * Add at the head of the queue to reflect it was the earliest 460c349dbc7Sjsg * job extracted. 461c349dbc7Sjsg */ 4625ca02815Sjsg list_add(&bad->list, &sched->pending_list); 463c349dbc7Sjsg 464c349dbc7Sjsg /* 465c349dbc7Sjsg * Iterate the job list from later to earlier one and either deactive 4665ca02815Sjsg * their HW callbacks or remove them from pending list if they already 467c349dbc7Sjsg * signaled. 468c349dbc7Sjsg * This iteration is thread safe as sched thread is stopped. 469c349dbc7Sjsg */ 4705ca02815Sjsg list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, 4715ca02815Sjsg list) { 472c349dbc7Sjsg if (s_job->s_fence->parent && 473c349dbc7Sjsg dma_fence_remove_callback(s_job->s_fence->parent, 474c349dbc7Sjsg &s_job->cb)) { 4751bb76ff1Sjsg dma_fence_put(s_job->s_fence->parent); 4761bb76ff1Sjsg s_job->s_fence->parent = NULL; 477c349dbc7Sjsg atomic_dec(&sched->hw_rq_count); 478c349dbc7Sjsg } else { 479c349dbc7Sjsg /* 4805ca02815Sjsg * remove job from pending_list. 481c349dbc7Sjsg * Locking here is for concurrent resume timeout 482c349dbc7Sjsg */ 483c349dbc7Sjsg spin_lock(&sched->job_list_lock); 4845ca02815Sjsg list_del_init(&s_job->list); 485c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 486c349dbc7Sjsg 487c349dbc7Sjsg /* 488c349dbc7Sjsg * Wait for job's HW fence callback to finish using s_job 489c349dbc7Sjsg * before releasing it. 490c349dbc7Sjsg * 491c349dbc7Sjsg * Job is still alive so fence refcount at least 1 492c349dbc7Sjsg */ 493c349dbc7Sjsg dma_fence_wait(&s_job->s_fence->finished, false); 494c349dbc7Sjsg 495c349dbc7Sjsg /* 496c349dbc7Sjsg * We must keep bad job alive for later use during 497c349dbc7Sjsg * recovery by some of the drivers but leave a hint 498c349dbc7Sjsg * that the guilty job must be released. 499c349dbc7Sjsg */ 500c349dbc7Sjsg if (bad != s_job) 501c349dbc7Sjsg sched->ops->free_job(s_job); 502c349dbc7Sjsg else 503c349dbc7Sjsg sched->free_guilty = true; 504c349dbc7Sjsg } 505c349dbc7Sjsg } 506c349dbc7Sjsg 507c349dbc7Sjsg /* 508c349dbc7Sjsg * Stop pending timer in flight as we rearm it in drm_sched_start. This 509c349dbc7Sjsg * avoids the pending timeout work in progress to fire right away after 510c349dbc7Sjsg * this TDR finished and before the newly restarted jobs had a 511c349dbc7Sjsg * chance to complete. 512c349dbc7Sjsg */ 513c349dbc7Sjsg cancel_delayed_work(&sched->work_tdr); 514c349dbc7Sjsg } 515c349dbc7Sjsg 516c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_stop); 517c349dbc7Sjsg 518c349dbc7Sjsg /** 5195ca02815Sjsg * drm_sched_start - recover jobs after a reset 520c349dbc7Sjsg * 521c349dbc7Sjsg * @sched: scheduler instance 522c349dbc7Sjsg * @full_recovery: proceed with complete sched restart 523c349dbc7Sjsg * 524c349dbc7Sjsg */ 525c349dbc7Sjsg void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) 526c349dbc7Sjsg { 527c349dbc7Sjsg struct drm_sched_job *s_job, *tmp; 528c349dbc7Sjsg int r; 529c349dbc7Sjsg 530c349dbc7Sjsg /* 531c349dbc7Sjsg * Locking the list is not required here as the sched thread is parked 532c349dbc7Sjsg * so no new jobs are being inserted or removed. Also concurrent 533c349dbc7Sjsg * GPU recovers can't run in parallel. 534c349dbc7Sjsg */ 5355ca02815Sjsg list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 536c349dbc7Sjsg struct dma_fence *fence = s_job->s_fence->parent; 537c349dbc7Sjsg 538c349dbc7Sjsg atomic_inc(&sched->hw_rq_count); 539c349dbc7Sjsg 540c349dbc7Sjsg if (!full_recovery) 541c349dbc7Sjsg continue; 542c349dbc7Sjsg 543c349dbc7Sjsg if (fence) { 544c349dbc7Sjsg r = dma_fence_add_callback(fence, &s_job->cb, 5455ca02815Sjsg drm_sched_job_done_cb); 546c349dbc7Sjsg if (r == -ENOENT) 547f005ef32Sjsg drm_sched_job_done(s_job, fence->error); 548c349dbc7Sjsg else if (r) 5491bb76ff1Sjsg DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", 550c349dbc7Sjsg r); 551c349dbc7Sjsg } else 552f005ef32Sjsg drm_sched_job_done(s_job, -ECANCELED); 553c349dbc7Sjsg } 554c349dbc7Sjsg 555c349dbc7Sjsg if (full_recovery) { 556c349dbc7Sjsg spin_lock(&sched->job_list_lock); 557c349dbc7Sjsg drm_sched_start_timeout(sched); 558c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 559c349dbc7Sjsg } 560c349dbc7Sjsg 561c349dbc7Sjsg kthread_unpark(sched->thread); 562c349dbc7Sjsg } 563c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_start); 564c349dbc7Sjsg 565c349dbc7Sjsg /** 566f005ef32Sjsg * drm_sched_resubmit_jobs - Deprecated, don't use in new code! 567c349dbc7Sjsg * 568c349dbc7Sjsg * @sched: scheduler instance 569c349dbc7Sjsg * 570f005ef32Sjsg * Re-submitting jobs was a concept AMD came up as cheap way to implement 571f005ef32Sjsg * recovery after a job timeout. 572f005ef32Sjsg * 573f005ef32Sjsg * This turned out to be not working very well. First of all there are many 574f005ef32Sjsg * problem with the dma_fence implementation and requirements. Either the 575f005ef32Sjsg * implementation is risking deadlocks with core memory management or violating 576f005ef32Sjsg * documented implementation details of the dma_fence object. 577f005ef32Sjsg * 578f005ef32Sjsg * Drivers can still save and restore their state for recovery operations, but 579f005ef32Sjsg * we shouldn't make this a general scheduler feature around the dma_fence 580f005ef32Sjsg * interface. 581c349dbc7Sjsg */ 582c349dbc7Sjsg void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 583c349dbc7Sjsg { 584c349dbc7Sjsg struct drm_sched_job *s_job, *tmp; 585c349dbc7Sjsg uint64_t guilty_context; 586c349dbc7Sjsg bool found_guilty = false; 587c349dbc7Sjsg struct dma_fence *fence; 588c349dbc7Sjsg 5895ca02815Sjsg list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 590c349dbc7Sjsg struct drm_sched_fence *s_fence = s_job->s_fence; 591c349dbc7Sjsg 592c349dbc7Sjsg if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 593c349dbc7Sjsg found_guilty = true; 594c349dbc7Sjsg guilty_context = s_job->s_fence->scheduled.context; 595c349dbc7Sjsg } 596c349dbc7Sjsg 597c349dbc7Sjsg if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 598c349dbc7Sjsg dma_fence_set_error(&s_fence->finished, -ECANCELED); 599c349dbc7Sjsg 600c349dbc7Sjsg fence = sched->ops->run_job(s_job); 601c349dbc7Sjsg 602c349dbc7Sjsg if (IS_ERR_OR_NULL(fence)) { 603c349dbc7Sjsg if (IS_ERR(fence)) 604c349dbc7Sjsg dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 605c349dbc7Sjsg 606c349dbc7Sjsg s_job->s_fence->parent = NULL; 607c349dbc7Sjsg } else { 6081bb76ff1Sjsg 6091bb76ff1Sjsg s_job->s_fence->parent = dma_fence_get(fence); 6101bb76ff1Sjsg 6111bb76ff1Sjsg /* Drop for orignal kref_init */ 6121bb76ff1Sjsg dma_fence_put(fence); 613c349dbc7Sjsg } 614c349dbc7Sjsg } 615c349dbc7Sjsg } 616f005ef32Sjsg EXPORT_SYMBOL(drm_sched_resubmit_jobs); 617c349dbc7Sjsg 618c349dbc7Sjsg /** 619c349dbc7Sjsg * drm_sched_job_init - init a scheduler job 620c349dbc7Sjsg * @job: scheduler job to init 621c349dbc7Sjsg * @entity: scheduler entity to use 622c349dbc7Sjsg * @owner: job owner for debugging 623c349dbc7Sjsg * 624c349dbc7Sjsg * Refer to drm_sched_entity_push_job() documentation 625c349dbc7Sjsg * for locking considerations. 626c349dbc7Sjsg * 6271bb76ff1Sjsg * Drivers must make sure drm_sched_job_cleanup() if this function returns 6281bb76ff1Sjsg * successfully, even when @job is aborted before drm_sched_job_arm() is called. 6291bb76ff1Sjsg * 6301bb76ff1Sjsg * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware 6311bb76ff1Sjsg * has died, which can mean that there's no valid runqueue for a @entity. 6321bb76ff1Sjsg * This function returns -ENOENT in this case (which probably should be -EIO as 6331bb76ff1Sjsg * a more meanigful return value). 6341bb76ff1Sjsg * 635c349dbc7Sjsg * Returns 0 for success, negative error code otherwise. 636c349dbc7Sjsg */ 637c349dbc7Sjsg int drm_sched_job_init(struct drm_sched_job *job, 638c349dbc7Sjsg struct drm_sched_entity *entity, 639c349dbc7Sjsg void *owner) 640c349dbc7Sjsg { 641c349dbc7Sjsg if (!entity->rq) 642c349dbc7Sjsg return -ENOENT; 643c349dbc7Sjsg 644*62d8ac78Sjsg /* 645*62d8ac78Sjsg * We don't know for sure how the user has allocated. Thus, zero the 646*62d8ac78Sjsg * struct so that unallowed (i.e., too early) usage of pointers that 647*62d8ac78Sjsg * this function does not set is guaranteed to lead to a NULL pointer 648*62d8ac78Sjsg * exception instead of UB. 649*62d8ac78Sjsg */ 650*62d8ac78Sjsg memset(job, 0, sizeof(*job)); 651*62d8ac78Sjsg 652c349dbc7Sjsg job->entity = entity; 6531bb76ff1Sjsg job->s_fence = drm_sched_fence_alloc(entity, owner); 654c349dbc7Sjsg if (!job->s_fence) 655c349dbc7Sjsg return -ENOMEM; 656c349dbc7Sjsg 6575ca02815Sjsg INIT_LIST_HEAD(&job->list); 658c349dbc7Sjsg 6591bb76ff1Sjsg xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); 6601bb76ff1Sjsg 661c349dbc7Sjsg return 0; 662c349dbc7Sjsg } 663c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_job_init); 664c349dbc7Sjsg 665c349dbc7Sjsg /** 6661bb76ff1Sjsg * drm_sched_job_arm - arm a scheduler job for execution 6671bb76ff1Sjsg * @job: scheduler job to arm 668c349dbc7Sjsg * 6691bb76ff1Sjsg * This arms a scheduler job for execution. Specifically it initializes the 6701bb76ff1Sjsg * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv 6711bb76ff1Sjsg * or other places that need to track the completion of this job. 6721bb76ff1Sjsg * 6731bb76ff1Sjsg * Refer to drm_sched_entity_push_job() documentation for locking 6741bb76ff1Sjsg * considerations. 6751bb76ff1Sjsg * 6761bb76ff1Sjsg * This can only be called if drm_sched_job_init() succeeded. 6771bb76ff1Sjsg */ 6781bb76ff1Sjsg void drm_sched_job_arm(struct drm_sched_job *job) 6791bb76ff1Sjsg { 6801bb76ff1Sjsg struct drm_gpu_scheduler *sched; 6811bb76ff1Sjsg struct drm_sched_entity *entity = job->entity; 6821bb76ff1Sjsg 6831bb76ff1Sjsg BUG_ON(!entity); 6841bb76ff1Sjsg drm_sched_entity_select_rq(entity); 6851bb76ff1Sjsg sched = entity->rq->sched; 6861bb76ff1Sjsg 6871bb76ff1Sjsg job->sched = sched; 6881bb76ff1Sjsg job->s_priority = entity->rq - sched->sched_rq; 6891bb76ff1Sjsg job->id = atomic64_inc_return(&sched->job_id_count); 6901bb76ff1Sjsg 6911bb76ff1Sjsg drm_sched_fence_init(job->s_fence, job->entity); 6921bb76ff1Sjsg } 6931bb76ff1Sjsg EXPORT_SYMBOL(drm_sched_job_arm); 6941bb76ff1Sjsg 6951bb76ff1Sjsg /** 6961bb76ff1Sjsg * drm_sched_job_add_dependency - adds the fence as a job dependency 6971bb76ff1Sjsg * @job: scheduler job to add the dependencies to 6981bb76ff1Sjsg * @fence: the dma_fence to add to the list of dependencies. 6991bb76ff1Sjsg * 7001bb76ff1Sjsg * Note that @fence is consumed in both the success and error cases. 7011bb76ff1Sjsg * 7021bb76ff1Sjsg * Returns: 7031bb76ff1Sjsg * 0 on success, or an error on failing to expand the array. 7041bb76ff1Sjsg */ 7051bb76ff1Sjsg int drm_sched_job_add_dependency(struct drm_sched_job *job, 7061bb76ff1Sjsg struct dma_fence *fence) 7071bb76ff1Sjsg { 7081bb76ff1Sjsg struct dma_fence *entry; 7091bb76ff1Sjsg unsigned long index; 7101bb76ff1Sjsg u32 id = 0; 7111bb76ff1Sjsg int ret; 7121bb76ff1Sjsg 7131bb76ff1Sjsg if (!fence) 7141bb76ff1Sjsg return 0; 7151bb76ff1Sjsg 7161bb76ff1Sjsg /* Deduplicate if we already depend on a fence from the same context. 7171bb76ff1Sjsg * This lets the size of the array of deps scale with the number of 7181bb76ff1Sjsg * engines involved, rather than the number of BOs. 7191bb76ff1Sjsg */ 7201bb76ff1Sjsg xa_for_each(&job->dependencies, index, entry) { 7211bb76ff1Sjsg if (entry->context != fence->context) 7221bb76ff1Sjsg continue; 7231bb76ff1Sjsg 7241bb76ff1Sjsg if (dma_fence_is_later(fence, entry)) { 7251bb76ff1Sjsg dma_fence_put(entry); 7261bb76ff1Sjsg xa_store(&job->dependencies, index, fence, GFP_KERNEL); 7271bb76ff1Sjsg } else { 7281bb76ff1Sjsg dma_fence_put(fence); 7291bb76ff1Sjsg } 7301bb76ff1Sjsg return 0; 7311bb76ff1Sjsg } 7321bb76ff1Sjsg 7331bb76ff1Sjsg ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); 7341bb76ff1Sjsg if (ret != 0) 7351bb76ff1Sjsg dma_fence_put(fence); 7361bb76ff1Sjsg 7371bb76ff1Sjsg return ret; 7381bb76ff1Sjsg } 7391bb76ff1Sjsg EXPORT_SYMBOL(drm_sched_job_add_dependency); 7401bb76ff1Sjsg 7411bb76ff1Sjsg /** 742f005ef32Sjsg * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency 743f005ef32Sjsg * @job: scheduler job to add the dependencies to 744f005ef32Sjsg * @file: drm file private pointer 745f005ef32Sjsg * @handle: syncobj handle to lookup 746f005ef32Sjsg * @point: timeline point 747f005ef32Sjsg * 748f005ef32Sjsg * This adds the fence matching the given syncobj to @job. 749f005ef32Sjsg * 750f005ef32Sjsg * Returns: 751f005ef32Sjsg * 0 on success, or an error on failing to expand the array. 752f005ef32Sjsg */ 753f005ef32Sjsg int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, 754f005ef32Sjsg struct drm_file *file, 755f005ef32Sjsg u32 handle, 756f005ef32Sjsg u32 point) 757f005ef32Sjsg { 758f005ef32Sjsg struct dma_fence *fence; 759f005ef32Sjsg int ret; 760f005ef32Sjsg 761f005ef32Sjsg ret = drm_syncobj_find_fence(file, handle, point, 0, &fence); 762f005ef32Sjsg if (ret) 763f005ef32Sjsg return ret; 764f005ef32Sjsg 765f005ef32Sjsg return drm_sched_job_add_dependency(job, fence); 766f005ef32Sjsg } 767f005ef32Sjsg EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency); 768f005ef32Sjsg 769f005ef32Sjsg /** 770f005ef32Sjsg * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job 771f005ef32Sjsg * @job: scheduler job to add the dependencies to 772f005ef32Sjsg * @resv: the dma_resv object to get the fences from 773f005ef32Sjsg * @usage: the dma_resv_usage to use to filter the fences 774f005ef32Sjsg * 775f005ef32Sjsg * This adds all fences matching the given usage from @resv to @job. 776f005ef32Sjsg * Must be called with the @resv lock held. 777f005ef32Sjsg * 778f005ef32Sjsg * Returns: 779f005ef32Sjsg * 0 on success, or an error on failing to expand the array. 780f005ef32Sjsg */ 781f005ef32Sjsg int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 782f005ef32Sjsg struct dma_resv *resv, 783f005ef32Sjsg enum dma_resv_usage usage) 784f005ef32Sjsg { 785f005ef32Sjsg struct dma_resv_iter cursor; 786f005ef32Sjsg struct dma_fence *fence; 787f005ef32Sjsg int ret; 788f005ef32Sjsg 789f005ef32Sjsg dma_resv_assert_held(resv); 790f005ef32Sjsg 791f005ef32Sjsg dma_resv_for_each_fence(&cursor, resv, usage, fence) { 792f005ef32Sjsg /* Make sure to grab an additional ref on the added fence */ 793f005ef32Sjsg dma_fence_get(fence); 794f005ef32Sjsg ret = drm_sched_job_add_dependency(job, fence); 795f005ef32Sjsg if (ret) { 796f005ef32Sjsg dma_fence_put(fence); 797f005ef32Sjsg return ret; 798f005ef32Sjsg } 799f005ef32Sjsg } 800f005ef32Sjsg return 0; 801f005ef32Sjsg } 802f005ef32Sjsg EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies); 803f005ef32Sjsg 804f005ef32Sjsg /** 8051bb76ff1Sjsg * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job 8061bb76ff1Sjsg * dependencies 8071bb76ff1Sjsg * @job: scheduler job to add the dependencies to 8081bb76ff1Sjsg * @obj: the gem object to add new dependencies from. 8091bb76ff1Sjsg * @write: whether the job might write the object (so we need to depend on 8101bb76ff1Sjsg * shared fences in the reservation object). 8111bb76ff1Sjsg * 8121bb76ff1Sjsg * This should be called after drm_gem_lock_reservations() on your array of 8131bb76ff1Sjsg * GEM objects used in the job but before updating the reservations with your 8141bb76ff1Sjsg * own fences. 8151bb76ff1Sjsg * 8161bb76ff1Sjsg * Returns: 8171bb76ff1Sjsg * 0 on success, or an error on failing to expand the array. 8181bb76ff1Sjsg */ 8191bb76ff1Sjsg int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 8201bb76ff1Sjsg struct drm_gem_object *obj, 8211bb76ff1Sjsg bool write) 8221bb76ff1Sjsg { 823f005ef32Sjsg return drm_sched_job_add_resv_dependencies(job, obj->resv, 824f005ef32Sjsg dma_resv_usage_rw(write)); 8251bb76ff1Sjsg } 8261bb76ff1Sjsg EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); 8271bb76ff1Sjsg 8281bb76ff1Sjsg /** 8291bb76ff1Sjsg * drm_sched_job_cleanup - clean up scheduler job resources 830c349dbc7Sjsg * @job: scheduler job to clean up 8311bb76ff1Sjsg * 8321bb76ff1Sjsg * Cleans up the resources allocated with drm_sched_job_init(). 8331bb76ff1Sjsg * 8341bb76ff1Sjsg * Drivers should call this from their error unwind code if @job is aborted 8351bb76ff1Sjsg * before drm_sched_job_arm() is called. 8361bb76ff1Sjsg * 8371bb76ff1Sjsg * After that point of no return @job is committed to be executed by the 8381bb76ff1Sjsg * scheduler, and this function should be called from the 8391bb76ff1Sjsg * &drm_sched_backend_ops.free_job callback. 840c349dbc7Sjsg */ 841c349dbc7Sjsg void drm_sched_job_cleanup(struct drm_sched_job *job) 842c349dbc7Sjsg { 8431bb76ff1Sjsg struct dma_fence *fence; 8441bb76ff1Sjsg unsigned long index; 8451bb76ff1Sjsg 8461bb76ff1Sjsg if (kref_read(&job->s_fence->finished.refcount)) { 8471bb76ff1Sjsg /* drm_sched_job_arm() has been called */ 848c349dbc7Sjsg dma_fence_put(&job->s_fence->finished); 8491bb76ff1Sjsg } else { 8501bb76ff1Sjsg /* aborted job before committing to run it */ 8511bb76ff1Sjsg drm_sched_fence_free(job->s_fence); 8521bb76ff1Sjsg } 8531bb76ff1Sjsg 854c349dbc7Sjsg job->s_fence = NULL; 8551bb76ff1Sjsg 8561bb76ff1Sjsg xa_for_each(&job->dependencies, index, fence) { 8571bb76ff1Sjsg dma_fence_put(fence); 8581bb76ff1Sjsg } 8591bb76ff1Sjsg xa_destroy(&job->dependencies); 8601bb76ff1Sjsg 861c349dbc7Sjsg } 862c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_job_cleanup); 863c349dbc7Sjsg 864c349dbc7Sjsg /** 865f005ef32Sjsg * drm_sched_can_queue -- Can we queue more to the hardware? 866c349dbc7Sjsg * @sched: scheduler instance 867c349dbc7Sjsg * 868c349dbc7Sjsg * Return true if we can push more jobs to the hw, otherwise false. 869c349dbc7Sjsg */ 870f005ef32Sjsg static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) 871c349dbc7Sjsg { 872c349dbc7Sjsg return atomic_read(&sched->hw_rq_count) < 873c349dbc7Sjsg sched->hw_submission_limit; 874c349dbc7Sjsg } 875c349dbc7Sjsg 876c349dbc7Sjsg /** 877f005ef32Sjsg * drm_sched_wakeup_if_can_queue - Wake up the scheduler 878c349dbc7Sjsg * @sched: scheduler instance 879c349dbc7Sjsg * 880f005ef32Sjsg * Wake up the scheduler if we can queue jobs. 881c349dbc7Sjsg */ 882f005ef32Sjsg void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) 883c349dbc7Sjsg { 884f005ef32Sjsg if (drm_sched_can_queue(sched)) 885c349dbc7Sjsg wake_up_interruptible(&sched->wake_up_worker); 886c349dbc7Sjsg } 887c349dbc7Sjsg 888c349dbc7Sjsg /** 889c349dbc7Sjsg * drm_sched_select_entity - Select next entity to process 890c349dbc7Sjsg * 891c349dbc7Sjsg * @sched: scheduler instance 892c349dbc7Sjsg * 893c349dbc7Sjsg * Returns the entity to process or NULL if none are found. 894c349dbc7Sjsg */ 895c349dbc7Sjsg static struct drm_sched_entity * 896c349dbc7Sjsg drm_sched_select_entity(struct drm_gpu_scheduler *sched) 897c349dbc7Sjsg { 898c349dbc7Sjsg struct drm_sched_entity *entity; 899c349dbc7Sjsg int i; 900c349dbc7Sjsg 901f005ef32Sjsg if (!drm_sched_can_queue(sched)) 902c349dbc7Sjsg return NULL; 903c349dbc7Sjsg 904c349dbc7Sjsg /* Kernel run queue has higher priority than normal run queue*/ 905ad8b1aafSjsg for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 906f005ef32Sjsg entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? 907f005ef32Sjsg drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) : 908f005ef32Sjsg drm_sched_rq_select_entity_rr(&sched->sched_rq[i]); 909c349dbc7Sjsg if (entity) 910c349dbc7Sjsg break; 911c349dbc7Sjsg } 912c349dbc7Sjsg 913c349dbc7Sjsg return entity; 914c349dbc7Sjsg } 915c349dbc7Sjsg 916c349dbc7Sjsg /** 917c349dbc7Sjsg * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed 918c349dbc7Sjsg * 919c349dbc7Sjsg * @sched: scheduler instance 920c349dbc7Sjsg * 9215ca02815Sjsg * Returns the next finished job from the pending list (if there is one) 922c349dbc7Sjsg * ready for it to be destroyed. 923c349dbc7Sjsg */ 924c349dbc7Sjsg static struct drm_sched_job * 925c349dbc7Sjsg drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) 926c349dbc7Sjsg { 9275ca02815Sjsg struct drm_sched_job *job, *next; 928c349dbc7Sjsg 929c349dbc7Sjsg spin_lock(&sched->job_list_lock); 930c349dbc7Sjsg 9315ca02815Sjsg job = list_first_entry_or_null(&sched->pending_list, 9325ca02815Sjsg struct drm_sched_job, list); 933c349dbc7Sjsg 934c349dbc7Sjsg if (job && dma_fence_is_signaled(&job->s_fence->finished)) { 9355ca02815Sjsg /* remove job from pending_list */ 9365ca02815Sjsg list_del_init(&job->list); 9371bb76ff1Sjsg 9381bb76ff1Sjsg /* cancel this job's TO timer */ 9391bb76ff1Sjsg cancel_delayed_work(&sched->work_tdr); 9405ca02815Sjsg /* make the scheduled timestamp more accurate */ 9415ca02815Sjsg next = list_first_entry_or_null(&sched->pending_list, 9425ca02815Sjsg typeof(*next), list); 9431bb76ff1Sjsg 9441bb76ff1Sjsg if (next) { 9455ca02815Sjsg next->s_fence->scheduled.timestamp = 946d156b7d5Sjsg dma_fence_timestamp(&job->s_fence->finished); 9471bb76ff1Sjsg /* start TO timer for next job */ 9481bb76ff1Sjsg drm_sched_start_timeout(sched); 9491bb76ff1Sjsg } 950c349dbc7Sjsg } else { 951c349dbc7Sjsg job = NULL; 952c349dbc7Sjsg } 953c349dbc7Sjsg 954c349dbc7Sjsg spin_unlock(&sched->job_list_lock); 955c349dbc7Sjsg 956c349dbc7Sjsg return job; 957c349dbc7Sjsg } 958c349dbc7Sjsg 959c349dbc7Sjsg /** 960c349dbc7Sjsg * drm_sched_pick_best - Get a drm sched from a sched_list with the least load 961c349dbc7Sjsg * @sched_list: list of drm_gpu_schedulers 962c349dbc7Sjsg * @num_sched_list: number of drm_gpu_schedulers in the sched_list 963c349dbc7Sjsg * 964c349dbc7Sjsg * Returns pointer of the sched with the least load or NULL if none of the 965c349dbc7Sjsg * drm_gpu_schedulers are ready 966c349dbc7Sjsg */ 967c349dbc7Sjsg struct drm_gpu_scheduler * 968c349dbc7Sjsg drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 969c349dbc7Sjsg unsigned int num_sched_list) 970c349dbc7Sjsg { 971c349dbc7Sjsg struct drm_gpu_scheduler *sched, *picked_sched = NULL; 972c349dbc7Sjsg int i; 973ad8b1aafSjsg unsigned int min_score = UINT_MAX, num_score; 974c349dbc7Sjsg 975c349dbc7Sjsg for (i = 0; i < num_sched_list; ++i) { 976c349dbc7Sjsg sched = sched_list[i]; 977c349dbc7Sjsg 978c349dbc7Sjsg if (!sched->ready) { 979c349dbc7Sjsg DRM_WARN("scheduler %s is not ready, skipping", 980c349dbc7Sjsg sched->name); 981c349dbc7Sjsg continue; 982c349dbc7Sjsg } 983c349dbc7Sjsg 9845ca02815Sjsg num_score = atomic_read(sched->score); 985ad8b1aafSjsg if (num_score < min_score) { 986ad8b1aafSjsg min_score = num_score; 987c349dbc7Sjsg picked_sched = sched; 988c349dbc7Sjsg } 989c349dbc7Sjsg } 990c349dbc7Sjsg 991c349dbc7Sjsg return picked_sched; 992c349dbc7Sjsg } 993c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_pick_best); 994c349dbc7Sjsg 995c349dbc7Sjsg /** 996c349dbc7Sjsg * drm_sched_blocked - check if the scheduler is blocked 997c349dbc7Sjsg * 998c349dbc7Sjsg * @sched: scheduler instance 999c349dbc7Sjsg * 1000c349dbc7Sjsg * Returns true if blocked, otherwise false. 1001c349dbc7Sjsg */ 1002c349dbc7Sjsg static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) 1003c349dbc7Sjsg { 1004c349dbc7Sjsg if (kthread_should_park()) { 1005c349dbc7Sjsg kthread_parkme(); 1006c349dbc7Sjsg return true; 1007c349dbc7Sjsg } 1008c349dbc7Sjsg 1009c349dbc7Sjsg return false; 1010c349dbc7Sjsg } 1011c349dbc7Sjsg 1012c349dbc7Sjsg /** 1013c349dbc7Sjsg * drm_sched_main - main scheduler thread 1014c349dbc7Sjsg * 1015c349dbc7Sjsg * @param: scheduler instance 1016c349dbc7Sjsg * 1017c349dbc7Sjsg * Returns 0. 1018c349dbc7Sjsg */ 1019c349dbc7Sjsg static int drm_sched_main(void *param) 1020c349dbc7Sjsg { 1021c349dbc7Sjsg struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; 1022c349dbc7Sjsg int r; 1023c349dbc7Sjsg 1024c349dbc7Sjsg #ifdef __linux__ 1025ad8b1aafSjsg sched_set_fifo_low(current); 1026c349dbc7Sjsg #endif 1027c349dbc7Sjsg 1028c349dbc7Sjsg while (!kthread_should_stop()) { 1029c349dbc7Sjsg struct drm_sched_entity *entity = NULL; 1030c349dbc7Sjsg struct drm_sched_fence *s_fence; 1031c349dbc7Sjsg struct drm_sched_job *sched_job; 1032c349dbc7Sjsg struct dma_fence *fence; 1033c349dbc7Sjsg struct drm_sched_job *cleanup_job = NULL; 1034c349dbc7Sjsg 1035c349dbc7Sjsg wait_event_interruptible(sched->wake_up_worker, 1036c349dbc7Sjsg (cleanup_job = drm_sched_get_cleanup_job(sched)) || 1037c349dbc7Sjsg (!drm_sched_blocked(sched) && 1038c349dbc7Sjsg (entity = drm_sched_select_entity(sched))) || 1039c349dbc7Sjsg kthread_should_stop()); 1040c349dbc7Sjsg 10411bb76ff1Sjsg if (cleanup_job) 1042c349dbc7Sjsg sched->ops->free_job(cleanup_job); 1043c349dbc7Sjsg 1044c349dbc7Sjsg if (!entity) 1045c349dbc7Sjsg continue; 1046c349dbc7Sjsg 1047c349dbc7Sjsg sched_job = drm_sched_entity_pop_job(entity); 1048c349dbc7Sjsg 10495ca02815Sjsg if (!sched_job) { 1050f005ef32Sjsg complete_all(&entity->entity_idle); 1051c349dbc7Sjsg continue; 10525ca02815Sjsg } 1053c349dbc7Sjsg 1054c349dbc7Sjsg s_fence = sched_job->s_fence; 1055c349dbc7Sjsg 1056c349dbc7Sjsg atomic_inc(&sched->hw_rq_count); 1057c349dbc7Sjsg drm_sched_job_begin(sched_job); 1058c349dbc7Sjsg 1059c349dbc7Sjsg trace_drm_run_job(sched_job, entity); 1060c349dbc7Sjsg fence = sched->ops->run_job(sched_job); 1061f005ef32Sjsg complete_all(&entity->entity_idle); 1062f005ef32Sjsg drm_sched_fence_scheduled(s_fence, fence); 1063c349dbc7Sjsg 1064c349dbc7Sjsg if (!IS_ERR_OR_NULL(fence)) { 10651bb76ff1Sjsg /* Drop for original kref_init of the fence */ 10661bb76ff1Sjsg dma_fence_put(fence); 10671bb76ff1Sjsg 1068c349dbc7Sjsg r = dma_fence_add_callback(fence, &sched_job->cb, 10695ca02815Sjsg drm_sched_job_done_cb); 1070c349dbc7Sjsg if (r == -ENOENT) 1071f005ef32Sjsg drm_sched_job_done(sched_job, fence->error); 1072c349dbc7Sjsg else if (r) 10731bb76ff1Sjsg DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", 1074c349dbc7Sjsg r); 1075c349dbc7Sjsg } else { 1076f005ef32Sjsg drm_sched_job_done(sched_job, IS_ERR(fence) ? 1077f005ef32Sjsg PTR_ERR(fence) : 0); 1078c349dbc7Sjsg } 1079c349dbc7Sjsg 1080c349dbc7Sjsg wake_up(&sched->job_scheduled); 1081c349dbc7Sjsg } 1082c349dbc7Sjsg return 0; 1083c349dbc7Sjsg } 1084c349dbc7Sjsg 1085c349dbc7Sjsg /** 1086c349dbc7Sjsg * drm_sched_init - Init a gpu scheduler instance 1087c349dbc7Sjsg * 1088c349dbc7Sjsg * @sched: scheduler instance 1089c349dbc7Sjsg * @ops: backend operations for this scheduler 1090c349dbc7Sjsg * @hw_submission: number of hw submissions that can be in flight 1091c349dbc7Sjsg * @hang_limit: number of times to allow a job to hang before dropping it 1092c349dbc7Sjsg * @timeout: timeout value in jiffies for the scheduler 10935ca02815Sjsg * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is 10945ca02815Sjsg * used 10955ca02815Sjsg * @score: optional score atomic shared with other schedulers 1096c349dbc7Sjsg * @name: name used for debugging 10971bb76ff1Sjsg * @dev: target &struct device 1098c349dbc7Sjsg * 1099c349dbc7Sjsg * Return 0 on success, otherwise error code. 1100c349dbc7Sjsg */ 1101c349dbc7Sjsg int drm_sched_init(struct drm_gpu_scheduler *sched, 1102c349dbc7Sjsg const struct drm_sched_backend_ops *ops, 11035ca02815Sjsg unsigned hw_submission, unsigned hang_limit, 11045ca02815Sjsg long timeout, struct workqueue_struct *timeout_wq, 11051bb76ff1Sjsg atomic_t *score, const char *name, struct device *dev) 1106c349dbc7Sjsg { 1107c349dbc7Sjsg int i, ret; 1108c349dbc7Sjsg sched->ops = ops; 1109c349dbc7Sjsg sched->hw_submission_limit = hw_submission; 1110c349dbc7Sjsg sched->name = name; 1111c349dbc7Sjsg sched->timeout = timeout; 11125ca02815Sjsg sched->timeout_wq = timeout_wq ? : system_wq; 1113c349dbc7Sjsg sched->hang_limit = hang_limit; 11145ca02815Sjsg sched->score = score ? score : &sched->_score; 11151bb76ff1Sjsg sched->dev = dev; 1116ad8b1aafSjsg for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) 1117c349dbc7Sjsg drm_sched_rq_init(sched, &sched->sched_rq[i]); 1118c349dbc7Sjsg 1119c349dbc7Sjsg init_waitqueue_head(&sched->wake_up_worker); 1120c349dbc7Sjsg init_waitqueue_head(&sched->job_scheduled); 11215ca02815Sjsg INIT_LIST_HEAD(&sched->pending_list); 112258a32cd0Sjsg mtx_init(&sched->job_list_lock, IPL_NONE); 1123c349dbc7Sjsg atomic_set(&sched->hw_rq_count, 0); 1124c349dbc7Sjsg INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 11255ca02815Sjsg atomic_set(&sched->_score, 0); 1126c349dbc7Sjsg atomic64_set(&sched->job_id_count, 0); 1127c349dbc7Sjsg 1128c349dbc7Sjsg /* Each scheduler will run on a seperate kernel thread */ 1129c349dbc7Sjsg sched->thread = kthread_run(drm_sched_main, sched, sched->name); 1130c349dbc7Sjsg if (IS_ERR(sched->thread)) { 1131c349dbc7Sjsg ret = PTR_ERR(sched->thread); 1132c349dbc7Sjsg sched->thread = NULL; 11331bb76ff1Sjsg DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); 1134c349dbc7Sjsg return ret; 1135c349dbc7Sjsg } 1136c349dbc7Sjsg 1137c349dbc7Sjsg sched->ready = true; 1138c349dbc7Sjsg return 0; 1139c349dbc7Sjsg } 1140c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_init); 1141c349dbc7Sjsg 1142c349dbc7Sjsg /** 1143c349dbc7Sjsg * drm_sched_fini - Destroy a gpu scheduler 1144c349dbc7Sjsg * 1145c349dbc7Sjsg * @sched: scheduler instance 1146c349dbc7Sjsg * 1147c349dbc7Sjsg * Tears down and cleans up the scheduler. 1148c349dbc7Sjsg */ 1149c349dbc7Sjsg void drm_sched_fini(struct drm_gpu_scheduler *sched) 1150c349dbc7Sjsg { 11512ecbf4e0Sjsg struct drm_sched_entity *s_entity; 11522ecbf4e0Sjsg int i; 11532ecbf4e0Sjsg 1154c349dbc7Sjsg if (sched->thread) 1155c349dbc7Sjsg kthread_stop(sched->thread); 1156c349dbc7Sjsg 11572ecbf4e0Sjsg for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 11582ecbf4e0Sjsg struct drm_sched_rq *rq = &sched->sched_rq[i]; 11592ecbf4e0Sjsg 11602ecbf4e0Sjsg spin_lock(&rq->lock); 11612ecbf4e0Sjsg list_for_each_entry(s_entity, &rq->entities, list) 11622ecbf4e0Sjsg /* 11632ecbf4e0Sjsg * Prevents reinsertion and marks job_queue as idle, 11642ecbf4e0Sjsg * it will removed from rq in drm_sched_entity_fini 11652ecbf4e0Sjsg * eventually 11662ecbf4e0Sjsg */ 11672ecbf4e0Sjsg s_entity->stopped = true; 11682ecbf4e0Sjsg spin_unlock(&rq->lock); 11692ecbf4e0Sjsg 11702ecbf4e0Sjsg } 11712ecbf4e0Sjsg 11722ecbf4e0Sjsg /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */ 11732ecbf4e0Sjsg wake_up_all(&sched->job_scheduled); 11742ecbf4e0Sjsg 1175ad8b1aafSjsg /* Confirm no work left behind accessing device structures */ 1176ad8b1aafSjsg cancel_delayed_work_sync(&sched->work_tdr); 1177ad8b1aafSjsg 1178c349dbc7Sjsg sched->ready = false; 1179c349dbc7Sjsg } 1180c349dbc7Sjsg EXPORT_SYMBOL(drm_sched_fini); 11815ca02815Sjsg 11825ca02815Sjsg /** 1183f005ef32Sjsg * drm_sched_increase_karma - Update sched_entity guilty flag 11845ca02815Sjsg * 11855ca02815Sjsg * @bad: The job guilty of time out 11865ca02815Sjsg * 1187f005ef32Sjsg * Increment on every hang caused by the 'bad' job. If this exceeds the hang 1188f005ef32Sjsg * limit of the scheduler then the respective sched entity is marked guilty and 1189f005ef32Sjsg * jobs from it will not be scheduled further 11905ca02815Sjsg */ 1191f005ef32Sjsg void drm_sched_increase_karma(struct drm_sched_job *bad) 11925ca02815Sjsg { 11935ca02815Sjsg int i; 11945ca02815Sjsg struct drm_sched_entity *tmp; 11955ca02815Sjsg struct drm_sched_entity *entity; 11965ca02815Sjsg struct drm_gpu_scheduler *sched = bad->sched; 11975ca02815Sjsg 11985ca02815Sjsg /* don't change @bad's karma if it's from KERNEL RQ, 11995ca02815Sjsg * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 12005ca02815Sjsg * corrupt but keep in mind that kernel jobs always considered good. 12015ca02815Sjsg */ 12025ca02815Sjsg if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 12035ca02815Sjsg atomic_inc(&bad->karma); 12045ca02815Sjsg 12055ca02815Sjsg for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; 12065ca02815Sjsg i++) { 12075ca02815Sjsg struct drm_sched_rq *rq = &sched->sched_rq[i]; 12085ca02815Sjsg 12095ca02815Sjsg spin_lock(&rq->lock); 12105ca02815Sjsg list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 12115ca02815Sjsg if (bad->s_fence->scheduled.context == 12125ca02815Sjsg entity->fence_context) { 12135ca02815Sjsg if (entity->guilty) 1214f005ef32Sjsg atomic_set(entity->guilty, 1); 12155ca02815Sjsg break; 12165ca02815Sjsg } 12175ca02815Sjsg } 12185ca02815Sjsg spin_unlock(&rq->lock); 12195ca02815Sjsg if (&entity->list != &rq->entities) 12205ca02815Sjsg break; 12215ca02815Sjsg } 12225ca02815Sjsg } 12235ca02815Sjsg } 1224f005ef32Sjsg EXPORT_SYMBOL(drm_sched_increase_karma); 1225