1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/completion.h> 27 28 #include <drm/drm_print.h> 29 #include <drm/gpu_scheduler.h> 30 31 #include "gpu_scheduler_trace.h" 32 33 #define to_drm_sched_job(sched_job) \ 34 container_of((sched_job), struct drm_sched_job, queue_node) 35 36 /** 37 * drm_sched_entity_init - Init a context entity used by scheduler when 38 * submit to HW ring. 39 * 40 * @entity: scheduler entity to init 41 * @priority: priority of the entity 42 * @sched_list: the list of drm scheds on which jobs from this 43 * entity can be submitted 44 * @num_sched_list: number of drm sched in sched_list 45 * @guilty: atomic_t set to 1 when a job on this queue 46 * is found to be guilty causing a timeout 47 * 48 * Note that the &sched_list must have at least one element to schedule the entity. 49 * 50 * For changing @priority later on at runtime see 51 * drm_sched_entity_set_priority(). For changing the set of schedulers 52 * @sched_list at runtime see drm_sched_entity_modify_sched(). 53 * 54 * An entity is cleaned up by callind drm_sched_entity_fini(). See also 55 * drm_sched_entity_destroy(). 56 * 57 * Returns 0 on success or a negative error code on failure. 58 */ 59 int drm_sched_entity_init(struct drm_sched_entity *entity, 60 enum drm_sched_priority priority, 61 struct drm_gpu_scheduler **sched_list, 62 unsigned int num_sched_list, 63 atomic_t *guilty) 64 { 65 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 66 return -EINVAL; 67 68 memset(entity, 0, sizeof(struct drm_sched_entity)); 69 INIT_LIST_HEAD(&entity->list); 70 entity->rq = NULL; 71 entity->guilty = guilty; 72 entity->num_sched_list = num_sched_list; 73 entity->priority = priority; 74 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 75 RCU_INIT_POINTER(entity->last_scheduled, NULL); 76 RB_CLEAR_NODE(&entity->rb_tree_node); 77 78 if(num_sched_list) 79 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 80 81 init_completion(&entity->entity_idle); 82 83 /* We start in an idle state. */ 84 complete_all(&entity->entity_idle); 85 86 mtx_init(&entity->rq_lock, IPL_NONE); 87 spsc_queue_init(&entity->job_queue); 88 89 atomic_set(&entity->fence_seq, 0); 90 entity->fence_context = dma_fence_context_alloc(2); 91 92 return 0; 93 } 94 EXPORT_SYMBOL(drm_sched_entity_init); 95 96 /** 97 * drm_sched_entity_modify_sched - Modify sched of an entity 98 * @entity: scheduler entity to init 99 * @sched_list: the list of new drm scheds which will replace 100 * existing entity->sched_list 101 * @num_sched_list: number of drm sched in sched_list 102 * 103 * Note that this must be called under the same common lock for @entity as 104 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to 105 * guarantee through some other means that this is never called while new jobs 106 * can be pushed to @entity. 107 */ 108 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 109 struct drm_gpu_scheduler **sched_list, 110 unsigned int num_sched_list) 111 { 112 WARN_ON(!num_sched_list || !sched_list); 113 114 spin_lock(&entity->rq_lock); 115 entity->sched_list = sched_list; 116 entity->num_sched_list = num_sched_list; 117 spin_unlock(&entity->rq_lock); 118 } 119 EXPORT_SYMBOL(drm_sched_entity_modify_sched); 120 121 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 122 { 123 rmb(); /* for list_empty to work without lock */ 124 125 if (list_empty(&entity->list) || 126 spsc_queue_count(&entity->job_queue) == 0 || 127 entity->stopped) 128 return true; 129 130 return false; 131 } 132 133 /* Return true if entity could provide a job. */ 134 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 135 { 136 if (spsc_queue_peek(&entity->job_queue) == NULL) 137 return false; 138 139 if (READ_ONCE(entity->dependency)) 140 return false; 141 142 return true; 143 } 144 145 /** 146 * drm_sched_entity_error - return error of last scheduled job 147 * @entity: scheduler entity to check 148 * 149 * Opportunistically return the error of the last scheduled job. Result can 150 * change any time when new jobs are pushed to the hw. 151 */ 152 int drm_sched_entity_error(struct drm_sched_entity *entity) 153 { 154 struct dma_fence *fence; 155 int r; 156 157 rcu_read_lock(); 158 fence = rcu_dereference(entity->last_scheduled); 159 r = fence ? fence->error : 0; 160 rcu_read_unlock(); 161 162 return r; 163 } 164 EXPORT_SYMBOL(drm_sched_entity_error); 165 166 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 167 { 168 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 169 170 drm_sched_fence_finished(job->s_fence, -ESRCH); 171 WARN_ON(job->s_fence->parent); 172 job->sched->ops->free_job(job); 173 } 174 175 /* Signal the scheduler finished fence when the entity in question is killed. */ 176 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 177 struct dma_fence_cb *cb) 178 { 179 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 180 finish_cb); 181 unsigned long index; 182 183 dma_fence_put(f); 184 185 /* Wait for all dependencies to avoid data corruptions */ 186 xa_for_each(&job->dependencies, index, f) { 187 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); 188 189 if (s_fence && f == &s_fence->scheduled) { 190 /* The dependencies array had a reference on the scheduled 191 * fence, and the finished fence refcount might have 192 * dropped to zero. Use dma_fence_get_rcu() so we get 193 * a NULL fence in that case. 194 */ 195 f = dma_fence_get_rcu(&s_fence->finished); 196 197 /* Now that we have a reference on the finished fence, 198 * we can release the reference the dependencies array 199 * had on the scheduled fence. 200 */ 201 dma_fence_put(&s_fence->scheduled); 202 } 203 204 xa_erase(&job->dependencies, index); 205 if (f && !dma_fence_add_callback(f, &job->finish_cb, 206 drm_sched_entity_kill_jobs_cb)) 207 return; 208 209 dma_fence_put(f); 210 } 211 212 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 213 schedule_work(&job->work); 214 } 215 216 /* Remove the entity from the scheduler and kill all pending jobs */ 217 static void drm_sched_entity_kill(struct drm_sched_entity *entity) 218 { 219 struct drm_sched_job *job; 220 struct dma_fence *prev; 221 222 if (!entity->rq) 223 return; 224 225 spin_lock(&entity->rq_lock); 226 entity->stopped = true; 227 drm_sched_rq_remove_entity(entity->rq, entity); 228 spin_unlock(&entity->rq_lock); 229 230 /* Make sure this entity is not used by the scheduler at the moment */ 231 wait_for_completion(&entity->entity_idle); 232 233 /* The entity is guaranteed to not be used by the scheduler */ 234 prev = rcu_dereference_check(entity->last_scheduled, true); 235 dma_fence_get(prev); 236 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 237 struct drm_sched_fence *s_fence = job->s_fence; 238 239 dma_fence_get(&s_fence->finished); 240 if (!prev || dma_fence_add_callback(prev, &job->finish_cb, 241 drm_sched_entity_kill_jobs_cb)) 242 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 243 244 prev = &s_fence->finished; 245 } 246 dma_fence_put(prev); 247 } 248 249 /** 250 * drm_sched_entity_flush - Flush a context entity 251 * 252 * @entity: scheduler entity 253 * @timeout: time to wait in for Q to become empty in jiffies. 254 * 255 * Splitting drm_sched_entity_fini() into two functions, The first one does the 256 * waiting, removes the entity from the runqueue and returns an error when the 257 * process was killed. 258 * 259 * Returns the remaining time in jiffies left from the input timeout 260 */ 261 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 262 { 263 struct drm_gpu_scheduler *sched; 264 #ifdef __linux__ 265 struct task_struct *last_user; 266 #else 267 struct process *last_user, *curpr; 268 #endif 269 long ret = timeout; 270 271 if (!entity->rq) 272 return 0; 273 274 sched = entity->rq->sched; 275 /** 276 * The client will not queue more IBs during this fini, consume existing 277 * queued IBs or discard them on SIGKILL 278 */ 279 #ifdef __linux__ 280 if (current->flags & PF_EXITING) { 281 #else 282 curpr = curproc->p_p; 283 if (curpr->ps_flags & PS_EXITING) { 284 #endif 285 if (timeout) 286 ret = wait_event_timeout( 287 sched->job_scheduled, 288 drm_sched_entity_is_idle(entity), 289 timeout); 290 } else { 291 wait_event_killable(sched->job_scheduled, 292 drm_sched_entity_is_idle(entity)); 293 } 294 295 /* For killed process disable any more IBs enqueue right now */ 296 #ifdef __linux__ 297 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 298 if ((!last_user || last_user == current->group_leader) && 299 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 300 #else 301 last_user = cmpxchg(&entity->last_user, curpr, NULL); 302 if ((!last_user || last_user == curproc->p_p) && 303 (curpr->ps_flags & PS_EXITING) && 304 (curpr->ps_xsig == SIGKILL)) 305 #endif 306 drm_sched_entity_kill(entity); 307 308 return ret; 309 } 310 EXPORT_SYMBOL(drm_sched_entity_flush); 311 312 /** 313 * drm_sched_entity_fini - Destroy a context entity 314 * 315 * @entity: scheduler entity 316 * 317 * Cleanups up @entity which has been initialized by drm_sched_entity_init(). 318 * 319 * If there are potentially job still in flight or getting newly queued 320 * drm_sched_entity_flush() must be called first. This function then goes over 321 * the entity and signals all jobs with an error code if the process was killed. 322 */ 323 void drm_sched_entity_fini(struct drm_sched_entity *entity) 324 { 325 /* 326 * If consumption of existing IBs wasn't completed. Forcefully remove 327 * them here. Also makes sure that the scheduler won't touch this entity 328 * any more. 329 */ 330 drm_sched_entity_kill(entity); 331 332 if (entity->dependency) { 333 dma_fence_remove_callback(entity->dependency, &entity->cb); 334 dma_fence_put(entity->dependency); 335 entity->dependency = NULL; 336 } 337 338 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); 339 RCU_INIT_POINTER(entity->last_scheduled, NULL); 340 } 341 EXPORT_SYMBOL(drm_sched_entity_fini); 342 343 /** 344 * drm_sched_entity_destroy - Destroy a context entity 345 * @entity: scheduler entity 346 * 347 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a 348 * convenience wrapper. 349 */ 350 void drm_sched_entity_destroy(struct drm_sched_entity *entity) 351 { 352 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 353 drm_sched_entity_fini(entity); 354 } 355 EXPORT_SYMBOL(drm_sched_entity_destroy); 356 357 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */ 358 static void drm_sched_entity_clear_dep(struct dma_fence *f, 359 struct dma_fence_cb *cb) 360 { 361 struct drm_sched_entity *entity = 362 container_of(cb, struct drm_sched_entity, cb); 363 364 entity->dependency = NULL; 365 dma_fence_put(f); 366 } 367 368 /* 369 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 370 * wake up scheduler 371 */ 372 static void drm_sched_entity_wakeup(struct dma_fence *f, 373 struct dma_fence_cb *cb) 374 { 375 struct drm_sched_entity *entity = 376 container_of(cb, struct drm_sched_entity, cb); 377 378 drm_sched_entity_clear_dep(f, cb); 379 drm_sched_wakeup_if_can_queue(entity->rq->sched); 380 } 381 382 /** 383 * drm_sched_entity_set_priority - Sets priority of the entity 384 * 385 * @entity: scheduler entity 386 * @priority: scheduler priority 387 * 388 * Update the priority of runqueus used for the entity. 389 */ 390 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 391 enum drm_sched_priority priority) 392 { 393 spin_lock(&entity->rq_lock); 394 entity->priority = priority; 395 spin_unlock(&entity->rq_lock); 396 } 397 EXPORT_SYMBOL(drm_sched_entity_set_priority); 398 399 /* 400 * Add a callback to the current dependency of the entity to wake up the 401 * scheduler when the entity becomes available. 402 */ 403 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 404 { 405 struct drm_gpu_scheduler *sched = entity->rq->sched; 406 struct dma_fence *fence = entity->dependency; 407 struct drm_sched_fence *s_fence; 408 409 if (fence->context == entity->fence_context || 410 fence->context == entity->fence_context + 1) { 411 /* 412 * Fence is a scheduled/finished fence from a job 413 * which belongs to the same entity, we can ignore 414 * fences from ourself 415 */ 416 dma_fence_put(entity->dependency); 417 return false; 418 } 419 420 s_fence = to_drm_sched_fence(fence); 421 if (!fence->error && s_fence && s_fence->sched == sched && 422 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { 423 424 /* 425 * Fence is from the same scheduler, only need to wait for 426 * it to be scheduled 427 */ 428 fence = dma_fence_get(&s_fence->scheduled); 429 dma_fence_put(entity->dependency); 430 entity->dependency = fence; 431 if (!dma_fence_add_callback(fence, &entity->cb, 432 drm_sched_entity_clear_dep)) 433 return true; 434 435 /* Ignore it when it is already scheduled */ 436 dma_fence_put(fence); 437 return false; 438 } 439 440 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 441 drm_sched_entity_wakeup)) 442 return true; 443 444 dma_fence_put(entity->dependency); 445 return false; 446 } 447 448 static struct dma_fence * 449 drm_sched_job_dependency(struct drm_sched_job *job, 450 struct drm_sched_entity *entity) 451 { 452 struct dma_fence *f; 453 454 /* We keep the fence around, so we can iterate over all dependencies 455 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled 456 * before killing the job. 457 */ 458 f = xa_load(&job->dependencies, job->last_dependency); 459 if (f) { 460 job->last_dependency++; 461 return dma_fence_get(f); 462 } 463 464 if (job->sched->ops->prepare_job) 465 return job->sched->ops->prepare_job(job, entity); 466 467 return NULL; 468 } 469 470 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 471 { 472 struct drm_sched_job *sched_job; 473 474 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 475 if (!sched_job) 476 return NULL; 477 478 while ((entity->dependency = 479 drm_sched_job_dependency(sched_job, entity))) { 480 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 481 482 if (drm_sched_entity_add_dependency_cb(entity)) 483 return NULL; 484 } 485 486 /* skip jobs from entity that marked guilty */ 487 if (entity->guilty && atomic_read(entity->guilty)) 488 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 489 490 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); 491 rcu_assign_pointer(entity->last_scheduled, 492 dma_fence_get(&sched_job->s_fence->finished)); 493 494 /* 495 * If the queue is empty we allow drm_sched_entity_select_rq() to 496 * locklessly access ->last_scheduled. This only works if we set the 497 * pointer before we dequeue and if we a write barrier here. 498 */ 499 smp_wmb(); 500 501 spsc_queue_pop(&entity->job_queue); 502 503 /* 504 * Update the entity's location in the min heap according to 505 * the timestamp of the next job, if any. 506 */ 507 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { 508 struct drm_sched_job *next; 509 510 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 511 if (next) 512 drm_sched_rq_update_fifo(entity, next->submit_ts); 513 } 514 515 /* Jobs and entities might have different lifecycles. Since we're 516 * removing the job from the entities queue, set the jobs entity pointer 517 * to NULL to prevent any future access of the entity through this job. 518 */ 519 sched_job->entity = NULL; 520 521 return sched_job; 522 } 523 524 void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 525 { 526 struct dma_fence *fence; 527 struct drm_gpu_scheduler *sched; 528 struct drm_sched_rq *rq; 529 530 /* single possible engine and already selected */ 531 if (!entity->sched_list) 532 return; 533 534 /* queue non-empty, stay on the same engine */ 535 if (spsc_queue_count(&entity->job_queue)) 536 return; 537 538 /* 539 * Only when the queue is empty are we guaranteed that the scheduler 540 * thread cannot change ->last_scheduled. To enforce ordering we need 541 * a read barrier here. See drm_sched_entity_pop_job() for the other 542 * side. 543 */ 544 smp_rmb(); 545 546 fence = rcu_dereference_check(entity->last_scheduled, true); 547 548 /* stay on the same engine if the previous job hasn't finished */ 549 if (fence && !dma_fence_is_signaled(fence)) 550 return; 551 552 spin_lock(&entity->rq_lock); 553 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 554 rq = sched ? &sched->sched_rq[entity->priority] : NULL; 555 if (rq != entity->rq) { 556 drm_sched_rq_remove_entity(entity->rq, entity); 557 entity->rq = rq; 558 } 559 spin_unlock(&entity->rq_lock); 560 561 if (entity->num_sched_list == 1) 562 entity->sched_list = NULL; 563 } 564 565 /** 566 * drm_sched_entity_push_job - Submit a job to the entity's job queue 567 * @sched_job: job to submit 568 * 569 * Note: To guarantee that the order of insertion to queue matches the job's 570 * fence sequence number this function should be called with drm_sched_job_arm() 571 * under common lock for the struct drm_sched_entity that was set up for 572 * @sched_job in drm_sched_job_init(). 573 * 574 * Returns 0 for success, negative error code otherwise. 575 */ 576 void drm_sched_entity_push_job(struct drm_sched_job *sched_job) 577 { 578 struct drm_sched_entity *entity = sched_job->entity; 579 bool first; 580 ktime_t submit_ts; 581 582 trace_drm_sched_job(sched_job, entity); 583 atomic_inc(entity->rq->sched->score); 584 #ifdef __linux__ 585 WRITE_ONCE(entity->last_user, current->group_leader); 586 #else 587 WRITE_ONCE(entity->last_user, curproc->p_p); 588 #endif 589 590 /* 591 * After the sched_job is pushed into the entity queue, it may be 592 * completed and freed up at any time. We can no longer access it. 593 * Make sure to set the submit_ts first, to avoid a race. 594 */ 595 sched_job->submit_ts = submit_ts = ktime_get(); 596 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 597 598 /* first job wakes up scheduler */ 599 if (first) { 600 /* Add the entity to the run queue */ 601 spin_lock(&entity->rq_lock); 602 if (entity->stopped) { 603 spin_unlock(&entity->rq_lock); 604 605 DRM_ERROR("Trying to push to a killed entity\n"); 606 return; 607 } 608 609 drm_sched_rq_add_entity(entity->rq, entity); 610 spin_unlock(&entity->rq_lock); 611 612 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 613 drm_sched_rq_update_fifo(entity, submit_ts); 614 615 drm_sched_wakeup_if_can_queue(entity->rq->sched); 616 } 617 } 618 EXPORT_SYMBOL(drm_sched_entity_push_job); 619