Lines Matching defs:s_job

263  * @s_job: pointer to the job which is done
267 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
269 struct drm_sched_fence *s_fence = s_job->s_fence;
290 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
292 drm_sched_job_done(s_job, f->error);
379 static void drm_sched_job_begin(struct drm_sched_job *s_job)
381 struct drm_gpu_scheduler *sched = s_job->sched;
384 list_add_tail(&s_job->list, &sched->pending_list);
446 struct drm_sched_job *s_job, *tmp;
470 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
472 if (s_job->s_fence->parent &&
473 dma_fence_remove_callback(s_job->s_fence->parent,
474 &s_job->cb)) {
475 dma_fence_put(s_job->s_fence->parent);
476 s_job->s_fence->parent = NULL;
484 list_del_init(&s_job->list);
488 * Wait for job's HW fence callback to finish using s_job
493 dma_fence_wait(&s_job->s_fence->finished, false);
500 if (bad != s_job)
501 sched->ops->free_job(s_job);
527 struct drm_sched_job *s_job, *tmp;
535 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
536 struct dma_fence *fence = s_job->s_fence->parent;
544 r = dma_fence_add_callback(fence, &s_job->cb,
547 drm_sched_job_done(s_job, fence->error);
552 drm_sched_job_done(s_job, -ECANCELED);
584 struct drm_sched_job *s_job, *tmp;
589 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
590 struct drm_sched_fence *s_fence = s_job->s_fence;
592 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
594 guilty_context = s_job->s_fence->scheduled.context;
597 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
600 fence = sched->ops->run_job(s_job);
606 s_job->s_fence->parent = NULL;
609 s_job->s_fence->parent = dma_fence_get(fence);