Lines Matching defs:job

32  * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
46 * Note that once a job was taken from the entities queue and pushed to the
190 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
234 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
262 * drm_sched_job_done - complete a job
263 * @s_job: pointer to the job which is done
265 * Finish the job's fence and wake up the worker thread.
284 * drm_sched_job_done_cb - the callback for a done job
324 * drm_sched_suspend_timeout - Suspend scheduler job timeout
358 * drm_sched_resume_timeout - Resume scheduler job timeout
392 struct drm_sched_job *job;
399 job = list_first_entry_or_null(&sched->pending_list,
402 if (job) {
404 * Remove the bad job so it cannot be freed by concurrent
408 list_del_init(&job->list);
411 status = job->sched->ops->timedout_job(job);
414 * Guilty job did complete and hence needs to be manually removed
418 job->sched->ops->free_job(job);
436 * @bad: job which caused the time out
439 * Note: bad job will not be freed as it might be used later and so it's
451 * Reinsert back the bad job here - now it's safe as
453 * bad job at this point - we parked (waited for) any in progress
460 * job extracted.
465 * Iterate the job list from later to earlier one and either deactive
480 * remove job from pending_list.
488 * Wait for job's HW fence callback to finish using s_job
496 * We must keep bad job alive for later use during
498 * that the guilty job must be released.
571 * recovery after a job timeout.
619 * drm_sched_job_init - init a scheduler job
620 * @job: scheduler job to init
622 * @owner: job owner for debugging
628 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
637 int drm_sched_job_init(struct drm_sched_job *job,
650 memset(job, 0, sizeof(*job));
652 job->entity = entity;
653 job->s_fence = drm_sched_fence_alloc(entity, owner);
654 if (!job->s_fence)
657 INIT_LIST_HEAD(&job->list);
659 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
666 * drm_sched_job_arm - arm a scheduler job for execution
667 * @job: scheduler job to arm
669 * This arms a scheduler job for execution. Specifically it initializes the
670 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
671 * or other places that need to track the completion of this job.
678 void drm_sched_job_arm(struct drm_sched_job *job)
681 struct drm_sched_entity *entity = job->entity;
687 job->sched = sched;
688 job->s_priority = entity->rq - sched->sched_rq;
689 job->id = atomic64_inc_return(&sched->job_id_count);
691 drm_sched_fence_init(job->s_fence, job->entity);
696 * drm_sched_job_add_dependency - adds the fence as a job dependency
697 * @job: scheduler job to add the dependencies to
705 int drm_sched_job_add_dependency(struct drm_sched_job *job,
720 xa_for_each(&job->dependencies, index, entry) {
726 xa_store(&job->dependencies, index, fence, GFP_KERNEL);
733 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
742 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
743 * @job: scheduler job to add the dependencies to
748 * This adds the fence matching the given syncobj to @job.
753 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
765 return drm_sched_job_add_dependency(job, fence);
770 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
771 * @job: scheduler job to add the dependencies to
775 * This adds all fences matching the given usage from @resv to @job.
781 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
794 ret = drm_sched_job_add_dependency(job, fence);
805 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
807 * @job: scheduler job to add the dependencies to
809 * @write: whether the job might write the object (so we need to depend on
813 * GEM objects used in the job but before updating the reservations with your
819 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
823 return drm_sched_job_add_resv_dependencies(job, obj->resv,
829 * drm_sched_job_cleanup - clean up scheduler job resources
830 * @job: scheduler job to clean up
834 * Drivers should call this from their error unwind code if @job is aborted
837 * After that point of no return @job is committed to be executed by the
841 void drm_sched_job_cleanup(struct drm_sched_job *job)
846 if (kref_read(&job->s_fence->finished.refcount)) {
848 dma_fence_put(&job->s_fence->finished);
850 /* aborted job before committing to run it */
851 drm_sched_fence_free(job->s_fence);
854 job->s_fence = NULL;
856 xa_for_each(&job->dependencies, index, fence) {
859 xa_destroy(&job->dependencies);
917 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
921 * Returns the next finished job from the pending list (if there is one)
927 struct drm_sched_job *job, *next;
931 job = list_first_entry_or_null(&sched->pending_list,
934 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
935 /* remove job from pending_list */
936 list_del_init(&job->list);
938 /* cancel this job's TO timer */
946 dma_fence_timestamp(&job->s_fence->finished);
947 /* start TO timer for next job */
951 job = NULL;
956 return job;
1091 * @hang_limit: number of times to allow a job to hang before dropping it
1185 * @bad: The job guilty of time out
1187 * Increment on every hang caused by the 'bad' job. If this exceeds the hang