Lines Matching defs:job
37 struct amdgpu_job *job = to_amdgpu_job(s_job);
47 /* Effectively the job is aborted as the device is gone */
55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
61 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
78 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
95 unsigned int num_ibs, struct amdgpu_job **job)
100 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
101 if (!*job)
108 (*job)->base.sched = &adev->rings[0]->sched;
109 (*job)->vm = vm;
111 amdgpu_sync_create(&(*job)->explicit_sync);
112 (*job)->generation = amdgpu_vm_generation(adev, vm);
113 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
118 return drm_sched_job_init(&(*job)->base, entity, owner);
124 struct amdgpu_job **job)
128 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
132 (*job)->num_ibs = 1;
133 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
136 drm_sched_job_cleanup(&(*job)->base);
137 kfree(*job);
143 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
147 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
148 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
151 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
152 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
155 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
156 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
160 void amdgpu_job_free_resources(struct amdgpu_job *job)
166 if (job->base.s_fence && job->base.s_fence->finished.ops)
167 f = &job->base.s_fence->finished;
168 else if (job->hw_fence.ops)
169 f = &job->hw_fence;
173 for (i = 0; i < job->num_ibs; ++i)
174 amdgpu_ib_free(NULL, &job->ibs[i], f);
179 struct amdgpu_job *job = to_amdgpu_job(s_job);
183 amdgpu_sync_free(&job->explicit_sync);
186 if (!job->hw_fence.ops)
187 kfree(job);
189 dma_fence_put(&job->hw_fence);
192 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
197 WARN_ON(job->gang_submit);
203 if (job != leader)
205 job->gang_submit = fence;
208 void amdgpu_job_free(struct amdgpu_job *job)
210 if (job->base.entity)
211 drm_sched_job_cleanup(&job->base);
213 amdgpu_job_free_resources(job);
214 amdgpu_sync_free(&job->explicit_sync);
215 if (job->gang_submit != &job->base.s_fence->scheduled)
216 dma_fence_put(job->gang_submit);
218 if (!job->hw_fence.ops)
219 kfree(job);
221 dma_fence_put(&job->hw_fence);
224 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
228 drm_sched_job_arm(&job->base);
229 f = dma_fence_get(&job->base.s_fence->finished);
230 amdgpu_job_free_resources(job);
231 drm_sched_entity_push_job(&job->base);
236 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
241 job->base.sched = &ring->sched;
242 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
247 amdgpu_job_free(job);
256 struct amdgpu_job *job = to_amdgpu_job(sched_job);
264 if (!fence && job->gang_submit)
265 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
267 while (!fence && job->vm && !job->vmid) {
268 r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
278 dma_fence_set_error(&job->base.s_fence->finished, r);
287 struct amdgpu_job *job;
290 job = to_amdgpu_job(sched_job);
291 finished = &job->base.s_fence->finished;
293 trace_amdgpu_sched_run_job(job);
295 /* Skip job if VRAM is lost and never resubmit gangs */
296 if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
297 (job->job_run_counter && job->gang_submit))
304 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
312 job->job_run_counter++;
313 amdgpu_job_free_resources(job);