Lines Matching refs:req

77 	struct drm_i915_gem_request *req = to_request(fence);  in i915_fence_release()  local
85 i915_sw_fence_fini(&req->submit); in i915_fence_release()
87 kmem_cache_free(req->i915->requests, req); in i915_fence_release()
427 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) in i915_gem_request_retire_upto() argument
429 struct intel_engine_cs *engine = req->engine; in i915_gem_request_retire_upto()
432 lockdep_assert_held(&req->i915->drm.struct_mutex); in i915_gem_request_retire_upto()
433 GEM_BUG_ON(!i915_gem_request_completed(req)); in i915_gem_request_retire_upto()
435 if (list_empty(&req->link)) in i915_gem_request_retire_upto()
443 } while (tmp != req); in i915_gem_request_retire_upto()
593 struct drm_i915_gem_request *req; in i915_gem_request_alloc() local
626 req = list_first_entry_or_null(&engine->timeline->requests, in i915_gem_request_alloc()
627 typeof(*req), link); in i915_gem_request_alloc()
628 if (req && i915_gem_request_completed(req)) in i915_gem_request_alloc()
629 i915_gem_request_retire(req); in i915_gem_request_alloc()
659 req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); in i915_gem_request_alloc()
660 if (!req) { in i915_gem_request_alloc()
665 req->timeline = i915_gem_context_lookup_timeline(ctx, engine); in i915_gem_request_alloc()
666 GEM_BUG_ON(req->timeline == engine->timeline); in i915_gem_request_alloc()
668 lockinit(&req->lock, "i915_rl", 0, 0); in i915_gem_request_alloc()
669 dma_fence_init(&req->fence, in i915_gem_request_alloc()
671 &req->lock, in i915_gem_request_alloc()
672 req->timeline->fence_context, in i915_gem_request_alloc()
673 timeline_get_seqno(req->timeline)); in i915_gem_request_alloc()
676 i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); in i915_gem_request_alloc()
677 init_waitqueue_head(&req->execute); in i915_gem_request_alloc()
679 i915_priotree_init(&req->priotree); in i915_gem_request_alloc()
681 INIT_LIST_HEAD(&req->active_list); in i915_gem_request_alloc()
682 req->i915 = dev_priv; in i915_gem_request_alloc()
683 req->engine = engine; in i915_gem_request_alloc()
684 req->ctx = ctx; in i915_gem_request_alloc()
685 req->ring = ring; in i915_gem_request_alloc()
688 req->global_seqno = 0; in i915_gem_request_alloc()
689 req->file_priv = NULL; in i915_gem_request_alloc()
690 req->batch = NULL; in i915_gem_request_alloc()
691 req->capture_list = NULL; in i915_gem_request_alloc()
692 req->waitboost = false; in i915_gem_request_alloc()
701 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; in i915_gem_request_alloc()
702 GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); in i915_gem_request_alloc()
704 ret = engine->request_alloc(req); in i915_gem_request_alloc()
713 req->head = req->ring->emit; in i915_gem_request_alloc()
716 GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); in i915_gem_request_alloc()
717 return req; in i915_gem_request_alloc()
721 GEM_BUG_ON(!list_empty(&req->active_list)); in i915_gem_request_alloc()
722 GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); in i915_gem_request_alloc()
723 GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); in i915_gem_request_alloc()
725 kmem_cache_free(dev_priv->requests, req); in i915_gem_request_alloc()
789 i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, in i915_gem_request_await_dma_fence() argument
821 if (fence->context == req->fence.context) in i915_gem_request_await_dma_fence()
825 if (fence->context != req->i915->mm.unordered_timeline && in i915_gem_request_await_dma_fence()
826 intel_timeline_sync_is_later(req->timeline, fence)) in i915_gem_request_await_dma_fence()
830 ret = i915_gem_request_await_request(req, in i915_gem_request_await_dma_fence()
833 ret = i915_sw_fence_await_dma_fence(&req->submit, fence, in i915_gem_request_await_dma_fence()
840 if (fence->context != req->i915->mm.unordered_timeline) in i915_gem_request_await_dma_fence()
841 intel_timeline_sync_set(req->timeline, fence); in i915_gem_request_await_dma_fence()
1040 static bool __i915_spin_request(const struct drm_i915_gem_request *req, in __i915_spin_request() argument
1043 struct intel_engine_cs *engine = req->engine; in __i915_spin_request()
1076 return seqno == i915_gem_request_global_seqno(req); in __i915_spin_request()
1127 long i915_wait_request(struct drm_i915_gem_request *req, in i915_wait_request() argument
1133 wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue; in i915_wait_request()
1141 !!lockdep_is_held(&req->i915->drm.struct_mutex) != in i915_wait_request()
1146 if (i915_gem_request_completed(req)) in i915_wait_request()
1152 trace_i915_gem_request_wait_begin(req, flags); in i915_wait_request()
1154 add_wait_queue(&req->execute, &exec); in i915_wait_request()
1158 intel_wait_init(&wait, req); in i915_wait_request()
1163 if (intel_wait_update_request(&wait, req)) in i915_wait_request()
1167 __i915_wait_request_check_and_reset(req)) in i915_wait_request()
1184 GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); in i915_wait_request()
1187 if (__i915_spin_request(req, wait.seqno, state, 5)) in i915_wait_request()
1191 if (intel_engine_add_wait(req->engine, &wait)) in i915_wait_request()
1199 __i915_wait_request_check_and_reset(req); in i915_wait_request()
1215 intel_wait_check_request(&wait, req)) in i915_wait_request()
1226 if (__i915_request_irq_complete(req)) in i915_wait_request()
1240 __i915_wait_request_check_and_reset(req)) in i915_wait_request()
1244 if (__i915_spin_request(req, wait.seqno, state, 2)) in i915_wait_request()
1247 if (!intel_wait_check_request(&wait, req)) { in i915_wait_request()
1248 intel_engine_remove_wait(req->engine, &wait); in i915_wait_request()
1253 intel_engine_remove_wait(req->engine, &wait); in i915_wait_request()
1258 remove_wait_queue(&req->execute, &exec); in i915_wait_request()
1259 trace_i915_gem_request_wait_end(req); in i915_wait_request()