xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/i915_request.h (revision 3ce207bbb720baa0d6646d25ae7a0f0e3ec98a8c)
1*3ce207bbSriastradh /*	$NetBSD: i915_request.h,v 1.4 2021/12/19 11:36:17 riastradh Exp $	*/
24e390cabSriastradh 
34e390cabSriastradh /*
44e390cabSriastradh  * Copyright © 2008-2018 Intel Corporation
54e390cabSriastradh  *
64e390cabSriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
74e390cabSriastradh  * copy of this software and associated documentation files (the "Software"),
84e390cabSriastradh  * to deal in the Software without restriction, including without limitation
94e390cabSriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
104e390cabSriastradh  * and/or sell copies of the Software, and to permit persons to whom the
114e390cabSriastradh  * Software is furnished to do so, subject to the following conditions:
124e390cabSriastradh  *
134e390cabSriastradh  * The above copyright notice and this permission notice (including the next
144e390cabSriastradh  * paragraph) shall be included in all copies or substantial portions of the
154e390cabSriastradh  * Software.
164e390cabSriastradh  *
174e390cabSriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
184e390cabSriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
194e390cabSriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
204e390cabSriastradh  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
214e390cabSriastradh  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
224e390cabSriastradh  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
234e390cabSriastradh  * IN THE SOFTWARE.
244e390cabSriastradh  *
254e390cabSriastradh  */
264e390cabSriastradh 
274e390cabSriastradh #ifndef I915_REQUEST_H
284e390cabSriastradh #define I915_REQUEST_H
294e390cabSriastradh 
304e390cabSriastradh #include <linux/dma-fence.h>
314e390cabSriastradh #include <linux/lockdep.h>
324e390cabSriastradh 
334e390cabSriastradh #include "gem/i915_gem_context_types.h"
344e390cabSriastradh #include "gt/intel_context_types.h"
354e390cabSriastradh #include "gt/intel_engine_types.h"
364e390cabSriastradh #include "gt/intel_timeline_types.h"
374e390cabSriastradh 
384e390cabSriastradh #include "i915_gem.h"
394e390cabSriastradh #include "i915_scheduler.h"
404e390cabSriastradh #include "i915_selftest.h"
414e390cabSriastradh #include "i915_sw_fence.h"
424e390cabSriastradh 
434e390cabSriastradh #include <uapi/drm/i915_drm.h>
444e390cabSriastradh 
454e390cabSriastradh struct drm_file;
464e390cabSriastradh struct drm_i915_gem_object;
474e390cabSriastradh struct i915_request;
484e390cabSriastradh 
494e390cabSriastradh struct i915_capture_list {
504e390cabSriastradh 	struct i915_capture_list *next;
514e390cabSriastradh 	struct i915_vma *vma;
524e390cabSriastradh };
534e390cabSriastradh 
544e390cabSriastradh #define RQ_TRACE(rq, fmt, ...) do {					\
554e390cabSriastradh 	const struct i915_request *rq__ = (rq);				\
564e390cabSriastradh 	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,	\
574e390cabSriastradh 		     rq__->fence.context, rq__->fence.seqno,		\
584e390cabSriastradh 		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
594e390cabSriastradh } while (0)
604e390cabSriastradh 
614e390cabSriastradh enum {
624e390cabSriastradh 	/*
634e390cabSriastradh 	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
644e390cabSriastradh 	 *
654e390cabSriastradh 	 * Set by __i915_request_submit() on handing over to HW, and cleared
664e390cabSriastradh 	 * by __i915_request_unsubmit() if we preempt this request.
674e390cabSriastradh 	 *
684e390cabSriastradh 	 * Finally cleared for consistency on retiring the request, when
694e390cabSriastradh 	 * we know the HW is no longer running this request.
704e390cabSriastradh 	 *
714e390cabSriastradh 	 * See i915_request_is_active()
724e390cabSriastradh 	 */
734e390cabSriastradh 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
744e390cabSriastradh 
754e390cabSriastradh 	/*
764e390cabSriastradh 	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
774e390cabSriastradh 	 *
784e390cabSriastradh 	 * Using the scheduler, when a request is ready for execution it is put
794e390cabSriastradh 	 * into the priority queue, and removed from that queue when transferred
804e390cabSriastradh 	 * to the HW runlists. We want to track its membership within the
814e390cabSriastradh 	 * priority queue so that we can easily check before rescheduling.
824e390cabSriastradh 	 *
834e390cabSriastradh 	 * See i915_request_in_priority_queue()
844e390cabSriastradh 	 */
854e390cabSriastradh 	I915_FENCE_FLAG_PQUEUE,
864e390cabSriastradh 
874e390cabSriastradh 	/*
884e390cabSriastradh 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
894e390cabSriastradh 	 *
904e390cabSriastradh 	 * Internal bookkeeping used by the breadcrumb code to track when
914e390cabSriastradh 	 * a request is on the various signal_list.
924e390cabSriastradh 	 */
934e390cabSriastradh 	I915_FENCE_FLAG_SIGNAL,
944e390cabSriastradh 
954e390cabSriastradh 	/*
964e390cabSriastradh 	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
974e390cabSriastradh 	 *
984e390cabSriastradh 	 * This request has been suspended, pending an ongoing investigation.
994e390cabSriastradh 	 */
1004e390cabSriastradh 	I915_FENCE_FLAG_HOLD,
1014e390cabSriastradh 
1024e390cabSriastradh 	/*
1034e390cabSriastradh 	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
1044e390cabSriastradh 	 *
1054e390cabSriastradh 	 * The execution of some requests should not be interrupted. This is
1064e390cabSriastradh 	 * a sensitive operation as it makes the request super important,
1074e390cabSriastradh 	 * blocking other higher priority work. Abuse of this flag will
1084e390cabSriastradh 	 * lead to quality of service issues.
1094e390cabSriastradh 	 */
1104e390cabSriastradh 	I915_FENCE_FLAG_NOPREEMPT,
1114e390cabSriastradh 
1124e390cabSriastradh 	/*
1134e390cabSriastradh 	 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
1144e390cabSriastradh 	 *
1154e390cabSriastradh 	 * A high priority sentinel request may be submitted to clear the
1164e390cabSriastradh 	 * submission queue. As it will be the only request in-flight, upon
1174e390cabSriastradh 	 * execution all other active requests will have been preempted and
1184e390cabSriastradh 	 * unsubmitted. This preemptive pulse is used to re-evaluate the
1194e390cabSriastradh 	 * in-flight requests, particularly in cases where an active context
1204e390cabSriastradh 	 * is banned and those active requests need to be cancelled.
1214e390cabSriastradh 	 */
1224e390cabSriastradh 	I915_FENCE_FLAG_SENTINEL,
1234e390cabSriastradh 
1244e390cabSriastradh 	/*
1254e390cabSriastradh 	 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
1264e390cabSriastradh 	 *
1274e390cabSriastradh 	 * Some requests are more important than others! In particular, a
1284e390cabSriastradh 	 * request that the user is waiting on is typically required for
1294e390cabSriastradh 	 * interactive latency, for which we want to minimise by upclocking
1304e390cabSriastradh 	 * the GPU. Here we track such boost requests on a per-request basis.
1314e390cabSriastradh 	 */
1324e390cabSriastradh 	I915_FENCE_FLAG_BOOST,
1334e390cabSriastradh };
1344e390cabSriastradh 
1354e390cabSriastradh /**
1364e390cabSriastradh  * Request queue structure.
1374e390cabSriastradh  *
1384e390cabSriastradh  * The request queue allows us to note sequence numbers that have been emitted
1394e390cabSriastradh  * and may be associated with active buffers to be retired.
1404e390cabSriastradh  *
1414e390cabSriastradh  * By keeping this list, we can avoid having to do questionable sequence
1424e390cabSriastradh  * number comparisons on buffer last_read|write_seqno. It also allows an
1434e390cabSriastradh  * emission time to be associated with the request for tracking how far ahead
1444e390cabSriastradh  * of the GPU the submission is.
1454e390cabSriastradh  *
1464e390cabSriastradh  * When modifying this structure be very aware that we perform a lockless
1474e390cabSriastradh  * RCU lookup of it that may race against reallocation of the struct
1484e390cabSriastradh  * from the slab freelist. We intentionally do not zero the structure on
1494e390cabSriastradh  * allocation so that the lookup can use the dangling pointers (and is
1504e390cabSriastradh  * cogniscent that those pointers may be wrong). Instead, everything that
1514e390cabSriastradh  * needs to be initialised must be done so explicitly.
1524e390cabSriastradh  *
1534e390cabSriastradh  * The requests are reference counted.
1544e390cabSriastradh  */
1554e390cabSriastradh struct i915_request {
1564e390cabSriastradh 	struct dma_fence fence;
1574e390cabSriastradh 	spinlock_t lock;
1584e390cabSriastradh 
1594e390cabSriastradh 	/** On Which ring this request was generated */
1604e390cabSriastradh 	struct drm_i915_private *i915;
1614e390cabSriastradh 
1624e390cabSriastradh 	/**
1634e390cabSriastradh 	 * Context and ring buffer related to this request
1644e390cabSriastradh 	 * Contexts are refcounted, so when this request is associated with a
1654e390cabSriastradh 	 * context, we must increment the context's refcount, to guarantee that
1664e390cabSriastradh 	 * it persists while any request is linked to it. Requests themselves
1674e390cabSriastradh 	 * are also refcounted, so the request will only be freed when the last
1684e390cabSriastradh 	 * reference to it is dismissed, and the code in
1694e390cabSriastradh 	 * i915_request_free() will then decrement the refcount on the
1704e390cabSriastradh 	 * context.
1714e390cabSriastradh 	 */
1724e390cabSriastradh 	struct intel_engine_cs *engine;
1734e390cabSriastradh 	struct intel_context *context;
1744e390cabSriastradh 	struct intel_ring *ring;
1754e390cabSriastradh 	struct intel_timeline __rcu *timeline;
1764e390cabSriastradh 	struct list_head signal_link;
1774e390cabSriastradh 
1784e390cabSriastradh 	/*
1794e390cabSriastradh 	 * The rcu epoch of when this request was allocated. Used to judiciously
1804e390cabSriastradh 	 * apply backpressure on future allocations to ensure that under
1814e390cabSriastradh 	 * mempressure there is sufficient RCU ticks for us to reclaim our
1824e390cabSriastradh 	 * RCU protected slabs.
1834e390cabSriastradh 	 */
1844e390cabSriastradh 	unsigned long rcustate;
1854e390cabSriastradh 
1864e390cabSriastradh 	/*
1874e390cabSriastradh 	 * We pin the timeline->mutex while constructing the request to
1884e390cabSriastradh 	 * ensure that no caller accidentally drops it during construction.
1894e390cabSriastradh 	 * The timeline->mutex must be held to ensure that only this caller
1904e390cabSriastradh 	 * can use the ring and manipulate the associated timeline during
1914e390cabSriastradh 	 * construction.
1924e390cabSriastradh 	 */
1934e390cabSriastradh 	struct pin_cookie cookie;
1944e390cabSriastradh 
1954e390cabSriastradh 	/*
1964e390cabSriastradh 	 * Fences for the various phases in the request's lifetime.
1974e390cabSriastradh 	 *
1984e390cabSriastradh 	 * The submit fence is used to await upon all of the request's
1994e390cabSriastradh 	 * dependencies. When it is signaled, the request is ready to run.
2004e390cabSriastradh 	 * It is used by the driver to then queue the request for execution.
2014e390cabSriastradh 	 */
2024e390cabSriastradh 	struct i915_sw_fence submit;
2034e390cabSriastradh 	union {
204*3ce207bbSriastradh #ifdef __NetBSD__		/* XXX */
205*3ce207bbSriastradh 		struct i915_sw_fence_waiter submitq;
206*3ce207bbSriastradh #else
2074e390cabSriastradh 		wait_queue_entry_t submitq;
20880798c1dSriastradh #endif
2094e390cabSriastradh 		struct i915_sw_dma_fence_cb dmaq;
2104e390cabSriastradh 		struct i915_request_duration_cb {
2114e390cabSriastradh 			struct dma_fence_cb cb;
2124e390cabSriastradh 			ktime_t emitted;
2134e390cabSriastradh 		} duration;
2144e390cabSriastradh 	};
2154e390cabSriastradh 	struct list_head execute_cb;
2164e390cabSriastradh 	struct i915_sw_fence semaphore;
2174e390cabSriastradh 
2184e390cabSriastradh 	/*
2194e390cabSriastradh 	 * A list of everyone we wait upon, and everyone who waits upon us.
2204e390cabSriastradh 	 * Even though we will not be submitted to the hardware before the
2214e390cabSriastradh 	 * submit fence is signaled (it waits for all external events as well
2224e390cabSriastradh 	 * as our own requests), the scheduler still needs to know the
2234e390cabSriastradh 	 * dependency tree for the lifetime of the request (from execbuf
2244e390cabSriastradh 	 * to retirement), i.e. bidirectional dependency information for the
2254e390cabSriastradh 	 * request not tied to individual fences.
2264e390cabSriastradh 	 */
2274e390cabSriastradh 	struct i915_sched_node sched;
2284e390cabSriastradh 	struct i915_dependency dep;
2294e390cabSriastradh 	intel_engine_mask_t execution_mask;
2304e390cabSriastradh 
2314e390cabSriastradh 	/*
2324e390cabSriastradh 	 * A convenience pointer to the current breadcrumb value stored in
2334e390cabSriastradh 	 * the HW status page (or our timeline's local equivalent). The full
2344e390cabSriastradh 	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
2354e390cabSriastradh 	 */
2364e390cabSriastradh 	const u32 *hwsp_seqno;
2374e390cabSriastradh 
2384e390cabSriastradh 	/*
2394e390cabSriastradh 	 * If we need to access the timeline's seqno for this request in
2404e390cabSriastradh 	 * another request, we need to keep a read reference to this associated
2414e390cabSriastradh 	 * cacheline, so that we do not free and recycle it before the foreign
2424e390cabSriastradh 	 * observers have completed. Hence, we keep a pointer to the cacheline
2434e390cabSriastradh 	 * inside the timeline's HWSP vma, but it is only valid while this
2444e390cabSriastradh 	 * request has not completed and guarded by the timeline mutex.
2454e390cabSriastradh 	 */
2464e390cabSriastradh 	struct intel_timeline_cacheline __rcu *hwsp_cacheline;
2474e390cabSriastradh 
2484e390cabSriastradh 	/** Position in the ring of the start of the request */
2494e390cabSriastradh 	u32 head;
2504e390cabSriastradh 
2514e390cabSriastradh 	/** Position in the ring of the start of the user packets */
2524e390cabSriastradh 	u32 infix;
2534e390cabSriastradh 
2544e390cabSriastradh 	/**
2554e390cabSriastradh 	 * Position in the ring of the start of the postfix.
2564e390cabSriastradh 	 * This is required to calculate the maximum available ring space
2574e390cabSriastradh 	 * without overwriting the postfix.
2584e390cabSriastradh 	 */
2594e390cabSriastradh 	u32 postfix;
2604e390cabSriastradh 
2614e390cabSriastradh 	/** Position in the ring of the end of the whole request */
2624e390cabSriastradh 	u32 tail;
2634e390cabSriastradh 
2644e390cabSriastradh 	/** Position in the ring of the end of any workarounds after the tail */
2654e390cabSriastradh 	u32 wa_tail;
2664e390cabSriastradh 
2674e390cabSriastradh 	/** Preallocate space in the ring for the emitting the request */
2684e390cabSriastradh 	u32 reserved_space;
2694e390cabSriastradh 
2704e390cabSriastradh 	/** Batch buffer related to this request if any (used for
2714e390cabSriastradh 	 * error state dump only).
2724e390cabSriastradh 	 */
2734e390cabSriastradh 	struct i915_vma *batch;
2744e390cabSriastradh 	/**
2754e390cabSriastradh 	 * Additional buffers requested by userspace to be captured upon
2764e390cabSriastradh 	 * a GPU hang. The vma/obj on this list are protected by their
2774e390cabSriastradh 	 * active reference - all objects on this list must also be
2784e390cabSriastradh 	 * on the active_list (of their final request).
2794e390cabSriastradh 	 */
2804e390cabSriastradh 	struct i915_capture_list *capture_list;
2814e390cabSriastradh 
2824e390cabSriastradh 	/** Time at which this request was emitted, in jiffies. */
2834e390cabSriastradh 	unsigned long emitted_jiffies;
2844e390cabSriastradh 
2854e390cabSriastradh 	/** timeline->request entry for this request */
2864e390cabSriastradh 	struct list_head link;
2874e390cabSriastradh 
2884e390cabSriastradh 	struct drm_i915_file_private *file_priv;
2894e390cabSriastradh 	/** file_priv list entry for this request */
2904e390cabSriastradh 	struct list_head client_link;
2914e390cabSriastradh 
2924e390cabSriastradh 	I915_SELFTEST_DECLARE(struct {
2934e390cabSriastradh 		struct list_head link;
2944e390cabSriastradh 		unsigned long delay;
2954e390cabSriastradh 	} mock;)
2964e390cabSriastradh };
2974e390cabSriastradh 
2984e390cabSriastradh #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
2994e390cabSriastradh 
3004e390cabSriastradh extern const struct dma_fence_ops i915_fence_ops;
3014e390cabSriastradh 
dma_fence_is_i915(const struct dma_fence * fence)3024e390cabSriastradh static inline bool dma_fence_is_i915(const struct dma_fence *fence)
3034e390cabSriastradh {
3044e390cabSriastradh 	return fence->ops == &i915_fence_ops;
3054e390cabSriastradh }
3064e390cabSriastradh 
3074e390cabSriastradh struct i915_request * __must_check
3084e390cabSriastradh __i915_request_create(struct intel_context *ce, gfp_t gfp);
3094e390cabSriastradh struct i915_request * __must_check
3104e390cabSriastradh i915_request_create(struct intel_context *ce);
3114e390cabSriastradh 
3124e390cabSriastradh struct i915_request *__i915_request_commit(struct i915_request *request);
3134e390cabSriastradh void __i915_request_queue(struct i915_request *rq,
3144e390cabSriastradh 			  const struct i915_sched_attr *attr);
3154e390cabSriastradh 
3164e390cabSriastradh bool i915_request_retire(struct i915_request *rq);
3174e390cabSriastradh void i915_request_retire_upto(struct i915_request *rq);
3184e390cabSriastradh 
3194e390cabSriastradh static inline struct i915_request *
to_request(struct dma_fence * fence)3204e390cabSriastradh to_request(struct dma_fence *fence)
3214e390cabSriastradh {
3224e390cabSriastradh 	/* We assume that NULL fence/request are interoperable */
3234e390cabSriastradh 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
3244e390cabSriastradh 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
3254e390cabSriastradh 	return container_of(fence, struct i915_request, fence);
3264e390cabSriastradh }
3274e390cabSriastradh 
3284e390cabSriastradh static inline struct i915_request *
i915_request_get(struct i915_request * rq)3294e390cabSriastradh i915_request_get(struct i915_request *rq)
3304e390cabSriastradh {
3314e390cabSriastradh 	return to_request(dma_fence_get(&rq->fence));
3324e390cabSriastradh }
3334e390cabSriastradh 
3344e390cabSriastradh static inline struct i915_request *
i915_request_get_rcu(struct i915_request * rq)3354e390cabSriastradh i915_request_get_rcu(struct i915_request *rq)
3364e390cabSriastradh {
3374e390cabSriastradh 	return to_request(dma_fence_get_rcu(&rq->fence));
3384e390cabSriastradh }
3394e390cabSriastradh 
3404e390cabSriastradh static inline void
i915_request_put(struct i915_request * rq)3414e390cabSriastradh i915_request_put(struct i915_request *rq)
3424e390cabSriastradh {
3434e390cabSriastradh 	dma_fence_put(&rq->fence);
3444e390cabSriastradh }
3454e390cabSriastradh 
3464e390cabSriastradh int i915_request_await_object(struct i915_request *to,
3474e390cabSriastradh 			      struct drm_i915_gem_object *obj,
3484e390cabSriastradh 			      bool write);
3494e390cabSriastradh int i915_request_await_dma_fence(struct i915_request *rq,
3504e390cabSriastradh 				 struct dma_fence *fence);
3514e390cabSriastradh int i915_request_await_execution(struct i915_request *rq,
3524e390cabSriastradh 				 struct dma_fence *fence,
3534e390cabSriastradh 				 void (*hook)(struct i915_request *rq,
3544e390cabSriastradh 					      struct dma_fence *signal));
3554e390cabSriastradh 
3564e390cabSriastradh void i915_request_add(struct i915_request *rq);
3574e390cabSriastradh 
3584e390cabSriastradh bool __i915_request_submit(struct i915_request *request);
3594e390cabSriastradh void i915_request_submit(struct i915_request *request);
3604e390cabSriastradh 
3614e390cabSriastradh void i915_request_skip(struct i915_request *request, int error);
3624e390cabSriastradh 
3634e390cabSriastradh void __i915_request_unsubmit(struct i915_request *request);
3644e390cabSriastradh void i915_request_unsubmit(struct i915_request *request);
3654e390cabSriastradh 
3664e390cabSriastradh /* Note: part of the intel_breadcrumbs family */
3674e390cabSriastradh bool i915_request_enable_breadcrumb(struct i915_request *request);
3684e390cabSriastradh void i915_request_cancel_breadcrumb(struct i915_request *request);
3694e390cabSriastradh 
3704e390cabSriastradh long i915_request_wait(struct i915_request *rq,
3714e390cabSriastradh 		       unsigned int flags,
3724e390cabSriastradh 		       long timeout)
3734e390cabSriastradh 	__attribute__((nonnull(1)));
3744e390cabSriastradh #define I915_WAIT_INTERRUPTIBLE	BIT(0)
3754e390cabSriastradh #define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
3764e390cabSriastradh #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
3774e390cabSriastradh 
i915_request_signaled(const struct i915_request * rq)3784e390cabSriastradh static inline bool i915_request_signaled(const struct i915_request *rq)
3794e390cabSriastradh {
3804e390cabSriastradh 	/* The request may live longer than its HWSP, so check flags first! */
3814e390cabSriastradh 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
3824e390cabSriastradh }
3834e390cabSriastradh 
i915_request_is_active(const struct i915_request * rq)3844e390cabSriastradh static inline bool i915_request_is_active(const struct i915_request *rq)
3854e390cabSriastradh {
3864e390cabSriastradh 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
3874e390cabSriastradh }
3884e390cabSriastradh 
i915_request_in_priority_queue(const struct i915_request * rq)3894e390cabSriastradh static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
3904e390cabSriastradh {
3914e390cabSriastradh 	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
3924e390cabSriastradh }
3934e390cabSriastradh 
3944e390cabSriastradh /**
3954e390cabSriastradh  * Returns true if seq1 is later than seq2.
3964e390cabSriastradh  */
i915_seqno_passed(u32 seq1,u32 seq2)3974e390cabSriastradh static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
3984e390cabSriastradh {
3994e390cabSriastradh 	return (s32)(seq1 - seq2) >= 0;
4004e390cabSriastradh }
4014e390cabSriastradh 
__hwsp_seqno(const struct i915_request * rq)4024e390cabSriastradh static inline u32 __hwsp_seqno(const struct i915_request *rq)
4034e390cabSriastradh {
4044e390cabSriastradh 	return READ_ONCE(*rq->hwsp_seqno);
4054e390cabSriastradh }
4064e390cabSriastradh 
4074e390cabSriastradh /**
4084e390cabSriastradh  * hwsp_seqno - the current breadcrumb value in the HW status page
4094e390cabSriastradh  * @rq: the request, to chase the relevant HW status page
4104e390cabSriastradh  *
4114e390cabSriastradh  * The emphasis in naming here is that hwsp_seqno() is not a property of the
4124e390cabSriastradh  * request, but an indication of the current HW state (associated with this
4134e390cabSriastradh  * request). Its value will change as the GPU executes more requests.
4144e390cabSriastradh  *
4154e390cabSriastradh  * Returns the current breadcrumb value in the associated HW status page (or
4164e390cabSriastradh  * the local timeline's equivalent) for this request. The request itself
4174e390cabSriastradh  * has the associated breadcrumb value of rq->fence.seqno, when the HW
4184e390cabSriastradh  * status page has that breadcrumb or later, this request is complete.
4194e390cabSriastradh  */
hwsp_seqno(const struct i915_request * rq)4204e390cabSriastradh static inline u32 hwsp_seqno(const struct i915_request *rq)
4214e390cabSriastradh {
4224e390cabSriastradh 	u32 seqno;
4234e390cabSriastradh 
4244e390cabSriastradh 	rcu_read_lock(); /* the HWSP may be freed at runtime */
4254e390cabSriastradh 	seqno = __hwsp_seqno(rq);
4264e390cabSriastradh 	rcu_read_unlock();
4274e390cabSriastradh 
4284e390cabSriastradh 	return seqno;
4294e390cabSriastradh }
4304e390cabSriastradh 
__i915_request_has_started(const struct i915_request * rq)4314e390cabSriastradh static inline bool __i915_request_has_started(const struct i915_request *rq)
4324e390cabSriastradh {
4334e390cabSriastradh 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
4344e390cabSriastradh }
4354e390cabSriastradh 
4364e390cabSriastradh /**
4374e390cabSriastradh  * i915_request_started - check if the request has begun being executed
4384e390cabSriastradh  * @rq: the request
4394e390cabSriastradh  *
4404e390cabSriastradh  * If the timeline is not using initial breadcrumbs, a request is
4414e390cabSriastradh  * considered started if the previous request on its timeline (i.e.
4424e390cabSriastradh  * context) has been signaled.
4434e390cabSriastradh  *
4444e390cabSriastradh  * If the timeline is using semaphores, it will also be emitting an
4454e390cabSriastradh  * "initial breadcrumb" after the semaphores are complete and just before
4464e390cabSriastradh  * it began executing the user payload. A request can therefore be active
4474e390cabSriastradh  * on the HW and not yet started as it is still busywaiting on its
4484e390cabSriastradh  * dependencies (via HW semaphores).
4494e390cabSriastradh  *
4504e390cabSriastradh  * If the request has started, its dependencies will have been signaled
4514e390cabSriastradh  * (either by fences or by semaphores) and it will have begun processing
4524e390cabSriastradh  * the user payload.
4534e390cabSriastradh  *
4544e390cabSriastradh  * However, even if a request has started, it may have been preempted and
4554e390cabSriastradh  * so no longer active, or it may have already completed.
4564e390cabSriastradh  *
4574e390cabSriastradh  * See also i915_request_is_active().
4584e390cabSriastradh  *
4594e390cabSriastradh  * Returns true if the request has begun executing the user payload, or
4604e390cabSriastradh  * has completed:
4614e390cabSriastradh  */
i915_request_started(const struct i915_request * rq)4624e390cabSriastradh static inline bool i915_request_started(const struct i915_request *rq)
4634e390cabSriastradh {
4644e390cabSriastradh 	if (i915_request_signaled(rq))
4654e390cabSriastradh 		return true;
4664e390cabSriastradh 
4674e390cabSriastradh 	/* Remember: started but may have since been preempted! */
4684e390cabSriastradh 	return __i915_request_has_started(rq);
4694e390cabSriastradh }
4704e390cabSriastradh 
4714e390cabSriastradh /**
4724e390cabSriastradh  * i915_request_is_running - check if the request may actually be executing
4734e390cabSriastradh  * @rq: the request
4744e390cabSriastradh  *
4754e390cabSriastradh  * Returns true if the request is currently submitted to hardware, has passed
4764e390cabSriastradh  * its start point (i.e. the context is setup and not busywaiting). Note that
4774e390cabSriastradh  * it may no longer be running by the time the function returns!
4784e390cabSriastradh  */
i915_request_is_running(const struct i915_request * rq)4794e390cabSriastradh static inline bool i915_request_is_running(const struct i915_request *rq)
4804e390cabSriastradh {
4814e390cabSriastradh 	if (!i915_request_is_active(rq))
4824e390cabSriastradh 		return false;
4834e390cabSriastradh 
4844e390cabSriastradh 	return __i915_request_has_started(rq);
4854e390cabSriastradh }
4864e390cabSriastradh 
4874e390cabSriastradh /**
4884e390cabSriastradh  * i915_request_is_running - check if the request is ready for execution
4894e390cabSriastradh  * @rq: the request
4904e390cabSriastradh  *
4914e390cabSriastradh  * Upon construction, the request is instructed to wait upon various
4924e390cabSriastradh  * signals before it is ready to be executed by the HW. That is, we do
4934e390cabSriastradh  * not want to start execution and read data before it is written. In practice,
4944e390cabSriastradh  * this is controlled with a mixture of interrupts and semaphores. Once
4954e390cabSriastradh  * the submit fence is completed, the backend scheduler will place the
4964e390cabSriastradh  * request into its queue and from there submit it for execution. So we
4974e390cabSriastradh  * can detect when a request is eligible for execution (and is under control
4984e390cabSriastradh  * of the scheduler) by querying where it is in any of the scheduler's lists.
4994e390cabSriastradh  *
5004e390cabSriastradh  * Returns true if the request is ready for execution (it may be inflight),
5014e390cabSriastradh  * false otherwise.
5024e390cabSriastradh  */
i915_request_is_ready(const struct i915_request * rq)5034e390cabSriastradh static inline bool i915_request_is_ready(const struct i915_request *rq)
5044e390cabSriastradh {
5054e390cabSriastradh 	return !list_empty(&rq->sched.link);
5064e390cabSriastradh }
5074e390cabSriastradh 
i915_request_completed(const struct i915_request * rq)5084e390cabSriastradh static inline bool i915_request_completed(const struct i915_request *rq)
5094e390cabSriastradh {
5104e390cabSriastradh 	if (i915_request_signaled(rq))
5114e390cabSriastradh 		return true;
5124e390cabSriastradh 
5134e390cabSriastradh 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
5144e390cabSriastradh }
5154e390cabSriastradh 
i915_request_mark_complete(struct i915_request * rq)5164e390cabSriastradh static inline void i915_request_mark_complete(struct i915_request *rq)
5174e390cabSriastradh {
5184e390cabSriastradh 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
5194e390cabSriastradh }
5204e390cabSriastradh 
i915_request_has_waitboost(const struct i915_request * rq)5214e390cabSriastradh static inline bool i915_request_has_waitboost(const struct i915_request *rq)
5224e390cabSriastradh {
5234e390cabSriastradh 	return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
5244e390cabSriastradh }
5254e390cabSriastradh 
i915_request_has_nopreempt(const struct i915_request * rq)5264e390cabSriastradh static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
5274e390cabSriastradh {
5284e390cabSriastradh 	/* Preemption should only be disabled very rarely */
5294e390cabSriastradh 	return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
5304e390cabSriastradh }
5314e390cabSriastradh 
i915_request_has_sentinel(const struct i915_request * rq)5324e390cabSriastradh static inline bool i915_request_has_sentinel(const struct i915_request *rq)
5334e390cabSriastradh {
5344e390cabSriastradh 	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
5354e390cabSriastradh }
5364e390cabSriastradh 
i915_request_on_hold(const struct i915_request * rq)5374e390cabSriastradh static inline bool i915_request_on_hold(const struct i915_request *rq)
5384e390cabSriastradh {
5394e390cabSriastradh 	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
5404e390cabSriastradh }
5414e390cabSriastradh 
i915_request_set_hold(struct i915_request * rq)5424e390cabSriastradh static inline void i915_request_set_hold(struct i915_request *rq)
5434e390cabSriastradh {
5444e390cabSriastradh 	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
5454e390cabSriastradh }
5464e390cabSriastradh 
i915_request_clear_hold(struct i915_request * rq)5474e390cabSriastradh static inline void i915_request_clear_hold(struct i915_request *rq)
5484e390cabSriastradh {
5494e390cabSriastradh 	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
5504e390cabSriastradh }
5514e390cabSriastradh 
5524e390cabSriastradh static inline struct intel_timeline *
i915_request_timeline(struct i915_request * rq)5534e390cabSriastradh i915_request_timeline(struct i915_request *rq)
5544e390cabSriastradh {
5554e390cabSriastradh 	/* Valid only while the request is being constructed (or retired). */
5564e390cabSriastradh 	return rcu_dereference_protected(rq->timeline,
5574e390cabSriastradh 					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
5584e390cabSriastradh }
5594e390cabSriastradh 
5604e390cabSriastradh static inline struct i915_gem_context *
i915_request_gem_context(struct i915_request * rq)5614e390cabSriastradh i915_request_gem_context(struct i915_request *rq)
5624e390cabSriastradh {
5634e390cabSriastradh 	/* Valid only while the request is being constructed (or retired). */
5644e390cabSriastradh 	return rcu_dereference_protected(rq->context->gem_context, true);
5654e390cabSriastradh }
5664e390cabSriastradh 
5674e390cabSriastradh static inline struct intel_timeline *
i915_request_active_timeline(struct i915_request * rq)5684e390cabSriastradh i915_request_active_timeline(struct i915_request *rq)
5694e390cabSriastradh {
5704e390cabSriastradh 	/*
5714e390cabSriastradh 	 * When in use during submission, we are protected by a guarantee that
5724e390cabSriastradh 	 * the context/timeline is pinned and must remain pinned until after
5734e390cabSriastradh 	 * this submission.
5744e390cabSriastradh 	 */
5754e390cabSriastradh 	return rcu_dereference_protected(rq->timeline,
5764e390cabSriastradh 					 lockdep_is_held(&rq->engine->active.lock));
5774e390cabSriastradh }
5784e390cabSriastradh 
5794e390cabSriastradh #endif /* I915_REQUEST_H */
580