xref: /dflybsd-src/sys/dev/drm/i915/i915_gem_request.h (revision 08acb08c0a6e4dcab346cdf0c24c7567d8a66858)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_GEM_REQUEST_H
26 #define I915_GEM_REQUEST_H
27 
28 #include <linux/fence.h>
29 
30 #include "i915_gem.h"
31 
32 struct intel_wait {
33 	struct rb_node node;
34 	struct task_struct *tsk;
35 	u32 seqno;
36 };
37 
38 struct intel_signal_node {
39 	struct rb_node node;
40 	struct intel_wait wait;
41 };
42 
43 /**
44  * Request queue structure.
45  *
46  * The request queue allows us to note sequence numbers that have been emitted
47  * and may be associated with active buffers to be retired.
48  *
49  * By keeping this list, we can avoid having to do questionable sequence
50  * number comparisons on buffer last_read|write_seqno. It also allows an
51  * emission time to be associated with the request for tracking how far ahead
52  * of the GPU the submission is.
53  *
54  * When modifying this structure be very aware that we perform a lockless
55  * RCU lookup of it that may race against reallocation of the struct
56  * from the slab freelist. We intentionally do not zero the structure on
57  * allocation so that the lookup can use the dangling pointers (and is
58  * cogniscent that those pointers may be wrong). Instead, everything that
59  * needs to be initialised must be done so explicitly.
60  *
61  * The requests are reference counted.
62  */
63 struct drm_i915_gem_request {
64 	struct fence fence;
65 	spinlock_t lock;
66 
67 	/** On Which ring this request was generated */
68 	struct drm_i915_private *i915;
69 
70 	/**
71 	 * Context and ring buffer related to this request
72 	 * Contexts are refcounted, so when this request is associated with a
73 	 * context, we must increment the context's refcount, to guarantee that
74 	 * it persists while any request is linked to it. Requests themselves
75 	 * are also refcounted, so the request will only be freed when the last
76 	 * reference to it is dismissed, and the code in
77 	 * i915_gem_request_free() will then decrement the refcount on the
78 	 * context.
79 	 */
80 	struct i915_gem_context *ctx;
81 	struct intel_engine_cs *engine;
82 	struct intel_ring *ring;
83 	struct intel_signal_node signaling;
84 
85 	/** GEM sequence number associated with the previous request,
86 	 * when the HWS breadcrumb is equal to this the GPU is processing
87 	 * this request.
88 	 */
89 	u32 previous_seqno;
90 
91 	/** Position in the ringbuffer of the start of the request */
92 	u32 head;
93 
94 	/**
95 	 * Position in the ringbuffer of the start of the postfix.
96 	 * This is required to calculate the maximum available ringbuffer
97 	 * space without overwriting the postfix.
98 	 */
99 	u32 postfix;
100 
101 	/** Position in the ringbuffer of the end of the whole request */
102 	u32 tail;
103 
104 	/** Preallocate space in the ringbuffer for the emitting the request */
105 	u32 reserved_space;
106 
107 	/**
108 	 * Context related to the previous request.
109 	 * As the contexts are accessed by the hardware until the switch is
110 	 * completed to a new context, the hardware may still be writing
111 	 * to the context object after the breadcrumb is visible. We must
112 	 * not unpin/unbind/prune that object whilst still active and so
113 	 * we keep the previous context pinned until the following (this)
114 	 * request is retired.
115 	 */
116 	struct i915_gem_context *previous_context;
117 
118 	/** Batch buffer related to this request if any (used for
119 	 * error state dump only).
120 	 */
121 	struct drm_i915_gem_object *batch_obj;
122 	struct list_head active_list;
123 
124 	/** Time at which this request was emitted, in jiffies. */
125 	unsigned long emitted_jiffies;
126 
127 	/** engine->request_list entry for this request */
128 	struct list_head link;
129 
130 	/** ring->request_list entry for this request */
131 	struct list_head ring_link;
132 
133 	struct drm_i915_file_private *file_priv;
134 	/** file_priv list entry for this request */
135 	struct list_head client_list;
136 
137 	/** process identifier submitting this request */
138 	pid_t pid;
139 
140 	/**
141 	 * The ELSP only accepts two elements at a time, so we queue
142 	 * context/tail pairs on a given queue (ring->execlist_queue) until the
143 	 * hardware is available. The queue serves a double purpose: we also use
144 	 * it to keep track of the up to 2 contexts currently in the hardware
145 	 * (usually one in execution and the other queued up by the GPU): We
146 	 * only remove elements from the head of the queue when the hardware
147 	 * informs us that an element has been completed.
148 	 *
149 	 * All accesses to the queue are mediated by a spinlock
150 	 * (ring->execlist_lock).
151 	 */
152 
153 	/** Execlist link in the submission queue.*/
154 	struct list_head execlist_link;
155 
156 	/** Execlists no. of times this request has been sent to the ELSP */
157 	int elsp_submitted;
158 
159 	/** Execlists context hardware id. */
160 	unsigned int ctx_hw_id;
161 };
162 
163 extern const struct fence_ops i915_fence_ops;
164 
165 static inline bool fence_is_i915(struct fence *fence)
166 {
167 	return fence->ops == &i915_fence_ops;
168 }
169 
170 struct drm_i915_gem_request * __must_check
171 i915_gem_request_alloc(struct intel_engine_cs *engine,
172 		       struct i915_gem_context *ctx);
173 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
174 				   struct drm_file *file);
175 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
176 
177 static inline u32
178 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
179 {
180 	return req ? req->fence.seqno : 0;
181 }
182 
183 static inline struct intel_engine_cs *
184 i915_gem_request_get_engine(struct drm_i915_gem_request *req)
185 {
186 	return req ? req->engine : NULL;
187 }
188 
189 static inline struct drm_i915_gem_request *
190 to_request(struct fence *fence)
191 {
192 	/* We assume that NULL fence/request are interoperable */
193 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
194 	GEM_BUG_ON(fence && !fence_is_i915(fence));
195 	return container_of(fence, struct drm_i915_gem_request, fence);
196 }
197 
198 static inline struct drm_i915_gem_request *
199 i915_gem_request_get(struct drm_i915_gem_request *req)
200 {
201 	return to_request(fence_get(&req->fence));
202 }
203 
204 static inline struct drm_i915_gem_request *
205 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
206 {
207 	return to_request(fence_get_rcu(&req->fence));
208 }
209 
210 static inline void
211 i915_gem_request_put(struct drm_i915_gem_request *req)
212 {
213 	fence_put(&req->fence);
214 }
215 
216 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
217 					   struct drm_i915_gem_request *src)
218 {
219 	if (src)
220 		i915_gem_request_get(src);
221 
222 	if (*pdst)
223 		i915_gem_request_put(*pdst);
224 
225 	*pdst = src;
226 }
227 
228 void __i915_add_request(struct drm_i915_gem_request *req,
229 			struct drm_i915_gem_object *batch_obj,
230 			bool flush_caches);
231 #define i915_add_request(req) \
232 	__i915_add_request(req, NULL, true)
233 #define i915_add_request_no_flush(req) \
234 	__i915_add_request(req, NULL, false)
235 
236 struct intel_rps_client;
237 #define NO_WAITBOOST ERR_PTR(-1)
238 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
239 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
240 
241 int i915_wait_request(struct drm_i915_gem_request *req,
242 		      bool interruptible,
243 		      s64 *timeout,
244 		      struct intel_rps_client *rps)
245 	__attribute__((nonnull(1)));
246 
247 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
248 
249 /**
250  * Returns true if seq1 is later than seq2.
251  */
252 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
253 {
254 	return (s32)(seq1 - seq2) >= 0;
255 }
256 
257 static inline bool
258 i915_gem_request_started(const struct drm_i915_gem_request *req)
259 {
260 	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
261 				 req->previous_seqno);
262 }
263 
264 static inline bool
265 i915_gem_request_completed(const struct drm_i915_gem_request *req)
266 {
267 	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
268 				 req->fence.seqno);
269 }
270 
271 bool __i915_spin_request(const struct drm_i915_gem_request *request,
272 			 int state, unsigned long timeout_us);
273 static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
274 				     int state, unsigned long timeout_us)
275 {
276 	return (i915_gem_request_started(request) &&
277 		__i915_spin_request(request, state, timeout_us));
278 }
279 
280 /* We treat requests as fences. This is not be to confused with our
281  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
282  * We use the fences to synchronize access from the CPU with activity on the
283  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
284  * is reading them. We also track fences at a higher level to provide
285  * implicit synchronisation around GEM objects, e.g. set-domain will wait
286  * for outstanding GPU rendering before marking the object ready for CPU
287  * access, or a pageflip will wait until the GPU is complete before showing
288  * the frame on the scanout.
289  *
290  * In order to use a fence, the object must track the fence it needs to
291  * serialise with. For example, GEM objects want to track both read and
292  * write access so that we can perform concurrent read operations between
293  * the CPU and GPU engines, as well as waiting for all rendering to
294  * complete, or waiting for the last GPU user of a "fence register". The
295  * object then embeds a #i915_gem_active to track the most recent (in
296  * retirement order) request relevant for the desired mode of access.
297  * The #i915_gem_active is updated with i915_gem_active_set() to track the
298  * most recent fence request, typically this is done as part of
299  * i915_vma_move_to_active().
300  *
301  * When the #i915_gem_active completes (is retired), it will
302  * signal its completion to the owner through a callback as well as mark
303  * itself as idle (i915_gem_active.request == NULL). The owner
304  * can then perform any action, such as delayed freeing of an active
305  * resource including itself.
306  */
307 struct i915_gem_active;
308 
309 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
310 				   struct drm_i915_gem_request *);
311 
312 struct i915_gem_active {
313 	struct drm_i915_gem_request __rcu *request;
314 	struct list_head link;
315 	i915_gem_retire_fn retire;
316 };
317 
318 void i915_gem_retire_noop(struct i915_gem_active *,
319 			  struct drm_i915_gem_request *request);
320 
321 /**
322  * init_request_active - prepares the activity tracker for use
323  * @active - the active tracker
324  * @func - a callback when then the tracker is retired (becomes idle),
325  *         can be NULL
326  *
327  * init_request_active() prepares the embedded @active struct for use as
328  * an activity tracker, that is for tracking the last known active request
329  * associated with it. When the last request becomes idle, when it is retired
330  * after completion, the optional callback @func is invoked.
331  */
332 static inline void
333 init_request_active(struct i915_gem_active *active,
334 		    i915_gem_retire_fn retire)
335 {
336 	INIT_LIST_HEAD(&active->link);
337 	active->retire = retire ?: i915_gem_retire_noop;
338 }
339 
340 /**
341  * i915_gem_active_set - updates the tracker to watch the current request
342  * @active - the active tracker
343  * @request - the request to watch
344  *
345  * i915_gem_active_set() watches the given @request for completion. Whilst
346  * that @request is busy, the @active reports busy. When that @request is
347  * retired, the @active tracker is updated to report idle.
348  */
349 static inline void
350 i915_gem_active_set(struct i915_gem_active *active,
351 		    struct drm_i915_gem_request *request)
352 {
353 	list_move(&active->link, &request->active_list);
354 	rcu_assign_pointer(active->request, request);
355 }
356 
357 static inline struct drm_i915_gem_request *
358 __i915_gem_active_peek(const struct i915_gem_active *active)
359 {
360 	/* Inside the error capture (running with the driver in an unknown
361 	 * state), we want to bend the rules slightly (a lot).
362 	 *
363 	 * Work is in progress to make it safer, in the meantime this keeps
364 	 * the known issue from spamming the logs.
365 	 */
366 	return rcu_dereference_protected(active->request, 1);
367 }
368 
369 /**
370  * i915_gem_active_raw - return the active request
371  * @active - the active tracker
372  *
373  * i915_gem_active_raw() returns the current request being tracked, or NULL.
374  * It does not obtain a reference on the request for the caller, so the caller
375  * must hold struct_mutex.
376  */
377 static inline struct drm_i915_gem_request *
378 i915_gem_active_raw(const struct i915_gem_active *active, struct lock *mutex)
379 {
380 	return rcu_dereference_protected(active->request,
381 					 lockdep_is_held(mutex));
382 }
383 
384 /**
385  * i915_gem_active_peek - report the active request being monitored
386  * @active - the active tracker
387  *
388  * i915_gem_active_peek() returns the current request being tracked if
389  * still active, or NULL. It does not obtain a reference on the request
390  * for the caller, so the caller must hold struct_mutex.
391  */
392 static inline struct drm_i915_gem_request *
393 i915_gem_active_peek(const struct i915_gem_active *active, struct lock *mutex)
394 {
395 	struct drm_i915_gem_request *request;
396 
397 	request = i915_gem_active_raw(active, mutex);
398 	if (!request || i915_gem_request_completed(request))
399 		return NULL;
400 
401 	return request;
402 }
403 
404 /**
405  * i915_gem_active_get - return a reference to the active request
406  * @active - the active tracker
407  *
408  * i915_gem_active_get() returns a reference to the active request, or NULL
409  * if the active tracker is idle. The caller must hold struct_mutex.
410  */
411 static inline struct drm_i915_gem_request *
412 i915_gem_active_get(const struct i915_gem_active *active, struct lock *mutex)
413 {
414 	return i915_gem_request_get(i915_gem_active_peek(active, mutex));
415 }
416 
417 /**
418  * __i915_gem_active_get_rcu - return a reference to the active request
419  * @active - the active tracker
420  *
421  * __i915_gem_active_get() returns a reference to the active request, or NULL
422  * if the active tracker is idle. The caller must hold the RCU read lock, but
423  * the returned pointer is safe to use outside of RCU.
424  */
425 static inline struct drm_i915_gem_request *
426 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
427 {
428 	/* Performing a lockless retrieval of the active request is super
429 	 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
430 	 * slab of request objects will not be freed whilst we hold the
431 	 * RCU read lock. It does not guarantee that the request itself
432 	 * will not be freed and then *reused*. Viz,
433 	 *
434 	 * Thread A			Thread B
435 	 *
436 	 * req = active.request
437 	 *				retire(req) -> free(req);
438 	 *				(req is now first on the slab freelist)
439 	 *				active.request = NULL
440 	 *
441 	 *				req = new submission on a new object
442 	 * ref(req)
443 	 *
444 	 * To prevent the request from being reused whilst the caller
445 	 * uses it, we take a reference like normal. Whilst acquiring
446 	 * the reference we check that it is not in a destroyed state
447 	 * (refcnt == 0). That prevents the request being reallocated
448 	 * whilst the caller holds on to it. To check that the request
449 	 * was not reallocated as we acquired the reference we have to
450 	 * check that our request remains the active request across
451 	 * the lookup, in the same manner as a seqlock. The visibility
452 	 * of the pointer versus the reference counting is controlled
453 	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
454 	 *
455 	 * In the middle of all that, we inspect whether the request is
456 	 * complete. Retiring is lazy so the request may be completed long
457 	 * before the active tracker is updated. Querying whether the
458 	 * request is complete is far cheaper (as it involves no locked
459 	 * instructions setting cachelines to exclusive) than acquiring
460 	 * the reference, so we do it first. The RCU read lock ensures the
461 	 * pointer dereference is valid, but does not ensure that the
462 	 * seqno nor HWS is the right one! However, if the request was
463 	 * reallocated, that means the active tracker's request was complete.
464 	 * If the new request is also complete, then both are and we can
465 	 * just report the active tracker is idle. If the new request is
466 	 * incomplete, then we acquire a reference on it and check that
467 	 * it remained the active request.
468 	 *
469 	 * It is then imperative that we do not zero the request on
470 	 * reallocation, so that we can chase the dangling pointers!
471 	 * See i915_gem_request_alloc().
472 	 */
473 	do {
474 		struct drm_i915_gem_request *request;
475 
476 		request = rcu_dereference(active->request);
477 		if (!request || i915_gem_request_completed(request))
478 			return NULL;
479 
480 		request = i915_gem_request_get_rcu(request);
481 
482 		/* What stops the following rcu_access_pointer() from occurring
483 		 * before the above i915_gem_request_get_rcu()? If we were
484 		 * to read the value before pausing to get the reference to
485 		 * the request, we may not notice a change in the active
486 		 * tracker.
487 		 *
488 		 * The rcu_access_pointer() is a mere compiler barrier, which
489 		 * means both the CPU and compiler are free to perform the
490 		 * memory read without constraint. The compiler only has to
491 		 * ensure that any operations after the rcu_access_pointer()
492 		 * occur afterwards in program order. This means the read may
493 		 * be performed earlier by an out-of-order CPU, or adventurous
494 		 * compiler.
495 		 *
496 		 * The atomic operation at the heart of
497 		 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
498 		 * atomic_inc_not_zero() which is only a full memory barrier
499 		 * when successful. That is, if i915_gem_request_get_rcu()
500 		 * returns the request (and so with the reference counted
501 		 * incremented) then the following read for rcu_access_pointer()
502 		 * must occur after the atomic operation and so confirm
503 		 * that this request is the one currently being tracked.
504 		 *
505 		 * The corresponding write barrier is part of
506 		 * rcu_assign_pointer().
507 		 */
508 		if (!request || request == rcu_access_pointer(active->request))
509 			return rcu_pointer_handoff(request);
510 
511 		i915_gem_request_put(request);
512 	} while (1);
513 }
514 
515 /**
516  * i915_gem_active_get_unlocked - return a reference to the active request
517  * @active - the active tracker
518  *
519  * i915_gem_active_get_unlocked() returns a reference to the active request,
520  * or NULL if the active tracker is idle. The reference is obtained under RCU,
521  * so no locking is required by the caller.
522  *
523  * The reference should be freed with i915_gem_request_put().
524  */
525 static inline struct drm_i915_gem_request *
526 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
527 {
528 	struct drm_i915_gem_request *request;
529 
530 	rcu_read_lock();
531 	request = __i915_gem_active_get_rcu(active);
532 	rcu_read_unlock();
533 
534 	return request;
535 }
536 
537 /**
538  * i915_gem_active_isset - report whether the active tracker is assigned
539  * @active - the active tracker
540  *
541  * i915_gem_active_isset() returns true if the active tracker is currently
542  * assigned to a request. Due to the lazy retiring, that request may be idle
543  * and this may report stale information.
544  */
545 static inline bool
546 i915_gem_active_isset(const struct i915_gem_active *active)
547 {
548 	return rcu_access_pointer(active->request);
549 }
550 
551 /**
552  * i915_gem_active_is_idle - report whether the active tracker is idle
553  * @active - the active tracker
554  *
555  * i915_gem_active_is_idle() returns true if the active tracker is currently
556  * unassigned or if the request is complete (but not yet retired). Requires
557  * the caller to hold struct_mutex (but that can be relaxed if desired).
558  */
559 static inline bool
560 i915_gem_active_is_idle(const struct i915_gem_active *active,
561 			struct lock *mutex)
562 {
563 	return !i915_gem_active_peek(active, mutex);
564 }
565 
566 /**
567  * i915_gem_active_wait - waits until the request is completed
568  * @active - the active request on which to wait
569  *
570  * i915_gem_active_wait() waits until the request is completed before
571  * returning. Note that it does not guarantee that the request is
572  * retired first, see i915_gem_active_retire().
573  *
574  * i915_gem_active_wait() returns immediately if the active
575  * request is already complete.
576  */
577 static inline int __must_check
578 i915_gem_active_wait(const struct i915_gem_active *active, struct lock *mutex)
579 {
580 	struct drm_i915_gem_request *request;
581 
582 	request = i915_gem_active_peek(active, mutex);
583 	if (!request)
584 		return 0;
585 
586 	return i915_wait_request(request, true, NULL, NULL);
587 }
588 
589 /**
590  * i915_gem_active_wait_unlocked - waits until the request is completed
591  * @active - the active request on which to wait
592  * @interruptible - whether the wait can be woken by a userspace signal
593  * @timeout - how long to wait at most
594  * @rps - userspace client to charge for a waitboost
595  *
596  * i915_gem_active_wait_unlocked() waits until the request is completed before
597  * returning, without requiring any locks to be held. Note that it does not
598  * retire any requests before returning.
599  *
600  * This function relies on RCU in order to acquire the reference to the active
601  * request without holding any locks. See __i915_gem_active_get_rcu() for the
602  * glory details on how that is managed. Once the reference is acquired, we
603  * can then wait upon the request, and afterwards release our reference,
604  * free of any locking.
605  *
606  * This function wraps i915_wait_request(), see it for the full details on
607  * the arguments.
608  *
609  * Returns 0 if successful, or a negative error code.
610  */
611 static inline int
612 i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
613 			      bool interruptible,
614 			      s64 *timeout,
615 			      struct intel_rps_client *rps)
616 {
617 	struct drm_i915_gem_request *request;
618 	int ret = 0;
619 
620 	request = i915_gem_active_get_unlocked(active);
621 	if (request) {
622 		ret = i915_wait_request(request, interruptible, timeout, rps);
623 		i915_gem_request_put(request);
624 	}
625 
626 	return ret;
627 }
628 
629 /**
630  * i915_gem_active_retire - waits until the request is retired
631  * @active - the active request on which to wait
632  *
633  * i915_gem_active_retire() waits until the request is completed,
634  * and then ensures that at least the retirement handler for this
635  * @active tracker is called before returning. If the @active
636  * tracker is idle, the function returns immediately.
637  */
638 static inline int __must_check
639 i915_gem_active_retire(struct i915_gem_active *active,
640 		       struct lock *mutex)
641 {
642 	struct drm_i915_gem_request *request;
643 	int ret;
644 
645 	request = i915_gem_active_raw(active, mutex);
646 	if (!request)
647 		return 0;
648 
649 	ret = i915_wait_request(request, true, NULL, NULL);
650 	if (ret)
651 		return ret;
652 
653 	list_del_init(&active->link);
654 	RCU_INIT_POINTER(active->request, NULL);
655 
656 	active->retire(active, request);
657 
658 	return 0;
659 }
660 
661 /* Convenience functions for peeking at state inside active's request whilst
662  * guarded by the struct_mutex.
663  */
664 
665 static inline uint32_t
666 i915_gem_active_get_seqno(const struct i915_gem_active *active,
667 			  struct lock *mutex)
668 {
669 	return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
670 }
671 
672 static inline struct intel_engine_cs *
673 i915_gem_active_get_engine(const struct i915_gem_active *active,
674 			   struct lock *mutex)
675 {
676 	return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
677 }
678 
679 #define for_each_active(mask, idx) \
680 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
681 
682 #endif /* I915_GEM_REQUEST_H */
683