xref: /openbsd-src/sys/dev/pci/drm/i915/i915_request.c (revision a5429850edcc9dd5646cc8ddb251ed22eba08b09)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/dma-fence-array.h>
26 #include <linux/dma-fence-chain.h>
27 #include <linux/irq_work.h>
28 #include <linux/prefetch.h>
29 #include <linux/sched.h>
30 #include <linux/sched/clock.h>
31 #include <linux/sched/signal.h>
32 #include <linux/sched/mm.h>
33 
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_breadcrumbs.h"
36 #include "gt/intel_context.h"
37 #include "gt/intel_engine.h"
38 #include "gt/intel_engine_heartbeat.h"
39 #include "gt/intel_gpu_commands.h"
40 #include "gt/intel_reset.h"
41 #include "gt/intel_ring.h"
42 #include "gt/intel_rps.h"
43 
44 #include "i915_active.h"
45 #include "i915_drv.h"
46 #include "i915_trace.h"
47 #include "intel_pm.h"
48 
49 struct execute_cb {
50 	struct irq_work work;
51 	struct i915_sw_fence *fence;
52 	struct i915_request *signal;
53 };
54 
55 static struct pool slab_requests;
56 static struct pool slab_execute_cbs;
57 
58 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
59 {
60 	return dev_name(to_request(fence)->engine->i915->drm.dev);
61 }
62 
63 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
64 {
65 	const struct i915_gem_context *ctx;
66 
67 	/*
68 	 * The timeline struct (as part of the ppgtt underneath a context)
69 	 * may be freed when the request is no longer in use by the GPU.
70 	 * We could extend the life of a context to beyond that of all
71 	 * fences, possibly keeping the hw resource around indefinitely,
72 	 * or we just give them a false name. Since
73 	 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
74 	 * lie seems justifiable.
75 	 */
76 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
77 		return "signaled";
78 
79 	ctx = i915_request_gem_context(to_request(fence));
80 	if (!ctx)
81 		return "[" DRIVER_NAME "]";
82 
83 	return ctx->name;
84 }
85 
86 static bool i915_fence_signaled(struct dma_fence *fence)
87 {
88 	return i915_request_completed(to_request(fence));
89 }
90 
91 static bool i915_fence_enable_signaling(struct dma_fence *fence)
92 {
93 	return i915_request_enable_breadcrumb(to_request(fence));
94 }
95 
96 static signed long i915_fence_wait(struct dma_fence *fence,
97 				   bool interruptible,
98 				   signed long timeout)
99 {
100 	return i915_request_wait(to_request(fence),
101 				 interruptible | I915_WAIT_PRIORITY,
102 				 timeout);
103 }
104 
105 #ifdef __linux__
106 struct kmem_cache *i915_request_slab_cache(void)
107 {
108 	return slab_requests;
109 }
110 #else
111 struct pool *i915_request_slab_cache(void)
112 {
113 	return &slab_requests;
114 }
115 #endif
116 
117 static void i915_fence_release(struct dma_fence *fence)
118 {
119 	struct i915_request *rq = to_request(fence);
120 
121 	GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
122 		   rq->guc_prio != GUC_PRIO_FINI);
123 
124 	/*
125 	 * The request is put onto a RCU freelist (i.e. the address
126 	 * is immediately reused), mark the fences as being freed now.
127 	 * Otherwise the debugobjects for the fences are only marked as
128 	 * freed when the slab cache itself is freed, and so we would get
129 	 * caught trying to reuse dead objects.
130 	 */
131 	i915_sw_fence_fini(&rq->submit);
132 	i915_sw_fence_fini(&rq->semaphore);
133 
134 	/*
135 	 * Keep one request on each engine for reserved use under mempressure,
136 	 * do not use with virtual engines as this really is only needed for
137 	 * kernel contexts.
138 	 */
139 	if (!intel_engine_is_virtual(rq->engine) &&
140 	    !cmpxchg(&rq->engine->request_pool, NULL, rq)) {
141 		intel_context_put(rq->context);
142 		return;
143 	}
144 
145 	intel_context_put(rq->context);
146 
147 #ifdef __linux__
148 	kmem_cache_free(slab_requests, rq);
149 #else
150 	pool_put(&slab_requests, rq);
151 #endif
152 }
153 
154 const struct dma_fence_ops i915_fence_ops = {
155 	.get_driver_name = i915_fence_get_driver_name,
156 	.get_timeline_name = i915_fence_get_timeline_name,
157 	.enable_signaling = i915_fence_enable_signaling,
158 	.signaled = i915_fence_signaled,
159 	.wait = i915_fence_wait,
160 	.release = i915_fence_release,
161 };
162 
163 static void irq_execute_cb(struct irq_work *wrk)
164 {
165 	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
166 
167 	i915_sw_fence_complete(cb->fence);
168 #ifdef __linux__
169 	kmem_cache_free(slab_execute_cbs, cb);
170 #else
171 	pool_put(&slab_execute_cbs, cb);
172 #endif
173 }
174 
175 static __always_inline void
176 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
177 {
178 	struct execute_cb *cb, *cn;
179 
180 	if (llist_empty(&rq->execute_cb))
181 		return;
182 
183 	llist_for_each_entry_safe(cb, cn,
184 				  llist_del_all(&rq->execute_cb),
185 				  work.node.llist)
186 		fn(&cb->work);
187 }
188 
189 static void __notify_execute_cb_irq(struct i915_request *rq)
190 {
191 	__notify_execute_cb(rq, irq_work_queue);
192 }
193 
194 static bool irq_work_imm(struct irq_work *wrk)
195 {
196 #ifdef __linux__
197 	wrk->func(wrk);
198 #else
199 	wrk->task.t_func(wrk);
200 #endif
201 	return false;
202 }
203 
204 void i915_request_notify_execute_cb_imm(struct i915_request *rq)
205 {
206 	__notify_execute_cb(rq, irq_work_imm);
207 }
208 
209 static void free_capture_list(struct i915_request *request)
210 {
211 	struct i915_capture_list *capture;
212 
213 	capture = fetch_and_zero(&request->capture_list);
214 	while (capture) {
215 		struct i915_capture_list *next = capture->next;
216 
217 		kfree(capture);
218 		capture = next;
219 	}
220 }
221 
222 static void __i915_request_fill(struct i915_request *rq, u8 val)
223 {
224 	void *vaddr = rq->ring->vaddr;
225 	u32 head;
226 
227 	head = rq->infix;
228 	if (rq->postfix < head) {
229 		memset(vaddr + head, val, rq->ring->size - head);
230 		head = 0;
231 	}
232 	memset(vaddr + head, val, rq->postfix - head);
233 }
234 
235 /**
236  * i915_request_active_engine
237  * @rq: request to inspect
238  * @active: pointer in which to return the active engine
239  *
240  * Fills the currently active engine to the @active pointer if the request
241  * is active and still not completed.
242  *
243  * Returns true if request was active or false otherwise.
244  */
245 bool
246 i915_request_active_engine(struct i915_request *rq,
247 			   struct intel_engine_cs **active)
248 {
249 	struct intel_engine_cs *engine, *locked;
250 	bool ret = false;
251 
252 	/*
253 	 * Serialise with __i915_request_submit() so that it sees
254 	 * is-banned?, or we know the request is already inflight.
255 	 *
256 	 * Note that rq->engine is unstable, and so we double
257 	 * check that we have acquired the lock on the final engine.
258 	 */
259 	locked = READ_ONCE(rq->engine);
260 	spin_lock_irq(&locked->sched_engine->lock);
261 	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
262 		spin_unlock(&locked->sched_engine->lock);
263 		locked = engine;
264 		spin_lock(&locked->sched_engine->lock);
265 	}
266 
267 	if (i915_request_is_active(rq)) {
268 		if (!__i915_request_is_complete(rq))
269 			*active = locked;
270 		ret = true;
271 	}
272 
273 	spin_unlock_irq(&locked->sched_engine->lock);
274 
275 	return ret;
276 }
277 
278 static void __rq_init_watchdog(struct i915_request *rq)
279 {
280 	rq->watchdog.timer.to_func = NULL;
281 }
282 
283 #ifdef __linux__
284 
285 static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
286 {
287 	struct i915_request *rq =
288 		container_of(hrtimer, struct i915_request, watchdog.timer);
289 	struct intel_gt *gt = rq->engine->gt;
290 
291 	if (!i915_request_completed(rq)) {
292 		if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
293 			schedule_work(&gt->watchdog.work);
294 	} else {
295 		i915_request_put(rq);
296 	}
297 
298 	return HRTIMER_NORESTART;
299 }
300 
301 #else
302 
303 static void
304 __rq_watchdog_expired(void *arg)
305 {
306 	struct i915_request *rq = (struct i915_request *)arg;
307 	struct intel_gt *gt = rq->engine->gt;
308 
309 	if (!i915_request_completed(rq)) {
310 		if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
311 			schedule_work(&gt->watchdog.work);
312 	} else {
313 		i915_request_put(rq);
314 	}
315 }
316 
317 #endif
318 
319 static void __rq_arm_watchdog(struct i915_request *rq)
320 {
321 	struct i915_request_watchdog *wdg = &rq->watchdog;
322 	struct intel_context *ce = rq->context;
323 
324 	if (!ce->watchdog.timeout_us)
325 		return;
326 
327 	i915_request_get(rq);
328 
329 #ifdef __linux__
330 	hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
331 	wdg->timer.function = __rq_watchdog_expired;
332 	hrtimer_start_range_ns(&wdg->timer,
333 			       ns_to_ktime(ce->watchdog.timeout_us *
334 					   NSEC_PER_USEC),
335 			       NSEC_PER_MSEC,
336 			       HRTIMER_MODE_REL);
337 #else
338 	timeout_set(&wdg->timer, __rq_watchdog_expired, rq);
339 	timeout_add_msec(&wdg->timer, 1);
340 #endif
341 }
342 
343 static void __rq_cancel_watchdog(struct i915_request *rq)
344 {
345 	struct i915_request_watchdog *wdg = &rq->watchdog;
346 
347 	if (wdg->timer.to_func && hrtimer_try_to_cancel(&wdg->timer) > 0)
348 		i915_request_put(rq);
349 }
350 
351 bool i915_request_retire(struct i915_request *rq)
352 {
353 	if (!__i915_request_is_complete(rq))
354 		return false;
355 
356 	RQ_TRACE(rq, "\n");
357 
358 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
359 	trace_i915_request_retire(rq);
360 	i915_request_mark_complete(rq);
361 
362 	__rq_cancel_watchdog(rq);
363 
364 	/*
365 	 * We know the GPU must have read the request to have
366 	 * sent us the seqno + interrupt, so use the position
367 	 * of tail of the request to update the last known position
368 	 * of the GPU head.
369 	 *
370 	 * Note this requires that we are always called in request
371 	 * completion order.
372 	 */
373 	GEM_BUG_ON(!list_is_first(&rq->link,
374 				  &i915_request_timeline(rq)->requests));
375 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
376 		/* Poison before we release our space in the ring */
377 		__i915_request_fill(rq, POISON_FREE);
378 	rq->ring->head = rq->postfix;
379 
380 	if (!i915_request_signaled(rq)) {
381 		spin_lock_irq(&rq->lock);
382 		dma_fence_signal_locked(&rq->fence);
383 		spin_unlock_irq(&rq->lock);
384 	}
385 
386 	if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
387 		atomic_dec(&rq->engine->gt->rps.num_waiters);
388 
389 	/*
390 	 * We only loosely track inflight requests across preemption,
391 	 * and so we may find ourselves attempting to retire a _completed_
392 	 * request that we have removed from the HW and put back on a run
393 	 * queue.
394 	 *
395 	 * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be
396 	 * after removing the breadcrumb and signaling it, so that we do not
397 	 * inadvertently attach the breadcrumb to a completed request.
398 	 */
399 	rq->engine->remove_active_request(rq);
400 	GEM_BUG_ON(!llist_empty(&rq->execute_cb));
401 
402 	__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
403 
404 	intel_context_exit(rq->context);
405 	intel_context_unpin(rq->context);
406 
407 	free_capture_list(rq);
408 	i915_sched_node_fini(&rq->sched);
409 	i915_request_put(rq);
410 
411 	return true;
412 }
413 
414 void i915_request_retire_upto(struct i915_request *rq)
415 {
416 	struct intel_timeline * const tl = i915_request_timeline(rq);
417 	struct i915_request *tmp;
418 
419 	RQ_TRACE(rq, "\n");
420 	GEM_BUG_ON(!__i915_request_is_complete(rq));
421 
422 	do {
423 		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
424 		GEM_BUG_ON(!i915_request_completed(tmp));
425 	} while (i915_request_retire(tmp) && tmp != rq);
426 }
427 
428 static struct i915_request * const *
429 __engine_active(struct intel_engine_cs *engine)
430 {
431 	return READ_ONCE(engine->execlists.active);
432 }
433 
434 static bool __request_in_flight(const struct i915_request *signal)
435 {
436 	struct i915_request * const *port, *rq;
437 	bool inflight = false;
438 
439 	if (!i915_request_is_ready(signal))
440 		return false;
441 
442 	/*
443 	 * Even if we have unwound the request, it may still be on
444 	 * the GPU (preempt-to-busy). If that request is inside an
445 	 * unpreemptible critical section, it will not be removed. Some
446 	 * GPU functions may even be stuck waiting for the paired request
447 	 * (__await_execution) to be submitted and cannot be preempted
448 	 * until the bond is executing.
449 	 *
450 	 * As we know that there are always preemption points between
451 	 * requests, we know that only the currently executing request
452 	 * may be still active even though we have cleared the flag.
453 	 * However, we can't rely on our tracking of ELSP[0] to know
454 	 * which request is currently active and so maybe stuck, as
455 	 * the tracking maybe an event behind. Instead assume that
456 	 * if the context is still inflight, then it is still active
457 	 * even if the active flag has been cleared.
458 	 *
459 	 * To further complicate matters, if there a pending promotion, the HW
460 	 * may either perform a context switch to the second inflight execlists,
461 	 * or it may switch to the pending set of execlists. In the case of the
462 	 * latter, it may send the ACK and we process the event copying the
463 	 * pending[] over top of inflight[], _overwriting_ our *active. Since
464 	 * this implies the HW is arbitrating and not struck in *active, we do
465 	 * not worry about complete accuracy, but we do require no read/write
466 	 * tearing of the pointer [the read of the pointer must be valid, even
467 	 * as the array is being overwritten, for which we require the writes
468 	 * to avoid tearing.]
469 	 *
470 	 * Note that the read of *execlists->active may race with the promotion
471 	 * of execlists->pending[] to execlists->inflight[], overwritting
472 	 * the value at *execlists->active. This is fine. The promotion implies
473 	 * that we received an ACK from the HW, and so the context is not
474 	 * stuck -- if we do not see ourselves in *active, the inflight status
475 	 * is valid. If instead we see ourselves being copied into *active,
476 	 * we are inflight and may signal the callback.
477 	 */
478 	if (!intel_context_inflight(signal->context))
479 		return false;
480 
481 	rcu_read_lock();
482 	for (port = __engine_active(signal->engine);
483 	     (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
484 	     port++) {
485 		if (rq->context == signal->context) {
486 			inflight = i915_seqno_passed(rq->fence.seqno,
487 						     signal->fence.seqno);
488 			break;
489 		}
490 	}
491 	rcu_read_unlock();
492 
493 	return inflight;
494 }
495 
496 static int
497 __await_execution(struct i915_request *rq,
498 		  struct i915_request *signal,
499 		  gfp_t gfp)
500 {
501 	struct execute_cb *cb;
502 
503 	if (i915_request_is_active(signal))
504 		return 0;
505 
506 #ifdef __linux__
507 	cb = kmem_cache_alloc(slab_execute_cbs, gfp);
508 #else
509 	cb = pool_get(&slab_execute_cbs,
510 	    (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK);
511 #endif
512 	if (!cb)
513 		return -ENOMEM;
514 
515 	cb->fence = &rq->submit;
516 	i915_sw_fence_await(cb->fence);
517 	init_irq_work(&cb->work, irq_execute_cb);
518 
519 	/*
520 	 * Register the callback first, then see if the signaler is already
521 	 * active. This ensures that if we race with the
522 	 * __notify_execute_cb from i915_request_submit() and we are not
523 	 * included in that list, we get a second bite of the cherry and
524 	 * execute it ourselves. After this point, a future
525 	 * i915_request_submit() will notify us.
526 	 *
527 	 * In i915_request_retire() we set the ACTIVE bit on a completed
528 	 * request (then flush the execute_cb). So by registering the
529 	 * callback first, then checking the ACTIVE bit, we serialise with
530 	 * the completed/retired request.
531 	 */
532 	if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
533 		if (i915_request_is_active(signal) ||
534 		    __request_in_flight(signal))
535 			i915_request_notify_execute_cb_imm(signal);
536 	}
537 
538 	return 0;
539 }
540 
541 static bool fatal_error(int error)
542 {
543 	switch (error) {
544 	case 0: /* not an error! */
545 	case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
546 	case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
547 		return false;
548 	default:
549 		return true;
550 	}
551 }
552 
553 void __i915_request_skip(struct i915_request *rq)
554 {
555 	GEM_BUG_ON(!fatal_error(rq->fence.error));
556 
557 	if (rq->infix == rq->postfix)
558 		return;
559 
560 	RQ_TRACE(rq, "error: %d\n", rq->fence.error);
561 
562 	/*
563 	 * As this request likely depends on state from the lost
564 	 * context, clear out all the user operations leaving the
565 	 * breadcrumb at the end (so we get the fence notifications).
566 	 */
567 	__i915_request_fill(rq, 0);
568 	rq->infix = rq->postfix;
569 }
570 
571 bool i915_request_set_error_once(struct i915_request *rq, int error)
572 {
573 	int old;
574 
575 	GEM_BUG_ON(!IS_ERR_VALUE((long)error));
576 
577 	if (i915_request_signaled(rq))
578 		return false;
579 
580 	old = READ_ONCE(rq->fence.error);
581 	do {
582 		if (fatal_error(old))
583 			return false;
584 	} while (!try_cmpxchg(&rq->fence.error, &old, error));
585 
586 	return true;
587 }
588 
589 struct i915_request *i915_request_mark_eio(struct i915_request *rq)
590 {
591 	if (__i915_request_is_complete(rq))
592 		return NULL;
593 
594 	GEM_BUG_ON(i915_request_signaled(rq));
595 
596 	/* As soon as the request is completed, it may be retired */
597 	rq = i915_request_get(rq);
598 
599 	i915_request_set_error_once(rq, -EIO);
600 	i915_request_mark_complete(rq);
601 
602 	return rq;
603 }
604 
605 bool __i915_request_submit(struct i915_request *request)
606 {
607 	struct intel_engine_cs *engine = request->engine;
608 	bool result = false;
609 
610 	RQ_TRACE(request, "\n");
611 
612 	GEM_BUG_ON(!irqs_disabled());
613 	lockdep_assert_held(&engine->sched_engine->lock);
614 
615 	/*
616 	 * With the advent of preempt-to-busy, we frequently encounter
617 	 * requests that we have unsubmitted from HW, but left running
618 	 * until the next ack and so have completed in the meantime. On
619 	 * resubmission of that completed request, we can skip
620 	 * updating the payload, and execlists can even skip submitting
621 	 * the request.
622 	 *
623 	 * We must remove the request from the caller's priority queue,
624 	 * and the caller must only call us when the request is in their
625 	 * priority queue, under the sched_engine->lock. This ensures that the
626 	 * request has *not* yet been retired and we can safely move
627 	 * the request into the engine->active.list where it will be
628 	 * dropped upon retiring. (Otherwise if resubmit a *retired*
629 	 * request, this would be a horrible use-after-free.)
630 	 */
631 	if (__i915_request_is_complete(request)) {
632 		list_del_init(&request->sched.link);
633 		goto active;
634 	}
635 
636 	if (unlikely(intel_context_is_banned(request->context)))
637 		i915_request_set_error_once(request, -EIO);
638 
639 	if (unlikely(fatal_error(request->fence.error)))
640 		__i915_request_skip(request);
641 
642 	/*
643 	 * Are we using semaphores when the gpu is already saturated?
644 	 *
645 	 * Using semaphores incurs a cost in having the GPU poll a
646 	 * memory location, busywaiting for it to change. The continual
647 	 * memory reads can have a noticeable impact on the rest of the
648 	 * system with the extra bus traffic, stalling the cpu as it too
649 	 * tries to access memory across the bus (perf stat -e bus-cycles).
650 	 *
651 	 * If we installed a semaphore on this request and we only submit
652 	 * the request after the signaler completed, that indicates the
653 	 * system is overloaded and using semaphores at this time only
654 	 * increases the amount of work we are doing. If so, we disable
655 	 * further use of semaphores until we are idle again, whence we
656 	 * optimistically try again.
657 	 */
658 	if (request->sched.semaphores &&
659 	    i915_sw_fence_signaled(&request->semaphore))
660 		engine->saturated |= request->sched.semaphores;
661 
662 	engine->emit_fini_breadcrumb(request,
663 				     request->ring->vaddr + request->postfix);
664 
665 	trace_i915_request_execute(request);
666 	if (engine->bump_serial)
667 		engine->bump_serial(engine);
668 	else
669 		engine->serial++;
670 
671 	result = true;
672 
673 	GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
674 	engine->add_active_request(request);
675 active:
676 	clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
677 	set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
678 
679 	/*
680 	 * XXX Rollback bonded-execution on __i915_request_unsubmit()?
681 	 *
682 	 * In the future, perhaps when we have an active time-slicing scheduler,
683 	 * it will be interesting to unsubmit parallel execution and remove
684 	 * busywaits from the GPU until their master is restarted. This is
685 	 * quite hairy, we have to carefully rollback the fence and do a
686 	 * preempt-to-idle cycle on the target engine, all the while the
687 	 * master execute_cb may refire.
688 	 */
689 	__notify_execute_cb_irq(request);
690 
691 	/* We may be recursing from the signal callback of another i915 fence */
692 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
693 		i915_request_enable_breadcrumb(request);
694 
695 	return result;
696 }
697 
698 void i915_request_submit(struct i915_request *request)
699 {
700 	struct intel_engine_cs *engine = request->engine;
701 	unsigned long flags;
702 
703 	/* Will be called from irq-context when using foreign fences. */
704 	spin_lock_irqsave(&engine->sched_engine->lock, flags);
705 
706 	__i915_request_submit(request);
707 
708 	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
709 }
710 
711 void __i915_request_unsubmit(struct i915_request *request)
712 {
713 	struct intel_engine_cs *engine = request->engine;
714 
715 	/*
716 	 * Only unwind in reverse order, required so that the per-context list
717 	 * is kept in seqno/ring order.
718 	 */
719 	RQ_TRACE(request, "\n");
720 
721 	GEM_BUG_ON(!irqs_disabled());
722 	lockdep_assert_held(&engine->sched_engine->lock);
723 
724 	/*
725 	 * Before we remove this breadcrumb from the signal list, we have
726 	 * to ensure that a concurrent dma_fence_enable_signaling() does not
727 	 * attach itself. We first mark the request as no longer active and
728 	 * make sure that is visible to other cores, and then remove the
729 	 * breadcrumb if attached.
730 	 */
731 	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
732 	clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
733 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
734 		i915_request_cancel_breadcrumb(request);
735 
736 	/* We've already spun, don't charge on resubmitting. */
737 	if (request->sched.semaphores && __i915_request_has_started(request))
738 		request->sched.semaphores = 0;
739 
740 	/*
741 	 * We don't need to wake_up any waiters on request->execute, they
742 	 * will get woken by any other event or us re-adding this request
743 	 * to the engine timeline (__i915_request_submit()). The waiters
744 	 * should be quite adapt at finding that the request now has a new
745 	 * global_seqno to the one they went to sleep on.
746 	 */
747 }
748 
749 void i915_request_unsubmit(struct i915_request *request)
750 {
751 	struct intel_engine_cs *engine = request->engine;
752 	unsigned long flags;
753 
754 	/* Will be called from irq-context when using foreign fences. */
755 	spin_lock_irqsave(&engine->sched_engine->lock, flags);
756 
757 	__i915_request_unsubmit(request);
758 
759 	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
760 }
761 
762 void i915_request_cancel(struct i915_request *rq, int error)
763 {
764 	if (!i915_request_set_error_once(rq, error))
765 		return;
766 
767 	set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
768 
769 	intel_context_cancel_request(rq->context, rq);
770 }
771 
772 static int __i915_sw_fence_call
773 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
774 {
775 	struct i915_request *request =
776 		container_of(fence, typeof(*request), submit);
777 
778 	switch (state) {
779 	case FENCE_COMPLETE:
780 		trace_i915_request_submit(request);
781 
782 		if (unlikely(fence->error))
783 			i915_request_set_error_once(request, fence->error);
784 		else
785 			__rq_arm_watchdog(request);
786 
787 		/*
788 		 * We need to serialize use of the submit_request() callback
789 		 * with its hotplugging performed during an emergency
790 		 * i915_gem_set_wedged().  We use the RCU mechanism to mark the
791 		 * critical section in order to force i915_gem_set_wedged() to
792 		 * wait until the submit_request() is completed before
793 		 * proceeding.
794 		 */
795 		rcu_read_lock();
796 		request->engine->submit_request(request);
797 		rcu_read_unlock();
798 		break;
799 
800 	case FENCE_FREE:
801 		i915_request_put(request);
802 		break;
803 	}
804 
805 	return NOTIFY_DONE;
806 }
807 
808 static int __i915_sw_fence_call
809 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
810 {
811 	struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
812 
813 	switch (state) {
814 	case FENCE_COMPLETE:
815 		break;
816 
817 	case FENCE_FREE:
818 		i915_request_put(rq);
819 		break;
820 	}
821 
822 	return NOTIFY_DONE;
823 }
824 
825 static void retire_requests(struct intel_timeline *tl)
826 {
827 	struct i915_request *rq, *rn;
828 
829 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
830 		if (!i915_request_retire(rq))
831 			break;
832 }
833 
834 static void __i915_request_ctor(void *);
835 
836 static noinline struct i915_request *
837 request_alloc_slow(struct intel_timeline *tl,
838 		   struct i915_request **rsvd,
839 		   gfp_t gfp)
840 {
841 	struct i915_request *rq;
842 
843 	/* If we cannot wait, dip into our reserves */
844 	if (!gfpflags_allow_blocking(gfp)) {
845 		rq = xchg(rsvd, NULL);
846 		if (!rq) /* Use the normal failure path for one final WARN */
847 			goto out;
848 
849 		return rq;
850 	}
851 
852 	if (list_empty(&tl->requests))
853 		goto out;
854 
855 	/* Move our oldest request to the slab-cache (if not in use!) */
856 	rq = list_first_entry(&tl->requests, typeof(*rq), link);
857 	i915_request_retire(rq);
858 
859 #ifdef __linux__
860 	rq = kmem_cache_alloc(slab_requests,
861 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
862 #else
863 	rq = pool_get(&slab_requests,
864 	    (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK);
865 	if (rq)
866 		__i915_request_ctor(rq);
867 #endif
868 	if (rq)
869 		return rq;
870 
871 	/* Ratelimit ourselves to prevent oom from malicious clients */
872 	rq = list_last_entry(&tl->requests, typeof(*rq), link);
873 	cond_synchronize_rcu(rq->rcustate);
874 
875 	/* Retire our old requests in the hope that we free some */
876 	retire_requests(tl);
877 
878 out:
879 #ifdef __linux__
880 	return kmem_cache_alloc(slab_requests, gfp);
881 #else
882 	rq = pool_get(&slab_requests,
883 	    (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK);
884 	if (rq)
885 		__i915_request_ctor(rq);
886 	return rq;
887 #endif
888 }
889 
890 static void __i915_request_ctor(void *arg)
891 {
892 	struct i915_request *rq = arg;
893 
894 	/*
895 	 * witness does not understand spin_lock_nested()
896 	 * order reversal in i915 with this lock
897 	 */
898 	mtx_init_flags(&rq->lock, IPL_TTY, NULL, MTX_NOWITNESS);
899 	i915_sched_node_init(&rq->sched);
900 	i915_sw_fence_init(&rq->submit, submit_notify);
901 	i915_sw_fence_init(&rq->semaphore, semaphore_notify);
902 
903 	rq->capture_list = NULL;
904 
905 	init_llist_head(&rq->execute_cb);
906 }
907 
908 struct i915_request *
909 __i915_request_create(struct intel_context *ce, gfp_t gfp)
910 {
911 	struct intel_timeline *tl = ce->timeline;
912 	struct i915_request *rq;
913 	u32 seqno;
914 	int ret;
915 
916 	might_alloc(gfp);
917 
918 	/* Check that the caller provided an already pinned context */
919 	__intel_context_pin(ce);
920 
921 	/*
922 	 * Beware: Dragons be flying overhead.
923 	 *
924 	 * We use RCU to look up requests in flight. The lookups may
925 	 * race with the request being allocated from the slab freelist.
926 	 * That is the request we are writing to here, may be in the process
927 	 * of being read by __i915_active_request_get_rcu(). As such,
928 	 * we have to be very careful when overwriting the contents. During
929 	 * the RCU lookup, we change chase the request->engine pointer,
930 	 * read the request->global_seqno and increment the reference count.
931 	 *
932 	 * The reference count is incremented atomically. If it is zero,
933 	 * the lookup knows the request is unallocated and complete. Otherwise,
934 	 * it is either still in use, or has been reallocated and reset
935 	 * with dma_fence_init(). This increment is safe for release as we
936 	 * check that the request we have a reference to and matches the active
937 	 * request.
938 	 *
939 	 * Before we increment the refcount, we chase the request->engine
940 	 * pointer. We must not call kmem_cache_zalloc() or else we set
941 	 * that pointer to NULL and cause a crash during the lookup. If
942 	 * we see the request is completed (based on the value of the
943 	 * old engine and seqno), the lookup is complete and reports NULL.
944 	 * If we decide the request is not completed (new engine or seqno),
945 	 * then we grab a reference and double check that it is still the
946 	 * active request - which it won't be and restart the lookup.
947 	 *
948 	 * Do not use kmem_cache_zalloc() here!
949 	 */
950 #ifdef __linux__
951 	rq = kmem_cache_alloc(slab_requests,
952 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
953 #else
954 	rq = pool_get(&slab_requests,
955 	    (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK);
956 	if (rq)
957 		__i915_request_ctor(rq);
958 #endif
959 	if (unlikely(!rq)) {
960 		rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
961 		if (!rq) {
962 			ret = -ENOMEM;
963 			goto err_unreserve;
964 		}
965 	}
966 
967 	/*
968 	 * Hold a reference to the intel_context over life of an i915_request.
969 	 * Without this an i915_request can exist after the context has been
970 	 * destroyed (e.g. request retired, context closed, but user space holds
971 	 * a reference to the request from an out fence). In the case of GuC
972 	 * submission + virtual engine, the engine that the request references
973 	 * is also destroyed which can trigger bad pointer dref in fence ops
974 	 * (e.g. i915_fence_get_driver_name). We could likely change these
975 	 * functions to avoid touching the engine but let's just be safe and
976 	 * hold the intel_context reference. In execlist mode the request always
977 	 * eventually points to a physical engine so this isn't an issue.
978 	 */
979 	rq->context = intel_context_get(ce);
980 	rq->engine = ce->engine;
981 	rq->ring = ce->ring;
982 	rq->execution_mask = ce->engine->mask;
983 
984 	ret = intel_timeline_get_seqno(tl, rq, &seqno);
985 	if (ret)
986 		goto err_free;
987 
988 	dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
989 		       tl->fence_context, seqno);
990 
991 	RCU_INIT_POINTER(rq->timeline, tl);
992 	rq->hwsp_seqno = tl->hwsp_seqno;
993 	GEM_BUG_ON(__i915_request_is_complete(rq));
994 
995 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
996 
997 	rq->guc_prio = GUC_PRIO_INIT;
998 
999 	/* We bump the ref for the fence chain */
1000 	i915_sw_fence_reinit(&i915_request_get(rq)->submit);
1001 	i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
1002 
1003 	i915_sched_node_reinit(&rq->sched);
1004 
1005 	/* No zalloc, everything must be cleared after use */
1006 	rq->batch = NULL;
1007 	__rq_init_watchdog(rq);
1008 	GEM_BUG_ON(rq->capture_list);
1009 	GEM_BUG_ON(!llist_empty(&rq->execute_cb));
1010 
1011 	/*
1012 	 * Reserve space in the ring buffer for all the commands required to
1013 	 * eventually emit this request. This is to guarantee that the
1014 	 * i915_request_add() call can't fail. Note that the reserve may need
1015 	 * to be redone if the request is not actually submitted straight
1016 	 * away, e.g. because a GPU scheduler has deferred it.
1017 	 *
1018 	 * Note that due to how we add reserved_space to intel_ring_begin()
1019 	 * we need to double our request to ensure that if we need to wrap
1020 	 * around inside i915_request_add() there is sufficient space at
1021 	 * the beginning of the ring as well.
1022 	 */
1023 	rq->reserved_space =
1024 		2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
1025 
1026 	/*
1027 	 * Record the position of the start of the request so that
1028 	 * should we detect the updated seqno part-way through the
1029 	 * GPU processing the request, we never over-estimate the
1030 	 * position of the head.
1031 	 */
1032 	rq->head = rq->ring->emit;
1033 
1034 	ret = rq->engine->request_alloc(rq);
1035 	if (ret)
1036 		goto err_unwind;
1037 
1038 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
1039 
1040 	intel_context_mark_active(ce);
1041 	list_add_tail_rcu(&rq->link, &tl->requests);
1042 
1043 	return rq;
1044 
1045 err_unwind:
1046 	ce->ring->emit = rq->head;
1047 
1048 	/* Make sure we didn't add ourselves to external state before freeing */
1049 	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
1050 	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
1051 
1052 err_free:
1053 	intel_context_put(ce);
1054 #ifdef __linux__
1055 	kmem_cache_free(slab_requests, rq);
1056 #else
1057 	pool_put(&slab_requests, rq);
1058 #endif
1059 err_unreserve:
1060 	intel_context_unpin(ce);
1061 	return ERR_PTR(ret);
1062 }
1063 
1064 struct i915_request *
1065 i915_request_create(struct intel_context *ce)
1066 {
1067 	struct i915_request *rq;
1068 	struct intel_timeline *tl;
1069 
1070 	tl = intel_context_timeline_lock(ce);
1071 	if (IS_ERR(tl))
1072 		return ERR_CAST(tl);
1073 
1074 	/* Move our oldest request to the slab-cache (if not in use!) */
1075 	rq = list_first_entry(&tl->requests, typeof(*rq), link);
1076 	if (!list_is_last(&rq->link, &tl->requests))
1077 		i915_request_retire(rq);
1078 
1079 	intel_context_enter(ce);
1080 	rq = __i915_request_create(ce, GFP_KERNEL);
1081 	intel_context_exit(ce); /* active reference transferred to request */
1082 	if (IS_ERR(rq))
1083 		goto err_unlock;
1084 
1085 	/* Check that we do not interrupt ourselves with a new request */
1086 	rq->cookie = lockdep_pin_lock(&tl->mutex);
1087 
1088 	return rq;
1089 
1090 err_unlock:
1091 	intel_context_timeline_unlock(tl);
1092 	return rq;
1093 }
1094 
1095 static int
1096 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1097 {
1098 	struct dma_fence *fence;
1099 	int err;
1100 
1101 	if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1102 		return 0;
1103 
1104 	if (i915_request_started(signal))
1105 		return 0;
1106 
1107 	/*
1108 	 * The caller holds a reference on @signal, but we do not serialise
1109 	 * against it being retired and removed from the lists.
1110 	 *
1111 	 * We do not hold a reference to the request before @signal, and
1112 	 * so must be very careful to ensure that it is not _recycled_ as
1113 	 * we follow the link backwards.
1114 	 */
1115 	fence = NULL;
1116 	rcu_read_lock();
1117 	do {
1118 		struct list_head *pos = READ_ONCE(signal->link.prev);
1119 		struct i915_request *prev;
1120 
1121 		/* Confirm signal has not been retired, the link is valid */
1122 		if (unlikely(__i915_request_has_started(signal)))
1123 			break;
1124 
1125 		/* Is signal the earliest request on its timeline? */
1126 		if (pos == &rcu_dereference(signal->timeline)->requests)
1127 			break;
1128 
1129 		/*
1130 		 * Peek at the request before us in the timeline. That
1131 		 * request will only be valid before it is retired, so
1132 		 * after acquiring a reference to it, confirm that it is
1133 		 * still part of the signaler's timeline.
1134 		 */
1135 		prev = list_entry(pos, typeof(*prev), link);
1136 		if (!i915_request_get_rcu(prev))
1137 			break;
1138 
1139 		/* After the strong barrier, confirm prev is still attached */
1140 		if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
1141 			i915_request_put(prev);
1142 			break;
1143 		}
1144 
1145 		fence = &prev->fence;
1146 	} while (0);
1147 	rcu_read_unlock();
1148 	if (!fence)
1149 		return 0;
1150 
1151 	err = 0;
1152 	if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1153 		err = i915_sw_fence_await_dma_fence(&rq->submit,
1154 						    fence, 0,
1155 						    I915_FENCE_GFP);
1156 	dma_fence_put(fence);
1157 
1158 	return err;
1159 }
1160 
1161 static intel_engine_mask_t
1162 already_busywaiting(struct i915_request *rq)
1163 {
1164 	/*
1165 	 * Polling a semaphore causes bus traffic, delaying other users of
1166 	 * both the GPU and CPU. We want to limit the impact on others,
1167 	 * while taking advantage of early submission to reduce GPU
1168 	 * latency. Therefore we restrict ourselves to not using more
1169 	 * than one semaphore from each source, and not using a semaphore
1170 	 * if we have detected the engine is saturated (i.e. would not be
1171 	 * submitted early and cause bus traffic reading an already passed
1172 	 * semaphore).
1173 	 *
1174 	 * See the are-we-too-late? check in __i915_request_submit().
1175 	 */
1176 	return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1177 }
1178 
1179 static int
1180 __emit_semaphore_wait(struct i915_request *to,
1181 		      struct i915_request *from,
1182 		      u32 seqno)
1183 {
1184 	const int has_token = GRAPHICS_VER(to->engine->i915) >= 12;
1185 	u32 hwsp_offset;
1186 	int len, err;
1187 	u32 *cs;
1188 
1189 	GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8);
1190 	GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
1191 
1192 	/* We need to pin the signaler's HWSP until we are finished reading. */
1193 	err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
1194 	if (err)
1195 		return err;
1196 
1197 	len = 4;
1198 	if (has_token)
1199 		len += 2;
1200 
1201 	cs = intel_ring_begin(to, len);
1202 	if (IS_ERR(cs))
1203 		return PTR_ERR(cs);
1204 
1205 	/*
1206 	 * Using greater-than-or-equal here means we have to worry
1207 	 * about seqno wraparound. To side step that issue, we swap
1208 	 * the timeline HWSP upon wrapping, so that everyone listening
1209 	 * for the old (pre-wrap) values do not see the much smaller
1210 	 * (post-wrap) values than they were expecting (and so wait
1211 	 * forever).
1212 	 */
1213 	*cs++ = (MI_SEMAPHORE_WAIT |
1214 		 MI_SEMAPHORE_GLOBAL_GTT |
1215 		 MI_SEMAPHORE_POLL |
1216 		 MI_SEMAPHORE_SAD_GTE_SDD) +
1217 		has_token;
1218 	*cs++ = seqno;
1219 	*cs++ = hwsp_offset;
1220 	*cs++ = 0;
1221 	if (has_token) {
1222 		*cs++ = 0;
1223 		*cs++ = MI_NOOP;
1224 	}
1225 
1226 	intel_ring_advance(to, cs);
1227 	return 0;
1228 }
1229 
1230 static int
1231 emit_semaphore_wait(struct i915_request *to,
1232 		    struct i915_request *from,
1233 		    gfp_t gfp)
1234 {
1235 	const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
1236 	struct i915_sw_fence *wait = &to->submit;
1237 
1238 	if (!intel_context_use_semaphores(to->context))
1239 		goto await_fence;
1240 
1241 	if (i915_request_has_initial_breadcrumb(to))
1242 		goto await_fence;
1243 
1244 	/*
1245 	 * If this or its dependents are waiting on an external fence
1246 	 * that may fail catastrophically, then we want to avoid using
1247 	 * sempahores as they bypass the fence signaling metadata, and we
1248 	 * lose the fence->error propagation.
1249 	 */
1250 	if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
1251 		goto await_fence;
1252 
1253 	/* Just emit the first semaphore we see as request space is limited. */
1254 	if (already_busywaiting(to) & mask)
1255 		goto await_fence;
1256 
1257 	if (i915_request_await_start(to, from) < 0)
1258 		goto await_fence;
1259 
1260 	/* Only submit our spinner after the signaler is running! */
1261 	if (__await_execution(to, from, gfp))
1262 		goto await_fence;
1263 
1264 	if (__emit_semaphore_wait(to, from, from->fence.seqno))
1265 		goto await_fence;
1266 
1267 	to->sched.semaphores |= mask;
1268 	wait = &to->semaphore;
1269 
1270 await_fence:
1271 	return i915_sw_fence_await_dma_fence(wait,
1272 					     &from->fence, 0,
1273 					     I915_FENCE_GFP);
1274 }
1275 
1276 static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
1277 					  struct dma_fence *fence)
1278 {
1279 	return __intel_timeline_sync_is_later(tl,
1280 					      fence->context,
1281 					      fence->seqno - 1);
1282 }
1283 
1284 static int intel_timeline_sync_set_start(struct intel_timeline *tl,
1285 					 const struct dma_fence *fence)
1286 {
1287 	return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
1288 }
1289 
1290 static int
1291 __i915_request_await_execution(struct i915_request *to,
1292 			       struct i915_request *from)
1293 {
1294 	int err;
1295 
1296 	GEM_BUG_ON(intel_context_is_barrier(from->context));
1297 
1298 	/* Submit both requests at the same time */
1299 	err = __await_execution(to, from, I915_FENCE_GFP);
1300 	if (err)
1301 		return err;
1302 
1303 	/* Squash repeated depenendices to the same timelines */
1304 	if (intel_timeline_sync_has_start(i915_request_timeline(to),
1305 					  &from->fence))
1306 		return 0;
1307 
1308 	/*
1309 	 * Wait until the start of this request.
1310 	 *
1311 	 * The execution cb fires when we submit the request to HW. But in
1312 	 * many cases this may be long before the request itself is ready to
1313 	 * run (consider that we submit 2 requests for the same context, where
1314 	 * the request of interest is behind an indefinite spinner). So we hook
1315 	 * up to both to reduce our queues and keep the execution lag minimised
1316 	 * in the worst case, though we hope that the await_start is elided.
1317 	 */
1318 	err = i915_request_await_start(to, from);
1319 	if (err < 0)
1320 		return err;
1321 
1322 	/*
1323 	 * Ensure both start together [after all semaphores in signal]
1324 	 *
1325 	 * Now that we are queued to the HW at roughly the same time (thanks
1326 	 * to the execute cb) and are ready to run at roughly the same time
1327 	 * (thanks to the await start), our signaler may still be indefinitely
1328 	 * delayed by waiting on a semaphore from a remote engine. If our
1329 	 * signaler depends on a semaphore, so indirectly do we, and we do not
1330 	 * want to start our payload until our signaler also starts theirs.
1331 	 * So we wait.
1332 	 *
1333 	 * However, there is also a second condition for which we need to wait
1334 	 * for the precise start of the signaler. Consider that the signaler
1335 	 * was submitted in a chain of requests following another context
1336 	 * (with just an ordinary intra-engine fence dependency between the
1337 	 * two). In this case the signaler is queued to HW, but not for
1338 	 * immediate execution, and so we must wait until it reaches the
1339 	 * active slot.
1340 	 */
1341 	if (intel_engine_has_semaphores(to->engine) &&
1342 	    !i915_request_has_initial_breadcrumb(to)) {
1343 		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
1344 		if (err < 0)
1345 			return err;
1346 	}
1347 
1348 	/* Couple the dependency tree for PI on this exposed to->fence */
1349 	if (to->engine->sched_engine->schedule) {
1350 		err = i915_sched_node_add_dependency(&to->sched,
1351 						     &from->sched,
1352 						     I915_DEPENDENCY_WEAK);
1353 		if (err < 0)
1354 			return err;
1355 	}
1356 
1357 	return intel_timeline_sync_set_start(i915_request_timeline(to),
1358 					     &from->fence);
1359 }
1360 
1361 static void mark_external(struct i915_request *rq)
1362 {
1363 	/*
1364 	 * The downside of using semaphores is that we lose metadata passing
1365 	 * along the signaling chain. This is particularly nasty when we
1366 	 * need to pass along a fatal error such as EFAULT or EDEADLK. For
1367 	 * fatal errors we want to scrub the request before it is executed,
1368 	 * which means that we cannot preload the request onto HW and have
1369 	 * it wait upon a semaphore.
1370 	 */
1371 	rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1372 }
1373 
1374 static int
1375 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1376 {
1377 	mark_external(rq);
1378 	return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1379 					     i915_fence_context_timeout(rq->engine->i915,
1380 									fence->context),
1381 					     I915_FENCE_GFP);
1382 }
1383 
1384 static int
1385 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1386 {
1387 	struct dma_fence *iter;
1388 	int err = 0;
1389 
1390 	if (!to_dma_fence_chain(fence))
1391 		return __i915_request_await_external(rq, fence);
1392 
1393 	dma_fence_chain_for_each(iter, fence) {
1394 		struct dma_fence_chain *chain = to_dma_fence_chain(iter);
1395 
1396 		if (!dma_fence_is_i915(chain->fence)) {
1397 			err = __i915_request_await_external(rq, iter);
1398 			break;
1399 		}
1400 
1401 		err = i915_request_await_dma_fence(rq, chain->fence);
1402 		if (err < 0)
1403 			break;
1404 	}
1405 
1406 	dma_fence_put(iter);
1407 	return err;
1408 }
1409 
1410 int
1411 i915_request_await_execution(struct i915_request *rq,
1412 			     struct dma_fence *fence)
1413 {
1414 	struct dma_fence **child = &fence;
1415 	unsigned int nchild = 1;
1416 	int ret;
1417 
1418 	if (dma_fence_is_array(fence)) {
1419 		struct dma_fence_array *array = to_dma_fence_array(fence);
1420 
1421 		/* XXX Error for signal-on-any fence arrays */
1422 
1423 		child = array->fences;
1424 		nchild = array->num_fences;
1425 		GEM_BUG_ON(!nchild);
1426 	}
1427 
1428 	do {
1429 		fence = *child++;
1430 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1431 			continue;
1432 
1433 		if (fence->context == rq->fence.context)
1434 			continue;
1435 
1436 		/*
1437 		 * We don't squash repeated fence dependencies here as we
1438 		 * want to run our callback in all cases.
1439 		 */
1440 
1441 		if (dma_fence_is_i915(fence))
1442 			ret = __i915_request_await_execution(rq,
1443 							     to_request(fence));
1444 		else
1445 			ret = i915_request_await_external(rq, fence);
1446 		if (ret < 0)
1447 			return ret;
1448 	} while (--nchild);
1449 
1450 	return 0;
1451 }
1452 
1453 static int
1454 await_request_submit(struct i915_request *to, struct i915_request *from)
1455 {
1456 	/*
1457 	 * If we are waiting on a virtual engine, then it may be
1458 	 * constrained to execute on a single engine *prior* to submission.
1459 	 * When it is submitted, it will be first submitted to the virtual
1460 	 * engine and then passed to the physical engine. We cannot allow
1461 	 * the waiter to be submitted immediately to the physical engine
1462 	 * as it may then bypass the virtual request.
1463 	 */
1464 	if (to->engine == READ_ONCE(from->engine))
1465 		return i915_sw_fence_await_sw_fence_gfp(&to->submit,
1466 							&from->submit,
1467 							I915_FENCE_GFP);
1468 	else
1469 		return __i915_request_await_execution(to, from);
1470 }
1471 
1472 static int
1473 i915_request_await_request(struct i915_request *to, struct i915_request *from)
1474 {
1475 	int ret;
1476 
1477 	GEM_BUG_ON(to == from);
1478 	GEM_BUG_ON(to->timeline == from->timeline);
1479 
1480 	if (i915_request_completed(from)) {
1481 		i915_sw_fence_set_error_once(&to->submit, from->fence.error);
1482 		return 0;
1483 	}
1484 
1485 	if (to->engine->sched_engine->schedule) {
1486 		ret = i915_sched_node_add_dependency(&to->sched,
1487 						     &from->sched,
1488 						     I915_DEPENDENCY_EXTERNAL);
1489 		if (ret < 0)
1490 			return ret;
1491 	}
1492 
1493 	if (!intel_engine_uses_guc(to->engine) &&
1494 	    is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
1495 		ret = await_request_submit(to, from);
1496 	else
1497 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
1498 	if (ret < 0)
1499 		return ret;
1500 
1501 	return 0;
1502 }
1503 
1504 int
1505 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1506 {
1507 	struct dma_fence **child = &fence;
1508 	unsigned int nchild = 1;
1509 	int ret;
1510 
1511 	/*
1512 	 * Note that if the fence-array was created in signal-on-any mode,
1513 	 * we should *not* decompose it into its individual fences. However,
1514 	 * we don't currently store which mode the fence-array is operating
1515 	 * in. Fortunately, the only user of signal-on-any is private to
1516 	 * amdgpu and we should not see any incoming fence-array from
1517 	 * sync-file being in signal-on-any mode.
1518 	 */
1519 	if (dma_fence_is_array(fence)) {
1520 		struct dma_fence_array *array = to_dma_fence_array(fence);
1521 
1522 		child = array->fences;
1523 		nchild = array->num_fences;
1524 		GEM_BUG_ON(!nchild);
1525 	}
1526 
1527 	do {
1528 		fence = *child++;
1529 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1530 			continue;
1531 
1532 		/*
1533 		 * Requests on the same timeline are explicitly ordered, along
1534 		 * with their dependencies, by i915_request_add() which ensures
1535 		 * that requests are submitted in-order through each ring.
1536 		 */
1537 		if (fence->context == rq->fence.context)
1538 			continue;
1539 
1540 		/* Squash repeated waits to the same timelines */
1541 		if (fence->context &&
1542 		    intel_timeline_sync_is_later(i915_request_timeline(rq),
1543 						 fence))
1544 			continue;
1545 
1546 		if (dma_fence_is_i915(fence))
1547 			ret = i915_request_await_request(rq, to_request(fence));
1548 		else
1549 			ret = i915_request_await_external(rq, fence);
1550 		if (ret < 0)
1551 			return ret;
1552 
1553 		/* Record the latest fence used against each timeline */
1554 		if (fence->context)
1555 			intel_timeline_sync_set(i915_request_timeline(rq),
1556 						fence);
1557 	} while (--nchild);
1558 
1559 	return 0;
1560 }
1561 
1562 /**
1563  * i915_request_await_object - set this request to (async) wait upon a bo
1564  * @to: request we are wishing to use
1565  * @obj: object which may be in use on another ring.
1566  * @write: whether the wait is on behalf of a writer
1567  *
1568  * This code is meant to abstract object synchronization with the GPU.
1569  * Conceptually we serialise writes between engines inside the GPU.
1570  * We only allow one engine to write into a buffer at any time, but
1571  * multiple readers. To ensure each has a coherent view of memory, we must:
1572  *
1573  * - If there is an outstanding write request to the object, the new
1574  *   request must wait for it to complete (either CPU or in hw, requests
1575  *   on the same ring will be naturally ordered).
1576  *
1577  * - If we are a write request (pending_write_domain is set), the new
1578  *   request must wait for outstanding read requests to complete.
1579  *
1580  * Returns 0 if successful, else propagates up the lower layer error.
1581  */
1582 int
1583 i915_request_await_object(struct i915_request *to,
1584 			  struct drm_i915_gem_object *obj,
1585 			  bool write)
1586 {
1587 	struct dma_fence *excl;
1588 	int ret = 0;
1589 
1590 	if (write) {
1591 		struct dma_fence **shared;
1592 		unsigned int count, i;
1593 
1594 		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
1595 					  &shared);
1596 		if (ret)
1597 			return ret;
1598 
1599 		for (i = 0; i < count; i++) {
1600 			ret = i915_request_await_dma_fence(to, shared[i]);
1601 			if (ret)
1602 				break;
1603 
1604 			dma_fence_put(shared[i]);
1605 		}
1606 
1607 		for (; i < count; i++)
1608 			dma_fence_put(shared[i]);
1609 		kfree(shared);
1610 	} else {
1611 		excl = dma_resv_get_excl_unlocked(obj->base.resv);
1612 	}
1613 
1614 	if (excl) {
1615 		if (ret == 0)
1616 			ret = i915_request_await_dma_fence(to, excl);
1617 
1618 		dma_fence_put(excl);
1619 	}
1620 
1621 	return ret;
1622 }
1623 
1624 static struct i915_request *
1625 __i915_request_add_to_timeline(struct i915_request *rq)
1626 {
1627 	struct intel_timeline *timeline = i915_request_timeline(rq);
1628 	struct i915_request *prev;
1629 
1630 	/*
1631 	 * Dependency tracking and request ordering along the timeline
1632 	 * is special cased so that we can eliminate redundant ordering
1633 	 * operations while building the request (we know that the timeline
1634 	 * itself is ordered, and here we guarantee it).
1635 	 *
1636 	 * As we know we will need to emit tracking along the timeline,
1637 	 * we embed the hooks into our request struct -- at the cost of
1638 	 * having to have specialised no-allocation interfaces (which will
1639 	 * be beneficial elsewhere).
1640 	 *
1641 	 * A second benefit to open-coding i915_request_await_request is
1642 	 * that we can apply a slight variant of the rules specialised
1643 	 * for timelines that jump between engines (such as virtual engines).
1644 	 * If we consider the case of virtual engine, we must emit a dma-fence
1645 	 * to prevent scheduling of the second request until the first is
1646 	 * complete (to maximise our greedy late load balancing) and this
1647 	 * precludes optimising to use semaphores serialisation of a single
1648 	 * timeline across engines.
1649 	 */
1650 	prev = to_request(__i915_active_fence_set(&timeline->last_request,
1651 						  &rq->fence));
1652 	if (prev && !__i915_request_is_complete(prev)) {
1653 		bool uses_guc = intel_engine_uses_guc(rq->engine);
1654 
1655 		/*
1656 		 * The requests are supposed to be kept in order. However,
1657 		 * we need to be wary in case the timeline->last_request
1658 		 * is used as a barrier for external modification to this
1659 		 * context.
1660 		 */
1661 		GEM_BUG_ON(prev->context == rq->context &&
1662 			   i915_seqno_passed(prev->fence.seqno,
1663 					     rq->fence.seqno));
1664 
1665 		if ((!uses_guc &&
1666 		     is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
1667 		    (uses_guc && prev->context == rq->context))
1668 			i915_sw_fence_await_sw_fence(&rq->submit,
1669 						     &prev->submit,
1670 						     &rq->submitq);
1671 		else
1672 			__i915_sw_fence_await_dma_fence(&rq->submit,
1673 							&prev->fence,
1674 							&rq->dmaq);
1675 		if (rq->engine->sched_engine->schedule)
1676 			__i915_sched_node_add_dependency(&rq->sched,
1677 							 &prev->sched,
1678 							 &rq->dep,
1679 							 0);
1680 	}
1681 
1682 	/*
1683 	 * Make sure that no request gazumped us - if it was allocated after
1684 	 * our i915_request_alloc() and called __i915_request_add() before
1685 	 * us, the timeline will hold its seqno which is later than ours.
1686 	 */
1687 	GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1688 
1689 	return prev;
1690 }
1691 
1692 /*
1693  * NB: This function is not allowed to fail. Doing so would mean the the
1694  * request is not being tracked for completion but the work itself is
1695  * going to happen on the hardware. This would be a Bad Thing(tm).
1696  */
1697 struct i915_request *__i915_request_commit(struct i915_request *rq)
1698 {
1699 	struct intel_engine_cs *engine = rq->engine;
1700 	struct intel_ring *ring = rq->ring;
1701 	u32 *cs;
1702 
1703 	RQ_TRACE(rq, "\n");
1704 
1705 	/*
1706 	 * To ensure that this call will not fail, space for its emissions
1707 	 * should already have been reserved in the ring buffer. Let the ring
1708 	 * know that it is time to use that space up.
1709 	 */
1710 	GEM_BUG_ON(rq->reserved_space > ring->space);
1711 	rq->reserved_space = 0;
1712 	rq->emitted_jiffies = jiffies;
1713 
1714 	/*
1715 	 * Record the position of the start of the breadcrumb so that
1716 	 * should we detect the updated seqno part-way through the
1717 	 * GPU processing the request, we never over-estimate the
1718 	 * position of the ring's HEAD.
1719 	 */
1720 	cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1721 	GEM_BUG_ON(IS_ERR(cs));
1722 	rq->postfix = intel_ring_offset(rq, cs);
1723 
1724 	return __i915_request_add_to_timeline(rq);
1725 }
1726 
1727 void __i915_request_queue_bh(struct i915_request *rq)
1728 {
1729 	i915_sw_fence_commit(&rq->semaphore);
1730 	i915_sw_fence_commit(&rq->submit);
1731 }
1732 
1733 void __i915_request_queue(struct i915_request *rq,
1734 			  const struct i915_sched_attr *attr)
1735 {
1736 	/*
1737 	 * Let the backend know a new request has arrived that may need
1738 	 * to adjust the existing execution schedule due to a high priority
1739 	 * request - i.e. we may want to preempt the current request in order
1740 	 * to run a high priority dependency chain *before* we can execute this
1741 	 * request.
1742 	 *
1743 	 * This is called before the request is ready to run so that we can
1744 	 * decide whether to preempt the entire chain so that it is ready to
1745 	 * run at the earliest possible convenience.
1746 	 */
1747 	if (attr && rq->engine->sched_engine->schedule)
1748 		rq->engine->sched_engine->schedule(rq, attr);
1749 
1750 	local_bh_disable();
1751 	__i915_request_queue_bh(rq);
1752 	local_bh_enable(); /* kick tasklets */
1753 }
1754 
1755 void i915_request_add(struct i915_request *rq)
1756 {
1757 	struct intel_timeline * const tl = i915_request_timeline(rq);
1758 	struct i915_sched_attr attr = {};
1759 	struct i915_gem_context *ctx;
1760 
1761 	lockdep_assert_held(&tl->mutex);
1762 	lockdep_unpin_lock(&tl->mutex, rq->cookie);
1763 
1764 	trace_i915_request_add(rq);
1765 	__i915_request_commit(rq);
1766 
1767 	/* XXX placeholder for selftests */
1768 	rcu_read_lock();
1769 	ctx = rcu_dereference(rq->context->gem_context);
1770 	if (ctx)
1771 		attr = ctx->sched;
1772 	rcu_read_unlock();
1773 
1774 	__i915_request_queue(rq, &attr);
1775 
1776 	mutex_unlock(&tl->mutex);
1777 }
1778 
1779 static unsigned long local_clock_ns(unsigned int *cpu)
1780 {
1781 	unsigned long t;
1782 
1783 	/*
1784 	 * Cheaply and approximately convert from nanoseconds to microseconds.
1785 	 * The result and subsequent calculations are also defined in the same
1786 	 * approximate microseconds units. The principal source of timing
1787 	 * error here is from the simple truncation.
1788 	 *
1789 	 * Note that local_clock() is only defined wrt to the current CPU;
1790 	 * the comparisons are no longer valid if we switch CPUs. Instead of
1791 	 * blocking preemption for the entire busywait, we can detect the CPU
1792 	 * switch and use that as indicator of system load and a reason to
1793 	 * stop busywaiting, see busywait_stop().
1794 	 */
1795 	*cpu = get_cpu();
1796 	t = local_clock();
1797 	put_cpu();
1798 
1799 	return t;
1800 }
1801 
1802 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1803 {
1804 	unsigned int this_cpu;
1805 
1806 	if (time_after(local_clock_ns(&this_cpu), timeout))
1807 		return true;
1808 
1809 	return this_cpu != cpu;
1810 }
1811 
1812 static bool __i915_spin_request(struct i915_request * const rq, int state)
1813 {
1814 	unsigned long timeout_ns;
1815 	unsigned int cpu;
1816 
1817 	/*
1818 	 * Only wait for the request if we know it is likely to complete.
1819 	 *
1820 	 * We don't track the timestamps around requests, nor the average
1821 	 * request length, so we do not have a good indicator that this
1822 	 * request will complete within the timeout. What we do know is the
1823 	 * order in which requests are executed by the context and so we can
1824 	 * tell if the request has been started. If the request is not even
1825 	 * running yet, it is a fair assumption that it will not complete
1826 	 * within our relatively short timeout.
1827 	 */
1828 	if (!i915_request_is_running(rq))
1829 		return false;
1830 
1831 	/*
1832 	 * When waiting for high frequency requests, e.g. during synchronous
1833 	 * rendering split between the CPU and GPU, the finite amount of time
1834 	 * required to set up the irq and wait upon it limits the response
1835 	 * rate. By busywaiting on the request completion for a short while we
1836 	 * can service the high frequency waits as quick as possible. However,
1837 	 * if it is a slow request, we want to sleep as quickly as possible.
1838 	 * The tradeoff between waiting and sleeping is roughly the time it
1839 	 * takes to sleep on a request, on the order of a microsecond.
1840 	 */
1841 
1842 	timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1843 	timeout_ns += local_clock_ns(&cpu);
1844 	do {
1845 		if (dma_fence_is_signaled(&rq->fence))
1846 			return true;
1847 
1848 		if (signal_pending_state(state, current))
1849 			break;
1850 
1851 		if (busywait_stop(timeout_ns, cpu))
1852 			break;
1853 
1854 		cpu_relax();
1855 	} while (!drm_need_resched());
1856 
1857 	return false;
1858 }
1859 
1860 struct request_wait {
1861 	struct dma_fence_cb cb;
1862 #ifdef __linux__
1863 	struct task_struct *tsk;
1864 #else
1865 	struct proc *tsk;
1866 #endif
1867 };
1868 
1869 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1870 {
1871 	struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1872 
1873 	wake_up_process(fetch_and_zero(&wait->tsk));
1874 }
1875 
1876 /**
1877  * i915_request_wait - wait until execution of request has finished
1878  * @rq: the request to wait upon
1879  * @flags: how to wait
1880  * @timeout: how long to wait in jiffies
1881  *
1882  * i915_request_wait() waits for the request to be completed, for a
1883  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1884  * unbounded wait).
1885  *
1886  * Returns the remaining time (in jiffies) if the request completed, which may
1887  * be zero or -ETIME if the request is unfinished after the timeout expires.
1888  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1889  * pending before the request completes.
1890  */
1891 long i915_request_wait(struct i915_request *rq,
1892 		       unsigned int flags,
1893 		       long timeout)
1894 {
1895 	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1896 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1897 	struct request_wait wait;
1898 
1899 	might_sleep();
1900 	GEM_BUG_ON(timeout < 0);
1901 
1902 	if (dma_fence_is_signaled(&rq->fence))
1903 		return timeout;
1904 
1905 	if (!timeout)
1906 		return -ETIME;
1907 
1908 	trace_i915_request_wait_begin(rq, flags);
1909 
1910 	/*
1911 	 * We must never wait on the GPU while holding a lock as we
1912 	 * may need to perform a GPU reset. So while we don't need to
1913 	 * serialise wait/reset with an explicit lock, we do want
1914 	 * lockdep to detect potential dependency cycles.
1915 	 */
1916 	mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1917 
1918 	/*
1919 	 * Optimistic spin before touching IRQs.
1920 	 *
1921 	 * We may use a rather large value here to offset the penalty of
1922 	 * switching away from the active task. Frequently, the client will
1923 	 * wait upon an old swapbuffer to throttle itself to remain within a
1924 	 * frame of the gpu. If the client is running in lockstep with the gpu,
1925 	 * then it should not be waiting long at all, and a sleep now will incur
1926 	 * extra scheduler latency in producing the next frame. To try to
1927 	 * avoid adding the cost of enabling/disabling the interrupt to the
1928 	 * short wait, we first spin to see if the request would have completed
1929 	 * in the time taken to setup the interrupt.
1930 	 *
1931 	 * We need upto 5us to enable the irq, and upto 20us to hide the
1932 	 * scheduler latency of a context switch, ignoring the secondary
1933 	 * impacts from a context switch such as cache eviction.
1934 	 *
1935 	 * The scheme used for low-latency IO is called "hybrid interrupt
1936 	 * polling". The suggestion there is to sleep until just before you
1937 	 * expect to be woken by the device interrupt and then poll for its
1938 	 * completion. That requires having a good predictor for the request
1939 	 * duration, which we currently lack.
1940 	 */
1941 	if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
1942 	    __i915_spin_request(rq, state))
1943 		goto out;
1944 
1945 	/*
1946 	 * This client is about to stall waiting for the GPU. In many cases
1947 	 * this is undesirable and limits the throughput of the system, as
1948 	 * many clients cannot continue processing user input/output whilst
1949 	 * blocked. RPS autotuning may take tens of milliseconds to respond
1950 	 * to the GPU load and thus incurs additional latency for the client.
1951 	 * We can circumvent that by promoting the GPU frequency to maximum
1952 	 * before we sleep. This makes the GPU throttle up much more quickly
1953 	 * (good for benchmarks and user experience, e.g. window animations),
1954 	 * but at a cost of spending more power processing the workload
1955 	 * (bad for battery).
1956 	 */
1957 	if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
1958 		intel_rps_boost(rq);
1959 
1960 #ifdef __linux__
1961 	wait.tsk = current;
1962 #else
1963 	wait.tsk = curproc;
1964 #endif
1965 	if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1966 		goto out;
1967 
1968 	/*
1969 	 * Flush the submission tasklet, but only if it may help this request.
1970 	 *
1971 	 * We sometimes experience some latency between the HW interrupts and
1972 	 * tasklet execution (mostly due to ksoftirqd latency, but it can also
1973 	 * be due to lazy CS events), so lets run the tasklet manually if there
1974 	 * is a chance it may submit this request. If the request is not ready
1975 	 * to run, as it is waiting for other fences to be signaled, flushing
1976 	 * the tasklet is busy work without any advantage for this client.
1977 	 *
1978 	 * If the HW is being lazy, this is the last chance before we go to
1979 	 * sleep to catch any pending events. We will check periodically in
1980 	 * the heartbeat to flush the submission tasklets as a last resort
1981 	 * for unhappy HW.
1982 	 */
1983 	if (i915_request_is_ready(rq))
1984 		__intel_engine_flush_submission(rq->engine, false);
1985 
1986 	for (;;) {
1987 		set_current_state(state);
1988 
1989 		if (dma_fence_is_signaled(&rq->fence))
1990 			break;
1991 
1992 		if (signal_pending_state(state, current)) {
1993 			timeout = -ERESTARTSYS;
1994 			break;
1995 		}
1996 
1997 		if (!timeout) {
1998 			timeout = -ETIME;
1999 			break;
2000 		}
2001 
2002 		timeout = io_schedule_timeout(timeout);
2003 	}
2004 	__set_current_state(TASK_RUNNING);
2005 
2006 	if (READ_ONCE(wait.tsk))
2007 		dma_fence_remove_callback(&rq->fence, &wait.cb);
2008 	GEM_BUG_ON(!list_empty(&wait.cb.node));
2009 
2010 out:
2011 	mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
2012 	trace_i915_request_wait_end(rq);
2013 	return timeout;
2014 }
2015 
2016 static int print_sched_attr(const struct i915_sched_attr *attr,
2017 			    char *buf, int x, int len)
2018 {
2019 	if (attr->priority == I915_PRIORITY_INVALID)
2020 		return x;
2021 
2022 	x += snprintf(buf + x, len - x,
2023 		      " prio=%d", attr->priority);
2024 
2025 	return x;
2026 }
2027 
2028 static char queue_status(const struct i915_request *rq)
2029 {
2030 	if (i915_request_is_active(rq))
2031 		return 'E';
2032 
2033 	if (i915_request_is_ready(rq))
2034 		return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
2035 
2036 	return 'U';
2037 }
2038 
2039 static const char *run_status(const struct i915_request *rq)
2040 {
2041 	if (__i915_request_is_complete(rq))
2042 		return "!";
2043 
2044 	if (__i915_request_has_started(rq))
2045 		return "*";
2046 
2047 	if (!i915_sw_fence_signaled(&rq->semaphore))
2048 		return "&";
2049 
2050 	return "";
2051 }
2052 
2053 static const char *fence_status(const struct i915_request *rq)
2054 {
2055 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
2056 		return "+";
2057 
2058 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
2059 		return "-";
2060 
2061 	return "";
2062 }
2063 
2064 void i915_request_show(struct drm_printer *m,
2065 		       const struct i915_request *rq,
2066 		       const char *prefix,
2067 		       int indent)
2068 {
2069 	const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
2070 	char buf[80] = "";
2071 	int x = 0;
2072 
2073 	/*
2074 	 * The prefix is used to show the queue status, for which we use
2075 	 * the following flags:
2076 	 *
2077 	 *  U [Unready]
2078 	 *    - initial status upon being submitted by the user
2079 	 *
2080 	 *    - the request is not ready for execution as it is waiting
2081 	 *      for external fences
2082 	 *
2083 	 *  R [Ready]
2084 	 *    - all fences the request was waiting on have been signaled,
2085 	 *      and the request is now ready for execution and will be
2086 	 *      in a backend queue
2087 	 *
2088 	 *    - a ready request may still need to wait on semaphores
2089 	 *      [internal fences]
2090 	 *
2091 	 *  V [Ready/virtual]
2092 	 *    - same as ready, but queued over multiple backends
2093 	 *
2094 	 *  E [Executing]
2095 	 *    - the request has been transferred from the backend queue and
2096 	 *      submitted for execution on HW
2097 	 *
2098 	 *    - a completed request may still be regarded as executing, its
2099 	 *      status may not be updated until it is retired and removed
2100 	 *      from the lists
2101 	 */
2102 
2103 	x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2104 
2105 	drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
2106 		   prefix, indent, "                ",
2107 		   queue_status(rq),
2108 		   rq->fence.context, rq->fence.seqno,
2109 		   run_status(rq),
2110 		   fence_status(rq),
2111 		   buf,
2112 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2113 		   name);
2114 }
2115 
2116 static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
2117 {
2118 	u32 ring = ENGINE_READ(engine, RING_START);
2119 
2120 	return ring == i915_ggtt_offset(rq->ring->vma);
2121 }
2122 
2123 static bool match_ring(struct i915_request *rq)
2124 {
2125 	struct intel_engine_cs *engine;
2126 	bool found;
2127 	int i;
2128 
2129 	if (!intel_engine_is_virtual(rq->engine))
2130 		return engine_match_ring(rq->engine, rq);
2131 
2132 	found = false;
2133 	i = 0;
2134 	while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
2135 		found = engine_match_ring(engine, rq);
2136 		if (found)
2137 			break;
2138 	}
2139 
2140 	return found;
2141 }
2142 
2143 enum i915_request_state i915_test_request_state(struct i915_request *rq)
2144 {
2145 	if (i915_request_completed(rq))
2146 		return I915_REQUEST_COMPLETE;
2147 
2148 	if (!i915_request_started(rq))
2149 		return I915_REQUEST_PENDING;
2150 
2151 	if (match_ring(rq))
2152 		return I915_REQUEST_ACTIVE;
2153 
2154 	return I915_REQUEST_QUEUED;
2155 }
2156 
2157 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2158 #include "selftests/mock_request.c"
2159 #include "selftests/i915_request.c"
2160 #endif
2161 
2162 void i915_request_module_exit(void)
2163 {
2164 #ifdef __linux__
2165 	kmem_cache_destroy(slab_execute_cbs);
2166 	kmem_cache_destroy(slab_requests);
2167 #else
2168 	pool_destroy(&slab_execute_cbs);
2169 	pool_destroy(&slab_requests);
2170 #endif
2171 }
2172 
2173 int __init i915_request_module_init(void)
2174 {
2175 #ifdef __linux__
2176 	slab_requests =
2177 		kmem_cache_create("i915_request",
2178 				  sizeof(struct i915_request),
2179 				  __alignof__(struct i915_request),
2180 				  SLAB_HWCACHE_ALIGN |
2181 				  SLAB_RECLAIM_ACCOUNT |
2182 				  SLAB_TYPESAFE_BY_RCU,
2183 				  __i915_request_ctor);
2184 	if (!slab_requests)
2185 		return -ENOMEM;
2186 
2187 	slab_execute_cbs = KMEM_CACHE(execute_cb,
2188 					     SLAB_HWCACHE_ALIGN |
2189 					     SLAB_RECLAIM_ACCOUNT |
2190 					     SLAB_TYPESAFE_BY_RCU);
2191 	if (!slab_execute_cbs)
2192 		goto err_requests;
2193 #else
2194 	pool_init(&slab_requests, sizeof(struct i915_request),
2195 	    CACHELINESIZE, IPL_TTY, 0, "i915_request", NULL);
2196 	pool_init(&slab_execute_cbs, sizeof(struct execute_cb),
2197 	    CACHELINESIZE, IPL_TTY, 0, "i915_exec", NULL);
2198 #endif
2199 
2200 	return 0;
2201 
2202 #ifdef __linux__
2203 err_requests:
2204 	kmem_cache_destroy(slab_requests);
2205 	return -ENOMEM;
2206 #endif
2207 }
2208