xref: /openbsd-src/sys/dev/pci/drm/i915/i915_request.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
30 
31 #include "i915_drv.h"
32 
33 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
34 {
35 	return "i915";
36 }
37 
38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39 {
40 	/*
41 	 * The timeline struct (as part of the ppgtt underneath a context)
42 	 * may be freed when the request is no longer in use by the GPU.
43 	 * We could extend the life of a context to beyond that of all
44 	 * fences, possibly keeping the hw resource around indefinitely,
45 	 * or we just give them a false name. Since
46 	 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
47 	 * lie seems justifiable.
48 	 */
49 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
50 		return "signaled";
51 
52 	return to_request(fence)->timeline->name;
53 }
54 
55 static bool i915_fence_signaled(struct dma_fence *fence)
56 {
57 	return i915_request_completed(to_request(fence));
58 }
59 
60 static bool i915_fence_enable_signaling(struct dma_fence *fence)
61 {
62 	return intel_engine_enable_signaling(to_request(fence), true);
63 }
64 
65 static signed long i915_fence_wait(struct dma_fence *fence,
66 				   bool interruptible,
67 				   signed long timeout)
68 {
69 	return i915_request_wait(to_request(fence), interruptible, timeout);
70 }
71 
72 static void i915_fence_release(struct dma_fence *fence)
73 {
74 	struct i915_request *rq = to_request(fence);
75 
76 	/*
77 	 * The request is put onto a RCU freelist (i.e. the address
78 	 * is immediately reused), mark the fences as being freed now.
79 	 * Otherwise the debugobjects for the fences are only marked as
80 	 * freed when the slab cache itself is freed, and so we would get
81 	 * caught trying to reuse dead objects.
82 	 */
83 	i915_sw_fence_fini(&rq->submit);
84 
85 #ifdef __linux__
86 	kmem_cache_free(rq->i915->requests, rq);
87 #else
88 	pool_put(&rq->i915->requests, rq);
89 #endif
90 }
91 
92 const struct dma_fence_ops i915_fence_ops = {
93 	.get_driver_name = i915_fence_get_driver_name,
94 	.get_timeline_name = i915_fence_get_timeline_name,
95 	.enable_signaling = i915_fence_enable_signaling,
96 	.signaled = i915_fence_signaled,
97 	.wait = i915_fence_wait,
98 	.release = i915_fence_release,
99 };
100 
101 static inline void
102 i915_request_remove_from_client(struct i915_request *request)
103 {
104 	struct drm_i915_file_private *file_priv;
105 
106 	file_priv = request->file_priv;
107 	if (!file_priv)
108 		return;
109 
110 	spin_lock(&file_priv->mm.lock);
111 	if (request->file_priv) {
112 		list_del(&request->client_link);
113 		request->file_priv = NULL;
114 	}
115 	spin_unlock(&file_priv->mm.lock);
116 }
117 
118 static struct i915_dependency *
119 i915_dependency_alloc(struct drm_i915_private *i915)
120 {
121 #ifdef __linux__
122 	return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
123 #else
124 	return pool_get(&i915->dependencies, PR_WAITOK);
125 #endif
126 }
127 
128 static void
129 i915_dependency_free(struct drm_i915_private *i915,
130 		     struct i915_dependency *dep)
131 {
132 #ifdef __linux__
133 	kmem_cache_free(i915->dependencies, dep);
134 #else
135 	pool_put(&i915->dependencies, dep);
136 #endif
137 }
138 
139 static void
140 __i915_sched_node_add_dependency(struct i915_sched_node *node,
141 				 struct i915_sched_node *signal,
142 				 struct i915_dependency *dep,
143 				 unsigned long flags)
144 {
145 	INIT_LIST_HEAD(&dep->dfs_link);
146 	list_add(&dep->wait_link, &signal->waiters_list);
147 	list_add(&dep->signal_link, &node->signalers_list);
148 	dep->signaler = signal;
149 	dep->flags = flags;
150 }
151 
152 static int
153 i915_sched_node_add_dependency(struct drm_i915_private *i915,
154 			       struct i915_sched_node *node,
155 			       struct i915_sched_node *signal)
156 {
157 	struct i915_dependency *dep;
158 
159 	dep = i915_dependency_alloc(i915);
160 	if (!dep)
161 		return -ENOMEM;
162 
163 	__i915_sched_node_add_dependency(node, signal, dep,
164 					 I915_DEPENDENCY_ALLOC);
165 	return 0;
166 }
167 
168 static void
169 i915_sched_node_fini(struct drm_i915_private *i915,
170 		     struct i915_sched_node *node)
171 {
172 	struct i915_dependency *dep, *tmp;
173 
174 	GEM_BUG_ON(!list_empty(&node->link));
175 
176 	/*
177 	 * Everyone we depended upon (the fences we wait to be signaled)
178 	 * should retire before us and remove themselves from our list.
179 	 * However, retirement is run independently on each timeline and
180 	 * so we may be called out-of-order.
181 	 */
182 	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
183 		GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
184 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
185 
186 		list_del(&dep->wait_link);
187 		if (dep->flags & I915_DEPENDENCY_ALLOC)
188 			i915_dependency_free(i915, dep);
189 	}
190 
191 	/* Remove ourselves from everyone who depends upon us */
192 	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
193 		GEM_BUG_ON(dep->signaler != node);
194 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
195 
196 		list_del(&dep->signal_link);
197 		if (dep->flags & I915_DEPENDENCY_ALLOC)
198 			i915_dependency_free(i915, dep);
199 	}
200 }
201 
202 static void
203 i915_sched_node_init(struct i915_sched_node *node)
204 {
205 	INIT_LIST_HEAD(&node->signalers_list);
206 	INIT_LIST_HEAD(&node->waiters_list);
207 	INIT_LIST_HEAD(&node->link);
208 	node->attr.priority = I915_PRIORITY_INVALID;
209 }
210 
211 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
212 {
213 	struct intel_engine_cs *engine;
214 	struct i915_timeline *timeline;
215 	enum intel_engine_id id;
216 	int ret;
217 
218 	/* Carefully retire all requests without writing to the rings */
219 	ret = i915_gem_wait_for_idle(i915,
220 				     I915_WAIT_INTERRUPTIBLE |
221 				     I915_WAIT_LOCKED,
222 				     MAX_SCHEDULE_TIMEOUT);
223 	if (ret)
224 		return ret;
225 
226 	GEM_BUG_ON(i915->gt.active_requests);
227 
228 	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
229 	for_each_engine(engine, i915, id) {
230 		GEM_TRACE("%s seqno %d (current %d) -> %d\n",
231 			  engine->name,
232 			  engine->timeline.seqno,
233 			  intel_engine_get_seqno(engine),
234 			  seqno);
235 
236 		if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
237 			/* Flush any waiters before we reuse the seqno */
238 			intel_engine_disarm_breadcrumbs(engine);
239 			intel_engine_init_hangcheck(engine);
240 			GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
241 		}
242 
243 		/* Check we are idle before we fiddle with hw state! */
244 		GEM_BUG_ON(!intel_engine_is_idle(engine));
245 		GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
246 
247 		/* Finally reset hw state */
248 		intel_engine_init_global_seqno(engine, seqno);
249 		engine->timeline.seqno = seqno;
250 	}
251 
252 	list_for_each_entry(timeline, &i915->gt.timelines, link)
253 		memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
254 
255 	i915->gt.request_serial = seqno;
256 
257 	return 0;
258 }
259 
260 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
261 {
262 	struct drm_i915_private *i915 = to_i915(dev);
263 
264 	lockdep_assert_held(&i915->drm.struct_mutex);
265 
266 	if (seqno == 0)
267 		return -EINVAL;
268 
269 	/* HWS page needs to be set less than what we will inject to ring */
270 	return reset_all_global_seqno(i915, seqno - 1);
271 }
272 
273 static int reserve_gt(struct drm_i915_private *i915)
274 {
275 	int ret;
276 
277 	/*
278 	 * Reservation is fine until we may need to wrap around
279 	 *
280 	 * By incrementing the serial for every request, we know that no
281 	 * individual engine may exceed that serial (as each is reset to 0
282 	 * on any wrap). This protects even the most pessimistic of migrations
283 	 * of every request from all engines onto just one.
284 	 */
285 	while (unlikely(++i915->gt.request_serial == 0)) {
286 		ret = reset_all_global_seqno(i915, 0);
287 		if (ret) {
288 			i915->gt.request_serial--;
289 			return ret;
290 		}
291 	}
292 
293 	if (!i915->gt.active_requests++)
294 		i915_gem_unpark(i915);
295 
296 	return 0;
297 }
298 
299 static void unreserve_gt(struct drm_i915_private *i915)
300 {
301 	GEM_BUG_ON(!i915->gt.active_requests);
302 	if (!--i915->gt.active_requests)
303 		i915_gem_park(i915);
304 }
305 
306 void i915_gem_retire_noop(struct i915_gem_active *active,
307 			  struct i915_request *request)
308 {
309 	/* Space left intentionally blank */
310 }
311 
312 static void advance_ring(struct i915_request *request)
313 {
314 	struct intel_ring *ring = request->ring;
315 	unsigned int tail;
316 
317 	/*
318 	 * We know the GPU must have read the request to have
319 	 * sent us the seqno + interrupt, so use the position
320 	 * of tail of the request to update the last known position
321 	 * of the GPU head.
322 	 *
323 	 * Note this requires that we are always called in request
324 	 * completion order.
325 	 */
326 	GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
327 	if (list_is_last(&request->ring_link, &ring->request_list)) {
328 		/*
329 		 * We may race here with execlists resubmitting this request
330 		 * as we retire it. The resubmission will move the ring->tail
331 		 * forwards (to request->wa_tail). We either read the
332 		 * current value that was written to hw, or the value that
333 		 * is just about to be. Either works, if we miss the last two
334 		 * noops - they are safe to be replayed on a reset.
335 		 */
336 		GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
337 		tail = READ_ONCE(request->tail);
338 		list_del(&ring->active_link);
339 	} else {
340 		tail = request->postfix;
341 	}
342 	list_del_init(&request->ring_link);
343 
344 	ring->head = tail;
345 }
346 
347 static void free_capture_list(struct i915_request *request)
348 {
349 	struct i915_capture_list *capture;
350 
351 	capture = request->capture_list;
352 	while (capture) {
353 		struct i915_capture_list *next = capture->next;
354 
355 		kfree(capture);
356 		capture = next;
357 	}
358 }
359 
360 static void __retire_engine_request(struct intel_engine_cs *engine,
361 				    struct i915_request *rq)
362 {
363 	GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
364 		  __func__, engine->name,
365 		  rq->fence.context, rq->fence.seqno,
366 		  rq->global_seqno,
367 		  intel_engine_get_seqno(engine));
368 
369 	GEM_BUG_ON(!i915_request_completed(rq));
370 
371 	local_irq_disable();
372 
373 	spin_lock(&engine->timeline.lock);
374 	GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
375 	list_del_init(&rq->link);
376 	spin_unlock(&engine->timeline.lock);
377 
378 	spin_lock(&rq->lock);
379 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
380 		dma_fence_signal_locked(&rq->fence);
381 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
382 		intel_engine_cancel_signaling(rq);
383 	if (rq->waitboost) {
384 		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
385 		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
386 	}
387 	spin_unlock(&rq->lock);
388 
389 	local_irq_enable();
390 
391 	/*
392 	 * The backing object for the context is done after switching to the
393 	 * *next* context. Therefore we cannot retire the previous context until
394 	 * the next context has already started running. However, since we
395 	 * cannot take the required locks at i915_request_submit() we
396 	 * defer the unpinning of the active context to now, retirement of
397 	 * the subsequent request.
398 	 */
399 	if (engine->last_retired_context)
400 		intel_context_unpin(engine->last_retired_context);
401 	engine->last_retired_context = rq->hw_context;
402 }
403 
404 static void __retire_engine_upto(struct intel_engine_cs *engine,
405 				 struct i915_request *rq)
406 {
407 	struct i915_request *tmp;
408 
409 	if (list_empty(&rq->link))
410 		return;
411 
412 	do {
413 		tmp = list_first_entry(&engine->timeline.requests,
414 				       typeof(*tmp), link);
415 
416 		GEM_BUG_ON(tmp->engine != engine);
417 		__retire_engine_request(engine, tmp);
418 	} while (tmp != rq);
419 }
420 
421 static void i915_request_retire(struct i915_request *request)
422 {
423 	struct i915_gem_active *active, *next;
424 
425 	GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
426 		  request->engine->name,
427 		  request->fence.context, request->fence.seqno,
428 		  request->global_seqno,
429 		  intel_engine_get_seqno(request->engine));
430 
431 	lockdep_assert_held(&request->i915->drm.struct_mutex);
432 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
433 	GEM_BUG_ON(!i915_request_completed(request));
434 
435 	trace_i915_request_retire(request);
436 
437 	advance_ring(request);
438 	free_capture_list(request);
439 
440 	/*
441 	 * Walk through the active list, calling retire on each. This allows
442 	 * objects to track their GPU activity and mark themselves as idle
443 	 * when their *last* active request is completed (updating state
444 	 * tracking lists for eviction, active references for GEM, etc).
445 	 *
446 	 * As the ->retire() may free the node, we decouple it first and
447 	 * pass along the auxiliary information (to avoid dereferencing
448 	 * the node after the callback).
449 	 */
450 	list_for_each_entry_safe(active, next, &request->active_list, link) {
451 		/*
452 		 * In microbenchmarks or focusing upon time inside the kernel,
453 		 * we may spend an inordinate amount of time simply handling
454 		 * the retirement of requests and processing their callbacks.
455 		 * Of which, this loop itself is particularly hot due to the
456 		 * cache misses when jumping around the list of i915_gem_active.
457 		 * So we try to keep this loop as streamlined as possible and
458 		 * also prefetch the next i915_gem_active to try and hide
459 		 * the likely cache miss.
460 		 */
461 		prefetchw(next);
462 
463 		INIT_LIST_HEAD(&active->link);
464 		RCU_INIT_POINTER(active->request, NULL);
465 
466 		active->retire(active, request);
467 	}
468 
469 	i915_request_remove_from_client(request);
470 
471 	/* Retirement decays the ban score as it is a sign of ctx progress */
472 	atomic_dec_if_positive(&request->gem_context->ban_score);
473 	intel_context_unpin(request->hw_context);
474 
475 	__retire_engine_upto(request->engine, request);
476 
477 	unreserve_gt(request->i915);
478 
479 	i915_sched_node_fini(request->i915, &request->sched);
480 	i915_request_put(request);
481 }
482 
483 void i915_request_retire_upto(struct i915_request *rq)
484 {
485 	struct intel_ring *ring = rq->ring;
486 	struct i915_request *tmp;
487 
488 	GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
489 		  rq->engine->name,
490 		  rq->fence.context, rq->fence.seqno,
491 		  rq->global_seqno,
492 		  intel_engine_get_seqno(rq->engine));
493 
494 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
495 	GEM_BUG_ON(!i915_request_completed(rq));
496 
497 	if (list_empty(&rq->ring_link))
498 		return;
499 
500 	do {
501 		tmp = list_first_entry(&ring->request_list,
502 				       typeof(*tmp), ring_link);
503 
504 		i915_request_retire(tmp);
505 	} while (tmp != rq);
506 }
507 
508 static u32 timeline_get_seqno(struct i915_timeline *tl)
509 {
510 	return ++tl->seqno;
511 }
512 
513 static void move_to_timeline(struct i915_request *request,
514 			     struct i915_timeline *timeline)
515 {
516 	GEM_BUG_ON(request->timeline == &request->engine->timeline);
517 	lockdep_assert_held(&request->engine->timeline.lock);
518 
519 	spin_lock(&request->timeline->lock);
520 	list_move_tail(&request->link, &timeline->requests);
521 	spin_unlock(&request->timeline->lock);
522 }
523 
524 void __i915_request_submit(struct i915_request *request)
525 {
526 	struct intel_engine_cs *engine = request->engine;
527 	u32 seqno;
528 
529 	GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
530 		  engine->name,
531 		  request->fence.context, request->fence.seqno,
532 		  engine->timeline.seqno + 1,
533 		  intel_engine_get_seqno(engine));
534 
535 	GEM_BUG_ON(!irqs_disabled());
536 	lockdep_assert_held(&engine->timeline.lock);
537 
538 	GEM_BUG_ON(request->global_seqno);
539 
540 	seqno = timeline_get_seqno(&engine->timeline);
541 	GEM_BUG_ON(!seqno);
542 	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
543 
544 	/* We may be recursing from the signal callback of another i915 fence */
545 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
546 	request->global_seqno = seqno;
547 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
548 		intel_engine_enable_signaling(request, false);
549 	spin_unlock(&request->lock);
550 
551 	engine->emit_breadcrumb(request,
552 				request->ring->vaddr + request->postfix);
553 
554 	/* Transfer from per-context onto the global per-engine timeline */
555 	move_to_timeline(request, &engine->timeline);
556 
557 	trace_i915_request_execute(request);
558 
559 	wake_up_all(&request->execute);
560 }
561 
562 void i915_request_submit(struct i915_request *request)
563 {
564 	struct intel_engine_cs *engine = request->engine;
565 	unsigned long flags;
566 
567 	/* Will be called from irq-context when using foreign fences. */
568 	spin_lock_irqsave(&engine->timeline.lock, flags);
569 
570 	__i915_request_submit(request);
571 
572 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
573 }
574 
575 void __i915_request_unsubmit(struct i915_request *request)
576 {
577 	struct intel_engine_cs *engine = request->engine;
578 
579 	GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
580 		  engine->name,
581 		  request->fence.context, request->fence.seqno,
582 		  request->global_seqno,
583 		  intel_engine_get_seqno(engine));
584 
585 	GEM_BUG_ON(!irqs_disabled());
586 	lockdep_assert_held(&engine->timeline.lock);
587 
588 	/*
589 	 * Only unwind in reverse order, required so that the per-context list
590 	 * is kept in seqno/ring order.
591 	 */
592 	GEM_BUG_ON(!request->global_seqno);
593 	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
594 	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
595 				     request->global_seqno));
596 	engine->timeline.seqno--;
597 
598 	/* We may be recursing from the signal callback of another i915 fence */
599 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
600 	request->global_seqno = 0;
601 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
602 		intel_engine_cancel_signaling(request);
603 	spin_unlock(&request->lock);
604 
605 	/* Transfer back from the global per-engine timeline to per-context */
606 	move_to_timeline(request, request->timeline);
607 
608 	/*
609 	 * We don't need to wake_up any waiters on request->execute, they
610 	 * will get woken by any other event or us re-adding this request
611 	 * to the engine timeline (__i915_request_submit()). The waiters
612 	 * should be quite adapt at finding that the request now has a new
613 	 * global_seqno to the one they went to sleep on.
614 	 */
615 }
616 
617 void i915_request_unsubmit(struct i915_request *request)
618 {
619 	struct intel_engine_cs *engine = request->engine;
620 	unsigned long flags;
621 
622 	/* Will be called from irq-context when using foreign fences. */
623 	spin_lock_irqsave(&engine->timeline.lock, flags);
624 
625 	__i915_request_unsubmit(request);
626 
627 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
628 }
629 
630 static int __i915_sw_fence_call
631 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
632 {
633 	struct i915_request *request =
634 		container_of(fence, typeof(*request), submit);
635 
636 	switch (state) {
637 	case FENCE_COMPLETE:
638 		trace_i915_request_submit(request);
639 		/*
640 		 * We need to serialize use of the submit_request() callback
641 		 * with its hotplugging performed during an emergency
642 		 * i915_gem_set_wedged().  We use the RCU mechanism to mark the
643 		 * critical section in order to force i915_gem_set_wedged() to
644 		 * wait until the submit_request() is completed before
645 		 * proceeding.
646 		 */
647 		rcu_read_lock();
648 		request->engine->submit_request(request);
649 		rcu_read_unlock();
650 		break;
651 
652 	case FENCE_FREE:
653 		i915_request_put(request);
654 		break;
655 	}
656 
657 	return NOTIFY_DONE;
658 }
659 
660 /**
661  * i915_request_alloc - allocate a request structure
662  *
663  * @engine: engine that we wish to issue the request on.
664  * @ctx: context that the request will be associated with.
665  *
666  * Returns a pointer to the allocated request if successful,
667  * or an error code if not.
668  */
669 struct i915_request *
670 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
671 {
672 	struct drm_i915_private *i915 = engine->i915;
673 	struct i915_request *rq;
674 	struct intel_context *ce;
675 	int ret;
676 
677 	lockdep_assert_held(&i915->drm.struct_mutex);
678 
679 	/*
680 	 * Preempt contexts are reserved for exclusive use to inject a
681 	 * preemption context switch. They are never to be used for any trivial
682 	 * request!
683 	 */
684 	GEM_BUG_ON(ctx == i915->preempt_context);
685 
686 	/*
687 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
688 	 * EIO if the GPU is already wedged.
689 	 */
690 	if (i915_terminally_wedged(&i915->gpu_error))
691 		return ERR_PTR(-EIO);
692 
693 	/*
694 	 * Pinning the contexts may generate requests in order to acquire
695 	 * GGTT space, so do this first before we reserve a seqno for
696 	 * ourselves.
697 	 */
698 	ce = intel_context_pin(ctx, engine);
699 	if (IS_ERR(ce))
700 		return ERR_CAST(ce);
701 
702 	ret = reserve_gt(i915);
703 	if (ret)
704 		goto err_unpin;
705 
706 	ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
707 	if (ret)
708 		goto err_unreserve;
709 
710 	/* Move our oldest request to the slab-cache (if not in use!) */
711 	rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
712 	if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
713 	    i915_request_completed(rq))
714 		i915_request_retire(rq);
715 
716 	/*
717 	 * Beware: Dragons be flying overhead.
718 	 *
719 	 * We use RCU to look up requests in flight. The lookups may
720 	 * race with the request being allocated from the slab freelist.
721 	 * That is the request we are writing to here, may be in the process
722 	 * of being read by __i915_gem_active_get_rcu(). As such,
723 	 * we have to be very careful when overwriting the contents. During
724 	 * the RCU lookup, we change chase the request->engine pointer,
725 	 * read the request->global_seqno and increment the reference count.
726 	 *
727 	 * The reference count is incremented atomically. If it is zero,
728 	 * the lookup knows the request is unallocated and complete. Otherwise,
729 	 * it is either still in use, or has been reallocated and reset
730 	 * with dma_fence_init(). This increment is safe for release as we
731 	 * check that the request we have a reference to and matches the active
732 	 * request.
733 	 *
734 	 * Before we increment the refcount, we chase the request->engine
735 	 * pointer. We must not call kmem_cache_zalloc() or else we set
736 	 * that pointer to NULL and cause a crash during the lookup. If
737 	 * we see the request is completed (based on the value of the
738 	 * old engine and seqno), the lookup is complete and reports NULL.
739 	 * If we decide the request is not completed (new engine or seqno),
740 	 * then we grab a reference and double check that it is still the
741 	 * active request - which it won't be and restart the lookup.
742 	 *
743 	 * Do not use kmem_cache_zalloc() here!
744 	 */
745 #ifdef __linux__
746 	rq = kmem_cache_alloc(i915->requests,
747 			      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
748 #else
749 	rq = pool_get(&i915->requests, PR_WAITOK);
750 #endif
751 	if (unlikely(!rq)) {
752 		/* Ratelimit ourselves to prevent oom from malicious clients */
753 		ret = i915_gem_wait_for_idle(i915,
754 					     I915_WAIT_LOCKED |
755 					     I915_WAIT_INTERRUPTIBLE,
756 					     MAX_SCHEDULE_TIMEOUT);
757 		if (ret)
758 			goto err_unreserve;
759 
760 		/*
761 		 * We've forced the client to stall and catch up with whatever
762 		 * backlog there might have been. As we are assuming that we
763 		 * caused the mempressure, now is an opportune time to
764 		 * recover as much memory from the request pool as is possible.
765 		 * Having already penalized the client to stall, we spend
766 		 * a little extra time to re-optimise page allocation.
767 		 */
768 #ifdef notyet
769 		kmem_cache_shrink(i915->requests);
770 #endif
771 		rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
772 
773 #ifdef __linux__
774 		rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
775 #else
776 		rq = pool_get(&i915->requests, PR_WAITOK);
777 #endif
778 		if (!rq) {
779 			ret = -ENOMEM;
780 			goto err_unreserve;
781 		}
782 	}
783 
784 	INIT_LIST_HEAD(&rq->active_list);
785 	rq->i915 = i915;
786 	rq->engine = engine;
787 	rq->gem_context = ctx;
788 	rq->hw_context = ce;
789 	rq->ring = ce->ring;
790 	rq->timeline = ce->ring->timeline;
791 	GEM_BUG_ON(rq->timeline == &engine->timeline);
792 
793 	mtx_init(&rq->lock, IPL_TTY);
794 	dma_fence_init(&rq->fence,
795 		       &i915_fence_ops,
796 		       &rq->lock,
797 		       rq->timeline->fence_context,
798 		       timeline_get_seqno(rq->timeline));
799 
800 	/* We bump the ref for the fence chain */
801 	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
802 	init_waitqueue_head(&rq->execute);
803 
804 	i915_sched_node_init(&rq->sched);
805 
806 	/* No zalloc, must clear what we need by hand */
807 	rq->global_seqno = 0;
808 	rq->signaling.wait.seqno = 0;
809 	rq->file_priv = NULL;
810 	rq->batch = NULL;
811 	rq->capture_list = NULL;
812 	rq->waitboost = false;
813 
814 	/*
815 	 * Reserve space in the ring buffer for all the commands required to
816 	 * eventually emit this request. This is to guarantee that the
817 	 * i915_request_add() call can't fail. Note that the reserve may need
818 	 * to be redone if the request is not actually submitted straight
819 	 * away, e.g. because a GPU scheduler has deferred it.
820 	 */
821 	rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
822 	GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
823 
824 	/*
825 	 * Record the position of the start of the request so that
826 	 * should we detect the updated seqno part-way through the
827 	 * GPU processing the request, we never over-estimate the
828 	 * position of the head.
829 	 */
830 	rq->head = rq->ring->emit;
831 
832 	/* Unconditionally invalidate GPU caches and TLBs. */
833 	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
834 	if (ret)
835 		goto err_unwind;
836 
837 	ret = engine->request_alloc(rq);
838 	if (ret)
839 		goto err_unwind;
840 
841 	/* Keep a second pin for the dual retirement along engine and ring */
842 	__intel_context_pin(ce);
843 
844 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
845 
846 	/* Check that we didn't interrupt ourselves with a new request */
847 	GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
848 	return rq;
849 
850 err_unwind:
851 	ce->ring->emit = rq->head;
852 
853 	/* Make sure we didn't add ourselves to external state before freeing */
854 	GEM_BUG_ON(!list_empty(&rq->active_list));
855 	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
856 	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
857 
858 #ifdef __linux__
859 	kmem_cache_free(i915->requests, rq);
860 #else
861 	pool_put(&i915->requests, rq);
862 #endif
863 err_unreserve:
864 	unreserve_gt(i915);
865 err_unpin:
866 	intel_context_unpin(ce);
867 	return ERR_PTR(ret);
868 }
869 
870 static int
871 i915_request_await_request(struct i915_request *to, struct i915_request *from)
872 {
873 	int ret;
874 
875 	GEM_BUG_ON(to == from);
876 	GEM_BUG_ON(to->timeline == from->timeline);
877 
878 	if (i915_request_completed(from))
879 		return 0;
880 
881 	if (to->engine->schedule) {
882 		ret = i915_sched_node_add_dependency(to->i915,
883 						     &to->sched,
884 						     &from->sched);
885 		if (ret < 0)
886 			return ret;
887 	}
888 
889 	if (to->engine == from->engine) {
890 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
891 						       &from->submit,
892 						       I915_FENCE_GFP);
893 		return ret < 0 ? ret : 0;
894 	}
895 
896 	if (to->engine->semaphore.sync_to) {
897 		u32 seqno;
898 
899 		GEM_BUG_ON(!from->engine->semaphore.signal);
900 
901 		seqno = i915_request_global_seqno(from);
902 		if (!seqno)
903 			goto await_dma_fence;
904 
905 		if (seqno <= to->timeline->global_sync[from->engine->id])
906 			return 0;
907 
908 		trace_i915_gem_ring_sync_to(to, from);
909 		ret = to->engine->semaphore.sync_to(to, from);
910 		if (ret)
911 			return ret;
912 
913 		to->timeline->global_sync[from->engine->id] = seqno;
914 		return 0;
915 	}
916 
917 await_dma_fence:
918 	ret = i915_sw_fence_await_dma_fence(&to->submit,
919 					    &from->fence, 0,
920 					    I915_FENCE_GFP);
921 	return ret < 0 ? ret : 0;
922 }
923 
924 int
925 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
926 {
927 	struct dma_fence **child = &fence;
928 	unsigned int nchild = 1;
929 	int ret;
930 
931 	/*
932 	 * Note that if the fence-array was created in signal-on-any mode,
933 	 * we should *not* decompose it into its individual fences. However,
934 	 * we don't currently store which mode the fence-array is operating
935 	 * in. Fortunately, the only user of signal-on-any is private to
936 	 * amdgpu and we should not see any incoming fence-array from
937 	 * sync-file being in signal-on-any mode.
938 	 */
939 	if (dma_fence_is_array(fence)) {
940 		struct dma_fence_array *array = to_dma_fence_array(fence);
941 
942 		child = array->fences;
943 		nchild = array->num_fences;
944 		GEM_BUG_ON(!nchild);
945 	}
946 
947 	do {
948 		fence = *child++;
949 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
950 			continue;
951 
952 		/*
953 		 * Requests on the same timeline are explicitly ordered, along
954 		 * with their dependencies, by i915_request_add() which ensures
955 		 * that requests are submitted in-order through each ring.
956 		 */
957 		if (fence->context == rq->fence.context)
958 			continue;
959 
960 		/* Squash repeated waits to the same timelines */
961 		if (fence->context != rq->i915->mm.unordered_timeline &&
962 		    i915_timeline_sync_is_later(rq->timeline, fence))
963 			continue;
964 
965 		if (dma_fence_is_i915(fence))
966 			ret = i915_request_await_request(rq, to_request(fence));
967 		else
968 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
969 							    I915_FENCE_TIMEOUT,
970 							    I915_FENCE_GFP);
971 		if (ret < 0)
972 			return ret;
973 
974 		/* Record the latest fence used against each timeline */
975 		if (fence->context != rq->i915->mm.unordered_timeline)
976 			i915_timeline_sync_set(rq->timeline, fence);
977 	} while (--nchild);
978 
979 	return 0;
980 }
981 
982 /**
983  * i915_request_await_object - set this request to (async) wait upon a bo
984  * @to: request we are wishing to use
985  * @obj: object which may be in use on another ring.
986  * @write: whether the wait is on behalf of a writer
987  *
988  * This code is meant to abstract object synchronization with the GPU.
989  * Conceptually we serialise writes between engines inside the GPU.
990  * We only allow one engine to write into a buffer at any time, but
991  * multiple readers. To ensure each has a coherent view of memory, we must:
992  *
993  * - If there is an outstanding write request to the object, the new
994  *   request must wait for it to complete (either CPU or in hw, requests
995  *   on the same ring will be naturally ordered).
996  *
997  * - If we are a write request (pending_write_domain is set), the new
998  *   request must wait for outstanding read requests to complete.
999  *
1000  * Returns 0 if successful, else propagates up the lower layer error.
1001  */
1002 int
1003 i915_request_await_object(struct i915_request *to,
1004 			  struct drm_i915_gem_object *obj,
1005 			  bool write)
1006 {
1007 	struct dma_fence *excl;
1008 	int ret = 0;
1009 
1010 	if (write) {
1011 		struct dma_fence **shared;
1012 		unsigned int count, i;
1013 
1014 		ret = reservation_object_get_fences_rcu(obj->resv,
1015 							&excl, &count, &shared);
1016 		if (ret)
1017 			return ret;
1018 
1019 		for (i = 0; i < count; i++) {
1020 			ret = i915_request_await_dma_fence(to, shared[i]);
1021 			if (ret)
1022 				break;
1023 
1024 			dma_fence_put(shared[i]);
1025 		}
1026 
1027 		for (; i < count; i++)
1028 			dma_fence_put(shared[i]);
1029 		kfree(shared);
1030 	} else {
1031 		excl = reservation_object_get_excl_rcu(obj->resv);
1032 	}
1033 
1034 	if (excl) {
1035 		if (ret == 0)
1036 			ret = i915_request_await_dma_fence(to, excl);
1037 
1038 		dma_fence_put(excl);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
1044 void i915_request_skip(struct i915_request *rq, int error)
1045 {
1046 	void *vaddr = rq->ring->vaddr;
1047 	u32 head;
1048 
1049 	GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1050 	dma_fence_set_error(&rq->fence, error);
1051 
1052 	/*
1053 	 * As this request likely depends on state from the lost
1054 	 * context, clear out all the user operations leaving the
1055 	 * breadcrumb at the end (so we get the fence notifications).
1056 	 */
1057 	head = rq->infix;
1058 	if (rq->postfix < head) {
1059 		memset(vaddr + head, 0, rq->ring->size - head);
1060 		head = 0;
1061 	}
1062 	memset(vaddr + head, 0, rq->postfix - head);
1063 }
1064 
1065 /*
1066  * NB: This function is not allowed to fail. Doing so would mean the the
1067  * request is not being tracked for completion but the work itself is
1068  * going to happen on the hardware. This would be a Bad Thing(tm).
1069  */
1070 void i915_request_add(struct i915_request *request)
1071 {
1072 	struct intel_engine_cs *engine = request->engine;
1073 	struct i915_timeline *timeline = request->timeline;
1074 	struct intel_ring *ring = request->ring;
1075 	struct i915_request *prev;
1076 	u32 *cs;
1077 
1078 	GEM_TRACE("%s fence %llx:%d\n",
1079 		  engine->name, request->fence.context, request->fence.seqno);
1080 
1081 	lockdep_assert_held(&request->i915->drm.struct_mutex);
1082 	trace_i915_request_add(request);
1083 
1084 	/*
1085 	 * Make sure that no request gazumped us - if it was allocated after
1086 	 * our i915_request_alloc() and called __i915_request_add() before
1087 	 * us, the timeline will hold its seqno which is later than ours.
1088 	 */
1089 	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1090 
1091 	/*
1092 	 * To ensure that this call will not fail, space for its emissions
1093 	 * should already have been reserved in the ring buffer. Let the ring
1094 	 * know that it is time to use that space up.
1095 	 */
1096 	request->reserved_space = 0;
1097 	engine->emit_flush(request, EMIT_FLUSH);
1098 
1099 	/*
1100 	 * Record the position of the start of the breadcrumb so that
1101 	 * should we detect the updated seqno part-way through the
1102 	 * GPU processing the request, we never over-estimate the
1103 	 * position of the ring's HEAD.
1104 	 */
1105 	cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
1106 	GEM_BUG_ON(IS_ERR(cs));
1107 	request->postfix = intel_ring_offset(request, cs);
1108 
1109 	/*
1110 	 * Seal the request and mark it as pending execution. Note that
1111 	 * we may inspect this state, without holding any locks, during
1112 	 * hangcheck. Hence we apply the barrier to ensure that we do not
1113 	 * see a more recent value in the hws than we are tracking.
1114 	 */
1115 
1116 	prev = i915_gem_active_raw(&timeline->last_request,
1117 				   &request->i915->drm.struct_mutex);
1118 	if (prev && !i915_request_completed(prev)) {
1119 		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
1120 					     &request->submitq);
1121 		if (engine->schedule)
1122 			__i915_sched_node_add_dependency(&request->sched,
1123 							 &prev->sched,
1124 							 &request->dep,
1125 							 0);
1126 	}
1127 
1128 	spin_lock_irq(&timeline->lock);
1129 	list_add_tail(&request->link, &timeline->requests);
1130 	spin_unlock_irq(&timeline->lock);
1131 
1132 	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1133 	i915_gem_active_set(&timeline->last_request, request);
1134 
1135 	list_add_tail(&request->ring_link, &ring->request_list);
1136 	if (list_is_first(&request->ring_link, &ring->request_list)) {
1137 		GEM_TRACE("marking %s as active\n", ring->timeline->name);
1138 		list_add(&ring->active_link, &request->i915->gt.active_rings);
1139 	}
1140 	request->emitted_jiffies = jiffies;
1141 
1142 	/*
1143 	 * Let the backend know a new request has arrived that may need
1144 	 * to adjust the existing execution schedule due to a high priority
1145 	 * request - i.e. we may want to preempt the current request in order
1146 	 * to run a high priority dependency chain *before* we can execute this
1147 	 * request.
1148 	 *
1149 	 * This is called before the request is ready to run so that we can
1150 	 * decide whether to preempt the entire chain so that it is ready to
1151 	 * run at the earliest possible convenience.
1152 	 */
1153 	local_bh_disable();
1154 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1155 	if (engine->schedule)
1156 		engine->schedule(request, &request->gem_context->sched);
1157 	rcu_read_unlock();
1158 	i915_sw_fence_commit(&request->submit);
1159 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1160 
1161 	/*
1162 	 * In typical scenarios, we do not expect the previous request on
1163 	 * the timeline to be still tracked by timeline->last_request if it
1164 	 * has been completed. If the completed request is still here, that
1165 	 * implies that request retirement is a long way behind submission,
1166 	 * suggesting that we haven't been retiring frequently enough from
1167 	 * the combination of retire-before-alloc, waiters and the background
1168 	 * retirement worker. So if the last request on this timeline was
1169 	 * already completed, do a catch up pass, flushing the retirement queue
1170 	 * up to this client. Since we have now moved the heaviest operations
1171 	 * during retirement onto secondary workers, such as freeing objects
1172 	 * or contexts, retiring a bunch of requests is mostly list management
1173 	 * (and cache misses), and so we should not be overly penalizing this
1174 	 * client by performing excess work, though we may still performing
1175 	 * work on behalf of others -- but instead we should benefit from
1176 	 * improved resource management. (Well, that's the theory at least.)
1177 	 */
1178 	if (prev && i915_request_completed(prev))
1179 		i915_request_retire_upto(prev);
1180 }
1181 
1182 #ifdef __linux__
1183 static unsigned long local_clock_us(unsigned int *cpu)
1184 {
1185 	unsigned long t;
1186 
1187 	/*
1188 	 * Cheaply and approximately convert from nanoseconds to microseconds.
1189 	 * The result and subsequent calculations are also defined in the same
1190 	 * approximate microseconds units. The principal source of timing
1191 	 * error here is from the simple truncation.
1192 	 *
1193 	 * Note that local_clock() is only defined wrt to the current CPU;
1194 	 * the comparisons are no longer valid if we switch CPUs. Instead of
1195 	 * blocking preemption for the entire busywait, we can detect the CPU
1196 	 * switch and use that as indicator of system load and a reason to
1197 	 * stop busywaiting, see busywait_stop().
1198 	 */
1199 	*cpu = get_cpu();
1200 	t = local_clock() >> 10;
1201 	put_cpu();
1202 
1203 	return t;
1204 }
1205 #else
1206 static unsigned long local_clock_us(unsigned *cpu)
1207 {
1208 	*cpu = cpu_number();
1209 	return ticks * tick;
1210 }
1211 #endif
1212 
1213 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1214 {
1215 	unsigned int this_cpu;
1216 
1217 	if (time_after(local_clock_us(&this_cpu), timeout))
1218 		return true;
1219 
1220 	return this_cpu != cpu;
1221 }
1222 
1223 static bool __i915_spin_request(const struct i915_request *rq,
1224 				u32 seqno, int state, unsigned long timeout_us)
1225 {
1226 	struct intel_engine_cs *engine = rq->engine;
1227 	unsigned int irq, cpu;
1228 
1229 	GEM_BUG_ON(!seqno);
1230 
1231 	/*
1232 	 * Only wait for the request if we know it is likely to complete.
1233 	 *
1234 	 * We don't track the timestamps around requests, nor the average
1235 	 * request length, so we do not have a good indicator that this
1236 	 * request will complete within the timeout. What we do know is the
1237 	 * order in which requests are executed by the engine and so we can
1238 	 * tell if the request has started. If the request hasn't started yet,
1239 	 * it is a fair assumption that it will not complete within our
1240 	 * relatively short timeout.
1241 	 */
1242 	if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
1243 		return false;
1244 
1245 	/*
1246 	 * When waiting for high frequency requests, e.g. during synchronous
1247 	 * rendering split between the CPU and GPU, the finite amount of time
1248 	 * required to set up the irq and wait upon it limits the response
1249 	 * rate. By busywaiting on the request completion for a short while we
1250 	 * can service the high frequency waits as quick as possible. However,
1251 	 * if it is a slow request, we want to sleep as quickly as possible.
1252 	 * The tradeoff between waiting and sleeping is roughly the time it
1253 	 * takes to sleep on a request, on the order of a microsecond.
1254 	 */
1255 
1256 	irq = READ_ONCE(engine->breadcrumbs.irq_count);
1257 	timeout_us += local_clock_us(&cpu);
1258 	do {
1259 		if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
1260 			return seqno == i915_request_global_seqno(rq);
1261 
1262 		/*
1263 		 * Seqno are meant to be ordered *before* the interrupt. If
1264 		 * we see an interrupt without a corresponding seqno advance,
1265 		 * assume we won't see one in the near future but require
1266 		 * the engine->seqno_barrier() to fixup coherency.
1267 		 */
1268 		if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
1269 			break;
1270 
1271 		if (signal_pending_state(state, current))
1272 			break;
1273 
1274 		if (busywait_stop(timeout_us, cpu))
1275 			break;
1276 
1277 		cpu_relax();
1278 	} while (!drm_need_resched());
1279 
1280 	return false;
1281 }
1282 
1283 static bool __i915_wait_request_check_and_reset(struct i915_request *request)
1284 {
1285 	struct i915_gpu_error *error = &request->i915->gpu_error;
1286 
1287 	if (likely(!i915_reset_handoff(error)))
1288 		return false;
1289 
1290 	__set_current_state(TASK_RUNNING);
1291 	i915_reset(request->i915, error->stalled_mask, error->reason);
1292 	return true;
1293 }
1294 
1295 /**
1296  * i915_request_wait - wait until execution of request has finished
1297  * @rq: the request to wait upon
1298  * @flags: how to wait
1299  * @timeout: how long to wait in jiffies
1300  *
1301  * i915_request_wait() waits for the request to be completed, for a
1302  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1303  * unbounded wait).
1304  *
1305  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1306  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1307  * must not specify that the wait is locked.
1308  *
1309  * Returns the remaining time (in jiffies) if the request completed, which may
1310  * be zero or -ETIME if the request is unfinished after the timeout expires.
1311  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1312  * pending before the request completes.
1313  */
1314 long i915_request_wait(struct i915_request *rq,
1315 		       unsigned int flags,
1316 		       long timeout)
1317 {
1318 	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1319 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1320 	wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
1321 	DEFINE_WAIT_FUNC(reset, default_wake_function);
1322 	DEFINE_WAIT_FUNC(exec, default_wake_function);
1323 	struct intel_wait wait;
1324 
1325 	might_sleep();
1326 #if IS_ENABLED(CONFIG_LOCKDEP)
1327 	GEM_BUG_ON(debug_locks &&
1328 		   !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
1329 		   !!(flags & I915_WAIT_LOCKED));
1330 #endif
1331 	GEM_BUG_ON(timeout < 0);
1332 
1333 	if (i915_request_completed(rq))
1334 		return timeout;
1335 
1336 	if (!timeout)
1337 		return -ETIME;
1338 
1339 	trace_i915_request_wait_begin(rq, flags);
1340 
1341 	add_wait_queue(&rq->execute, &exec);
1342 	if (flags & I915_WAIT_LOCKED)
1343 		add_wait_queue(errq, &reset);
1344 
1345 	intel_wait_init(&wait);
1346 
1347 restart:
1348 	do {
1349 		set_current_state(state);
1350 		if (intel_wait_update_request(&wait, rq))
1351 			break;
1352 
1353 		if (flags & I915_WAIT_LOCKED &&
1354 		    __i915_wait_request_check_and_reset(rq))
1355 			continue;
1356 
1357 		if (signal_pending_state(state, current)) {
1358 			timeout = -ERESTARTSYS;
1359 			goto complete;
1360 		}
1361 
1362 		if (!timeout) {
1363 			timeout = -ETIME;
1364 			goto complete;
1365 		}
1366 
1367 		timeout = io_schedule_timeout(timeout);
1368 	} while (1);
1369 
1370 	GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1371 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
1372 
1373 	/* Optimistic short spin before touching IRQs */
1374 	if (__i915_spin_request(rq, wait.seqno, state, 5))
1375 		goto complete;
1376 
1377 	set_current_state(state);
1378 	if (intel_engine_add_wait(rq->engine, &wait))
1379 		/*
1380 		 * In order to check that we haven't missed the interrupt
1381 		 * as we enabled it, we need to kick ourselves to do a
1382 		 * coherent check on the seqno before we sleep.
1383 		 */
1384 		goto wakeup;
1385 
1386 	if (flags & I915_WAIT_LOCKED)
1387 		__i915_wait_request_check_and_reset(rq);
1388 
1389 	for (;;) {
1390 		if (signal_pending_state(state, current)) {
1391 			timeout = -ERESTARTSYS;
1392 			break;
1393 		}
1394 
1395 		if (!timeout) {
1396 			timeout = -ETIME;
1397 			break;
1398 		}
1399 
1400 		timeout = io_schedule_timeout(timeout);
1401 
1402 		if (intel_wait_complete(&wait) &&
1403 		    intel_wait_check_request(&wait, rq))
1404 			break;
1405 
1406 		set_current_state(state);
1407 
1408 wakeup:
1409 		/*
1410 		 * Carefully check if the request is complete, giving time
1411 		 * for the seqno to be visible following the interrupt.
1412 		 * We also have to check in case we are kicked by the GPU
1413 		 * reset in order to drop the struct_mutex.
1414 		 */
1415 		if (__i915_request_irq_complete(rq))
1416 			break;
1417 
1418 		/*
1419 		 * If the GPU is hung, and we hold the lock, reset the GPU
1420 		 * and then check for completion. On a full reset, the engine's
1421 		 * HW seqno will be advanced passed us and we are complete.
1422 		 * If we do a partial reset, we have to wait for the GPU to
1423 		 * resume and update the breadcrumb.
1424 		 *
1425 		 * If we don't hold the mutex, we can just wait for the worker
1426 		 * to come along and update the breadcrumb (either directly
1427 		 * itself, or indirectly by recovering the GPU).
1428 		 */
1429 		if (flags & I915_WAIT_LOCKED &&
1430 		    __i915_wait_request_check_and_reset(rq))
1431 			continue;
1432 
1433 		/* Only spin if we know the GPU is processing this request */
1434 		if (__i915_spin_request(rq, wait.seqno, state, 2))
1435 			break;
1436 
1437 		if (!intel_wait_check_request(&wait, rq)) {
1438 			intel_engine_remove_wait(rq->engine, &wait);
1439 			goto restart;
1440 		}
1441 	}
1442 
1443 	intel_engine_remove_wait(rq->engine, &wait);
1444 complete:
1445 	__set_current_state(TASK_RUNNING);
1446 	if (flags & I915_WAIT_LOCKED)
1447 		remove_wait_queue(errq, &reset);
1448 	remove_wait_queue(&rq->execute, &exec);
1449 	trace_i915_request_wait_end(rq);
1450 
1451 	return timeout;
1452 }
1453 
1454 static void ring_retire_requests(struct intel_ring *ring)
1455 {
1456 	struct i915_request *request, *next;
1457 
1458 	list_for_each_entry_safe(request, next,
1459 				 &ring->request_list, ring_link) {
1460 		if (!i915_request_completed(request))
1461 			break;
1462 
1463 		i915_request_retire(request);
1464 	}
1465 }
1466 
1467 void i915_retire_requests(struct drm_i915_private *i915)
1468 {
1469 	struct intel_ring *ring, *tmp;
1470 
1471 	lockdep_assert_held(&i915->drm.struct_mutex);
1472 
1473 	if (!i915->gt.active_requests)
1474 		return;
1475 
1476 	list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
1477 		ring_retire_requests(ring);
1478 }
1479 
1480 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1481 #include "selftests/mock_request.c"
1482 #include "selftests/i915_request.c"
1483 #endif
1484