xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_ring_submission.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2008-2021 Intel Corporation
4  */
5 
6 #include "gen2_engine_cs.h"
7 #include "gen6_engine_cs.h"
8 #include "gen6_ppgtt.h"
9 #include "gen7_renderclear.h"
10 #include "i915_drv.h"
11 #include "i915_mitigations.h"
12 #include "intel_breadcrumbs.h"
13 #include "intel_context.h"
14 #include "intel_gt.h"
15 #include "intel_gt_irq.h"
16 #include "intel_reset.h"
17 #include "intel_ring.h"
18 #include "shmem_utils.h"
19 #include "intel_engine_heartbeat.h"
20 #include "intel_engine_pm.h"
21 
22 /* Rough estimate of the typical request size, performing a flush,
23  * set-context and then emitting the batch.
24  */
25 #define LEGACY_REQUEST_SIZE 200
26 
27 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
28 {
29 	/*
30 	 * Keep the render interrupt unmasked as this papers over
31 	 * lost interrupts following a reset.
32 	 */
33 	if (engine->class == RENDER_CLASS) {
34 		if (GRAPHICS_VER(engine->i915) >= 6)
35 			mask &= ~BIT(0);
36 		else
37 			mask &= ~I915_USER_INTERRUPT;
38 	}
39 
40 	intel_engine_set_hwsp_writemask(engine, mask);
41 }
42 
43 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
44 {
45 	u32 addr;
46 
47 	addr = lower_32_bits(phys);
48 	if (GRAPHICS_VER(engine->i915) >= 4)
49 		addr |= (phys >> 28) & 0xf0;
50 
51 	intel_uncore_write(engine->uncore, HWS_PGA, addr);
52 }
53 
54 static struct vm_page *status_page(struct intel_engine_cs *engine)
55 {
56 	struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
57 
58 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
59 	return sg_page(obj->mm.pages->sgl);
60 }
61 
62 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
63 {
64 	set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
65 	set_hwstam(engine, ~0u);
66 }
67 
68 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
69 {
70 	i915_reg_t hwsp;
71 
72 	/*
73 	 * The ring status page addresses are no longer next to the rest of
74 	 * the ring registers as of gen7.
75 	 */
76 	if (GRAPHICS_VER(engine->i915) == 7) {
77 		switch (engine->id) {
78 		/*
79 		 * No more rings exist on Gen7. Default case is only to shut up
80 		 * gcc switch check warning.
81 		 */
82 		default:
83 			GEM_BUG_ON(engine->id);
84 			fallthrough;
85 		case RCS0:
86 			hwsp = RENDER_HWS_PGA_GEN7;
87 			break;
88 		case BCS0:
89 			hwsp = BLT_HWS_PGA_GEN7;
90 			break;
91 		case VCS0:
92 			hwsp = BSD_HWS_PGA_GEN7;
93 			break;
94 		case VECS0:
95 			hwsp = VEBOX_HWS_PGA_GEN7;
96 			break;
97 		}
98 	} else if (GRAPHICS_VER(engine->i915) == 6) {
99 		hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
100 	} else {
101 		hwsp = RING_HWS_PGA(engine->mmio_base);
102 	}
103 
104 	intel_uncore_write_fw(engine->uncore, hwsp, offset);
105 	intel_uncore_posting_read_fw(engine->uncore, hwsp);
106 }
107 
108 static void flush_cs_tlb(struct intel_engine_cs *engine)
109 {
110 	if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
111 		return;
112 
113 	/* ring should be idle before issuing a sync flush*/
114 	GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
115 
116 	ENGINE_WRITE_FW(engine, RING_INSTPM,
117 			_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
118 					   INSTPM_SYNC_FLUSH));
119 	if (__intel_wait_for_register_fw(engine->uncore,
120 					 RING_INSTPM(engine->mmio_base),
121 					 INSTPM_SYNC_FLUSH, 0,
122 					 2000, 0, NULL))
123 		ENGINE_TRACE(engine,
124 			     "wait for SyncFlush to complete for TLB invalidation timed out\n");
125 }
126 
127 static void ring_setup_status_page(struct intel_engine_cs *engine)
128 {
129 	set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
130 	set_hwstam(engine, ~0u);
131 
132 	flush_cs_tlb(engine);
133 }
134 
135 static struct i915_address_space *vm_alias(struct i915_address_space *vm)
136 {
137 	if (i915_is_ggtt(vm))
138 		vm = &i915_vm_to_ggtt(vm)->alias->vm;
139 
140 	return vm;
141 }
142 
143 static u32 pp_dir(struct i915_address_space *vm)
144 {
145 	return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
146 }
147 
148 static void set_pp_dir(struct intel_engine_cs *engine)
149 {
150 	struct i915_address_space *vm = vm_alias(engine->gt->vm);
151 
152 	if (!vm)
153 		return;
154 
155 	ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
156 	ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
157 
158 	if (GRAPHICS_VER(engine->i915) >= 7) {
159 		ENGINE_WRITE_FW(engine,
160 				RING_MODE_GEN7,
161 				_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
162 	}
163 }
164 
165 static bool stop_ring(struct intel_engine_cs *engine)
166 {
167 	/* Empty the ring by skipping to the end */
168 	ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
169 	ENGINE_POSTING_READ(engine, RING_HEAD);
170 
171 	/* The ring must be empty before it is disabled */
172 	ENGINE_WRITE_FW(engine, RING_CTL, 0);
173 	ENGINE_POSTING_READ(engine, RING_CTL);
174 
175 	/* Then reset the disabled ring */
176 	ENGINE_WRITE_FW(engine, RING_HEAD, 0);
177 	ENGINE_WRITE_FW(engine, RING_TAIL, 0);
178 
179 	return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
180 }
181 
182 static int xcs_resume(struct intel_engine_cs *engine)
183 {
184 	struct intel_ring *ring = engine->legacy.ring;
185 
186 	ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
187 		     ring->head, ring->tail);
188 
189 	/*
190 	 * Double check the ring is empty & disabled before we resume. Called
191 	 * from atomic context during PCI probe, so _hardirq().
192 	 */
193 	intel_synchronize_hardirq(engine->i915);
194 	if (!stop_ring(engine))
195 		goto err;
196 
197 	if (HWS_NEEDS_PHYSICAL(engine->i915))
198 		ring_setup_phys_status_page(engine);
199 	else
200 		ring_setup_status_page(engine);
201 
202 	intel_breadcrumbs_reset(engine->breadcrumbs);
203 
204 	/* Enforce ordering by reading HEAD register back */
205 	ENGINE_POSTING_READ(engine, RING_HEAD);
206 
207 	/*
208 	 * Initialize the ring. This must happen _after_ we've cleared the ring
209 	 * registers with the above sequence (the readback of the HEAD registers
210 	 * also enforces ordering), otherwise the hw might lose the new ring
211 	 * register values.
212 	 */
213 	ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
214 
215 	/* Check that the ring offsets point within the ring! */
216 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
217 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
218 	intel_ring_update_space(ring);
219 
220 	set_pp_dir(engine);
221 
222 	/* First wake the ring up to an empty/idle ring */
223 	ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
224 	ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
225 	ENGINE_POSTING_READ(engine, RING_TAIL);
226 
227 	ENGINE_WRITE_FW(engine, RING_CTL,
228 			RING_CTL_SIZE(ring->size) | RING_VALID);
229 
230 	/* If the head is still not zero, the ring is dead */
231 	if (__intel_wait_for_register_fw(engine->uncore,
232 					 RING_CTL(engine->mmio_base),
233 					 RING_VALID, RING_VALID,
234 					 5000, 0, NULL))
235 		goto err;
236 
237 	if (GRAPHICS_VER(engine->i915) > 2)
238 		ENGINE_WRITE_FW(engine,
239 				RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
240 
241 	/* Now awake, let it get started */
242 	if (ring->tail != ring->head) {
243 		ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
244 		ENGINE_POSTING_READ(engine, RING_TAIL);
245 	}
246 
247 	/* Papering over lost _interrupts_ immediately following the restart */
248 	intel_engine_signal_breadcrumbs(engine);
249 	return 0;
250 
251 err:
252 	drm_err(&engine->i915->drm,
253 		"%s initialization failed; "
254 		"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
255 		engine->name,
256 		ENGINE_READ(engine, RING_CTL),
257 		ENGINE_READ(engine, RING_CTL) & RING_VALID,
258 		ENGINE_READ(engine, RING_HEAD), ring->head,
259 		ENGINE_READ(engine, RING_TAIL), ring->tail,
260 		ENGINE_READ(engine, RING_START),
261 		i915_ggtt_offset(ring->vma));
262 	return -EIO;
263 }
264 
265 static void sanitize_hwsp(struct intel_engine_cs *engine)
266 {
267 	struct intel_timeline *tl;
268 
269 	list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
270 		intel_timeline_reset_seqno(tl);
271 }
272 
273 static void xcs_sanitize(struct intel_engine_cs *engine)
274 {
275 	/*
276 	 * Poison residual state on resume, in case the suspend didn't!
277 	 *
278 	 * We have to assume that across suspend/resume (or other loss
279 	 * of control) that the contents of our pinned buffers has been
280 	 * lost, replaced by garbage. Since this doesn't always happen,
281 	 * let's poison such state so that we more quickly spot when
282 	 * we falsely assume it has been preserved.
283 	 */
284 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
285 		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
286 
287 	/*
288 	 * The kernel_context HWSP is stored in the status_page. As above,
289 	 * that may be lost on resume/initialisation, and so we need to
290 	 * reset the value in the HWSP.
291 	 */
292 	sanitize_hwsp(engine);
293 
294 	/* And scrub the dirty cachelines for the HWSP */
295 	drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
296 
297 	intel_engine_reset_pinned_contexts(engine);
298 }
299 
300 static void reset_prepare(struct intel_engine_cs *engine)
301 {
302 	/*
303 	 * We stop engines, otherwise we might get failed reset and a
304 	 * dead gpu (on elk). Also as modern gpu as kbl can suffer
305 	 * from system hang if batchbuffer is progressing when
306 	 * the reset is issued, regardless of READY_TO_RESET ack.
307 	 * Thus assume it is best to stop engines on all gens
308 	 * where we have a gpu reset.
309 	 *
310 	 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
311 	 *
312 	 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
313 	 * WaClearRingBufHeadRegAtInit:ctg,elk
314 	 *
315 	 * FIXME: Wa for more modern gens needs to be validated
316 	 */
317 	ENGINE_TRACE(engine, "\n");
318 	intel_engine_stop_cs(engine);
319 
320 	if (!stop_ring(engine)) {
321 		/* G45 ring initialization often fails to reset head to zero */
322 		ENGINE_TRACE(engine,
323 			     "HEAD not reset to zero, "
324 			     "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",
325 			     ENGINE_READ_FW(engine, RING_CTL),
326 			     ENGINE_READ_FW(engine, RING_HEAD),
327 			     ENGINE_READ_FW(engine, RING_TAIL),
328 			     ENGINE_READ_FW(engine, RING_START));
329 		if (!stop_ring(engine)) {
330 			drm_err(&engine->i915->drm,
331 				"failed to set %s head to zero "
332 				"ctl %08x head %08x tail %08x start %08x\n",
333 				engine->name,
334 				ENGINE_READ_FW(engine, RING_CTL),
335 				ENGINE_READ_FW(engine, RING_HEAD),
336 				ENGINE_READ_FW(engine, RING_TAIL),
337 				ENGINE_READ_FW(engine, RING_START));
338 		}
339 	}
340 }
341 
342 static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
343 {
344 	struct i915_request *pos, *rq;
345 	unsigned long flags;
346 	u32 head;
347 
348 	rq = NULL;
349 	spin_lock_irqsave(&engine->sched_engine->lock, flags);
350 	rcu_read_lock();
351 	list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
352 		if (!__i915_request_is_complete(pos)) {
353 			rq = pos;
354 			break;
355 		}
356 	}
357 	rcu_read_unlock();
358 
359 	/*
360 	 * The guilty request will get skipped on a hung engine.
361 	 *
362 	 * Users of client default contexts do not rely on logical
363 	 * state preserved between batches so it is safe to execute
364 	 * queued requests following the hang. Non default contexts
365 	 * rely on preserved state, so skipping a batch loses the
366 	 * evolution of the state and it needs to be considered corrupted.
367 	 * Executing more queued batches on top of corrupted state is
368 	 * risky. But we take the risk by trying to advance through
369 	 * the queued requests in order to make the client behaviour
370 	 * more predictable around resets, by not throwing away random
371 	 * amount of batches it has prepared for execution. Sophisticated
372 	 * clients can use gem_reset_stats_ioctl and dma fence status
373 	 * (exported via sync_file info ioctl on explicit fences) to observe
374 	 * when it loses the context state and should rebuild accordingly.
375 	 *
376 	 * The context ban, and ultimately the client ban, mechanism are safety
377 	 * valves if client submission ends up resulting in nothing more than
378 	 * subsequent hangs.
379 	 */
380 
381 	if (rq) {
382 		/*
383 		 * Try to restore the logical GPU state to match the
384 		 * continuation of the request queue. If we skip the
385 		 * context/PD restore, then the next request may try to execute
386 		 * assuming that its context is valid and loaded on the GPU and
387 		 * so may try to access invalid memory, prompting repeated GPU
388 		 * hangs.
389 		 *
390 		 * If the request was guilty, we still restore the logical
391 		 * state in case the next request requires it (e.g. the
392 		 * aliasing ppgtt), but skip over the hung batch.
393 		 *
394 		 * If the request was innocent, we try to replay the request
395 		 * with the restored context.
396 		 */
397 		__i915_request_reset(rq, stalled);
398 
399 		GEM_BUG_ON(rq->ring != engine->legacy.ring);
400 		head = rq->head;
401 	} else {
402 		head = engine->legacy.ring->tail;
403 	}
404 	engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
405 
406 	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
407 }
408 
409 static void reset_finish(struct intel_engine_cs *engine)
410 {
411 }
412 
413 static void reset_cancel(struct intel_engine_cs *engine)
414 {
415 	struct i915_request *request;
416 	unsigned long flags;
417 
418 	spin_lock_irqsave(&engine->sched_engine->lock, flags);
419 
420 	/* Mark all submitted requests as skipped. */
421 	list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
422 		i915_request_put(i915_request_mark_eio(request));
423 	intel_engine_signal_breadcrumbs(engine);
424 
425 	/* Remaining _unready_ requests will be nop'ed when submitted */
426 
427 	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
428 }
429 
430 static void i9xx_submit_request(struct i915_request *request)
431 {
432 	i915_request_submit(request);
433 	wmb(); /* paranoid flush writes out of the WCB before mmio */
434 
435 	ENGINE_WRITE(request->engine, RING_TAIL,
436 		     intel_ring_set_tail(request->ring, request->tail));
437 }
438 
439 static void __ring_context_fini(struct intel_context *ce)
440 {
441 	i915_vma_put(ce->state);
442 }
443 
444 static void ring_context_destroy(struct kref *ref)
445 {
446 	struct intel_context *ce = container_of(ref, typeof(*ce), ref);
447 
448 	GEM_BUG_ON(intel_context_is_pinned(ce));
449 
450 	if (ce->state)
451 		__ring_context_fini(ce);
452 
453 	intel_context_fini(ce);
454 	intel_context_free(ce);
455 }
456 
457 static int ring_context_init_default_state(struct intel_context *ce,
458 					   struct i915_gem_ww_ctx *ww)
459 {
460 	struct drm_i915_gem_object *obj = ce->state->obj;
461 	void *vaddr;
462 
463 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
464 	if (IS_ERR(vaddr))
465 		return PTR_ERR(vaddr);
466 
467 #ifdef __linux__
468 	shmem_read(ce->engine->default_state, 0,
469 		   vaddr, ce->engine->context_size);
470 #else
471 	uao_read(ce->engine->default_state, 0,
472 		   vaddr, ce->engine->context_size);
473 #endif
474 
475 	i915_gem_object_flush_map(obj);
476 	__i915_gem_object_release_map(obj);
477 
478 	__set_bit(CONTEXT_VALID_BIT, &ce->flags);
479 	return 0;
480 }
481 
482 static int ring_context_pre_pin(struct intel_context *ce,
483 				struct i915_gem_ww_ctx *ww,
484 				void **unused)
485 {
486 	struct i915_address_space *vm;
487 	int err = 0;
488 
489 	if (ce->engine->default_state &&
490 	    !test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
491 		err = ring_context_init_default_state(ce, ww);
492 		if (err)
493 			return err;
494 	}
495 
496 	vm = vm_alias(ce->vm);
497 	if (vm)
498 		err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
499 
500 	return err;
501 }
502 
503 static void __context_unpin_ppgtt(struct intel_context *ce)
504 {
505 	struct i915_address_space *vm;
506 
507 	vm = vm_alias(ce->vm);
508 	if (vm)
509 		gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
510 }
511 
512 static void ring_context_unpin(struct intel_context *ce)
513 {
514 }
515 
516 static void ring_context_post_unpin(struct intel_context *ce)
517 {
518 	__context_unpin_ppgtt(ce);
519 }
520 
521 static struct i915_vma *
522 alloc_context_vma(struct intel_engine_cs *engine)
523 {
524 	struct drm_i915_private *i915 = engine->i915;
525 	struct drm_i915_gem_object *obj;
526 	struct i915_vma *vma;
527 	int err;
528 
529 	obj = i915_gem_object_create_shmem(i915, engine->context_size);
530 	if (IS_ERR(obj))
531 		return ERR_CAST(obj);
532 
533 	/*
534 	 * Try to make the context utilize L3 as well as LLC.
535 	 *
536 	 * On VLV we don't have L3 controls in the PTEs so we
537 	 * shouldn't touch the cache level, especially as that
538 	 * would make the object snooped which might have a
539 	 * negative performance impact.
540 	 *
541 	 * Snooping is required on non-llc platforms in execlist
542 	 * mode, but since all GGTT accesses use PAT entry 0 we
543 	 * get snooping anyway regardless of cache_level.
544 	 *
545 	 * This is only applicable for Ivy Bridge devices since
546 	 * later platforms don't have L3 control bits in the PTE.
547 	 */
548 	if (IS_IVYBRIDGE(i915))
549 		i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
550 
551 	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
552 	if (IS_ERR(vma)) {
553 		err = PTR_ERR(vma);
554 		goto err_obj;
555 	}
556 
557 	return vma;
558 
559 err_obj:
560 	i915_gem_object_put(obj);
561 	return ERR_PTR(err);
562 }
563 
564 static int ring_context_alloc(struct intel_context *ce)
565 {
566 	struct intel_engine_cs *engine = ce->engine;
567 
568 	/* One ringbuffer to rule them all */
569 	GEM_BUG_ON(!engine->legacy.ring);
570 	ce->ring = engine->legacy.ring;
571 	ce->timeline = intel_timeline_get(engine->legacy.timeline);
572 
573 	GEM_BUG_ON(ce->state);
574 	if (engine->context_size) {
575 		struct i915_vma *vma;
576 
577 		vma = alloc_context_vma(engine);
578 		if (IS_ERR(vma))
579 			return PTR_ERR(vma);
580 
581 		ce->state = vma;
582 	}
583 
584 	return 0;
585 }
586 
587 static int ring_context_pin(struct intel_context *ce, void *unused)
588 {
589 	return 0;
590 }
591 
592 static void ring_context_reset(struct intel_context *ce)
593 {
594 	intel_ring_reset(ce->ring, ce->ring->emit);
595 	clear_bit(CONTEXT_VALID_BIT, &ce->flags);
596 }
597 
598 static void ring_context_ban(struct intel_context *ce,
599 			     struct i915_request *rq)
600 {
601 	struct intel_engine_cs *engine;
602 
603 	if (!rq || !i915_request_is_active(rq))
604 		return;
605 
606 	engine = rq->engine;
607 	lockdep_assert_held(&engine->sched_engine->lock);
608 	list_for_each_entry_continue(rq, &engine->sched_engine->requests,
609 				     sched.link)
610 		if (rq->context == ce) {
611 			i915_request_set_error_once(rq, -EIO);
612 			__i915_request_skip(rq);
613 		}
614 }
615 
616 static void ring_context_cancel_request(struct intel_context *ce,
617 					struct i915_request *rq)
618 {
619 	struct intel_engine_cs *engine = NULL;
620 
621 	i915_request_active_engine(rq, &engine);
622 
623 	if (engine && intel_engine_pulse(engine))
624 		intel_gt_handle_error(engine->gt, engine->mask, 0,
625 				      "request cancellation by %s",
626 				      curproc->p_p->ps_comm);
627 }
628 
629 static const struct intel_context_ops ring_context_ops = {
630 	.alloc = ring_context_alloc,
631 
632 	.cancel_request = ring_context_cancel_request,
633 
634 	.ban = ring_context_ban,
635 
636 	.pre_pin = ring_context_pre_pin,
637 	.pin = ring_context_pin,
638 	.unpin = ring_context_unpin,
639 	.post_unpin = ring_context_post_unpin,
640 
641 	.enter = intel_context_enter_engine,
642 	.exit = intel_context_exit_engine,
643 
644 	.reset = ring_context_reset,
645 	.destroy = ring_context_destroy,
646 };
647 
648 static int load_pd_dir(struct i915_request *rq,
649 		       struct i915_address_space *vm,
650 		       u32 valid)
651 {
652 	const struct intel_engine_cs * const engine = rq->engine;
653 	u32 *cs;
654 
655 	cs = intel_ring_begin(rq, 12);
656 	if (IS_ERR(cs))
657 		return PTR_ERR(cs);
658 
659 	*cs++ = MI_LOAD_REGISTER_IMM(1);
660 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
661 	*cs++ = valid;
662 
663 	*cs++ = MI_LOAD_REGISTER_IMM(1);
664 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
665 	*cs++ = pp_dir(vm);
666 
667 	/* Stall until the page table load is complete? */
668 	*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
669 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
670 	*cs++ = intel_gt_scratch_offset(engine->gt,
671 					INTEL_GT_SCRATCH_FIELD_DEFAULT);
672 
673 	*cs++ = MI_LOAD_REGISTER_IMM(1);
674 	*cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
675 	*cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
676 
677 	intel_ring_advance(rq, cs);
678 
679 	return rq->engine->emit_flush(rq, EMIT_FLUSH);
680 }
681 
682 static int mi_set_context(struct i915_request *rq,
683 			  struct intel_context *ce,
684 			  u32 flags)
685 {
686 	struct intel_engine_cs *engine = rq->engine;
687 	struct drm_i915_private *i915 = engine->i915;
688 	enum intel_engine_id id;
689 	const int num_engines =
690 		IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
691 	bool force_restore = false;
692 	int len;
693 	u32 *cs;
694 
695 	len = 4;
696 	if (GRAPHICS_VER(i915) == 7)
697 		len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
698 	else if (GRAPHICS_VER(i915) == 5)
699 		len += 2;
700 	if (flags & MI_FORCE_RESTORE) {
701 		GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
702 		flags &= ~MI_FORCE_RESTORE;
703 		force_restore = true;
704 		len += 2;
705 	}
706 
707 	cs = intel_ring_begin(rq, len);
708 	if (IS_ERR(cs))
709 		return PTR_ERR(cs);
710 
711 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
712 	if (GRAPHICS_VER(i915) == 7) {
713 		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
714 		if (num_engines) {
715 			struct intel_engine_cs *signaller;
716 
717 			*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
718 			for_each_engine(signaller, engine->gt, id) {
719 				if (signaller == engine)
720 					continue;
721 
722 				*cs++ = i915_mmio_reg_offset(
723 					   RING_PSMI_CTL(signaller->mmio_base));
724 				*cs++ = _MASKED_BIT_ENABLE(
725 						GEN6_PSMI_SLEEP_MSG_DISABLE);
726 			}
727 		}
728 	} else if (GRAPHICS_VER(i915) == 5) {
729 		/*
730 		 * This w/a is only listed for pre-production ilk a/b steppings,
731 		 * but is also mentioned for programming the powerctx. To be
732 		 * safe, just apply the workaround; we do not use SyncFlush so
733 		 * this should never take effect and so be a no-op!
734 		 */
735 		*cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
736 	}
737 
738 	if (force_restore) {
739 		/*
740 		 * The HW doesn't handle being told to restore the current
741 		 * context very well. Quite often it likes goes to go off and
742 		 * sulk, especially when it is meant to be reloading PP_DIR.
743 		 * A very simple fix to force the reload is to simply switch
744 		 * away from the current context and back again.
745 		 *
746 		 * Note that the kernel_context will contain random state
747 		 * following the INHIBIT_RESTORE. We accept this since we
748 		 * never use the kernel_context state; it is merely a
749 		 * placeholder we use to flush other contexts.
750 		 */
751 		*cs++ = MI_SET_CONTEXT;
752 		*cs++ = i915_ggtt_offset(engine->kernel_context->state) |
753 			MI_MM_SPACE_GTT |
754 			MI_RESTORE_INHIBIT;
755 	}
756 
757 	*cs++ = MI_NOOP;
758 	*cs++ = MI_SET_CONTEXT;
759 	*cs++ = i915_ggtt_offset(ce->state) | flags;
760 	/*
761 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
762 	 * WaMiSetContext_Hang:snb,ivb,vlv
763 	 */
764 	*cs++ = MI_NOOP;
765 
766 	if (GRAPHICS_VER(i915) == 7) {
767 		if (num_engines) {
768 			struct intel_engine_cs *signaller;
769 			i915_reg_t last_reg = {}; /* keep gcc quiet */
770 
771 			*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
772 			for_each_engine(signaller, engine->gt, id) {
773 				if (signaller == engine)
774 					continue;
775 
776 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
777 				*cs++ = i915_mmio_reg_offset(last_reg);
778 				*cs++ = _MASKED_BIT_DISABLE(
779 						GEN6_PSMI_SLEEP_MSG_DISABLE);
780 			}
781 
782 			/* Insert a delay before the next switch! */
783 			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
784 			*cs++ = i915_mmio_reg_offset(last_reg);
785 			*cs++ = intel_gt_scratch_offset(engine->gt,
786 							INTEL_GT_SCRATCH_FIELD_DEFAULT);
787 			*cs++ = MI_NOOP;
788 		}
789 		*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
790 	} else if (GRAPHICS_VER(i915) == 5) {
791 		*cs++ = MI_SUSPEND_FLUSH;
792 	}
793 
794 	intel_ring_advance(rq, cs);
795 
796 	return 0;
797 }
798 
799 static int remap_l3_slice(struct i915_request *rq, int slice)
800 {
801 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
802 	u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
803 	int i;
804 
805 	if (!remap_info)
806 		return 0;
807 
808 	cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
809 	if (IS_ERR(cs))
810 		return PTR_ERR(cs);
811 
812 	/*
813 	 * Note: We do not worry about the concurrent register cacheline hang
814 	 * here because no other code should access these registers other than
815 	 * at initialization time.
816 	 */
817 	*cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW);
818 	for (i = 0; i < L3LOG_DW; i++) {
819 		*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
820 		*cs++ = remap_info[i];
821 	}
822 	*cs++ = MI_NOOP;
823 	intel_ring_advance(rq, cs);
824 
825 	return 0;
826 #undef L3LOG_DW
827 }
828 
829 static int remap_l3(struct i915_request *rq)
830 {
831 	struct i915_gem_context *ctx = i915_request_gem_context(rq);
832 	int i, err;
833 
834 	if (!ctx || !ctx->remap_slice)
835 		return 0;
836 
837 	for (i = 0; i < MAX_L3_SLICES; i++) {
838 		if (!(ctx->remap_slice & BIT(i)))
839 			continue;
840 
841 		err = remap_l3_slice(rq, i);
842 		if (err)
843 			return err;
844 	}
845 
846 	ctx->remap_slice = 0;
847 	return 0;
848 }
849 
850 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
851 {
852 	int ret;
853 
854 	if (!vm)
855 		return 0;
856 
857 	ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
858 	if (ret)
859 		return ret;
860 
861 	/*
862 	 * Not only do we need a full barrier (post-sync write) after
863 	 * invalidating the TLBs, but we need to wait a little bit
864 	 * longer. Whether this is merely delaying us, or the
865 	 * subsequent flush is a key part of serialising with the
866 	 * post-sync op, this extra pass appears vital before a
867 	 * mm switch!
868 	 */
869 	ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
870 	if (ret)
871 		return ret;
872 
873 	return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
874 }
875 
876 static int clear_residuals(struct i915_request *rq)
877 {
878 	struct intel_engine_cs *engine = rq->engine;
879 	int ret;
880 
881 	ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
882 	if (ret)
883 		return ret;
884 
885 	if (engine->kernel_context->state) {
886 		ret = mi_set_context(rq,
887 				     engine->kernel_context,
888 				     MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
889 		if (ret)
890 			return ret;
891 	}
892 
893 	ret = engine->emit_bb_start(rq,
894 				    engine->wa_ctx.vma->node.start, 0,
895 				    0);
896 	if (ret)
897 		return ret;
898 
899 	ret = engine->emit_flush(rq, EMIT_FLUSH);
900 	if (ret)
901 		return ret;
902 
903 	/* Always invalidate before the next switch_mm() */
904 	return engine->emit_flush(rq, EMIT_INVALIDATE);
905 }
906 
907 static int switch_context(struct i915_request *rq)
908 {
909 	struct intel_engine_cs *engine = rq->engine;
910 	struct intel_context *ce = rq->context;
911 	void **residuals = NULL;
912 	int ret;
913 
914 	GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
915 
916 	if (engine->wa_ctx.vma && ce != engine->kernel_context) {
917 		if (engine->wa_ctx.vma->private != ce &&
918 		    i915_mitigate_clear_residuals()) {
919 			ret = clear_residuals(rq);
920 			if (ret)
921 				return ret;
922 
923 			residuals = &engine->wa_ctx.vma->private;
924 		}
925 	}
926 
927 	ret = switch_mm(rq, vm_alias(ce->vm));
928 	if (ret)
929 		return ret;
930 
931 	if (ce->state) {
932 		u32 flags;
933 
934 		GEM_BUG_ON(engine->id != RCS0);
935 
936 		/* For resource streamer on HSW+ and power context elsewhere */
937 		BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
938 		BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
939 
940 		flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
941 		if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
942 			flags |= MI_RESTORE_EXT_STATE_EN;
943 		else
944 			flags |= MI_RESTORE_INHIBIT;
945 
946 		ret = mi_set_context(rq, ce, flags);
947 		if (ret)
948 			return ret;
949 	}
950 
951 	ret = remap_l3(rq);
952 	if (ret)
953 		return ret;
954 
955 	/*
956 	 * Now past the point of no return, this request _will_ be emitted.
957 	 *
958 	 * Or at least this preamble will be emitted, the request may be
959 	 * interrupted prior to submitting the user payload. If so, we
960 	 * still submit the "empty" request in order to preserve global
961 	 * state tracking such as this, our tracking of the current
962 	 * dirty context.
963 	 */
964 	if (residuals) {
965 		intel_context_put(*residuals);
966 		*residuals = intel_context_get(ce);
967 	}
968 
969 	return 0;
970 }
971 
972 static int ring_request_alloc(struct i915_request *request)
973 {
974 	int ret;
975 
976 	GEM_BUG_ON(!intel_context_is_pinned(request->context));
977 	GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
978 
979 	/*
980 	 * Flush enough space to reduce the likelihood of waiting after
981 	 * we start building the request - in which case we will just
982 	 * have to repeat work.
983 	 */
984 	request->reserved_space += LEGACY_REQUEST_SIZE;
985 
986 	/* Unconditionally invalidate GPU caches and TLBs. */
987 	ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
988 	if (ret)
989 		return ret;
990 
991 	ret = switch_context(request);
992 	if (ret)
993 		return ret;
994 
995 	request->reserved_space -= LEGACY_REQUEST_SIZE;
996 	return 0;
997 }
998 
999 static void gen6_bsd_submit_request(struct i915_request *request)
1000 {
1001 	struct intel_uncore *uncore = request->engine->uncore;
1002 
1003 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1004 
1005        /* Every tail move must follow the sequence below */
1006 
1007 	/* Disable notification that the ring is IDLE. The GT
1008 	 * will then assume that it is busy and bring it out of rc6.
1009 	 */
1010 	intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1011 			      _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1012 
1013 	/* Clear the context id. Here be magic! */
1014 	intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
1015 
1016 	/* Wait for the ring not to be idle, i.e. for it to wake up. */
1017 	if (__intel_wait_for_register_fw(uncore,
1018 					 GEN6_BSD_SLEEP_PSMI_CONTROL,
1019 					 GEN6_BSD_SLEEP_INDICATOR,
1020 					 0,
1021 					 1000, 0, NULL))
1022 		drm_err(&uncore->i915->drm,
1023 			"timed out waiting for the BSD ring to wake up\n");
1024 
1025 	/* Now that the ring is fully powered up, update the tail */
1026 	i9xx_submit_request(request);
1027 
1028 	/* Let the ring send IDLE messages to the GT again,
1029 	 * and so let it sleep to conserve power when idle.
1030 	 */
1031 	intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1032 			      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1033 
1034 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1035 }
1036 
1037 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
1038 {
1039 	engine->submit_request = i9xx_submit_request;
1040 }
1041 
1042 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
1043 {
1044 	engine->submit_request = gen6_bsd_submit_request;
1045 }
1046 
1047 static void ring_release(struct intel_engine_cs *engine)
1048 {
1049 	struct drm_i915_private *dev_priv = engine->i915;
1050 
1051 	drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
1052 		    (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
1053 
1054 	intel_engine_cleanup_common(engine);
1055 
1056 	if (engine->wa_ctx.vma) {
1057 		intel_context_put(engine->wa_ctx.vma->private);
1058 		i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1059 	}
1060 
1061 	intel_ring_unpin(engine->legacy.ring);
1062 	intel_ring_put(engine->legacy.ring);
1063 
1064 	intel_timeline_unpin(engine->legacy.timeline);
1065 	intel_timeline_put(engine->legacy.timeline);
1066 }
1067 
1068 static void irq_handler(struct intel_engine_cs *engine, u16 iir)
1069 {
1070 	intel_engine_signal_breadcrumbs(engine);
1071 }
1072 
1073 static void setup_irq(struct intel_engine_cs *engine)
1074 {
1075 	struct drm_i915_private *i915 = engine->i915;
1076 
1077 	intel_engine_set_irq_handler(engine, irq_handler);
1078 
1079 	if (GRAPHICS_VER(i915) >= 6) {
1080 		engine->irq_enable = gen6_irq_enable;
1081 		engine->irq_disable = gen6_irq_disable;
1082 	} else if (GRAPHICS_VER(i915) >= 5) {
1083 		engine->irq_enable = gen5_irq_enable;
1084 		engine->irq_disable = gen5_irq_disable;
1085 	} else if (GRAPHICS_VER(i915) >= 3) {
1086 		engine->irq_enable = gen3_irq_enable;
1087 		engine->irq_disable = gen3_irq_disable;
1088 	} else {
1089 		engine->irq_enable = gen2_irq_enable;
1090 		engine->irq_disable = gen2_irq_disable;
1091 	}
1092 }
1093 
1094 static void add_to_engine(struct i915_request *rq)
1095 {
1096 	lockdep_assert_held(&rq->engine->sched_engine->lock);
1097 	list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
1098 }
1099 
1100 static void remove_from_engine(struct i915_request *rq)
1101 {
1102 	spin_lock_irq(&rq->engine->sched_engine->lock);
1103 	list_del_init(&rq->sched.link);
1104 
1105 	/* Prevent further __await_execution() registering a cb, then flush */
1106 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
1107 
1108 	spin_unlock_irq(&rq->engine->sched_engine->lock);
1109 
1110 	i915_request_notify_execute_cb_imm(rq);
1111 }
1112 
1113 static void setup_common(struct intel_engine_cs *engine)
1114 {
1115 	struct drm_i915_private *i915 = engine->i915;
1116 
1117 	/* gen8+ are only supported with execlists */
1118 	GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
1119 
1120 	setup_irq(engine);
1121 
1122 	engine->resume = xcs_resume;
1123 	engine->sanitize = xcs_sanitize;
1124 
1125 	engine->reset.prepare = reset_prepare;
1126 	engine->reset.rewind = reset_rewind;
1127 	engine->reset.cancel = reset_cancel;
1128 	engine->reset.finish = reset_finish;
1129 
1130 	engine->add_active_request = add_to_engine;
1131 	engine->remove_active_request = remove_from_engine;
1132 
1133 	engine->cops = &ring_context_ops;
1134 	engine->request_alloc = ring_request_alloc;
1135 
1136 	/*
1137 	 * Using a global execution timeline; the previous final breadcrumb is
1138 	 * equivalent to our next initial bread so we can elide
1139 	 * engine->emit_init_breadcrumb().
1140 	 */
1141 	engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
1142 	if (GRAPHICS_VER(i915) == 5)
1143 		engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
1144 
1145 	engine->set_default_submission = i9xx_set_default_submission;
1146 
1147 	if (GRAPHICS_VER(i915) >= 6)
1148 		engine->emit_bb_start = gen6_emit_bb_start;
1149 	else if (GRAPHICS_VER(i915) >= 4)
1150 		engine->emit_bb_start = gen4_emit_bb_start;
1151 	else if (IS_I830(i915) || IS_I845G(i915))
1152 		engine->emit_bb_start = i830_emit_bb_start;
1153 	else
1154 		engine->emit_bb_start = gen3_emit_bb_start;
1155 }
1156 
1157 static void setup_rcs(struct intel_engine_cs *engine)
1158 {
1159 	struct drm_i915_private *i915 = engine->i915;
1160 
1161 	if (HAS_L3_DPF(i915))
1162 		engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1163 
1164 	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1165 
1166 	if (GRAPHICS_VER(i915) >= 7) {
1167 		engine->emit_flush = gen7_emit_flush_rcs;
1168 		engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
1169 	} else if (GRAPHICS_VER(i915) == 6) {
1170 		engine->emit_flush = gen6_emit_flush_rcs;
1171 		engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
1172 	} else if (GRAPHICS_VER(i915) == 5) {
1173 		engine->emit_flush = gen4_emit_flush_rcs;
1174 	} else {
1175 		if (GRAPHICS_VER(i915) < 4)
1176 			engine->emit_flush = gen2_emit_flush;
1177 		else
1178 			engine->emit_flush = gen4_emit_flush_rcs;
1179 		engine->irq_enable_mask = I915_USER_INTERRUPT;
1180 	}
1181 
1182 	if (IS_HASWELL(i915))
1183 		engine->emit_bb_start = hsw_emit_bb_start;
1184 }
1185 
1186 static void setup_vcs(struct intel_engine_cs *engine)
1187 {
1188 	struct drm_i915_private *i915 = engine->i915;
1189 
1190 	if (GRAPHICS_VER(i915) >= 6) {
1191 		/* gen6 bsd needs a special wa for tail updates */
1192 		if (GRAPHICS_VER(i915) == 6)
1193 			engine->set_default_submission = gen6_bsd_set_default_submission;
1194 		engine->emit_flush = gen6_emit_flush_vcs;
1195 		engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1196 
1197 		if (GRAPHICS_VER(i915) == 6)
1198 			engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1199 		else
1200 			engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1201 	} else {
1202 		engine->emit_flush = gen4_emit_flush_vcs;
1203 		if (GRAPHICS_VER(i915) == 5)
1204 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1205 		else
1206 			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1207 	}
1208 }
1209 
1210 static void setup_bcs(struct intel_engine_cs *engine)
1211 {
1212 	struct drm_i915_private *i915 = engine->i915;
1213 
1214 	engine->emit_flush = gen6_emit_flush_xcs;
1215 	engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1216 
1217 	if (GRAPHICS_VER(i915) == 6)
1218 		engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1219 	else
1220 		engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1221 }
1222 
1223 static void setup_vecs(struct intel_engine_cs *engine)
1224 {
1225 	struct drm_i915_private *i915 = engine->i915;
1226 
1227 	GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
1228 
1229 	engine->emit_flush = gen6_emit_flush_xcs;
1230 	engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1231 	engine->irq_enable = hsw_irq_enable_vecs;
1232 	engine->irq_disable = hsw_irq_disable_vecs;
1233 
1234 	engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1235 }
1236 
1237 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
1238 				    struct i915_vma * const vma)
1239 {
1240 	return gen7_setup_clear_gpr_bb(engine, vma);
1241 }
1242 
1243 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
1244 				   struct i915_gem_ww_ctx *ww,
1245 				   struct i915_vma *vma)
1246 {
1247 	int err;
1248 
1249 	err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH);
1250 	if (err)
1251 		return err;
1252 
1253 	err = i915_vma_sync(vma);
1254 	if (err)
1255 		goto err_unpin;
1256 
1257 	err = gen7_ctx_switch_bb_setup(engine, vma);
1258 	if (err)
1259 		goto err_unpin;
1260 
1261 	engine->wa_ctx.vma = vma;
1262 	return 0;
1263 
1264 err_unpin:
1265 	i915_vma_unpin(vma);
1266 	return err;
1267 }
1268 
1269 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
1270 {
1271 	struct drm_i915_gem_object *obj;
1272 	struct i915_vma *vma;
1273 	int size, err;
1274 
1275 	if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
1276 		return 0;
1277 
1278 	err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
1279 	if (err < 0)
1280 		return ERR_PTR(err);
1281 	if (!err)
1282 		return NULL;
1283 
1284 	size = roundup2(err, PAGE_SIZE);
1285 
1286 	obj = i915_gem_object_create_internal(engine->i915, size);
1287 	if (IS_ERR(obj))
1288 		return ERR_CAST(obj);
1289 
1290 	vma = i915_vma_instance(obj, engine->gt->vm, NULL);
1291 	if (IS_ERR(vma)) {
1292 		i915_gem_object_put(obj);
1293 		return ERR_CAST(vma);
1294 	}
1295 
1296 	vma->private = intel_context_create(engine); /* dummy residuals */
1297 	if (IS_ERR(vma->private)) {
1298 		err = PTR_ERR(vma->private);
1299 		vma->private = NULL;
1300 		i915_gem_object_put(obj);
1301 		return ERR_PTR(err);
1302 	}
1303 
1304 	return vma;
1305 }
1306 
1307 int intel_ring_submission_setup(struct intel_engine_cs *engine)
1308 {
1309 	struct i915_gem_ww_ctx ww;
1310 	struct intel_timeline *timeline;
1311 	struct intel_ring *ring;
1312 	struct i915_vma *gen7_wa_vma;
1313 	int err;
1314 
1315 	setup_common(engine);
1316 
1317 	switch (engine->class) {
1318 	case RENDER_CLASS:
1319 		setup_rcs(engine);
1320 		break;
1321 	case VIDEO_DECODE_CLASS:
1322 		setup_vcs(engine);
1323 		break;
1324 	case COPY_ENGINE_CLASS:
1325 		setup_bcs(engine);
1326 		break;
1327 	case VIDEO_ENHANCEMENT_CLASS:
1328 		setup_vecs(engine);
1329 		break;
1330 	default:
1331 		MISSING_CASE(engine->class);
1332 		return -ENODEV;
1333 	}
1334 
1335 	timeline = intel_timeline_create_from_engine(engine,
1336 						     I915_GEM_HWS_SEQNO_ADDR);
1337 	if (IS_ERR(timeline)) {
1338 		err = PTR_ERR(timeline);
1339 		goto err;
1340 	}
1341 	GEM_BUG_ON(timeline->has_initial_breadcrumb);
1342 
1343 	ring = intel_engine_create_ring(engine, SZ_16K);
1344 	if (IS_ERR(ring)) {
1345 		err = PTR_ERR(ring);
1346 		goto err_timeline;
1347 	}
1348 
1349 	GEM_BUG_ON(engine->legacy.ring);
1350 	engine->legacy.ring = ring;
1351 	engine->legacy.timeline = timeline;
1352 
1353 	gen7_wa_vma = gen7_ctx_vma(engine);
1354 	if (IS_ERR(gen7_wa_vma)) {
1355 		err = PTR_ERR(gen7_wa_vma);
1356 		goto err_ring;
1357 	}
1358 
1359 	i915_gem_ww_ctx_init(&ww, false);
1360 
1361 retry:
1362 	err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
1363 	if (!err && gen7_wa_vma)
1364 		err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
1365 	if (!err && engine->legacy.ring->vma->obj)
1366 		err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
1367 	if (!err)
1368 		err = intel_timeline_pin(timeline, &ww);
1369 	if (!err) {
1370 		err = intel_ring_pin(ring, &ww);
1371 		if (err)
1372 			intel_timeline_unpin(timeline);
1373 	}
1374 	if (err)
1375 		goto out;
1376 
1377 	GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
1378 
1379 	if (gen7_wa_vma) {
1380 		err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
1381 		if (err) {
1382 			intel_ring_unpin(ring);
1383 			intel_timeline_unpin(timeline);
1384 		}
1385 	}
1386 
1387 out:
1388 	if (err == -EDEADLK) {
1389 		err = i915_gem_ww_ctx_backoff(&ww);
1390 		if (!err)
1391 			goto retry;
1392 	}
1393 	i915_gem_ww_ctx_fini(&ww);
1394 	if (err)
1395 		goto err_gen7_put;
1396 
1397 	/* Finally, take ownership and responsibility for cleanup! */
1398 	engine->release = ring_release;
1399 
1400 	return 0;
1401 
1402 err_gen7_put:
1403 	if (gen7_wa_vma) {
1404 		intel_context_put(gen7_wa_vma->private);
1405 		i915_gem_object_put(gen7_wa_vma->obj);
1406 	}
1407 err_ring:
1408 	intel_ring_put(ring);
1409 err_timeline:
1410 	intel_timeline_put(timeline);
1411 err:
1412 	intel_engine_cleanup_common(engine);
1413 	return err;
1414 }
1415 
1416 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1417 #include "selftest_ring_submission.c"
1418 #endif
1419