xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/selftest_workarounds.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: selftest_workarounds.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2018 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: selftest_workarounds.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11 
12 #include "gem/i915_gem_pm.h"
13 #include "gt/intel_engine_user.h"
14 #include "gt/intel_gt.h"
15 #include "i915_selftest.h"
16 #include "intel_reset.h"
17 
18 #include "selftests/igt_flush_test.h"
19 #include "selftests/igt_reset.h"
20 #include "selftests/igt_spinner.h"
21 #include "selftests/mock_drm.h"
22 
23 #include "gem/selftests/igt_gem_utils.h"
24 #include "gem/selftests/mock_context.h"
25 
26 static const struct wo_register {
27 	enum intel_platform platform;
28 	u32 reg;
29 } wo_registers[] = {
30 	{ INTEL_GEMINILAKE, 0x731c }
31 };
32 
33 struct wa_lists {
34 	struct i915_wa_list gt_wa_list;
35 	struct {
36 		struct i915_wa_list wa_list;
37 		struct i915_wa_list ctx_wa_list;
38 	} engine[I915_NUM_ENGINES];
39 };
40 
request_add_sync(struct i915_request * rq,int err)41 static int request_add_sync(struct i915_request *rq, int err)
42 {
43 	i915_request_get(rq);
44 	i915_request_add(rq);
45 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
46 		err = -EIO;
47 	i915_request_put(rq);
48 
49 	return err;
50 }
51 
request_add_spin(struct i915_request * rq,struct igt_spinner * spin)52 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
53 {
54 	int err = 0;
55 
56 	i915_request_get(rq);
57 	i915_request_add(rq);
58 	if (spin && !igt_wait_for_spinner(spin, rq))
59 		err = -ETIMEDOUT;
60 	i915_request_put(rq);
61 
62 	return err;
63 }
64 
65 static void
reference_lists_init(struct intel_gt * gt,struct wa_lists * lists)66 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
67 {
68 	struct intel_engine_cs *engine;
69 	enum intel_engine_id id;
70 
71 	memset(lists, 0, sizeof(*lists));
72 
73 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
74 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
75 	wa_init_finish(&lists->gt_wa_list);
76 
77 	for_each_engine(engine, gt, id) {
78 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
79 
80 		wa_init_start(wal, "REF", engine->name);
81 		engine_init_workarounds(engine, wal);
82 		wa_init_finish(wal);
83 
84 		__intel_engine_init_ctx_wa(engine,
85 					   &lists->engine[id].ctx_wa_list,
86 					   "CTX_REF");
87 	}
88 }
89 
90 static void
reference_lists_fini(struct intel_gt * gt,struct wa_lists * lists)91 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
92 {
93 	struct intel_engine_cs *engine;
94 	enum intel_engine_id id;
95 
96 	for_each_engine(engine, gt, id)
97 		intel_wa_list_free(&lists->engine[id].wa_list);
98 
99 	intel_wa_list_free(&lists->gt_wa_list);
100 }
101 
102 static struct drm_i915_gem_object *
read_nonprivs(struct i915_gem_context * ctx,struct intel_engine_cs * engine)103 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
104 {
105 	const u32 base = engine->mmio_base;
106 	struct drm_i915_gem_object *result;
107 	struct i915_request *rq;
108 	struct i915_vma *vma;
109 	u32 srm, *cs;
110 	int err;
111 	int i;
112 
113 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
114 	if (IS_ERR(result))
115 		return result;
116 
117 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
118 
119 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
120 	if (IS_ERR(cs)) {
121 		err = PTR_ERR(cs);
122 		goto err_obj;
123 	}
124 	memset(cs, 0xc5, PAGE_SIZE);
125 	i915_gem_object_flush_map(result);
126 	i915_gem_object_unpin_map(result);
127 
128 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
129 	if (IS_ERR(vma)) {
130 		err = PTR_ERR(vma);
131 		goto err_obj;
132 	}
133 
134 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
135 	if (err)
136 		goto err_obj;
137 
138 	rq = igt_request_alloc(ctx, engine);
139 	if (IS_ERR(rq)) {
140 		err = PTR_ERR(rq);
141 		goto err_pin;
142 	}
143 
144 	i915_vma_lock(vma);
145 	err = i915_request_await_object(rq, vma->obj, true);
146 	if (err == 0)
147 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
148 	i915_vma_unlock(vma);
149 	if (err)
150 		goto err_req;
151 
152 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
153 	if (INTEL_GEN(ctx->i915) >= 8)
154 		srm++;
155 
156 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
157 	if (IS_ERR(cs)) {
158 		err = PTR_ERR(cs);
159 		goto err_req;
160 	}
161 
162 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
163 		*cs++ = srm;
164 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
165 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
166 		*cs++ = 0;
167 	}
168 	intel_ring_advance(rq, cs);
169 
170 	i915_request_add(rq);
171 	i915_vma_unpin(vma);
172 
173 	return result;
174 
175 err_req:
176 	i915_request_add(rq);
177 err_pin:
178 	i915_vma_unpin(vma);
179 err_obj:
180 	i915_gem_object_put(result);
181 	return ERR_PTR(err);
182 }
183 
184 static u32
get_whitelist_reg(const struct intel_engine_cs * engine,unsigned int i)185 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
186 {
187 	i915_reg_t reg = i < engine->whitelist.count ?
188 			 engine->whitelist.list[i].reg :
189 			 RING_NOPID(engine->mmio_base);
190 
191 	return i915_mmio_reg_offset(reg);
192 }
193 
194 static void
print_results(const struct intel_engine_cs * engine,const u32 * results)195 print_results(const struct intel_engine_cs *engine, const u32 *results)
196 {
197 	unsigned int i;
198 
199 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
200 		u32 expected = get_whitelist_reg(engine, i);
201 		u32 actual = results[i];
202 
203 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
204 			i, expected, actual);
205 	}
206 }
207 
check_whitelist(struct i915_gem_context * ctx,struct intel_engine_cs * engine)208 static int check_whitelist(struct i915_gem_context *ctx,
209 			   struct intel_engine_cs *engine)
210 {
211 	struct drm_i915_gem_object *results;
212 	struct intel_wedge_me wedge;
213 	u32 *vaddr;
214 	int err;
215 	int i;
216 
217 	results = read_nonprivs(ctx, engine);
218 	if (IS_ERR(results))
219 		return PTR_ERR(results);
220 
221 	err = 0;
222 	i915_gem_object_lock(results);
223 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
224 		err = i915_gem_object_set_to_cpu_domain(results, false);
225 	i915_gem_object_unlock(results);
226 	if (intel_gt_is_wedged(engine->gt))
227 		err = -EIO;
228 	if (err)
229 		goto out_put;
230 
231 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
232 	if (IS_ERR(vaddr)) {
233 		err = PTR_ERR(vaddr);
234 		goto out_put;
235 	}
236 
237 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
238 		u32 expected = get_whitelist_reg(engine, i);
239 		u32 actual = vaddr[i];
240 
241 		if (expected != actual) {
242 			print_results(engine, vaddr);
243 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
244 			       i, expected, actual);
245 
246 			err = -EINVAL;
247 			break;
248 		}
249 	}
250 
251 	i915_gem_object_unpin_map(results);
252 out_put:
253 	i915_gem_object_put(results);
254 	return err;
255 }
256 
do_device_reset(struct intel_engine_cs * engine)257 static int do_device_reset(struct intel_engine_cs *engine)
258 {
259 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
260 	return 0;
261 }
262 
do_engine_reset(struct intel_engine_cs * engine)263 static int do_engine_reset(struct intel_engine_cs *engine)
264 {
265 	return intel_engine_reset(engine, "live_workarounds");
266 }
267 
268 static int
switch_to_scratch_context(struct intel_engine_cs * engine,struct igt_spinner * spin)269 switch_to_scratch_context(struct intel_engine_cs *engine,
270 			  struct igt_spinner *spin)
271 {
272 	struct intel_context *ce;
273 	struct i915_request *rq;
274 	int err = 0;
275 
276 	ce = intel_context_create(engine);
277 	if (IS_ERR(ce))
278 		return PTR_ERR(ce);
279 
280 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
281 	intel_context_put(ce);
282 
283 	if (IS_ERR(rq)) {
284 		spin = NULL;
285 		err = PTR_ERR(rq);
286 		goto err;
287 	}
288 
289 	err = request_add_spin(rq, spin);
290 err:
291 	if (err && spin)
292 		igt_spinner_end(spin);
293 
294 	return err;
295 }
296 
check_whitelist_across_reset(struct intel_engine_cs * engine,int (* reset)(struct intel_engine_cs *),const char * name)297 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
298 					int (*reset)(struct intel_engine_cs *),
299 					const char *name)
300 {
301 	struct drm_i915_private *i915 = engine->i915;
302 	struct i915_gem_context *ctx, *tmp;
303 	struct igt_spinner spin;
304 	intel_wakeref_t wakeref;
305 	int err;
306 
307 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
308 		engine->whitelist.count, engine->name, name);
309 
310 	ctx = kernel_context(i915);
311 	if (IS_ERR(ctx))
312 		return PTR_ERR(ctx);
313 
314 	err = igt_spinner_init(&spin, engine->gt);
315 	if (err)
316 		goto out_ctx;
317 
318 	err = check_whitelist(ctx, engine);
319 	if (err) {
320 		pr_err("Invalid whitelist *before* %s reset!\n", name);
321 		goto out_spin;
322 	}
323 
324 	err = switch_to_scratch_context(engine, &spin);
325 	if (err)
326 		goto out_spin;
327 
328 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
329 		err = reset(engine);
330 
331 	igt_spinner_end(&spin);
332 
333 	if (err) {
334 		pr_err("%s reset failed\n", name);
335 		goto out_spin;
336 	}
337 
338 	err = check_whitelist(ctx, engine);
339 	if (err) {
340 		pr_err("Whitelist not preserved in context across %s reset!\n",
341 		       name);
342 		goto out_spin;
343 	}
344 
345 	tmp = kernel_context(i915);
346 	if (IS_ERR(tmp)) {
347 		err = PTR_ERR(tmp);
348 		goto out_spin;
349 	}
350 	kernel_context_close(ctx);
351 	ctx = tmp;
352 
353 	err = check_whitelist(ctx, engine);
354 	if (err) {
355 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
356 		       name);
357 		goto out_spin;
358 	}
359 
360 out_spin:
361 	igt_spinner_fini(&spin);
362 out_ctx:
363 	kernel_context_close(ctx);
364 	return err;
365 }
366 
create_batch(struct i915_address_space * vm)367 static struct i915_vma *create_batch(struct i915_address_space *vm)
368 {
369 	struct drm_i915_gem_object *obj;
370 	struct i915_vma *vma;
371 	int err;
372 
373 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
374 	if (IS_ERR(obj))
375 		return ERR_CAST(obj);
376 
377 	vma = i915_vma_instance(obj, vm, NULL);
378 	if (IS_ERR(vma)) {
379 		err = PTR_ERR(vma);
380 		goto err_obj;
381 	}
382 
383 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
384 	if (err)
385 		goto err_obj;
386 
387 	return vma;
388 
389 err_obj:
390 	i915_gem_object_put(obj);
391 	return ERR_PTR(err);
392 }
393 
reg_write(u32 old,u32 new,u32 rsvd)394 static u32 reg_write(u32 old, u32 new, u32 rsvd)
395 {
396 	if (rsvd == 0x0000ffff) {
397 		old &= ~(new >> 16);
398 		old |= new & (new >> 16);
399 	} else {
400 		old &= ~rsvd;
401 		old |= new & rsvd;
402 	}
403 
404 	return old;
405 }
406 
wo_register(struct intel_engine_cs * engine,u32 reg)407 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
408 {
409 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
410 	int i;
411 
412 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
413 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
414 		return true;
415 
416 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
417 		if (wo_registers[i].platform == platform &&
418 		    wo_registers[i].reg == reg)
419 			return true;
420 	}
421 
422 	return false;
423 }
424 
ro_register(u32 reg)425 static bool ro_register(u32 reg)
426 {
427 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
428 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
429 		return true;
430 
431 	return false;
432 }
433 
whitelist_writable_count(struct intel_engine_cs * engine)434 static int whitelist_writable_count(struct intel_engine_cs *engine)
435 {
436 	int count = engine->whitelist.count;
437 	int i;
438 
439 	for (i = 0; i < engine->whitelist.count; i++) {
440 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
441 
442 		if (ro_register(reg))
443 			count--;
444 	}
445 
446 	return count;
447 }
448 
check_dirty_whitelist(struct intel_context * ce)449 static int check_dirty_whitelist(struct intel_context *ce)
450 {
451 	const u32 values[] = {
452 		0x00000000,
453 		0x01010101,
454 		0x10100101,
455 		0x03030303,
456 		0x30300303,
457 		0x05050505,
458 		0x50500505,
459 		0x0f0f0f0f,
460 		0xf00ff00f,
461 		0x10101010,
462 		0xf0f01010,
463 		0x30303030,
464 		0xa0a03030,
465 		0x50505050,
466 		0xc0c05050,
467 		0xf0f0f0f0,
468 		0x11111111,
469 		0x33333333,
470 		0x55555555,
471 		0x0000ffff,
472 		0x00ff00ff,
473 		0xff0000ff,
474 		0xffff00ff,
475 		0xffffffff,
476 	};
477 	struct intel_engine_cs *engine = ce->engine;
478 	struct i915_vma *scratch;
479 	struct i915_vma *batch;
480 	int err = 0, i, v;
481 	u32 *cs, *results;
482 
483 	scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
484 	if (IS_ERR(scratch))
485 		return PTR_ERR(scratch);
486 
487 	batch = create_batch(ce->vm);
488 	if (IS_ERR(batch)) {
489 		err = PTR_ERR(batch);
490 		goto out_scratch;
491 	}
492 
493 	for (i = 0; i < engine->whitelist.count; i++) {
494 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
495 		u64 addr = scratch->node.start;
496 		struct i915_request *rq;
497 		u32 srm, lrm, rsvd;
498 		u32 expect;
499 		int idx;
500 		bool ro_reg;
501 
502 		if (wo_register(engine, reg))
503 			continue;
504 
505 		ro_reg = ro_register(reg);
506 
507 		/* Clear non priv flags */
508 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
509 
510 		srm = MI_STORE_REGISTER_MEM;
511 		lrm = MI_LOAD_REGISTER_MEM;
512 		if (INTEL_GEN(engine->i915) >= 8)
513 			lrm++, srm++;
514 
515 		pr_debug("%s: Writing garbage to %x\n",
516 			 engine->name, reg);
517 
518 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
519 		if (IS_ERR(cs)) {
520 			err = PTR_ERR(cs);
521 			goto out_batch;
522 		}
523 
524 		/* SRM original */
525 		*cs++ = srm;
526 		*cs++ = reg;
527 		*cs++ = lower_32_bits(addr);
528 		*cs++ = upper_32_bits(addr);
529 
530 		idx = 1;
531 		for (v = 0; v < ARRAY_SIZE(values); v++) {
532 			/* LRI garbage */
533 			*cs++ = MI_LOAD_REGISTER_IMM(1);
534 			*cs++ = reg;
535 			*cs++ = values[v];
536 
537 			/* SRM result */
538 			*cs++ = srm;
539 			*cs++ = reg;
540 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
541 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
542 			idx++;
543 		}
544 		for (v = 0; v < ARRAY_SIZE(values); v++) {
545 			/* LRI garbage */
546 			*cs++ = MI_LOAD_REGISTER_IMM(1);
547 			*cs++ = reg;
548 			*cs++ = ~values[v];
549 
550 			/* SRM result */
551 			*cs++ = srm;
552 			*cs++ = reg;
553 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
554 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
555 			idx++;
556 		}
557 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
558 
559 		/* LRM original -- don't leave garbage in the context! */
560 		*cs++ = lrm;
561 		*cs++ = reg;
562 		*cs++ = lower_32_bits(addr);
563 		*cs++ = upper_32_bits(addr);
564 
565 		*cs++ = MI_BATCH_BUFFER_END;
566 
567 		i915_gem_object_flush_map(batch->obj);
568 		i915_gem_object_unpin_map(batch->obj);
569 		intel_gt_chipset_flush(engine->gt);
570 
571 		rq = intel_context_create_request(ce);
572 		if (IS_ERR(rq)) {
573 			err = PTR_ERR(rq);
574 			goto out_batch;
575 		}
576 
577 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
578 			err = engine->emit_init_breadcrumb(rq);
579 			if (err)
580 				goto err_request;
581 		}
582 
583 		i915_vma_lock(batch);
584 		err = i915_request_await_object(rq, batch->obj, false);
585 		if (err == 0)
586 			err = i915_vma_move_to_active(batch, rq, 0);
587 		i915_vma_unlock(batch);
588 		if (err)
589 			goto err_request;
590 
591 		err = engine->emit_bb_start(rq,
592 					    batch->node.start, PAGE_SIZE,
593 					    0);
594 		if (err)
595 			goto err_request;
596 
597 err_request:
598 		err = request_add_sync(rq, err);
599 		if (err) {
600 			pr_err("%s: Futzing %x timedout; cancelling test\n",
601 			       engine->name, reg);
602 			intel_gt_set_wedged(engine->gt);
603 			goto out_batch;
604 		}
605 
606 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
607 		if (IS_ERR(results)) {
608 			err = PTR_ERR(results);
609 			goto out_batch;
610 		}
611 
612 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
613 		if (!ro_reg) {
614 			/* detect write masking */
615 			rsvd = results[ARRAY_SIZE(values)];
616 			if (!rsvd) {
617 				pr_err("%s: Unable to write to whitelisted register %x\n",
618 				       engine->name, reg);
619 				err = -EINVAL;
620 				goto out_unpin;
621 			}
622 		}
623 
624 		expect = results[0];
625 		idx = 1;
626 		for (v = 0; v < ARRAY_SIZE(values); v++) {
627 			if (ro_reg)
628 				expect = results[0];
629 			else
630 				expect = reg_write(expect, values[v], rsvd);
631 
632 			if (results[idx] != expect)
633 				err++;
634 			idx++;
635 		}
636 		for (v = 0; v < ARRAY_SIZE(values); v++) {
637 			if (ro_reg)
638 				expect = results[0];
639 			else
640 				expect = reg_write(expect, ~values[v], rsvd);
641 
642 			if (results[idx] != expect)
643 				err++;
644 			idx++;
645 		}
646 		if (err) {
647 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
648 			       engine->name, err, reg);
649 
650 			if (ro_reg)
651 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
652 					engine->name, reg, results[0]);
653 			else
654 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
655 					engine->name, reg, results[0], rsvd);
656 
657 			expect = results[0];
658 			idx = 1;
659 			for (v = 0; v < ARRAY_SIZE(values); v++) {
660 				u32 w = values[v];
661 
662 				if (ro_reg)
663 					expect = results[0];
664 				else
665 					expect = reg_write(expect, w, rsvd);
666 				pr_info("Wrote %08x, read %08x, expect %08x\n",
667 					w, results[idx], expect);
668 				idx++;
669 			}
670 			for (v = 0; v < ARRAY_SIZE(values); v++) {
671 				u32 w = ~values[v];
672 
673 				if (ro_reg)
674 					expect = results[0];
675 				else
676 					expect = reg_write(expect, w, rsvd);
677 				pr_info("Wrote %08x, read %08x, expect %08x\n",
678 					w, results[idx], expect);
679 				idx++;
680 			}
681 
682 			err = -EINVAL;
683 		}
684 out_unpin:
685 		i915_gem_object_unpin_map(scratch->obj);
686 		if (err)
687 			break;
688 	}
689 
690 	if (igt_flush_test(engine->i915))
691 		err = -EIO;
692 out_batch:
693 	i915_vma_unpin_and_release(&batch, 0);
694 out_scratch:
695 	i915_vma_unpin_and_release(&scratch, 0);
696 	return err;
697 }
698 
live_dirty_whitelist(void * arg)699 static int live_dirty_whitelist(void *arg)
700 {
701 	struct intel_gt *gt = arg;
702 	struct intel_engine_cs *engine;
703 	enum intel_engine_id id;
704 
705 	/* Can the user write to the whitelisted registers? */
706 
707 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
708 		return 0;
709 
710 	for_each_engine(engine, gt, id) {
711 		struct intel_context *ce;
712 		int err;
713 
714 		if (engine->whitelist.count == 0)
715 			continue;
716 
717 		ce = intel_context_create(engine);
718 		if (IS_ERR(ce))
719 			return PTR_ERR(ce);
720 
721 		err = check_dirty_whitelist(ce);
722 		intel_context_put(ce);
723 		if (err)
724 			return err;
725 	}
726 
727 	return 0;
728 }
729 
live_reset_whitelist(void * arg)730 static int live_reset_whitelist(void *arg)
731 {
732 	struct intel_gt *gt = arg;
733 	struct intel_engine_cs *engine;
734 	enum intel_engine_id id;
735 	int err = 0;
736 
737 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
738 	igt_global_reset_lock(gt);
739 
740 	for_each_engine(engine, gt, id) {
741 		if (engine->whitelist.count == 0)
742 			continue;
743 
744 		if (intel_has_reset_engine(gt)) {
745 			err = check_whitelist_across_reset(engine,
746 							   do_engine_reset,
747 							   "engine");
748 			if (err)
749 				goto out;
750 		}
751 
752 		if (intel_has_gpu_reset(gt)) {
753 			err = check_whitelist_across_reset(engine,
754 							   do_device_reset,
755 							   "device");
756 			if (err)
757 				goto out;
758 		}
759 	}
760 
761 out:
762 	igt_global_reset_unlock(gt);
763 	return err;
764 }
765 
read_whitelisted_registers(struct i915_gem_context * ctx,struct intel_engine_cs * engine,struct i915_vma * results)766 static int read_whitelisted_registers(struct i915_gem_context *ctx,
767 				      struct intel_engine_cs *engine,
768 				      struct i915_vma *results)
769 {
770 	struct i915_request *rq;
771 	int i, err = 0;
772 	u32 srm, *cs;
773 
774 	rq = igt_request_alloc(ctx, engine);
775 	if (IS_ERR(rq))
776 		return PTR_ERR(rq);
777 
778 	i915_vma_lock(results);
779 	err = i915_request_await_object(rq, results->obj, true);
780 	if (err == 0)
781 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
782 	i915_vma_unlock(results);
783 	if (err)
784 		goto err_req;
785 
786 	srm = MI_STORE_REGISTER_MEM;
787 	if (INTEL_GEN(ctx->i915) >= 8)
788 		srm++;
789 
790 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
791 	if (IS_ERR(cs)) {
792 		err = PTR_ERR(cs);
793 		goto err_req;
794 	}
795 
796 	for (i = 0; i < engine->whitelist.count; i++) {
797 		u64 offset = results->node.start + sizeof(u32) * i;
798 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
799 
800 		/* Clear non priv flags */
801 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
802 
803 		*cs++ = srm;
804 		*cs++ = reg;
805 		*cs++ = lower_32_bits(offset);
806 		*cs++ = upper_32_bits(offset);
807 	}
808 	intel_ring_advance(rq, cs);
809 
810 err_req:
811 	return request_add_sync(rq, err);
812 }
813 
scrub_whitelisted_registers(struct i915_gem_context * ctx,struct intel_engine_cs * engine)814 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
815 				       struct intel_engine_cs *engine)
816 {
817 	struct i915_address_space *vm;
818 	struct i915_request *rq;
819 	struct i915_vma *batch;
820 	int i, err = 0;
821 	u32 *cs;
822 
823 	vm = i915_gem_context_get_vm_rcu(ctx);
824 	batch = create_batch(vm);
825 	i915_vm_put(vm);
826 	if (IS_ERR(batch))
827 		return PTR_ERR(batch);
828 
829 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
830 	if (IS_ERR(cs)) {
831 		err = PTR_ERR(cs);
832 		goto err_batch;
833 	}
834 
835 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
836 	for (i = 0; i < engine->whitelist.count; i++) {
837 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
838 
839 		if (ro_register(reg))
840 			continue;
841 
842 		/* Clear non priv flags */
843 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
844 
845 		*cs++ = reg;
846 		*cs++ = 0xffffffff;
847 	}
848 	*cs++ = MI_BATCH_BUFFER_END;
849 
850 	i915_gem_object_flush_map(batch->obj);
851 	intel_gt_chipset_flush(engine->gt);
852 
853 	rq = igt_request_alloc(ctx, engine);
854 	if (IS_ERR(rq)) {
855 		err = PTR_ERR(rq);
856 		goto err_unpin;
857 	}
858 
859 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
860 		err = engine->emit_init_breadcrumb(rq);
861 		if (err)
862 			goto err_request;
863 	}
864 
865 	i915_vma_lock(batch);
866 	err = i915_request_await_object(rq, batch->obj, false);
867 	if (err == 0)
868 		err = i915_vma_move_to_active(batch, rq, 0);
869 	i915_vma_unlock(batch);
870 	if (err)
871 		goto err_request;
872 
873 	/* Perform the writes from an unprivileged "user" batch */
874 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
875 
876 err_request:
877 	err = request_add_sync(rq, err);
878 
879 err_unpin:
880 	i915_gem_object_unpin_map(batch->obj);
881 err_batch:
882 	i915_vma_unpin_and_release(&batch, 0);
883 	return err;
884 }
885 
886 struct regmask {
887 	i915_reg_t reg;
888 	unsigned long gen_mask;
889 };
890 
find_reg(struct drm_i915_private * i915,i915_reg_t reg,const struct regmask * tbl,unsigned long count)891 static bool find_reg(struct drm_i915_private *i915,
892 		     i915_reg_t reg,
893 		     const struct regmask *tbl,
894 		     unsigned long count)
895 {
896 	u32 offset = i915_mmio_reg_offset(reg);
897 
898 	while (count--) {
899 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
900 		    i915_mmio_reg_offset(tbl->reg) == offset)
901 			return true;
902 		tbl++;
903 	}
904 
905 	return false;
906 }
907 
pardon_reg(struct drm_i915_private * i915,i915_reg_t reg)908 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
909 {
910 	/* Alas, we must pardon some whitelists. Mistakes already made */
911 	static const struct regmask pardon[] = {
912 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
913 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
914 	};
915 
916 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
917 }
918 
result_eq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)919 static bool result_eq(struct intel_engine_cs *engine,
920 		      u32 a, u32 b, i915_reg_t reg)
921 {
922 	if (a != b && !pardon_reg(engine->i915, reg)) {
923 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
924 		       i915_mmio_reg_offset(reg), a, b);
925 		return false;
926 	}
927 
928 	return true;
929 }
930 
writeonly_reg(struct drm_i915_private * i915,i915_reg_t reg)931 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
932 {
933 	/* Some registers do not seem to behave and our writes unreadable */
934 	static const struct regmask wo[] = {
935 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
936 	};
937 
938 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
939 }
940 
result_neq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)941 static bool result_neq(struct intel_engine_cs *engine,
942 		       u32 a, u32 b, i915_reg_t reg)
943 {
944 	if (a == b && !writeonly_reg(engine->i915, reg)) {
945 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
946 		       i915_mmio_reg_offset(reg), a);
947 		return false;
948 	}
949 
950 	return true;
951 }
952 
953 static int
check_whitelisted_registers(struct intel_engine_cs * engine,struct i915_vma * A,struct i915_vma * B,bool (* fn)(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg))954 check_whitelisted_registers(struct intel_engine_cs *engine,
955 			    struct i915_vma *A,
956 			    struct i915_vma *B,
957 			    bool (*fn)(struct intel_engine_cs *engine,
958 				       u32 a, u32 b,
959 				       i915_reg_t reg))
960 {
961 	u32 *a, *b;
962 	int i, err;
963 
964 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
965 	if (IS_ERR(a))
966 		return PTR_ERR(a);
967 
968 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
969 	if (IS_ERR(b)) {
970 		err = PTR_ERR(b);
971 		goto err_a;
972 	}
973 
974 	err = 0;
975 	for (i = 0; i < engine->whitelist.count; i++) {
976 		const struct i915_wa *wa = &engine->whitelist.list[i];
977 
978 		if (i915_mmio_reg_offset(wa->reg) &
979 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
980 			continue;
981 
982 		if (!fn(engine, a[i], b[i], wa->reg))
983 			err = -EINVAL;
984 	}
985 
986 	i915_gem_object_unpin_map(B->obj);
987 err_a:
988 	i915_gem_object_unpin_map(A->obj);
989 	return err;
990 }
991 
live_isolated_whitelist(void * arg)992 static int live_isolated_whitelist(void *arg)
993 {
994 	struct intel_gt *gt = arg;
995 	struct {
996 		struct i915_gem_context *ctx;
997 		struct i915_vma *scratch[2];
998 	} client[2] = {};
999 	struct intel_engine_cs *engine;
1000 	enum intel_engine_id id;
1001 	int i, err = 0;
1002 
1003 	/*
1004 	 * Check that a write into a whitelist register works, but
1005 	 * invisible to a second context.
1006 	 */
1007 
1008 	if (!intel_engines_has_context_isolation(gt->i915))
1009 		return 0;
1010 
1011 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1012 		struct i915_address_space *vm;
1013 		struct i915_gem_context *c;
1014 
1015 		c = kernel_context(gt->i915);
1016 		if (IS_ERR(c)) {
1017 			err = PTR_ERR(c);
1018 			goto err;
1019 		}
1020 
1021 		vm = i915_gem_context_get_vm_rcu(c);
1022 
1023 		client[i].scratch[0] = create_scratch(vm, 1024);
1024 		if (IS_ERR(client[i].scratch[0])) {
1025 			err = PTR_ERR(client[i].scratch[0]);
1026 			i915_vm_put(vm);
1027 			kernel_context_close(c);
1028 			goto err;
1029 		}
1030 
1031 		client[i].scratch[1] = create_scratch(vm, 1024);
1032 		if (IS_ERR(client[i].scratch[1])) {
1033 			err = PTR_ERR(client[i].scratch[1]);
1034 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1035 			i915_vm_put(vm);
1036 			kernel_context_close(c);
1037 			goto err;
1038 		}
1039 
1040 		client[i].ctx = c;
1041 		i915_vm_put(vm);
1042 	}
1043 
1044 	for_each_engine(engine, gt, id) {
1045 		if (!engine->kernel_context->vm)
1046 			continue;
1047 
1048 		if (!whitelist_writable_count(engine))
1049 			continue;
1050 
1051 		/* Read default values */
1052 		err = read_whitelisted_registers(client[0].ctx, engine,
1053 						 client[0].scratch[0]);
1054 		if (err)
1055 			goto err;
1056 
1057 		/* Try to overwrite registers (should only affect ctx0) */
1058 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1059 		if (err)
1060 			goto err;
1061 
1062 		/* Read values from ctx1, we expect these to be defaults */
1063 		err = read_whitelisted_registers(client[1].ctx, engine,
1064 						 client[1].scratch[0]);
1065 		if (err)
1066 			goto err;
1067 
1068 		/* Verify that both reads return the same default values */
1069 		err = check_whitelisted_registers(engine,
1070 						  client[0].scratch[0],
1071 						  client[1].scratch[0],
1072 						  result_eq);
1073 		if (err)
1074 			goto err;
1075 
1076 		/* Read back the updated values in ctx0 */
1077 		err = read_whitelisted_registers(client[0].ctx, engine,
1078 						 client[0].scratch[1]);
1079 		if (err)
1080 			goto err;
1081 
1082 		/* User should be granted privilege to overwhite regs */
1083 		err = check_whitelisted_registers(engine,
1084 						  client[0].scratch[0],
1085 						  client[0].scratch[1],
1086 						  result_neq);
1087 		if (err)
1088 			goto err;
1089 	}
1090 
1091 err:
1092 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1093 		if (!client[i].ctx)
1094 			break;
1095 
1096 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1097 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1098 		kernel_context_close(client[i].ctx);
1099 	}
1100 
1101 	if (igt_flush_test(gt->i915))
1102 		err = -EIO;
1103 
1104 	return err;
1105 }
1106 
1107 static bool
verify_wa_lists(struct i915_gem_context * ctx,struct wa_lists * lists,const char * str)1108 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1109 		const char *str)
1110 {
1111 	struct drm_i915_private *i915 = ctx->i915;
1112 	struct i915_gem_engines_iter it;
1113 	struct intel_context *ce;
1114 	bool ok = true;
1115 
1116 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1117 
1118 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1119 		enum intel_engine_id id = ce->engine->id;
1120 
1121 		ok &= engine_wa_list_verify(ce,
1122 					    &lists->engine[id].wa_list,
1123 					    str) == 0;
1124 
1125 		ok &= engine_wa_list_verify(ce,
1126 					    &lists->engine[id].ctx_wa_list,
1127 					    str) == 0;
1128 	}
1129 
1130 	return ok;
1131 }
1132 
1133 static int
live_gpu_reset_workarounds(void * arg)1134 live_gpu_reset_workarounds(void *arg)
1135 {
1136 	struct intel_gt *gt = arg;
1137 	struct i915_gem_context *ctx;
1138 	intel_wakeref_t wakeref;
1139 	struct wa_lists lists;
1140 	bool ok;
1141 
1142 	if (!intel_has_gpu_reset(gt))
1143 		return 0;
1144 
1145 	ctx = kernel_context(gt->i915);
1146 	if (IS_ERR(ctx))
1147 		return PTR_ERR(ctx);
1148 
1149 	i915_gem_context_lock_engines(ctx);
1150 
1151 	pr_info("Verifying after GPU reset...\n");
1152 
1153 	igt_global_reset_lock(gt);
1154 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1155 
1156 	reference_lists_init(gt, &lists);
1157 
1158 	ok = verify_wa_lists(ctx, &lists, "before reset");
1159 	if (!ok)
1160 		goto out;
1161 
1162 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1163 
1164 	ok = verify_wa_lists(ctx, &lists, "after reset");
1165 
1166 out:
1167 	i915_gem_context_unlock_engines(ctx);
1168 	kernel_context_close(ctx);
1169 	reference_lists_fini(gt, &lists);
1170 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1171 	igt_global_reset_unlock(gt);
1172 
1173 	return ok ? 0 : -ESRCH;
1174 }
1175 
1176 static int
live_engine_reset_workarounds(void * arg)1177 live_engine_reset_workarounds(void *arg)
1178 {
1179 	struct intel_gt *gt = arg;
1180 	struct i915_gem_engines_iter it;
1181 	struct i915_gem_context *ctx;
1182 	struct intel_context *ce;
1183 	struct igt_spinner spin;
1184 	struct i915_request *rq;
1185 	intel_wakeref_t wakeref;
1186 	struct wa_lists lists;
1187 	int ret = 0;
1188 
1189 	if (!intel_has_reset_engine(gt))
1190 		return 0;
1191 
1192 	ctx = kernel_context(gt->i915);
1193 	if (IS_ERR(ctx))
1194 		return PTR_ERR(ctx);
1195 
1196 	igt_global_reset_lock(gt);
1197 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1198 
1199 	reference_lists_init(gt, &lists);
1200 
1201 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1202 		struct intel_engine_cs *engine = ce->engine;
1203 		bool ok;
1204 
1205 		pr_info("Verifying after %s reset...\n", engine->name);
1206 
1207 		ok = verify_wa_lists(ctx, &lists, "before reset");
1208 		if (!ok) {
1209 			ret = -ESRCH;
1210 			goto err;
1211 		}
1212 
1213 		intel_engine_reset(engine, "live_workarounds");
1214 
1215 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1216 		if (!ok) {
1217 			ret = -ESRCH;
1218 			goto err;
1219 		}
1220 
1221 		ret = igt_spinner_init(&spin, engine->gt);
1222 		if (ret)
1223 			goto err;
1224 
1225 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1226 		if (IS_ERR(rq)) {
1227 			ret = PTR_ERR(rq);
1228 			igt_spinner_fini(&spin);
1229 			goto err;
1230 		}
1231 
1232 		ret = request_add_spin(rq, &spin);
1233 		if (ret) {
1234 			pr_err("Spinner failed to start\n");
1235 			igt_spinner_fini(&spin);
1236 			goto err;
1237 		}
1238 
1239 		intel_engine_reset(engine, "live_workarounds");
1240 
1241 		igt_spinner_end(&spin);
1242 		igt_spinner_fini(&spin);
1243 
1244 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1245 		if (!ok) {
1246 			ret = -ESRCH;
1247 			goto err;
1248 		}
1249 	}
1250 err:
1251 	i915_gem_context_unlock_engines(ctx);
1252 	reference_lists_fini(gt, &lists);
1253 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1254 	igt_global_reset_unlock(gt);
1255 	kernel_context_close(ctx);
1256 
1257 	igt_flush_test(gt->i915);
1258 
1259 	return ret;
1260 }
1261 
intel_workarounds_live_selftests(struct drm_i915_private * i915)1262 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1263 {
1264 	static const struct i915_subtest tests[] = {
1265 		SUBTEST(live_dirty_whitelist),
1266 		SUBTEST(live_reset_whitelist),
1267 		SUBTEST(live_isolated_whitelist),
1268 		SUBTEST(live_gpu_reset_workarounds),
1269 		SUBTEST(live_engine_reset_workarounds),
1270 	};
1271 
1272 	if (intel_gt_is_wedged(&i915->gt))
1273 		return 0;
1274 
1275 	return intel_gt_live_subtests(tests, &i915->gt);
1276 }
1277