xref: /openbsd-src/sys/dev/pci/drm/i915/gt/selftest_workarounds.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
15ca02815Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2018 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
61bb76ff1Sjsg #include "gem/i915_gem_internal.h"
7c349dbc7Sjsg #include "gem/i915_gem_pm.h"
8c349dbc7Sjsg #include "gt/intel_engine_user.h"
9c349dbc7Sjsg #include "gt/intel_gt.h"
10c349dbc7Sjsg #include "i915_selftest.h"
11c349dbc7Sjsg #include "intel_reset.h"
12c349dbc7Sjsg 
13c349dbc7Sjsg #include "selftests/igt_flush_test.h"
14c349dbc7Sjsg #include "selftests/igt_reset.h"
15c349dbc7Sjsg #include "selftests/igt_spinner.h"
165ca02815Sjsg #include "selftests/intel_scheduler_helpers.h"
17c349dbc7Sjsg #include "selftests/mock_drm.h"
18c349dbc7Sjsg 
19c349dbc7Sjsg #include "gem/selftests/igt_gem_utils.h"
20c349dbc7Sjsg #include "gem/selftests/mock_context.h"
21c349dbc7Sjsg 
22c349dbc7Sjsg static const struct wo_register {
23c349dbc7Sjsg 	enum intel_platform platform;
24c349dbc7Sjsg 	u32 reg;
25c349dbc7Sjsg } wo_registers[] = {
26c349dbc7Sjsg 	{ INTEL_GEMINILAKE, 0x731c }
27c349dbc7Sjsg };
28c349dbc7Sjsg 
29c349dbc7Sjsg struct wa_lists {
30c349dbc7Sjsg 	struct i915_wa_list gt_wa_list;
31c349dbc7Sjsg 	struct {
32c349dbc7Sjsg 		struct i915_wa_list wa_list;
33c349dbc7Sjsg 		struct i915_wa_list ctx_wa_list;
34c349dbc7Sjsg 	} engine[I915_NUM_ENGINES];
35c349dbc7Sjsg };
36c349dbc7Sjsg 
request_add_sync(struct i915_request * rq,int err)37c349dbc7Sjsg static int request_add_sync(struct i915_request *rq, int err)
38c349dbc7Sjsg {
39c349dbc7Sjsg 	i915_request_get(rq);
40c349dbc7Sjsg 	i915_request_add(rq);
41c349dbc7Sjsg 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
42c349dbc7Sjsg 		err = -EIO;
43c349dbc7Sjsg 	i915_request_put(rq);
44c349dbc7Sjsg 
45c349dbc7Sjsg 	return err;
46c349dbc7Sjsg }
47c349dbc7Sjsg 
request_add_spin(struct i915_request * rq,struct igt_spinner * spin)48c349dbc7Sjsg static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
49c349dbc7Sjsg {
50c349dbc7Sjsg 	int err = 0;
51c349dbc7Sjsg 
52c349dbc7Sjsg 	i915_request_get(rq);
53c349dbc7Sjsg 	i915_request_add(rq);
54c349dbc7Sjsg 	if (spin && !igt_wait_for_spinner(spin, rq))
55c349dbc7Sjsg 		err = -ETIMEDOUT;
56c349dbc7Sjsg 	i915_request_put(rq);
57c349dbc7Sjsg 
58c349dbc7Sjsg 	return err;
59c349dbc7Sjsg }
60c349dbc7Sjsg 
61c349dbc7Sjsg static void
reference_lists_init(struct intel_gt * gt,struct wa_lists * lists)62c349dbc7Sjsg reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
63c349dbc7Sjsg {
64c349dbc7Sjsg 	struct intel_engine_cs *engine;
65c349dbc7Sjsg 	enum intel_engine_id id;
66c349dbc7Sjsg 
67c349dbc7Sjsg 	memset(lists, 0, sizeof(*lists));
68c349dbc7Sjsg 
69*f005ef32Sjsg 	wa_init_start(&lists->gt_wa_list, gt, "GT_REF", "global");
701bb76ff1Sjsg 	gt_init_workarounds(gt, &lists->gt_wa_list);
71c349dbc7Sjsg 	wa_init_finish(&lists->gt_wa_list);
72c349dbc7Sjsg 
73c349dbc7Sjsg 	for_each_engine(engine, gt, id) {
74c349dbc7Sjsg 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
75c349dbc7Sjsg 
76*f005ef32Sjsg 		wa_init_start(wal, gt, "REF", engine->name);
77c349dbc7Sjsg 		engine_init_workarounds(engine, wal);
78c349dbc7Sjsg 		wa_init_finish(wal);
79c349dbc7Sjsg 
80c349dbc7Sjsg 		__intel_engine_init_ctx_wa(engine,
81c349dbc7Sjsg 					   &lists->engine[id].ctx_wa_list,
82c349dbc7Sjsg 					   "CTX_REF");
83c349dbc7Sjsg 	}
84c349dbc7Sjsg }
85c349dbc7Sjsg 
86c349dbc7Sjsg static void
reference_lists_fini(struct intel_gt * gt,struct wa_lists * lists)87c349dbc7Sjsg reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
88c349dbc7Sjsg {
89c349dbc7Sjsg 	struct intel_engine_cs *engine;
90c349dbc7Sjsg 	enum intel_engine_id id;
91c349dbc7Sjsg 
92c349dbc7Sjsg 	for_each_engine(engine, gt, id)
93c349dbc7Sjsg 		intel_wa_list_free(&lists->engine[id].wa_list);
94c349dbc7Sjsg 
95c349dbc7Sjsg 	intel_wa_list_free(&lists->gt_wa_list);
96c349dbc7Sjsg }
97c349dbc7Sjsg 
98c349dbc7Sjsg static struct drm_i915_gem_object *
read_nonprivs(struct intel_context * ce)995ca02815Sjsg read_nonprivs(struct intel_context *ce)
100c349dbc7Sjsg {
1015ca02815Sjsg 	struct intel_engine_cs *engine = ce->engine;
102c349dbc7Sjsg 	const u32 base = engine->mmio_base;
103c349dbc7Sjsg 	struct drm_i915_gem_object *result;
104c349dbc7Sjsg 	struct i915_request *rq;
105c349dbc7Sjsg 	struct i915_vma *vma;
106c349dbc7Sjsg 	u32 srm, *cs;
107c349dbc7Sjsg 	int err;
108c349dbc7Sjsg 	int i;
109c349dbc7Sjsg 
110c349dbc7Sjsg 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
111c349dbc7Sjsg 	if (IS_ERR(result))
112c349dbc7Sjsg 		return result;
113c349dbc7Sjsg 
114c349dbc7Sjsg 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
115c349dbc7Sjsg 
1165ca02815Sjsg 	cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
117c349dbc7Sjsg 	if (IS_ERR(cs)) {
118c349dbc7Sjsg 		err = PTR_ERR(cs);
119c349dbc7Sjsg 		goto err_obj;
120c349dbc7Sjsg 	}
121c349dbc7Sjsg 	memset(cs, 0xc5, PAGE_SIZE);
122c349dbc7Sjsg 	i915_gem_object_flush_map(result);
123c349dbc7Sjsg 	i915_gem_object_unpin_map(result);
124c349dbc7Sjsg 
125c349dbc7Sjsg 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
126c349dbc7Sjsg 	if (IS_ERR(vma)) {
127c349dbc7Sjsg 		err = PTR_ERR(vma);
128c349dbc7Sjsg 		goto err_obj;
129c349dbc7Sjsg 	}
130c349dbc7Sjsg 
131c349dbc7Sjsg 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
132c349dbc7Sjsg 	if (err)
133c349dbc7Sjsg 		goto err_obj;
134c349dbc7Sjsg 
1355ca02815Sjsg 	rq = intel_context_create_request(ce);
136c349dbc7Sjsg 	if (IS_ERR(rq)) {
137c349dbc7Sjsg 		err = PTR_ERR(rq);
138c349dbc7Sjsg 		goto err_pin;
139c349dbc7Sjsg 	}
140c349dbc7Sjsg 
141*f005ef32Sjsg 	err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
142c349dbc7Sjsg 	if (err)
143c349dbc7Sjsg 		goto err_req;
144c349dbc7Sjsg 
145c349dbc7Sjsg 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1465ca02815Sjsg 	if (GRAPHICS_VER(engine->i915) >= 8)
147c349dbc7Sjsg 		srm++;
148c349dbc7Sjsg 
149c349dbc7Sjsg 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
150c349dbc7Sjsg 	if (IS_ERR(cs)) {
151c349dbc7Sjsg 		err = PTR_ERR(cs);
152c349dbc7Sjsg 		goto err_req;
153c349dbc7Sjsg 	}
154c349dbc7Sjsg 
155c349dbc7Sjsg 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
156c349dbc7Sjsg 		*cs++ = srm;
157c349dbc7Sjsg 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
158c349dbc7Sjsg 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
159c349dbc7Sjsg 		*cs++ = 0;
160c349dbc7Sjsg 	}
161c349dbc7Sjsg 	intel_ring_advance(rq, cs);
162c349dbc7Sjsg 
163c349dbc7Sjsg 	i915_request_add(rq);
164c349dbc7Sjsg 	i915_vma_unpin(vma);
165c349dbc7Sjsg 
166c349dbc7Sjsg 	return result;
167c349dbc7Sjsg 
168c349dbc7Sjsg err_req:
169c349dbc7Sjsg 	i915_request_add(rq);
170c349dbc7Sjsg err_pin:
171c349dbc7Sjsg 	i915_vma_unpin(vma);
172c349dbc7Sjsg err_obj:
173c349dbc7Sjsg 	i915_gem_object_put(result);
174c349dbc7Sjsg 	return ERR_PTR(err);
175c349dbc7Sjsg }
176c349dbc7Sjsg 
177c349dbc7Sjsg static u32
get_whitelist_reg(const struct intel_engine_cs * engine,unsigned int i)178c349dbc7Sjsg get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
179c349dbc7Sjsg {
180c349dbc7Sjsg 	i915_reg_t reg = i < engine->whitelist.count ?
181c349dbc7Sjsg 			 engine->whitelist.list[i].reg :
182c349dbc7Sjsg 			 RING_NOPID(engine->mmio_base);
183c349dbc7Sjsg 
184c349dbc7Sjsg 	return i915_mmio_reg_offset(reg);
185c349dbc7Sjsg }
186c349dbc7Sjsg 
187c349dbc7Sjsg static void
print_results(const struct intel_engine_cs * engine,const u32 * results)188c349dbc7Sjsg print_results(const struct intel_engine_cs *engine, const u32 *results)
189c349dbc7Sjsg {
190c349dbc7Sjsg 	unsigned int i;
191c349dbc7Sjsg 
192c349dbc7Sjsg 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
193c349dbc7Sjsg 		u32 expected = get_whitelist_reg(engine, i);
194c349dbc7Sjsg 		u32 actual = results[i];
195c349dbc7Sjsg 
196c349dbc7Sjsg 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
197c349dbc7Sjsg 			i, expected, actual);
198c349dbc7Sjsg 	}
199c349dbc7Sjsg }
200c349dbc7Sjsg 
check_whitelist(struct intel_context * ce)2015ca02815Sjsg static int check_whitelist(struct intel_context *ce)
202c349dbc7Sjsg {
2035ca02815Sjsg 	struct intel_engine_cs *engine = ce->engine;
204c349dbc7Sjsg 	struct drm_i915_gem_object *results;
205c349dbc7Sjsg 	struct intel_wedge_me wedge;
206c349dbc7Sjsg 	u32 *vaddr;
207c349dbc7Sjsg 	int err;
208c349dbc7Sjsg 	int i;
209c349dbc7Sjsg 
2105ca02815Sjsg 	results = read_nonprivs(ce);
211c349dbc7Sjsg 	if (IS_ERR(results))
212c349dbc7Sjsg 		return PTR_ERR(results);
213c349dbc7Sjsg 
214c349dbc7Sjsg 	err = 0;
215ad8b1aafSjsg 	i915_gem_object_lock(results, NULL);
216c349dbc7Sjsg 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
217c349dbc7Sjsg 		err = i915_gem_object_set_to_cpu_domain(results, false);
2185ca02815Sjsg 
219c349dbc7Sjsg 	if (intel_gt_is_wedged(engine->gt))
220c349dbc7Sjsg 		err = -EIO;
221c349dbc7Sjsg 	if (err)
222c349dbc7Sjsg 		goto out_put;
223c349dbc7Sjsg 
224c349dbc7Sjsg 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
225c349dbc7Sjsg 	if (IS_ERR(vaddr)) {
226c349dbc7Sjsg 		err = PTR_ERR(vaddr);
227c349dbc7Sjsg 		goto out_put;
228c349dbc7Sjsg 	}
229c349dbc7Sjsg 
230c349dbc7Sjsg 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
231c349dbc7Sjsg 		u32 expected = get_whitelist_reg(engine, i);
232c349dbc7Sjsg 		u32 actual = vaddr[i];
233c349dbc7Sjsg 
234c349dbc7Sjsg 		if (expected != actual) {
235c349dbc7Sjsg 			print_results(engine, vaddr);
236c349dbc7Sjsg 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
237c349dbc7Sjsg 			       i, expected, actual);
238c349dbc7Sjsg 
239c349dbc7Sjsg 			err = -EINVAL;
240c349dbc7Sjsg 			break;
241c349dbc7Sjsg 		}
242c349dbc7Sjsg 	}
243c349dbc7Sjsg 
244c349dbc7Sjsg 	i915_gem_object_unpin_map(results);
245c349dbc7Sjsg out_put:
2465ca02815Sjsg 	i915_gem_object_unlock(results);
247c349dbc7Sjsg 	i915_gem_object_put(results);
248c349dbc7Sjsg 	return err;
249c349dbc7Sjsg }
250c349dbc7Sjsg 
do_device_reset(struct intel_engine_cs * engine)251c349dbc7Sjsg static int do_device_reset(struct intel_engine_cs *engine)
252c349dbc7Sjsg {
253c349dbc7Sjsg 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
254c349dbc7Sjsg 	return 0;
255c349dbc7Sjsg }
256c349dbc7Sjsg 
do_engine_reset(struct intel_engine_cs * engine)257c349dbc7Sjsg static int do_engine_reset(struct intel_engine_cs *engine)
258c349dbc7Sjsg {
259c349dbc7Sjsg 	return intel_engine_reset(engine, "live_workarounds");
260c349dbc7Sjsg }
261c349dbc7Sjsg 
do_guc_reset(struct intel_engine_cs * engine)2625ca02815Sjsg static int do_guc_reset(struct intel_engine_cs *engine)
2635ca02815Sjsg {
2645ca02815Sjsg 	/* Currently a no-op as the reset is handled by GuC */
2655ca02815Sjsg 	return 0;
2665ca02815Sjsg }
2675ca02815Sjsg 
268c349dbc7Sjsg static int
switch_to_scratch_context(struct intel_engine_cs * engine,struct igt_spinner * spin,struct i915_request ** rq)269c349dbc7Sjsg switch_to_scratch_context(struct intel_engine_cs *engine,
2705ca02815Sjsg 			  struct igt_spinner *spin,
2715ca02815Sjsg 			  struct i915_request **rq)
272c349dbc7Sjsg {
273c349dbc7Sjsg 	struct intel_context *ce;
274c349dbc7Sjsg 	int err = 0;
275c349dbc7Sjsg 
276c349dbc7Sjsg 	ce = intel_context_create(engine);
277c349dbc7Sjsg 	if (IS_ERR(ce))
278c349dbc7Sjsg 		return PTR_ERR(ce);
279c349dbc7Sjsg 
2805ca02815Sjsg 	*rq = igt_spinner_create_request(spin, ce, MI_NOOP);
281c349dbc7Sjsg 	intel_context_put(ce);
282c349dbc7Sjsg 
2835ca02815Sjsg 	if (IS_ERR(*rq)) {
284c349dbc7Sjsg 		spin = NULL;
2855ca02815Sjsg 		err = PTR_ERR(*rq);
286c349dbc7Sjsg 		goto err;
287c349dbc7Sjsg 	}
288c349dbc7Sjsg 
2895ca02815Sjsg 	err = request_add_spin(*rq, spin);
290c349dbc7Sjsg err:
291c349dbc7Sjsg 	if (err && spin)
292c349dbc7Sjsg 		igt_spinner_end(spin);
293c349dbc7Sjsg 
294c349dbc7Sjsg 	return err;
295c349dbc7Sjsg }
296c349dbc7Sjsg 
check_whitelist_across_reset(struct intel_engine_cs * engine,int (* reset)(struct intel_engine_cs *),const char * name)297c349dbc7Sjsg static int check_whitelist_across_reset(struct intel_engine_cs *engine,
298c349dbc7Sjsg 					int (*reset)(struct intel_engine_cs *),
299c349dbc7Sjsg 					const char *name)
300c349dbc7Sjsg {
3015ca02815Sjsg 	struct intel_context *ce, *tmp;
302c349dbc7Sjsg 	struct igt_spinner spin;
3035ca02815Sjsg 	struct i915_request *rq;
304c349dbc7Sjsg 	intel_wakeref_t wakeref;
305c349dbc7Sjsg 	int err;
306c349dbc7Sjsg 
307c349dbc7Sjsg 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
308c349dbc7Sjsg 		engine->whitelist.count, engine->name, name);
309c349dbc7Sjsg 
3105ca02815Sjsg 	ce = intel_context_create(engine);
3115ca02815Sjsg 	if (IS_ERR(ce))
3125ca02815Sjsg 		return PTR_ERR(ce);
313c349dbc7Sjsg 
314c349dbc7Sjsg 	err = igt_spinner_init(&spin, engine->gt);
315c349dbc7Sjsg 	if (err)
316c349dbc7Sjsg 		goto out_ctx;
317c349dbc7Sjsg 
3185ca02815Sjsg 	err = check_whitelist(ce);
319c349dbc7Sjsg 	if (err) {
320c349dbc7Sjsg 		pr_err("Invalid whitelist *before* %s reset!\n", name);
321c349dbc7Sjsg 		goto out_spin;
322c349dbc7Sjsg 	}
323c349dbc7Sjsg 
3245ca02815Sjsg 	err = switch_to_scratch_context(engine, &spin, &rq);
325c349dbc7Sjsg 	if (err)
326c349dbc7Sjsg 		goto out_spin;
327c349dbc7Sjsg 
3285ca02815Sjsg 	/* Ensure the spinner hasn't aborted */
3295ca02815Sjsg 	if (i915_request_completed(rq)) {
3305ca02815Sjsg 		pr_err("%s spinner failed to start\n", name);
3315ca02815Sjsg 		err = -ETIMEDOUT;
3325ca02815Sjsg 		goto out_spin;
3335ca02815Sjsg 	}
3345ca02815Sjsg 
335c349dbc7Sjsg 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
336c349dbc7Sjsg 		err = reset(engine);
337c349dbc7Sjsg 
3385ca02815Sjsg 	/* Ensure the reset happens and kills the engine */
3395ca02815Sjsg 	if (err == 0)
3405ca02815Sjsg 		err = intel_selftest_wait_for_rq(rq);
3415ca02815Sjsg 
342c349dbc7Sjsg 	igt_spinner_end(&spin);
343c349dbc7Sjsg 
344c349dbc7Sjsg 	if (err) {
345c349dbc7Sjsg 		pr_err("%s reset failed\n", name);
346c349dbc7Sjsg 		goto out_spin;
347c349dbc7Sjsg 	}
348c349dbc7Sjsg 
3495ca02815Sjsg 	err = check_whitelist(ce);
350c349dbc7Sjsg 	if (err) {
351c349dbc7Sjsg 		pr_err("Whitelist not preserved in context across %s reset!\n",
352c349dbc7Sjsg 		       name);
353c349dbc7Sjsg 		goto out_spin;
354c349dbc7Sjsg 	}
355c349dbc7Sjsg 
3565ca02815Sjsg 	tmp = intel_context_create(engine);
357c349dbc7Sjsg 	if (IS_ERR(tmp)) {
358c349dbc7Sjsg 		err = PTR_ERR(tmp);
359c349dbc7Sjsg 		goto out_spin;
360c349dbc7Sjsg 	}
3615ca02815Sjsg 	intel_context_put(ce);
3625ca02815Sjsg 	ce = tmp;
363c349dbc7Sjsg 
3645ca02815Sjsg 	err = check_whitelist(ce);
365c349dbc7Sjsg 	if (err) {
366c349dbc7Sjsg 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
367c349dbc7Sjsg 		       name);
368c349dbc7Sjsg 		goto out_spin;
369c349dbc7Sjsg 	}
370c349dbc7Sjsg 
371c349dbc7Sjsg out_spin:
372c349dbc7Sjsg 	igt_spinner_fini(&spin);
373c349dbc7Sjsg out_ctx:
3745ca02815Sjsg 	intel_context_put(ce);
375c349dbc7Sjsg 	return err;
376c349dbc7Sjsg }
377c349dbc7Sjsg 
create_batch(struct i915_address_space * vm)378c349dbc7Sjsg static struct i915_vma *create_batch(struct i915_address_space *vm)
379c349dbc7Sjsg {
380c349dbc7Sjsg 	struct drm_i915_gem_object *obj;
381c349dbc7Sjsg 	struct i915_vma *vma;
382c349dbc7Sjsg 	int err;
383c349dbc7Sjsg 
384c349dbc7Sjsg 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
385c349dbc7Sjsg 	if (IS_ERR(obj))
386c349dbc7Sjsg 		return ERR_CAST(obj);
387c349dbc7Sjsg 
388c349dbc7Sjsg 	vma = i915_vma_instance(obj, vm, NULL);
389c349dbc7Sjsg 	if (IS_ERR(vma)) {
390c349dbc7Sjsg 		err = PTR_ERR(vma);
391c349dbc7Sjsg 		goto err_obj;
392c349dbc7Sjsg 	}
393c349dbc7Sjsg 
394c349dbc7Sjsg 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
395c349dbc7Sjsg 	if (err)
396c349dbc7Sjsg 		goto err_obj;
397c349dbc7Sjsg 
398c349dbc7Sjsg 	return vma;
399c349dbc7Sjsg 
400c349dbc7Sjsg err_obj:
401c349dbc7Sjsg 	i915_gem_object_put(obj);
402c349dbc7Sjsg 	return ERR_PTR(err);
403c349dbc7Sjsg }
404c349dbc7Sjsg 
reg_write(u32 old,u32 new,u32 rsvd)405c349dbc7Sjsg static u32 reg_write(u32 old, u32 new, u32 rsvd)
406c349dbc7Sjsg {
407c349dbc7Sjsg 	if (rsvd == 0x0000ffff) {
408c349dbc7Sjsg 		old &= ~(new >> 16);
409c349dbc7Sjsg 		old |= new & (new >> 16);
410c349dbc7Sjsg 	} else {
411c349dbc7Sjsg 		old &= ~rsvd;
412c349dbc7Sjsg 		old |= new & rsvd;
413c349dbc7Sjsg 	}
414c349dbc7Sjsg 
415c349dbc7Sjsg 	return old;
416c349dbc7Sjsg }
417c349dbc7Sjsg 
wo_register(struct intel_engine_cs * engine,u32 reg)418c349dbc7Sjsg static bool wo_register(struct intel_engine_cs *engine, u32 reg)
419c349dbc7Sjsg {
420c349dbc7Sjsg 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
421c349dbc7Sjsg 	int i;
422c349dbc7Sjsg 
423c349dbc7Sjsg 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
424c349dbc7Sjsg 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
425c349dbc7Sjsg 		return true;
426c349dbc7Sjsg 
427c349dbc7Sjsg 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
428c349dbc7Sjsg 		if (wo_registers[i].platform == platform &&
429c349dbc7Sjsg 		    wo_registers[i].reg == reg)
430c349dbc7Sjsg 			return true;
431c349dbc7Sjsg 	}
432c349dbc7Sjsg 
433c349dbc7Sjsg 	return false;
434c349dbc7Sjsg }
435c349dbc7Sjsg 
timestamp(const struct intel_engine_cs * engine,u32 reg)436ad8b1aafSjsg static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
437ad8b1aafSjsg {
438ad8b1aafSjsg 	reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
439ad8b1aafSjsg 	switch (reg) {
440ad8b1aafSjsg 	case 0x358:
441ad8b1aafSjsg 	case 0x35c:
442ad8b1aafSjsg 	case 0x3a8:
443ad8b1aafSjsg 		return true;
444ad8b1aafSjsg 
445ad8b1aafSjsg 	default:
446ad8b1aafSjsg 		return false;
447ad8b1aafSjsg 	}
448ad8b1aafSjsg }
449ad8b1aafSjsg 
ro_register(u32 reg)450c349dbc7Sjsg static bool ro_register(u32 reg)
451c349dbc7Sjsg {
452c349dbc7Sjsg 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
453c349dbc7Sjsg 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
454c349dbc7Sjsg 		return true;
455c349dbc7Sjsg 
456c349dbc7Sjsg 	return false;
457c349dbc7Sjsg }
458c349dbc7Sjsg 
whitelist_writable_count(struct intel_engine_cs * engine)459c349dbc7Sjsg static int whitelist_writable_count(struct intel_engine_cs *engine)
460c349dbc7Sjsg {
461c349dbc7Sjsg 	int count = engine->whitelist.count;
462c349dbc7Sjsg 	int i;
463c349dbc7Sjsg 
464c349dbc7Sjsg 	for (i = 0; i < engine->whitelist.count; i++) {
465c349dbc7Sjsg 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
466c349dbc7Sjsg 
467c349dbc7Sjsg 		if (ro_register(reg))
468c349dbc7Sjsg 			count--;
469c349dbc7Sjsg 	}
470c349dbc7Sjsg 
471c349dbc7Sjsg 	return count;
472c349dbc7Sjsg }
473c349dbc7Sjsg 
check_dirty_whitelist(struct intel_context * ce)474c349dbc7Sjsg static int check_dirty_whitelist(struct intel_context *ce)
475c349dbc7Sjsg {
476c349dbc7Sjsg 	const u32 values[] = {
477c349dbc7Sjsg 		0x00000000,
478c349dbc7Sjsg 		0x01010101,
479c349dbc7Sjsg 		0x10100101,
480c349dbc7Sjsg 		0x03030303,
481c349dbc7Sjsg 		0x30300303,
482c349dbc7Sjsg 		0x05050505,
483c349dbc7Sjsg 		0x50500505,
484c349dbc7Sjsg 		0x0f0f0f0f,
485c349dbc7Sjsg 		0xf00ff00f,
486c349dbc7Sjsg 		0x10101010,
487c349dbc7Sjsg 		0xf0f01010,
488c349dbc7Sjsg 		0x30303030,
489c349dbc7Sjsg 		0xa0a03030,
490c349dbc7Sjsg 		0x50505050,
491c349dbc7Sjsg 		0xc0c05050,
492c349dbc7Sjsg 		0xf0f0f0f0,
493c349dbc7Sjsg 		0x11111111,
494c349dbc7Sjsg 		0x33333333,
495c349dbc7Sjsg 		0x55555555,
496c349dbc7Sjsg 		0x0000ffff,
497c349dbc7Sjsg 		0x00ff00ff,
498c349dbc7Sjsg 		0xff0000ff,
499c349dbc7Sjsg 		0xffff00ff,
500c349dbc7Sjsg 		0xffffffff,
501c349dbc7Sjsg 	};
502c349dbc7Sjsg 	struct intel_engine_cs *engine = ce->engine;
503c349dbc7Sjsg 	struct i915_vma *scratch;
504c349dbc7Sjsg 	struct i915_vma *batch;
5055ca02815Sjsg 	int err = 0, i, v, sz;
506c349dbc7Sjsg 	u32 *cs, *results;
507c349dbc7Sjsg 
5085ca02815Sjsg 	sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
5095ca02815Sjsg 	scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
510c349dbc7Sjsg 	if (IS_ERR(scratch))
511c349dbc7Sjsg 		return PTR_ERR(scratch);
512c349dbc7Sjsg 
513c349dbc7Sjsg 	batch = create_batch(ce->vm);
514c349dbc7Sjsg 	if (IS_ERR(batch)) {
515c349dbc7Sjsg 		err = PTR_ERR(batch);
516c349dbc7Sjsg 		goto out_scratch;
517c349dbc7Sjsg 	}
518c349dbc7Sjsg 
519c349dbc7Sjsg 	for (i = 0; i < engine->whitelist.count; i++) {
520c349dbc7Sjsg 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
5215ca02815Sjsg 		struct i915_gem_ww_ctx ww;
522*f005ef32Sjsg 		u64 addr = i915_vma_offset(scratch);
523c349dbc7Sjsg 		struct i915_request *rq;
524c349dbc7Sjsg 		u32 srm, lrm, rsvd;
525c349dbc7Sjsg 		u32 expect;
526c349dbc7Sjsg 		int idx;
527c349dbc7Sjsg 		bool ro_reg;
528c349dbc7Sjsg 
529c349dbc7Sjsg 		if (wo_register(engine, reg))
530c349dbc7Sjsg 			continue;
531c349dbc7Sjsg 
532ad8b1aafSjsg 		if (timestamp(engine, reg))
533ad8b1aafSjsg 			continue; /* timestamps are expected to autoincrement */
534ad8b1aafSjsg 
535c349dbc7Sjsg 		ro_reg = ro_register(reg);
536c349dbc7Sjsg 
5375ca02815Sjsg 		i915_gem_ww_ctx_init(&ww, false);
5385ca02815Sjsg retry:
5395ca02815Sjsg 		cs = NULL;
5405ca02815Sjsg 		err = i915_gem_object_lock(scratch->obj, &ww);
5415ca02815Sjsg 		if (!err)
5425ca02815Sjsg 			err = i915_gem_object_lock(batch->obj, &ww);
5435ca02815Sjsg 		if (!err)
5445ca02815Sjsg 			err = intel_context_pin_ww(ce, &ww);
5455ca02815Sjsg 		if (err)
5465ca02815Sjsg 			goto out;
5475ca02815Sjsg 
5485ca02815Sjsg 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
5495ca02815Sjsg 		if (IS_ERR(cs)) {
5505ca02815Sjsg 			err = PTR_ERR(cs);
5515ca02815Sjsg 			goto out_ctx;
5525ca02815Sjsg 		}
5535ca02815Sjsg 
5545ca02815Sjsg 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
5555ca02815Sjsg 		if (IS_ERR(results)) {
5565ca02815Sjsg 			err = PTR_ERR(results);
5575ca02815Sjsg 			goto out_unmap_batch;
5585ca02815Sjsg 		}
5595ca02815Sjsg 
560c349dbc7Sjsg 		/* Clear non priv flags */
561c349dbc7Sjsg 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
562c349dbc7Sjsg 
563c349dbc7Sjsg 		srm = MI_STORE_REGISTER_MEM;
564c349dbc7Sjsg 		lrm = MI_LOAD_REGISTER_MEM;
5655ca02815Sjsg 		if (GRAPHICS_VER(engine->i915) >= 8)
566c349dbc7Sjsg 			lrm++, srm++;
567c349dbc7Sjsg 
568c349dbc7Sjsg 		pr_debug("%s: Writing garbage to %x\n",
569c349dbc7Sjsg 			 engine->name, reg);
570c349dbc7Sjsg 
571c349dbc7Sjsg 		/* SRM original */
572c349dbc7Sjsg 		*cs++ = srm;
573c349dbc7Sjsg 		*cs++ = reg;
574c349dbc7Sjsg 		*cs++ = lower_32_bits(addr);
575c349dbc7Sjsg 		*cs++ = upper_32_bits(addr);
576c349dbc7Sjsg 
577c349dbc7Sjsg 		idx = 1;
578c349dbc7Sjsg 		for (v = 0; v < ARRAY_SIZE(values); v++) {
579c349dbc7Sjsg 			/* LRI garbage */
580c349dbc7Sjsg 			*cs++ = MI_LOAD_REGISTER_IMM(1);
581c349dbc7Sjsg 			*cs++ = reg;
582c349dbc7Sjsg 			*cs++ = values[v];
583c349dbc7Sjsg 
584c349dbc7Sjsg 			/* SRM result */
585c349dbc7Sjsg 			*cs++ = srm;
586c349dbc7Sjsg 			*cs++ = reg;
587c349dbc7Sjsg 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
588c349dbc7Sjsg 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
589c349dbc7Sjsg 			idx++;
590c349dbc7Sjsg 		}
591c349dbc7Sjsg 		for (v = 0; v < ARRAY_SIZE(values); v++) {
592c349dbc7Sjsg 			/* LRI garbage */
593c349dbc7Sjsg 			*cs++ = MI_LOAD_REGISTER_IMM(1);
594c349dbc7Sjsg 			*cs++ = reg;
595c349dbc7Sjsg 			*cs++ = ~values[v];
596c349dbc7Sjsg 
597c349dbc7Sjsg 			/* SRM result */
598c349dbc7Sjsg 			*cs++ = srm;
599c349dbc7Sjsg 			*cs++ = reg;
600c349dbc7Sjsg 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
601c349dbc7Sjsg 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
602c349dbc7Sjsg 			idx++;
603c349dbc7Sjsg 		}
604c349dbc7Sjsg 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
605c349dbc7Sjsg 
606c349dbc7Sjsg 		/* LRM original -- don't leave garbage in the context! */
607c349dbc7Sjsg 		*cs++ = lrm;
608c349dbc7Sjsg 		*cs++ = reg;
609c349dbc7Sjsg 		*cs++ = lower_32_bits(addr);
610c349dbc7Sjsg 		*cs++ = upper_32_bits(addr);
611c349dbc7Sjsg 
612c349dbc7Sjsg 		*cs++ = MI_BATCH_BUFFER_END;
613c349dbc7Sjsg 
614c349dbc7Sjsg 		i915_gem_object_flush_map(batch->obj);
615c349dbc7Sjsg 		i915_gem_object_unpin_map(batch->obj);
616c349dbc7Sjsg 		intel_gt_chipset_flush(engine->gt);
6175ca02815Sjsg 		cs = NULL;
618c349dbc7Sjsg 
6195ca02815Sjsg 		rq = i915_request_create(ce);
620c349dbc7Sjsg 		if (IS_ERR(rq)) {
621c349dbc7Sjsg 			err = PTR_ERR(rq);
6225ca02815Sjsg 			goto out_unmap_scratch;
623c349dbc7Sjsg 		}
624c349dbc7Sjsg 
625c349dbc7Sjsg 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
626c349dbc7Sjsg 			err = engine->emit_init_breadcrumb(rq);
627c349dbc7Sjsg 			if (err)
628c349dbc7Sjsg 				goto err_request;
629c349dbc7Sjsg 		}
630c349dbc7Sjsg 
631c349dbc7Sjsg 		err = i915_vma_move_to_active(batch, rq, 0);
632c349dbc7Sjsg 		if (err)
633c349dbc7Sjsg 			goto err_request;
634c349dbc7Sjsg 
635c349dbc7Sjsg 		err = i915_vma_move_to_active(scratch, rq,
636c349dbc7Sjsg 					      EXEC_OBJECT_WRITE);
637c349dbc7Sjsg 		if (err)
638c349dbc7Sjsg 			goto err_request;
639c349dbc7Sjsg 
640c349dbc7Sjsg 		err = engine->emit_bb_start(rq,
641*f005ef32Sjsg 					    i915_vma_offset(batch), PAGE_SIZE,
642c349dbc7Sjsg 					    0);
643c349dbc7Sjsg 		if (err)
644c349dbc7Sjsg 			goto err_request;
645c349dbc7Sjsg 
646c349dbc7Sjsg err_request:
647c349dbc7Sjsg 		err = request_add_sync(rq, err);
648c349dbc7Sjsg 		if (err) {
649c349dbc7Sjsg 			pr_err("%s: Futzing %x timedout; cancelling test\n",
650c349dbc7Sjsg 			       engine->name, reg);
651c349dbc7Sjsg 			intel_gt_set_wedged(engine->gt);
6525ca02815Sjsg 			goto out_unmap_scratch;
653c349dbc7Sjsg 		}
654c349dbc7Sjsg 
655c349dbc7Sjsg 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
656c349dbc7Sjsg 		if (!ro_reg) {
657c349dbc7Sjsg 			/* detect write masking */
658c349dbc7Sjsg 			rsvd = results[ARRAY_SIZE(values)];
659c349dbc7Sjsg 			if (!rsvd) {
660c349dbc7Sjsg 				pr_err("%s: Unable to write to whitelisted register %x\n",
661c349dbc7Sjsg 				       engine->name, reg);
662c349dbc7Sjsg 				err = -EINVAL;
6635ca02815Sjsg 				goto out_unmap_scratch;
664c349dbc7Sjsg 			}
665ad8b1aafSjsg 		} else {
666ad8b1aafSjsg 			rsvd = 0;
667c349dbc7Sjsg 		}
668c349dbc7Sjsg 
669c349dbc7Sjsg 		expect = results[0];
670c349dbc7Sjsg 		idx = 1;
671c349dbc7Sjsg 		for (v = 0; v < ARRAY_SIZE(values); v++) {
672c349dbc7Sjsg 			if (ro_reg)
673c349dbc7Sjsg 				expect = results[0];
674c349dbc7Sjsg 			else
675c349dbc7Sjsg 				expect = reg_write(expect, values[v], rsvd);
676c349dbc7Sjsg 
677c349dbc7Sjsg 			if (results[idx] != expect)
678c349dbc7Sjsg 				err++;
679c349dbc7Sjsg 			idx++;
680c349dbc7Sjsg 		}
681c349dbc7Sjsg 		for (v = 0; v < ARRAY_SIZE(values); v++) {
682c349dbc7Sjsg 			if (ro_reg)
683c349dbc7Sjsg 				expect = results[0];
684c349dbc7Sjsg 			else
685c349dbc7Sjsg 				expect = reg_write(expect, ~values[v], rsvd);
686c349dbc7Sjsg 
687c349dbc7Sjsg 			if (results[idx] != expect)
688c349dbc7Sjsg 				err++;
689c349dbc7Sjsg 			idx++;
690c349dbc7Sjsg 		}
691c349dbc7Sjsg 		if (err) {
692c349dbc7Sjsg 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
693c349dbc7Sjsg 			       engine->name, err, reg);
694c349dbc7Sjsg 
695c349dbc7Sjsg 			if (ro_reg)
696c349dbc7Sjsg 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
697c349dbc7Sjsg 					engine->name, reg, results[0]);
698c349dbc7Sjsg 			else
699c349dbc7Sjsg 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
700c349dbc7Sjsg 					engine->name, reg, results[0], rsvd);
701c349dbc7Sjsg 
702c349dbc7Sjsg 			expect = results[0];
703c349dbc7Sjsg 			idx = 1;
704c349dbc7Sjsg 			for (v = 0; v < ARRAY_SIZE(values); v++) {
705c349dbc7Sjsg 				u32 w = values[v];
706c349dbc7Sjsg 
707c349dbc7Sjsg 				if (ro_reg)
708c349dbc7Sjsg 					expect = results[0];
709c349dbc7Sjsg 				else
710c349dbc7Sjsg 					expect = reg_write(expect, w, rsvd);
711c349dbc7Sjsg 				pr_info("Wrote %08x, read %08x, expect %08x\n",
712c349dbc7Sjsg 					w, results[idx], expect);
713c349dbc7Sjsg 				idx++;
714c349dbc7Sjsg 			}
715c349dbc7Sjsg 			for (v = 0; v < ARRAY_SIZE(values); v++) {
716c349dbc7Sjsg 				u32 w = ~values[v];
717c349dbc7Sjsg 
718c349dbc7Sjsg 				if (ro_reg)
719c349dbc7Sjsg 					expect = results[0];
720c349dbc7Sjsg 				else
721c349dbc7Sjsg 					expect = reg_write(expect, w, rsvd);
722c349dbc7Sjsg 				pr_info("Wrote %08x, read %08x, expect %08x\n",
723c349dbc7Sjsg 					w, results[idx], expect);
724c349dbc7Sjsg 				idx++;
725c349dbc7Sjsg 			}
726c349dbc7Sjsg 
727c349dbc7Sjsg 			err = -EINVAL;
728c349dbc7Sjsg 		}
7295ca02815Sjsg out_unmap_scratch:
730c349dbc7Sjsg 		i915_gem_object_unpin_map(scratch->obj);
7315ca02815Sjsg out_unmap_batch:
7325ca02815Sjsg 		if (cs)
7335ca02815Sjsg 			i915_gem_object_unpin_map(batch->obj);
7345ca02815Sjsg out_ctx:
7355ca02815Sjsg 		intel_context_unpin(ce);
7365ca02815Sjsg out:
7375ca02815Sjsg 		if (err == -EDEADLK) {
7385ca02815Sjsg 			err = i915_gem_ww_ctx_backoff(&ww);
7395ca02815Sjsg 			if (!err)
7405ca02815Sjsg 				goto retry;
7415ca02815Sjsg 		}
7425ca02815Sjsg 		i915_gem_ww_ctx_fini(&ww);
743c349dbc7Sjsg 		if (err)
744c349dbc7Sjsg 			break;
745c349dbc7Sjsg 	}
746c349dbc7Sjsg 
747c349dbc7Sjsg 	if (igt_flush_test(engine->i915))
748c349dbc7Sjsg 		err = -EIO;
7495ca02815Sjsg 
750c349dbc7Sjsg 	i915_vma_unpin_and_release(&batch, 0);
751c349dbc7Sjsg out_scratch:
752c349dbc7Sjsg 	i915_vma_unpin_and_release(&scratch, 0);
753c349dbc7Sjsg 	return err;
754c349dbc7Sjsg }
755c349dbc7Sjsg 
live_dirty_whitelist(void * arg)756c349dbc7Sjsg static int live_dirty_whitelist(void *arg)
757c349dbc7Sjsg {
758c349dbc7Sjsg 	struct intel_gt *gt = arg;
759c349dbc7Sjsg 	struct intel_engine_cs *engine;
760c349dbc7Sjsg 	enum intel_engine_id id;
761c349dbc7Sjsg 
762c349dbc7Sjsg 	/* Can the user write to the whitelisted registers? */
763c349dbc7Sjsg 
7645ca02815Sjsg 	if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
765c349dbc7Sjsg 		return 0;
766c349dbc7Sjsg 
767c349dbc7Sjsg 	for_each_engine(engine, gt, id) {
768c349dbc7Sjsg 		struct intel_context *ce;
769c349dbc7Sjsg 		int err;
770c349dbc7Sjsg 
771c349dbc7Sjsg 		if (engine->whitelist.count == 0)
772c349dbc7Sjsg 			continue;
773c349dbc7Sjsg 
774c349dbc7Sjsg 		ce = intel_context_create(engine);
775c349dbc7Sjsg 		if (IS_ERR(ce))
776c349dbc7Sjsg 			return PTR_ERR(ce);
777c349dbc7Sjsg 
778c349dbc7Sjsg 		err = check_dirty_whitelist(ce);
779c349dbc7Sjsg 		intel_context_put(ce);
780c349dbc7Sjsg 		if (err)
781c349dbc7Sjsg 			return err;
782c349dbc7Sjsg 	}
783c349dbc7Sjsg 
784c349dbc7Sjsg 	return 0;
785c349dbc7Sjsg }
786c349dbc7Sjsg 
live_reset_whitelist(void * arg)787c349dbc7Sjsg static int live_reset_whitelist(void *arg)
788c349dbc7Sjsg {
789c349dbc7Sjsg 	struct intel_gt *gt = arg;
790c349dbc7Sjsg 	struct intel_engine_cs *engine;
791c349dbc7Sjsg 	enum intel_engine_id id;
792c349dbc7Sjsg 	int err = 0;
793c349dbc7Sjsg 
794c349dbc7Sjsg 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
795c349dbc7Sjsg 	igt_global_reset_lock(gt);
796c349dbc7Sjsg 
797c349dbc7Sjsg 	for_each_engine(engine, gt, id) {
798c349dbc7Sjsg 		if (engine->whitelist.count == 0)
799c349dbc7Sjsg 			continue;
800c349dbc7Sjsg 
801c349dbc7Sjsg 		if (intel_has_reset_engine(gt)) {
8025ca02815Sjsg 			if (intel_engine_uses_guc(engine)) {
8035ca02815Sjsg 				struct intel_selftest_saved_policy saved;
8045ca02815Sjsg 				int err2;
8055ca02815Sjsg 
8065ca02815Sjsg 				err = intel_selftest_modify_policy(engine, &saved,
8075ca02815Sjsg 								   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
8085ca02815Sjsg 				if (err)
8095ca02815Sjsg 					goto out;
8105ca02815Sjsg 
8115ca02815Sjsg 				err = check_whitelist_across_reset(engine,
8125ca02815Sjsg 								   do_guc_reset,
8135ca02815Sjsg 								   "guc");
8145ca02815Sjsg 
8155ca02815Sjsg 				err2 = intel_selftest_restore_policy(engine, &saved);
8165ca02815Sjsg 				if (err == 0)
8175ca02815Sjsg 					err = err2;
8185ca02815Sjsg 			} else {
819c349dbc7Sjsg 				err = check_whitelist_across_reset(engine,
820c349dbc7Sjsg 								   do_engine_reset,
821c349dbc7Sjsg 								   "engine");
8225ca02815Sjsg 			}
8235ca02815Sjsg 
824c349dbc7Sjsg 			if (err)
825c349dbc7Sjsg 				goto out;
826c349dbc7Sjsg 		}
827c349dbc7Sjsg 
828c349dbc7Sjsg 		if (intel_has_gpu_reset(gt)) {
829c349dbc7Sjsg 			err = check_whitelist_across_reset(engine,
830c349dbc7Sjsg 							   do_device_reset,
831c349dbc7Sjsg 							   "device");
832c349dbc7Sjsg 			if (err)
833c349dbc7Sjsg 				goto out;
834c349dbc7Sjsg 		}
835c349dbc7Sjsg 	}
836c349dbc7Sjsg 
837c349dbc7Sjsg out:
838c349dbc7Sjsg 	igt_global_reset_unlock(gt);
839c349dbc7Sjsg 	return err;
840c349dbc7Sjsg }
841c349dbc7Sjsg 
read_whitelisted_registers(struct intel_context * ce,struct i915_vma * results)8425ca02815Sjsg static int read_whitelisted_registers(struct intel_context *ce,
843c349dbc7Sjsg 				      struct i915_vma *results)
844c349dbc7Sjsg {
8455ca02815Sjsg 	struct intel_engine_cs *engine = ce->engine;
846c349dbc7Sjsg 	struct i915_request *rq;
847c349dbc7Sjsg 	int i, err = 0;
848c349dbc7Sjsg 	u32 srm, *cs;
849c349dbc7Sjsg 
8505ca02815Sjsg 	rq = intel_context_create_request(ce);
851c349dbc7Sjsg 	if (IS_ERR(rq))
852c349dbc7Sjsg 		return PTR_ERR(rq);
853c349dbc7Sjsg 
854*f005ef32Sjsg 	err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
855c349dbc7Sjsg 	if (err)
856c349dbc7Sjsg 		goto err_req;
857c349dbc7Sjsg 
858c349dbc7Sjsg 	srm = MI_STORE_REGISTER_MEM;
8595ca02815Sjsg 	if (GRAPHICS_VER(engine->i915) >= 8)
860c349dbc7Sjsg 		srm++;
861c349dbc7Sjsg 
862c349dbc7Sjsg 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
863c349dbc7Sjsg 	if (IS_ERR(cs)) {
864c349dbc7Sjsg 		err = PTR_ERR(cs);
865c349dbc7Sjsg 		goto err_req;
866c349dbc7Sjsg 	}
867c349dbc7Sjsg 
868c349dbc7Sjsg 	for (i = 0; i < engine->whitelist.count; i++) {
869*f005ef32Sjsg 		u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
870c349dbc7Sjsg 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
871c349dbc7Sjsg 
872c349dbc7Sjsg 		/* Clear non priv flags */
873c349dbc7Sjsg 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
874c349dbc7Sjsg 
875c349dbc7Sjsg 		*cs++ = srm;
876c349dbc7Sjsg 		*cs++ = reg;
877c349dbc7Sjsg 		*cs++ = lower_32_bits(offset);
878c349dbc7Sjsg 		*cs++ = upper_32_bits(offset);
879c349dbc7Sjsg 	}
880c349dbc7Sjsg 	intel_ring_advance(rq, cs);
881c349dbc7Sjsg 
882c349dbc7Sjsg err_req:
883c349dbc7Sjsg 	return request_add_sync(rq, err);
884c349dbc7Sjsg }
885c349dbc7Sjsg 
scrub_whitelisted_registers(struct intel_context * ce)8865ca02815Sjsg static int scrub_whitelisted_registers(struct intel_context *ce)
887c349dbc7Sjsg {
8885ca02815Sjsg 	struct intel_engine_cs *engine = ce->engine;
889c349dbc7Sjsg 	struct i915_request *rq;
890c349dbc7Sjsg 	struct i915_vma *batch;
891c349dbc7Sjsg 	int i, err = 0;
892c349dbc7Sjsg 	u32 *cs;
893c349dbc7Sjsg 
8945ca02815Sjsg 	batch = create_batch(ce->vm);
895c349dbc7Sjsg 	if (IS_ERR(batch))
896c349dbc7Sjsg 		return PTR_ERR(batch);
897c349dbc7Sjsg 
8985ca02815Sjsg 	cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
899c349dbc7Sjsg 	if (IS_ERR(cs)) {
900c349dbc7Sjsg 		err = PTR_ERR(cs);
901c349dbc7Sjsg 		goto err_batch;
902c349dbc7Sjsg 	}
903c349dbc7Sjsg 
904c349dbc7Sjsg 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
905c349dbc7Sjsg 	for (i = 0; i < engine->whitelist.count; i++) {
906c349dbc7Sjsg 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
907c349dbc7Sjsg 
908c349dbc7Sjsg 		if (ro_register(reg))
909c349dbc7Sjsg 			continue;
910c349dbc7Sjsg 
911c349dbc7Sjsg 		/* Clear non priv flags */
912c349dbc7Sjsg 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
913c349dbc7Sjsg 
914c349dbc7Sjsg 		*cs++ = reg;
915c349dbc7Sjsg 		*cs++ = 0xffffffff;
916c349dbc7Sjsg 	}
917c349dbc7Sjsg 	*cs++ = MI_BATCH_BUFFER_END;
918c349dbc7Sjsg 
919c349dbc7Sjsg 	i915_gem_object_flush_map(batch->obj);
920c349dbc7Sjsg 	intel_gt_chipset_flush(engine->gt);
921c349dbc7Sjsg 
9225ca02815Sjsg 	rq = intel_context_create_request(ce);
923c349dbc7Sjsg 	if (IS_ERR(rq)) {
924c349dbc7Sjsg 		err = PTR_ERR(rq);
925c349dbc7Sjsg 		goto err_unpin;
926c349dbc7Sjsg 	}
927c349dbc7Sjsg 
928c349dbc7Sjsg 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
929c349dbc7Sjsg 		err = engine->emit_init_breadcrumb(rq);
930c349dbc7Sjsg 		if (err)
931c349dbc7Sjsg 			goto err_request;
932c349dbc7Sjsg 	}
933c349dbc7Sjsg 
934*f005ef32Sjsg 	err = igt_vma_move_to_active_unlocked(batch, rq, 0);
935c349dbc7Sjsg 	if (err)
936c349dbc7Sjsg 		goto err_request;
937c349dbc7Sjsg 
938c349dbc7Sjsg 	/* Perform the writes from an unprivileged "user" batch */
939*f005ef32Sjsg 	err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
940c349dbc7Sjsg 
941c349dbc7Sjsg err_request:
942c349dbc7Sjsg 	err = request_add_sync(rq, err);
943c349dbc7Sjsg 
944c349dbc7Sjsg err_unpin:
945c349dbc7Sjsg 	i915_gem_object_unpin_map(batch->obj);
946c349dbc7Sjsg err_batch:
947c349dbc7Sjsg 	i915_vma_unpin_and_release(&batch, 0);
948c349dbc7Sjsg 	return err;
949c349dbc7Sjsg }
950c349dbc7Sjsg 
951c349dbc7Sjsg struct regmask {
952c349dbc7Sjsg 	i915_reg_t reg;
9535ca02815Sjsg 	u8 graphics_ver;
954c349dbc7Sjsg };
955c349dbc7Sjsg 
find_reg(struct drm_i915_private * i915,i915_reg_t reg,const struct regmask * tbl,unsigned long count)956c349dbc7Sjsg static bool find_reg(struct drm_i915_private *i915,
957c349dbc7Sjsg 		     i915_reg_t reg,
958c349dbc7Sjsg 		     const struct regmask *tbl,
959c349dbc7Sjsg 		     unsigned long count)
960c349dbc7Sjsg {
961c349dbc7Sjsg 	u32 offset = i915_mmio_reg_offset(reg);
962c349dbc7Sjsg 
963c349dbc7Sjsg 	while (count--) {
9645ca02815Sjsg 		if (GRAPHICS_VER(i915) == tbl->graphics_ver &&
965c349dbc7Sjsg 		    i915_mmio_reg_offset(tbl->reg) == offset)
966c349dbc7Sjsg 			return true;
967c349dbc7Sjsg 		tbl++;
968c349dbc7Sjsg 	}
969c349dbc7Sjsg 
970c349dbc7Sjsg 	return false;
971c349dbc7Sjsg }
972c349dbc7Sjsg 
pardon_reg(struct drm_i915_private * i915,i915_reg_t reg)973c349dbc7Sjsg static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
974c349dbc7Sjsg {
975c349dbc7Sjsg 	/* Alas, we must pardon some whitelists. Mistakes already made */
976c349dbc7Sjsg 	static const struct regmask pardon[] = {
9775ca02815Sjsg 		{ GEN9_CTX_PREEMPT_REG, 9 },
978*f005ef32Sjsg 		{ _MMIO(0xb118), 9 }, /* GEN8_L3SQCREG4 */
979c349dbc7Sjsg 	};
980c349dbc7Sjsg 
981c349dbc7Sjsg 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
982c349dbc7Sjsg }
983c349dbc7Sjsg 
result_eq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)984c349dbc7Sjsg static bool result_eq(struct intel_engine_cs *engine,
985c349dbc7Sjsg 		      u32 a, u32 b, i915_reg_t reg)
986c349dbc7Sjsg {
987c349dbc7Sjsg 	if (a != b && !pardon_reg(engine->i915, reg)) {
988c349dbc7Sjsg 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
989c349dbc7Sjsg 		       i915_mmio_reg_offset(reg), a, b);
990c349dbc7Sjsg 		return false;
991c349dbc7Sjsg 	}
992c349dbc7Sjsg 
993c349dbc7Sjsg 	return true;
994c349dbc7Sjsg }
995c349dbc7Sjsg 
writeonly_reg(struct drm_i915_private * i915,i915_reg_t reg)996c349dbc7Sjsg static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
997c349dbc7Sjsg {
998c349dbc7Sjsg 	/* Some registers do not seem to behave and our writes unreadable */
999c349dbc7Sjsg 	static const struct regmask wo[] = {
10005ca02815Sjsg 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 },
1001c349dbc7Sjsg 	};
1002c349dbc7Sjsg 
1003c349dbc7Sjsg 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
1004c349dbc7Sjsg }
1005c349dbc7Sjsg 
result_neq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)1006c349dbc7Sjsg static bool result_neq(struct intel_engine_cs *engine,
1007c349dbc7Sjsg 		       u32 a, u32 b, i915_reg_t reg)
1008c349dbc7Sjsg {
1009c349dbc7Sjsg 	if (a == b && !writeonly_reg(engine->i915, reg)) {
1010c349dbc7Sjsg 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
1011c349dbc7Sjsg 		       i915_mmio_reg_offset(reg), a);
1012c349dbc7Sjsg 		return false;
1013c349dbc7Sjsg 	}
1014c349dbc7Sjsg 
1015c349dbc7Sjsg 	return true;
1016c349dbc7Sjsg }
1017c349dbc7Sjsg 
1018c349dbc7Sjsg static int
check_whitelisted_registers(struct intel_engine_cs * engine,struct i915_vma * A,struct i915_vma * B,bool (* fn)(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg))1019c349dbc7Sjsg check_whitelisted_registers(struct intel_engine_cs *engine,
1020c349dbc7Sjsg 			    struct i915_vma *A,
1021c349dbc7Sjsg 			    struct i915_vma *B,
1022c349dbc7Sjsg 			    bool (*fn)(struct intel_engine_cs *engine,
1023c349dbc7Sjsg 				       u32 a, u32 b,
1024c349dbc7Sjsg 				       i915_reg_t reg))
1025c349dbc7Sjsg {
1026c349dbc7Sjsg 	u32 *a, *b;
1027c349dbc7Sjsg 	int i, err;
1028c349dbc7Sjsg 
10295ca02815Sjsg 	a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
1030c349dbc7Sjsg 	if (IS_ERR(a))
1031c349dbc7Sjsg 		return PTR_ERR(a);
1032c349dbc7Sjsg 
10335ca02815Sjsg 	b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
1034c349dbc7Sjsg 	if (IS_ERR(b)) {
1035c349dbc7Sjsg 		err = PTR_ERR(b);
1036c349dbc7Sjsg 		goto err_a;
1037c349dbc7Sjsg 	}
1038c349dbc7Sjsg 
1039c349dbc7Sjsg 	err = 0;
1040c349dbc7Sjsg 	for (i = 0; i < engine->whitelist.count; i++) {
1041c349dbc7Sjsg 		const struct i915_wa *wa = &engine->whitelist.list[i];
1042c349dbc7Sjsg 
1043c349dbc7Sjsg 		if (i915_mmio_reg_offset(wa->reg) &
1044c349dbc7Sjsg 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
1045c349dbc7Sjsg 			continue;
1046c349dbc7Sjsg 
1047c349dbc7Sjsg 		if (!fn(engine, a[i], b[i], wa->reg))
1048c349dbc7Sjsg 			err = -EINVAL;
1049c349dbc7Sjsg 	}
1050c349dbc7Sjsg 
1051c349dbc7Sjsg 	i915_gem_object_unpin_map(B->obj);
1052c349dbc7Sjsg err_a:
1053c349dbc7Sjsg 	i915_gem_object_unpin_map(A->obj);
1054c349dbc7Sjsg 	return err;
1055c349dbc7Sjsg }
1056c349dbc7Sjsg 
live_isolated_whitelist(void * arg)1057c349dbc7Sjsg static int live_isolated_whitelist(void *arg)
1058c349dbc7Sjsg {
1059c349dbc7Sjsg 	struct intel_gt *gt = arg;
1060c349dbc7Sjsg 	struct {
1061c349dbc7Sjsg 		struct i915_vma *scratch[2];
1062c349dbc7Sjsg 	} client[2] = {};
1063c349dbc7Sjsg 	struct intel_engine_cs *engine;
1064c349dbc7Sjsg 	enum intel_engine_id id;
1065c349dbc7Sjsg 	int i, err = 0;
1066c349dbc7Sjsg 
1067c349dbc7Sjsg 	/*
1068c349dbc7Sjsg 	 * Check that a write into a whitelist register works, but
1069c349dbc7Sjsg 	 * invisible to a second context.
1070c349dbc7Sjsg 	 */
1071c349dbc7Sjsg 
1072c349dbc7Sjsg 	if (!intel_engines_has_context_isolation(gt->i915))
1073c349dbc7Sjsg 		return 0;
1074c349dbc7Sjsg 
1075c349dbc7Sjsg 	for (i = 0; i < ARRAY_SIZE(client); i++) {
10765ca02815Sjsg 		client[i].scratch[0] =
10775ca02815Sjsg 			__vm_create_scratch_for_read_pinned(gt->vm, 4096);
1078c349dbc7Sjsg 		if (IS_ERR(client[i].scratch[0])) {
1079c349dbc7Sjsg 			err = PTR_ERR(client[i].scratch[0]);
1080c349dbc7Sjsg 			goto err;
1081c349dbc7Sjsg 		}
1082c349dbc7Sjsg 
10835ca02815Sjsg 		client[i].scratch[1] =
10845ca02815Sjsg 			__vm_create_scratch_for_read_pinned(gt->vm, 4096);
1085c349dbc7Sjsg 		if (IS_ERR(client[i].scratch[1])) {
1086c349dbc7Sjsg 			err = PTR_ERR(client[i].scratch[1]);
1087c349dbc7Sjsg 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1088c349dbc7Sjsg 			goto err;
1089c349dbc7Sjsg 		}
1090c349dbc7Sjsg 	}
1091c349dbc7Sjsg 
1092c349dbc7Sjsg 	for_each_engine(engine, gt, id) {
10935ca02815Sjsg 		struct intel_context *ce[2];
10945ca02815Sjsg 
1095c349dbc7Sjsg 		if (!engine->kernel_context->vm)
1096c349dbc7Sjsg 			continue;
1097c349dbc7Sjsg 
1098c349dbc7Sjsg 		if (!whitelist_writable_count(engine))
1099c349dbc7Sjsg 			continue;
1100c349dbc7Sjsg 
11015ca02815Sjsg 		ce[0] = intel_context_create(engine);
11025ca02815Sjsg 		if (IS_ERR(ce[0])) {
11035ca02815Sjsg 			err = PTR_ERR(ce[0]);
11045ca02815Sjsg 			break;
11055ca02815Sjsg 		}
11065ca02815Sjsg 		ce[1] = intel_context_create(engine);
11075ca02815Sjsg 		if (IS_ERR(ce[1])) {
11085ca02815Sjsg 			err = PTR_ERR(ce[1]);
11095ca02815Sjsg 			intel_context_put(ce[0]);
11105ca02815Sjsg 			break;
11115ca02815Sjsg 		}
11125ca02815Sjsg 
1113c349dbc7Sjsg 		/* Read default values */
11145ca02815Sjsg 		err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1115c349dbc7Sjsg 		if (err)
11165ca02815Sjsg 			goto err_ce;
1117c349dbc7Sjsg 
1118c349dbc7Sjsg 		/* Try to overwrite registers (should only affect ctx0) */
11195ca02815Sjsg 		err = scrub_whitelisted_registers(ce[0]);
1120c349dbc7Sjsg 		if (err)
11215ca02815Sjsg 			goto err_ce;
1122c349dbc7Sjsg 
1123c349dbc7Sjsg 		/* Read values from ctx1, we expect these to be defaults */
11245ca02815Sjsg 		err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1125c349dbc7Sjsg 		if (err)
11265ca02815Sjsg 			goto err_ce;
1127c349dbc7Sjsg 
1128c349dbc7Sjsg 		/* Verify that both reads return the same default values */
1129c349dbc7Sjsg 		err = check_whitelisted_registers(engine,
1130c349dbc7Sjsg 						  client[0].scratch[0],
1131c349dbc7Sjsg 						  client[1].scratch[0],
1132c349dbc7Sjsg 						  result_eq);
1133c349dbc7Sjsg 		if (err)
11345ca02815Sjsg 			goto err_ce;
1135c349dbc7Sjsg 
1136c349dbc7Sjsg 		/* Read back the updated values in ctx0 */
11375ca02815Sjsg 		err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1138c349dbc7Sjsg 		if (err)
11395ca02815Sjsg 			goto err_ce;
1140c349dbc7Sjsg 
1141c349dbc7Sjsg 		/* User should be granted privilege to overwhite regs */
1142c349dbc7Sjsg 		err = check_whitelisted_registers(engine,
1143c349dbc7Sjsg 						  client[0].scratch[0],
1144c349dbc7Sjsg 						  client[0].scratch[1],
1145c349dbc7Sjsg 						  result_neq);
11465ca02815Sjsg err_ce:
11475ca02815Sjsg 		intel_context_put(ce[1]);
11485ca02815Sjsg 		intel_context_put(ce[0]);
1149c349dbc7Sjsg 		if (err)
11505ca02815Sjsg 			break;
1151c349dbc7Sjsg 	}
1152c349dbc7Sjsg 
1153c349dbc7Sjsg err:
1154c349dbc7Sjsg 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1155c349dbc7Sjsg 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1156c349dbc7Sjsg 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1157c349dbc7Sjsg 	}
1158c349dbc7Sjsg 
1159c349dbc7Sjsg 	if (igt_flush_test(gt->i915))
1160c349dbc7Sjsg 		err = -EIO;
1161c349dbc7Sjsg 
1162c349dbc7Sjsg 	return err;
1163c349dbc7Sjsg }
1164c349dbc7Sjsg 
1165c349dbc7Sjsg static bool
verify_wa_lists(struct intel_gt * gt,struct wa_lists * lists,const char * str)11665ca02815Sjsg verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1167c349dbc7Sjsg 		const char *str)
1168c349dbc7Sjsg {
11695ca02815Sjsg 	struct intel_engine_cs *engine;
11705ca02815Sjsg 	enum intel_engine_id id;
1171c349dbc7Sjsg 	bool ok = true;
1172c349dbc7Sjsg 
11735ca02815Sjsg 	ok &= wa_list_verify(gt, &lists->gt_wa_list, str);
1174c349dbc7Sjsg 
11755ca02815Sjsg 	for_each_engine(engine, gt, id) {
11765ca02815Sjsg 		struct intel_context *ce;
11775ca02815Sjsg 
11785ca02815Sjsg 		ce = intel_context_create(engine);
11795ca02815Sjsg 		if (IS_ERR(ce))
11805ca02815Sjsg 			return false;
1181c349dbc7Sjsg 
1182c349dbc7Sjsg 		ok &= engine_wa_list_verify(ce,
1183c349dbc7Sjsg 					    &lists->engine[id].wa_list,
1184c349dbc7Sjsg 					    str) == 0;
1185c349dbc7Sjsg 
1186c349dbc7Sjsg 		ok &= engine_wa_list_verify(ce,
1187c349dbc7Sjsg 					    &lists->engine[id].ctx_wa_list,
1188c349dbc7Sjsg 					    str) == 0;
11895ca02815Sjsg 
11905ca02815Sjsg 		intel_context_put(ce);
1191c349dbc7Sjsg 	}
1192c349dbc7Sjsg 
1193c349dbc7Sjsg 	return ok;
1194c349dbc7Sjsg }
1195c349dbc7Sjsg 
1196c349dbc7Sjsg static int
live_gpu_reset_workarounds(void * arg)1197c349dbc7Sjsg live_gpu_reset_workarounds(void *arg)
1198c349dbc7Sjsg {
1199c349dbc7Sjsg 	struct intel_gt *gt = arg;
1200c349dbc7Sjsg 	intel_wakeref_t wakeref;
12015ca02815Sjsg 	struct wa_lists *lists;
1202c349dbc7Sjsg 	bool ok;
1203c349dbc7Sjsg 
1204c349dbc7Sjsg 	if (!intel_has_gpu_reset(gt))
1205c349dbc7Sjsg 		return 0;
1206c349dbc7Sjsg 
12075ca02815Sjsg 	lists = kzalloc(sizeof(*lists), GFP_KERNEL);
12085ca02815Sjsg 	if (!lists)
12095ca02815Sjsg 		return -ENOMEM;
1210c349dbc7Sjsg 
1211c349dbc7Sjsg 	pr_info("Verifying after GPU reset...\n");
1212c349dbc7Sjsg 
1213c349dbc7Sjsg 	igt_global_reset_lock(gt);
1214c349dbc7Sjsg 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1215c349dbc7Sjsg 
12165ca02815Sjsg 	reference_lists_init(gt, lists);
1217c349dbc7Sjsg 
12185ca02815Sjsg 	ok = verify_wa_lists(gt, lists, "before reset");
1219c349dbc7Sjsg 	if (!ok)
1220c349dbc7Sjsg 		goto out;
1221c349dbc7Sjsg 
1222c349dbc7Sjsg 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1223c349dbc7Sjsg 
12245ca02815Sjsg 	ok = verify_wa_lists(gt, lists, "after reset");
1225c349dbc7Sjsg 
1226c349dbc7Sjsg out:
12275ca02815Sjsg 	reference_lists_fini(gt, lists);
1228c349dbc7Sjsg 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1229c349dbc7Sjsg 	igt_global_reset_unlock(gt);
12305ca02815Sjsg 	kfree(lists);
1231c349dbc7Sjsg 
1232c349dbc7Sjsg 	return ok ? 0 : -ESRCH;
1233c349dbc7Sjsg }
1234c349dbc7Sjsg 
1235c349dbc7Sjsg static int
live_engine_reset_workarounds(void * arg)1236c349dbc7Sjsg live_engine_reset_workarounds(void *arg)
1237c349dbc7Sjsg {
1238c349dbc7Sjsg 	struct intel_gt *gt = arg;
12395ca02815Sjsg 	struct intel_engine_cs *engine;
12405ca02815Sjsg 	enum intel_engine_id id;
1241c349dbc7Sjsg 	struct intel_context *ce;
1242c349dbc7Sjsg 	struct igt_spinner spin;
1243c349dbc7Sjsg 	struct i915_request *rq;
1244c349dbc7Sjsg 	intel_wakeref_t wakeref;
12455ca02815Sjsg 	struct wa_lists *lists;
1246c349dbc7Sjsg 	int ret = 0;
1247c349dbc7Sjsg 
1248c349dbc7Sjsg 	if (!intel_has_reset_engine(gt))
1249c349dbc7Sjsg 		return 0;
1250c349dbc7Sjsg 
12515ca02815Sjsg 	lists = kzalloc(sizeof(*lists), GFP_KERNEL);
12525ca02815Sjsg 	if (!lists)
12535ca02815Sjsg 		return -ENOMEM;
1254c349dbc7Sjsg 
1255c349dbc7Sjsg 	igt_global_reset_lock(gt);
1256c349dbc7Sjsg 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1257c349dbc7Sjsg 
12585ca02815Sjsg 	reference_lists_init(gt, lists);
1259c349dbc7Sjsg 
12605ca02815Sjsg 	for_each_engine(engine, gt, id) {
12615ca02815Sjsg 		struct intel_selftest_saved_policy saved;
12625ca02815Sjsg 		bool using_guc = intel_engine_uses_guc(engine);
1263c349dbc7Sjsg 		bool ok;
12645ca02815Sjsg 		int ret2;
1265c349dbc7Sjsg 
1266c349dbc7Sjsg 		pr_info("Verifying after %s reset...\n", engine->name);
12675ca02815Sjsg 		ret = intel_selftest_modify_policy(engine, &saved,
12685ca02815Sjsg 						   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
12695ca02815Sjsg 		if (ret)
12705ca02815Sjsg 			break;
1271c349dbc7Sjsg 
12725ca02815Sjsg 		ce = intel_context_create(engine);
12735ca02815Sjsg 		if (IS_ERR(ce)) {
12745ca02815Sjsg 			ret = PTR_ERR(ce);
12755ca02815Sjsg 			goto restore;
12765ca02815Sjsg 		}
12775ca02815Sjsg 
12785ca02815Sjsg 		if (!using_guc) {
12795ca02815Sjsg 			ok = verify_wa_lists(gt, lists, "before reset");
1280c349dbc7Sjsg 			if (!ok) {
1281c349dbc7Sjsg 				ret = -ESRCH;
1282c349dbc7Sjsg 				goto err;
1283c349dbc7Sjsg 			}
1284c349dbc7Sjsg 
12855ca02815Sjsg 			ret = intel_engine_reset(engine, "live_workarounds:idle");
12865ca02815Sjsg 			if (ret) {
12875ca02815Sjsg 				pr_err("%s: Reset failed while idle\n", engine->name);
12885ca02815Sjsg 				goto err;
12895ca02815Sjsg 			}
1290c349dbc7Sjsg 
12915ca02815Sjsg 			ok = verify_wa_lists(gt, lists, "after idle reset");
1292c349dbc7Sjsg 			if (!ok) {
1293c349dbc7Sjsg 				ret = -ESRCH;
1294c349dbc7Sjsg 				goto err;
1295c349dbc7Sjsg 			}
12965ca02815Sjsg 		}
1297c349dbc7Sjsg 
1298c349dbc7Sjsg 		ret = igt_spinner_init(&spin, engine->gt);
1299c349dbc7Sjsg 		if (ret)
1300c349dbc7Sjsg 			goto err;
1301c349dbc7Sjsg 
1302c349dbc7Sjsg 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1303c349dbc7Sjsg 		if (IS_ERR(rq)) {
1304c349dbc7Sjsg 			ret = PTR_ERR(rq);
1305c349dbc7Sjsg 			igt_spinner_fini(&spin);
1306c349dbc7Sjsg 			goto err;
1307c349dbc7Sjsg 		}
1308c349dbc7Sjsg 
1309c349dbc7Sjsg 		ret = request_add_spin(rq, &spin);
1310c349dbc7Sjsg 		if (ret) {
13115ca02815Sjsg 			pr_err("%s: Spinner failed to start\n", engine->name);
1312c349dbc7Sjsg 			igt_spinner_fini(&spin);
1313c349dbc7Sjsg 			goto err;
1314c349dbc7Sjsg 		}
1315c349dbc7Sjsg 
13165ca02815Sjsg 		/* Ensure the spinner hasn't aborted */
13175ca02815Sjsg 		if (i915_request_completed(rq)) {
13185ca02815Sjsg 			ret = -ETIMEDOUT;
13195ca02815Sjsg 			goto skip;
13205ca02815Sjsg 		}
1321c349dbc7Sjsg 
13225ca02815Sjsg 		if (!using_guc) {
13235ca02815Sjsg 			ret = intel_engine_reset(engine, "live_workarounds:active");
13245ca02815Sjsg 			if (ret) {
13255ca02815Sjsg 				pr_err("%s: Reset failed on an active spinner\n",
13265ca02815Sjsg 				       engine->name);
13275ca02815Sjsg 				igt_spinner_fini(&spin);
13285ca02815Sjsg 				goto err;
13295ca02815Sjsg 			}
13305ca02815Sjsg 		}
13315ca02815Sjsg 
13325ca02815Sjsg 		/* Ensure the reset happens and kills the engine */
13335ca02815Sjsg 		if (ret == 0)
13345ca02815Sjsg 			ret = intel_selftest_wait_for_rq(rq);
13355ca02815Sjsg 
13365ca02815Sjsg skip:
1337c349dbc7Sjsg 		igt_spinner_end(&spin);
1338c349dbc7Sjsg 		igt_spinner_fini(&spin);
1339c349dbc7Sjsg 
13405ca02815Sjsg 		ok = verify_wa_lists(gt, lists, "after busy reset");
13415ca02815Sjsg 		if (!ok)
1342c349dbc7Sjsg 			ret = -ESRCH;
13435ca02815Sjsg 
1344c349dbc7Sjsg err:
13455ca02815Sjsg 		intel_context_put(ce);
13465ca02815Sjsg 
13475ca02815Sjsg restore:
13485ca02815Sjsg 		ret2 = intel_selftest_restore_policy(engine, &saved);
13495ca02815Sjsg 		if (ret == 0)
13505ca02815Sjsg 			ret = ret2;
13515ca02815Sjsg 		if (ret)
13525ca02815Sjsg 			break;
13535ca02815Sjsg 	}
13545ca02815Sjsg 
13555ca02815Sjsg 	reference_lists_fini(gt, lists);
1356c349dbc7Sjsg 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1357c349dbc7Sjsg 	igt_global_reset_unlock(gt);
13585ca02815Sjsg 	kfree(lists);
1359c349dbc7Sjsg 
1360c349dbc7Sjsg 	igt_flush_test(gt->i915);
1361c349dbc7Sjsg 
1362c349dbc7Sjsg 	return ret;
1363c349dbc7Sjsg }
1364c349dbc7Sjsg 
intel_workarounds_live_selftests(struct drm_i915_private * i915)1365c349dbc7Sjsg int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1366c349dbc7Sjsg {
1367c349dbc7Sjsg 	static const struct i915_subtest tests[] = {
1368c349dbc7Sjsg 		SUBTEST(live_dirty_whitelist),
1369c349dbc7Sjsg 		SUBTEST(live_reset_whitelist),
1370c349dbc7Sjsg 		SUBTEST(live_isolated_whitelist),
1371c349dbc7Sjsg 		SUBTEST(live_gpu_reset_workarounds),
1372c349dbc7Sjsg 		SUBTEST(live_engine_reset_workarounds),
1373c349dbc7Sjsg 	};
1374c349dbc7Sjsg 
13751bb76ff1Sjsg 	if (intel_gt_is_wedged(to_gt(i915)))
1376c349dbc7Sjsg 		return 0;
1377c349dbc7Sjsg 
13781bb76ff1Sjsg 	return intel_gt_live_subtests(tests, to_gt(i915));
1379c349dbc7Sjsg }
1380