xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/selftest_rc6.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: selftest_rc6.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2019 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: selftest_rc6.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11 
12 #include "intel_context.h"
13 #include "intel_engine_pm.h"
14 #include "intel_gt_requests.h"
15 #include "intel_ring.h"
16 #include "selftest_rc6.h"
17 
18 #include "selftests/i915_random.h"
19 
live_rc6_manual(void * arg)20 int live_rc6_manual(void *arg)
21 {
22 	struct intel_gt *gt = arg;
23 	struct intel_rc6 *rc6 = &gt->rc6;
24 	intel_wakeref_t wakeref;
25 	u64 res[2];
26 	int err = 0;
27 
28 	/*
29 	 * Our claim is that we can "encourage" the GPU to enter rc6 at will.
30 	 * Let's try it!
31 	 */
32 
33 	if (!rc6->enabled)
34 		return 0;
35 
36 	/* bsw/byt use a PCU and decouple RC6 from our manual control */
37 	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
38 		return 0;
39 
40 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
41 
42 	/* Force RC6 off for starters */
43 	__intel_rc6_disable(rc6);
44 	msleep(1); /* wakeup is not immediate, takes about 100us on icl */
45 
46 	res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
47 	msleep(250);
48 	res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
49 	if ((res[1] - res[0]) >> 10) {
50 		pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
51 		       (res[1] - res[0]) >> 10);
52 		err = -EINVAL;
53 		goto out_unlock;
54 	}
55 
56 	/* Manually enter RC6 */
57 	intel_rc6_park(rc6);
58 
59 	res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
60 	msleep(100);
61 	res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
62 
63 	if (res[1] == res[0]) {
64 		pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
65 		       intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
66 		       intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
67 		err = -EINVAL;
68 	}
69 
70 	/* Restore what should have been the original state! */
71 	intel_rc6_unpark(rc6);
72 
73 out_unlock:
74 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
75 	return err;
76 }
77 
__live_rc6_ctx(struct intel_context * ce)78 static const u32 *__live_rc6_ctx(struct intel_context *ce)
79 {
80 	struct i915_request *rq;
81 	const u32 *result;
82 	u32 cmd;
83 	u32 *cs;
84 
85 	rq = intel_context_create_request(ce);
86 	if (IS_ERR(rq))
87 		return ERR_CAST(rq);
88 
89 	cs = intel_ring_begin(rq, 4);
90 	if (IS_ERR(cs)) {
91 		i915_request_add(rq);
92 		return cs;
93 	}
94 
95 	cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
96 	if (INTEL_GEN(rq->i915) >= 8)
97 		cmd++;
98 
99 	*cs++ = cmd;
100 	*cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
101 	*cs++ = ce->timeline->hwsp_offset + 8;
102 	*cs++ = 0;
103 	intel_ring_advance(rq, cs);
104 
105 	result = rq->hwsp_seqno + 2;
106 	i915_request_add(rq);
107 
108 	return result;
109 }
110 
111 static struct intel_engine_cs **
randomised_engines(struct intel_gt * gt,struct rnd_state * prng,unsigned int * count)112 randomised_engines(struct intel_gt *gt,
113 		   struct rnd_state *prng,
114 		   unsigned int *count)
115 {
116 	struct intel_engine_cs *engine, **engines;
117 	enum intel_engine_id id;
118 	int n;
119 
120 	n = 0;
121 	for_each_engine(engine, gt, id)
122 		n++;
123 	if (!n)
124 		return NULL;
125 
126 	engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
127 	if (!engines)
128 		return NULL;
129 
130 	n = 0;
131 	for_each_engine(engine, gt, id)
132 		engines[n++] = engine;
133 
134 	i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
135 
136 	*count = n;
137 	return engines;
138 }
139 
live_rc6_ctx_wa(void * arg)140 int live_rc6_ctx_wa(void *arg)
141 {
142 	struct intel_gt *gt = arg;
143 	struct intel_engine_cs **engines;
144 	unsigned int n, count;
145 	I915_RND_STATE(prng);
146 	int err = 0;
147 
148 	/* A read of CTX_INFO upsets rc6. Poke the bear! */
149 	if (INTEL_GEN(gt->i915) < 8)
150 		return 0;
151 
152 	engines = randomised_engines(gt, &prng, &count);
153 	if (!engines)
154 		return 0;
155 
156 	for (n = 0; n < count; n++) {
157 		struct intel_engine_cs *engine = engines[n];
158 		int pass;
159 
160 		for (pass = 0; pass < 2; pass++) {
161 			struct intel_context *ce;
162 			unsigned int resets =
163 				i915_reset_engine_count(&gt->i915->gpu_error,
164 							engine);
165 			const u32 *res;
166 
167 			/* Use a sacrifical context */
168 			ce = intel_context_create(engine);
169 			if (IS_ERR(ce)) {
170 				err = PTR_ERR(ce);
171 				goto out;
172 			}
173 
174 			intel_engine_pm_get(engine);
175 			res = __live_rc6_ctx(ce);
176 			intel_engine_pm_put(engine);
177 			intel_context_put(ce);
178 			if (IS_ERR(res)) {
179 				err = PTR_ERR(res);
180 				goto out;
181 			}
182 
183 			if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
184 				intel_gt_set_wedged(gt);
185 				err = -ETIME;
186 				goto out;
187 			}
188 
189 			intel_gt_pm_wait_for_idle(gt);
190 			pr_debug("%s: CTX_INFO=%0x\n",
191 				 engine->name, READ_ONCE(*res));
192 
193 			if (resets !=
194 			    i915_reset_engine_count(&gt->i915->gpu_error,
195 						    engine)) {
196 				pr_err("%s: GPU reset required\n",
197 				       engine->name);
198 				add_taint_for_CI(TAINT_WARN);
199 				err = -EIO;
200 				goto out;
201 			}
202 		}
203 	}
204 
205 out:
206 	kfree(engines);
207 	return err;
208 }
209