xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/selftests/i915_active.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: i915_active.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2018 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_active.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
11 
12 #include <linux/kref.h>
13 
14 #include "gem/i915_gem_pm.h"
15 #include "gt/intel_gt.h"
16 
17 #include "i915_selftest.h"
18 
19 #include "igt_flush_test.h"
20 #include "lib_sw_fence.h"
21 
22 struct live_active {
23 	struct i915_active base;
24 	struct kref ref;
25 	bool retired;
26 };
27 
__live_get(struct live_active * active)28 static void __live_get(struct live_active *active)
29 {
30 	kref_get(&active->ref);
31 }
32 
__live_free(struct live_active * active)33 static void __live_free(struct live_active *active)
34 {
35 	i915_active_fini(&active->base);
36 	kfree(active);
37 }
38 
__live_release(struct kref * ref)39 static void __live_release(struct kref *ref)
40 {
41 	struct live_active *active = container_of(ref, typeof(*active), ref);
42 
43 	__live_free(active);
44 }
45 
__live_put(struct live_active * active)46 static void __live_put(struct live_active *active)
47 {
48 	kref_put(&active->ref, __live_release);
49 }
50 
__live_active(struct i915_active * base)51 static int __live_active(struct i915_active *base)
52 {
53 	struct live_active *active = container_of(base, typeof(*active), base);
54 
55 	__live_get(active);
56 	return 0;
57 }
58 
__live_retire(struct i915_active * base)59 static void __live_retire(struct i915_active *base)
60 {
61 	struct live_active *active = container_of(base, typeof(*active), base);
62 
63 	active->retired = true;
64 	__live_put(active);
65 }
66 
__live_alloc(struct drm_i915_private * i915)67 static struct live_active *__live_alloc(struct drm_i915_private *i915)
68 {
69 	struct live_active *active;
70 
71 	active = kzalloc(sizeof(*active), GFP_KERNEL);
72 	if (!active)
73 		return NULL;
74 
75 	kref_init(&active->ref);
76 	i915_active_init(&active->base, __live_active, __live_retire);
77 
78 	return active;
79 }
80 
81 static struct live_active *
__live_active_setup(struct drm_i915_private * i915)82 __live_active_setup(struct drm_i915_private *i915)
83 {
84 	struct intel_engine_cs *engine;
85 	struct i915_sw_fence *submit;
86 	struct live_active *active;
87 	unsigned int count = 0;
88 	int err = 0;
89 
90 	active = __live_alloc(i915);
91 	if (!active)
92 		return ERR_PTR(-ENOMEM);
93 
94 	submit = heap_fence_create(GFP_KERNEL);
95 	if (!submit) {
96 		kfree(active);
97 		return ERR_PTR(-ENOMEM);
98 	}
99 
100 	err = i915_active_acquire(&active->base);
101 	if (err)
102 		goto out;
103 
104 	for_each_uabi_engine(engine, i915) {
105 		struct i915_request *rq;
106 
107 		rq = intel_engine_create_kernel_request(engine);
108 		if (IS_ERR(rq)) {
109 			err = PTR_ERR(rq);
110 			break;
111 		}
112 
113 		err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
114 						       submit,
115 						       GFP_KERNEL);
116 		if (err >= 0)
117 			err = i915_active_add_request(&active->base, rq);
118 		i915_request_add(rq);
119 		if (err) {
120 			pr_err("Failed to track active ref!\n");
121 			break;
122 		}
123 
124 		count++;
125 	}
126 
127 	i915_active_release(&active->base);
128 	if (READ_ONCE(active->retired) && count) {
129 		pr_err("i915_active retired before submission!\n");
130 		err = -EINVAL;
131 	}
132 	if (atomic_read(&active->base.count) != count) {
133 		pr_err("i915_active not tracking all requests, found %d, expected %d\n",
134 		       atomic_read(&active->base.count), count);
135 		err = -EINVAL;
136 	}
137 
138 out:
139 	i915_sw_fence_commit(submit);
140 	heap_fence_put(submit);
141 	if (err) {
142 		__live_put(active);
143 		active = ERR_PTR(err);
144 	}
145 
146 	return active;
147 }
148 
live_active_wait(void * arg)149 static int live_active_wait(void *arg)
150 {
151 	struct drm_i915_private *i915 = arg;
152 	struct live_active *active;
153 	int err = 0;
154 
155 	/* Check that we get a callback when requests retire upon waiting */
156 
157 	active = __live_active_setup(i915);
158 	if (IS_ERR(active))
159 		return PTR_ERR(active);
160 
161 	i915_active_wait(&active->base);
162 	if (!READ_ONCE(active->retired)) {
163 		struct drm_printer p = drm_err_printer(__func__);
164 
165 		pr_err("i915_active not retired after waiting!\n");
166 		i915_active_print(&active->base, &p);
167 
168 		err = -EINVAL;
169 	}
170 
171 	__live_put(active);
172 
173 	if (igt_flush_test(i915))
174 		err = -EIO;
175 
176 	return err;
177 }
178 
live_active_retire(void * arg)179 static int live_active_retire(void *arg)
180 {
181 	struct drm_i915_private *i915 = arg;
182 	struct live_active *active;
183 	int err = 0;
184 
185 	/* Check that we get a callback when requests are indirectly retired */
186 
187 	active = __live_active_setup(i915);
188 	if (IS_ERR(active))
189 		return PTR_ERR(active);
190 
191 	/* waits for & retires all requests */
192 	if (igt_flush_test(i915))
193 		err = -EIO;
194 
195 	if (!READ_ONCE(active->retired)) {
196 		struct drm_printer p = drm_err_printer(__func__);
197 
198 		pr_err("i915_active not retired after flushing!\n");
199 		i915_active_print(&active->base, &p);
200 
201 		err = -EINVAL;
202 	}
203 
204 	__live_put(active);
205 
206 	return err;
207 }
208 
i915_active_live_selftests(struct drm_i915_private * i915)209 int i915_active_live_selftests(struct drm_i915_private *i915)
210 {
211 	static const struct i915_subtest tests[] = {
212 		SUBTEST(live_active_wait),
213 		SUBTEST(live_active_retire),
214 	};
215 
216 	if (intel_gt_is_wedged(&i915->gt))
217 		return 0;
218 
219 	return i915_subtests(tests, i915);
220 }
221 
node_to_barrier(struct active_node * it)222 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
223 {
224 	struct intel_engine_cs *engine;
225 
226 	if (!is_barrier(&it->base))
227 		return NULL;
228 
229 	engine = __barrier_to_engine(it);
230 	smp_rmb(); /* serialise with add_active_barriers */
231 	if (!is_barrier(&it->base))
232 		return NULL;
233 
234 	return engine;
235 }
236 
i915_active_print(struct i915_active * ref,struct drm_printer * m)237 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
238 {
239 	drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
240 	drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
241 	drm_printf(m, "\tpreallocated barriers? %s\n",
242 		   yesno(!llist_empty(&ref->preallocated_barriers)));
243 
244 	if (i915_active_acquire_if_busy(ref)) {
245 		struct active_node *it, *n;
246 
247 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
248 			struct intel_engine_cs *engine;
249 
250 			engine = node_to_barrier(it);
251 			if (engine) {
252 				drm_printf(m, "\tbarrier: %s\n", engine->name);
253 				continue;
254 			}
255 
256 			if (i915_active_fence_isset(&it->base)) {
257 				drm_printf(m,
258 					   "\ttimeline: %llx\n", it->timeline);
259 				continue;
260 			}
261 		}
262 
263 		i915_active_release(ref);
264 	}
265 }
266 
spin_unlock_wait(spinlock_t * lock)267 static void spin_unlock_wait(spinlock_t *lock)
268 {
269 	spin_lock_irq(lock);
270 	spin_unlock_irq(lock);
271 }
272 
i915_active_unlock_wait(struct i915_active * ref)273 void i915_active_unlock_wait(struct i915_active *ref)
274 {
275 	if (i915_active_acquire_if_busy(ref)) {
276 		struct active_node *it, *n;
277 
278 		rcu_read_lock();
279 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
280 			struct dma_fence *f;
281 
282 			/* Wait for all active callbacks */
283 			f = rcu_dereference(it->base.fence);
284 			if (f)
285 				spin_unlock_wait(f->lock);
286 		}
287 		rcu_read_unlock();
288 
289 		i915_active_release(ref);
290 	}
291 
292 	/* And wait for the retire callback */
293 	spin_lock_irq(&ref->tree_lock);
294 	spin_unlock_irq(&ref->tree_lock);
295 
296 	/* ... which may have been on a thread instead */
297 	flush_work(&ref->work);
298 }
299