xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_engine_user.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/list.h>
8 #include <linux/list_sort.h>
9 #include <linux/llist.h>
10 
11 #include "i915_drv.h"
12 #include "intel_engine.h"
13 #include "intel_engine_user.h"
14 #include "intel_gt.h"
15 
16 struct intel_engine_cs *
17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
18 {
19 	struct rb_node *p = i915->uabi_engines.rb_node;
20 
21 	while (p) {
22 		struct intel_engine_cs *it =
23 			rb_entry(p, typeof(*it), uabi_node);
24 
25 		if (class < it->uabi_class)
26 			p = p->rb_left;
27 		else if (class > it->uabi_class ||
28 			 instance > it->uabi_instance)
29 			p = p->rb_right;
30 		else if (instance < it->uabi_instance)
31 			p = p->rb_left;
32 		else
33 			return it;
34 	}
35 
36 	return NULL;
37 }
38 
39 void intel_engine_add_user(struct intel_engine_cs *engine)
40 {
41 	llist_add((struct llist_node *)&engine->uabi_node,
42 		  (struct llist_head *)&engine->i915->uabi_engines);
43 }
44 
45 static const u8 uabi_classes[] = {
46 	[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
47 	[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
48 	[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
49 	[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
50 };
51 
52 static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
53 {
54 	const struct intel_engine_cs *a =
55 		container_of((struct rb_node *)A, typeof(*a), uabi_node);
56 	const struct intel_engine_cs *b =
57 		container_of((struct rb_node *)B, typeof(*b), uabi_node);
58 
59 	if (uabi_classes[a->class] < uabi_classes[b->class])
60 		return -1;
61 	if (uabi_classes[a->class] > uabi_classes[b->class])
62 		return 1;
63 
64 	if (a->instance < b->instance)
65 		return -1;
66 	if (a->instance > b->instance)
67 		return 1;
68 
69 	return 0;
70 }
71 
72 static struct llist_node *get_engines(struct drm_i915_private *i915)
73 {
74 	return llist_del_all((struct llist_head *)&i915->uabi_engines);
75 }
76 
77 static void sort_engines(struct drm_i915_private *i915,
78 			 struct list_head *engines)
79 {
80 	struct llist_node *pos, *next;
81 
82 	llist_for_each_safe(pos, next, get_engines(i915)) {
83 		struct intel_engine_cs *engine =
84 			container_of((struct rb_node *)pos, typeof(*engine),
85 				     uabi_node);
86 		list_add((struct list_head *)&engine->uabi_node, engines);
87 	}
88 	list_sort(NULL, engines, engine_cmp);
89 }
90 
91 #ifdef __linux__
92 static void set_scheduler_caps(struct drm_i915_private *i915)
93 {
94 	static const struct {
95 		u8 engine;
96 		u8 sched;
97 	} map[] = {
98 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
99 		MAP(HAS_PREEMPTION, PREEMPTION),
100 		MAP(HAS_SEMAPHORES, SEMAPHORES),
101 		MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
102 #undef MAP
103 	};
104 	struct intel_engine_cs *engine;
105 	u32 enabled, disabled;
106 
107 	enabled = 0;
108 	disabled = 0;
109 	for_each_uabi_engine(engine, i915) { /* all engines must agree! */
110 		int i;
111 
112 		if (engine->schedule)
113 			enabled |= (I915_SCHEDULER_CAP_ENABLED |
114 				    I915_SCHEDULER_CAP_PRIORITY);
115 		else
116 			disabled |= (I915_SCHEDULER_CAP_ENABLED |
117 				     I915_SCHEDULER_CAP_PRIORITY);
118 
119 		for (i = 0; i < ARRAY_SIZE(map); i++) {
120 			if (engine->flags & BIT(map[i].engine))
121 				enabled |= BIT(map[i].sched);
122 			else
123 				disabled |= BIT(map[i].sched);
124 		}
125 	}
126 
127 	i915->caps.scheduler = enabled & ~disabled;
128 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
129 		i915->caps.scheduler = 0;
130 }
131 #else
132 /* without the pointless ilog2 -> BIT() */
133 static void set_scheduler_caps(struct drm_i915_private *i915)
134 {
135 	static const struct {
136 		u8 engine;
137 		u8 sched;
138 	} map[] = {
139 #define MAP(x, y) { I915_ENGINE_##x, I915_SCHEDULER_CAP_##y }
140 		MAP(HAS_PREEMPTION, PREEMPTION),
141 		MAP(HAS_SEMAPHORES, SEMAPHORES),
142 		MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
143 #undef MAP
144 	};
145 	struct intel_engine_cs *engine;
146 	u32 enabled, disabled;
147 
148 	enabled = 0;
149 	disabled = 0;
150 	for_each_uabi_engine(engine, i915) { /* all engines must agree! */
151 		int i;
152 
153 		if (engine->schedule)
154 			enabled |= (I915_SCHEDULER_CAP_ENABLED |
155 				    I915_SCHEDULER_CAP_PRIORITY);
156 		else
157 			disabled |= (I915_SCHEDULER_CAP_ENABLED |
158 				     I915_SCHEDULER_CAP_PRIORITY);
159 
160 		for (i = 0; i < ARRAY_SIZE(map); i++) {
161 			if (engine->flags & map[i].engine)
162 				enabled |= map[i].sched;
163 			else
164 				disabled |= map[i].sched;
165 		}
166 	}
167 
168 	i915->caps.scheduler = enabled & ~disabled;
169 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
170 		i915->caps.scheduler = 0;
171 }
172 #endif
173 
174 const char *intel_engine_class_repr(u8 class)
175 {
176 	static const char * const uabi_names[] = {
177 		[RENDER_CLASS] = "rcs",
178 		[COPY_ENGINE_CLASS] = "bcs",
179 		[VIDEO_DECODE_CLASS] = "vcs",
180 		[VIDEO_ENHANCEMENT_CLASS] = "vecs",
181 	};
182 
183 	if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
184 		return "xxx";
185 
186 	return uabi_names[class];
187 }
188 
189 struct legacy_ring {
190 	struct intel_gt *gt;
191 	u8 class;
192 	u8 instance;
193 };
194 
195 static int legacy_ring_idx(const struct legacy_ring *ring)
196 {
197 	static const struct {
198 		u8 base, max;
199 	} map[] = {
200 		[RENDER_CLASS] = { RCS0, 1 },
201 		[COPY_ENGINE_CLASS] = { BCS0, 1 },
202 		[VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
203 		[VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
204 	};
205 
206 	if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
207 		return INVALID_ENGINE;
208 
209 	if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
210 		return INVALID_ENGINE;
211 
212 	return map[ring->class].base + ring->instance;
213 }
214 
215 static void add_legacy_ring(struct legacy_ring *ring,
216 			    struct intel_engine_cs *engine)
217 {
218 	if (engine->gt != ring->gt || engine->class != ring->class) {
219 		ring->gt = engine->gt;
220 		ring->class = engine->class;
221 		ring->instance = 0;
222 	}
223 
224 	engine->legacy_idx = legacy_ring_idx(ring);
225 	if (engine->legacy_idx != INVALID_ENGINE)
226 		ring->instance++;
227 }
228 
229 void intel_engines_driver_register(struct drm_i915_private *i915)
230 {
231 	struct legacy_ring ring = {};
232 	u8 uabi_instances[4] = {};
233 	struct list_head *it, *next;
234 	struct rb_node **p, *prev;
235 	DRM_LIST_HEAD(engines);
236 
237 	sort_engines(i915, &engines);
238 
239 	prev = NULL;
240 	p = &i915->uabi_engines.rb_node;
241 	list_for_each_safe(it, next, &engines) {
242 		struct intel_engine_cs *engine =
243 			container_of((struct rb_node *)it, typeof(*engine),
244 				     uabi_node);
245 		char old[sizeof(engine->name)];
246 
247 		if (intel_gt_has_init_error(engine->gt))
248 			continue; /* ignore incomplete engines */
249 
250 		GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
251 		engine->uabi_class = uabi_classes[engine->class];
252 
253 		GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
254 		engine->uabi_instance = uabi_instances[engine->uabi_class]++;
255 
256 		/* Replace the internal name with the final user facing name */
257 		memcpy(old, engine->name, sizeof(engine->name));
258 		scnprintf(engine->name, sizeof(engine->name), "%s%u",
259 			  intel_engine_class_repr(engine->class),
260 			  engine->uabi_instance);
261 		DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
262 
263 		rb_link_node(&engine->uabi_node, prev, p);
264 		rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
265 
266 		GEM_BUG_ON(intel_engine_lookup_user(i915,
267 						    engine->uabi_class,
268 						    engine->uabi_instance) != engine);
269 
270 		/* Fix up the mapping to match default execbuf::user_map[] */
271 		add_legacy_ring(&ring, engine);
272 
273 		prev = &engine->uabi_node;
274 		p = &prev->rb_right;
275 	}
276 
277 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
278 	    IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
279 		struct intel_engine_cs *engine;
280 		unsigned int isolation;
281 		int class, inst;
282 		int errors = 0;
283 
284 		for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
285 			for (inst = 0; inst < uabi_instances[class]; inst++) {
286 				engine = intel_engine_lookup_user(i915,
287 								  class, inst);
288 				if (!engine) {
289 					pr_err("UABI engine not found for { class:%d, instance:%d }\n",
290 					       class, inst);
291 					errors++;
292 					continue;
293 				}
294 
295 				if (engine->uabi_class != class ||
296 				    engine->uabi_instance != inst) {
297 					pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
298 					       engine->name,
299 					       engine->uabi_class,
300 					       engine->uabi_instance,
301 					       class, inst);
302 					errors++;
303 					continue;
304 				}
305 			}
306 		}
307 
308 		/*
309 		 * Make sure that classes with multiple engine instances all
310 		 * share the same basic configuration.
311 		 */
312 		isolation = intel_engines_has_context_isolation(i915);
313 		for_each_uabi_engine(engine, i915) {
314 			unsigned int bit = BIT(engine->uabi_class);
315 			unsigned int expected = engine->default_state ? bit : 0;
316 
317 			if ((isolation & bit) != expected) {
318 				pr_err("mismatching default context state for class %d on engine %s\n",
319 				       engine->uabi_class, engine->name);
320 				errors++;
321 			}
322 		}
323 
324 		if (drm_WARN(&i915->drm, errors,
325 			     "Invalid UABI engine mapping found"))
326 			i915->uabi_engines = RB_ROOT;
327 	}
328 
329 	set_scheduler_caps(i915);
330 }
331 
332 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
333 {
334 	struct intel_engine_cs *engine;
335 	unsigned int which;
336 
337 	which = 0;
338 	for_each_uabi_engine(engine, i915)
339 		if (engine->default_state)
340 			which |= BIT(engine->uabi_class);
341 
342 	return which;
343 }
344