xref: /openbsd-src/sys/dev/pci/drm/i915/gt/sysfs_engines.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/kobject.h>
7 #include <linux/sysfs.h>
8 
9 #include "i915_drv.h"
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "sysfs_engines.h"
13 
14 #ifdef __linux__
15 
16 struct kobj_engine {
17 	struct kobject base;
18 	struct intel_engine_cs *engine;
19 };
20 
21 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
22 {
23 	return container_of(kobj, struct kobj_engine, base)->engine;
24 }
25 
26 static ssize_t
27 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
28 {
29 	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
30 }
31 
32 static struct kobj_attribute name_attr =
33 __ATTR(name, 0444, name_show, NULL);
34 
35 static ssize_t
36 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
37 {
38 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
39 }
40 
41 static struct kobj_attribute class_attr =
42 __ATTR(class, 0444, class_show, NULL);
43 
44 static ssize_t
45 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
46 {
47 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
48 }
49 
50 static struct kobj_attribute inst_attr =
51 __ATTR(instance, 0444, inst_show, NULL);
52 
53 static ssize_t
54 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
55 {
56 	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
57 }
58 
59 static struct kobj_attribute mmio_attr =
60 __ATTR(mmio_base, 0444, mmio_show, NULL);
61 
62 static const char * const vcs_caps[] = {
63 	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
64 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
65 };
66 
67 static const char * const vecs_caps[] = {
68 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
69 };
70 
71 static ssize_t repr_trim(char *buf, ssize_t len)
72 {
73 	/* Trim off the trailing space and replace with a newline */
74 	if (len > PAGE_SIZE)
75 		len = PAGE_SIZE;
76 	if (len > 0)
77 		buf[len - 1] = '\n';
78 
79 	return len;
80 }
81 
82 static ssize_t
83 __caps_show(struct intel_engine_cs *engine,
84 	    unsigned long caps, char *buf, bool show_unknown)
85 {
86 	const char * const *repr;
87 	int count, n;
88 	ssize_t len;
89 
90 	switch (engine->class) {
91 	case VIDEO_DECODE_CLASS:
92 		repr = vcs_caps;
93 		count = ARRAY_SIZE(vcs_caps);
94 		break;
95 
96 	case VIDEO_ENHANCEMENT_CLASS:
97 		repr = vecs_caps;
98 		count = ARRAY_SIZE(vecs_caps);
99 		break;
100 
101 	default:
102 		repr = NULL;
103 		count = 0;
104 		break;
105 	}
106 	GEM_BUG_ON(count > BITS_PER_LONG);
107 
108 	len = 0;
109 	for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
110 		if (n >= count || !repr[n]) {
111 			if (GEM_WARN_ON(show_unknown))
112 				len += snprintf(buf + len, PAGE_SIZE - len,
113 						"[%x] ", n);
114 		} else {
115 			len += snprintf(buf + len, PAGE_SIZE - len,
116 					"%s ", repr[n]);
117 		}
118 		if (GEM_WARN_ON(len >= PAGE_SIZE))
119 			break;
120 	}
121 	return repr_trim(buf, len);
122 }
123 
124 static ssize_t
125 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
126 {
127 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
128 
129 	return __caps_show(engine, engine->uabi_capabilities, buf, true);
130 }
131 
132 static struct kobj_attribute caps_attr =
133 __ATTR(capabilities, 0444, caps_show, NULL);
134 
135 static ssize_t
136 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
137 {
138 	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
139 }
140 
141 static struct kobj_attribute all_caps_attr =
142 __ATTR(known_capabilities, 0444, all_caps_show, NULL);
143 
144 static ssize_t
145 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
146 	       const char *buf, size_t count)
147 {
148 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
149 	unsigned long long duration, clamped;
150 	int err;
151 
152 	/*
153 	 * When waiting for a request, if is it currently being executed
154 	 * on the GPU, we busywait for a short while before sleeping. The
155 	 * premise is that most requests are short, and if it is already
156 	 * executing then there is a good chance that it will complete
157 	 * before we can setup the interrupt handler and go to sleep.
158 	 * We try to offset the cost of going to sleep, by first spinning
159 	 * on the request -- if it completed in less time than it would take
160 	 * to go sleep, process the interrupt and return back to the client,
161 	 * then we have saved the client some latency, albeit at the cost
162 	 * of spinning on an expensive CPU core.
163 	 *
164 	 * While we try to avoid waiting at all for a request that is unlikely
165 	 * to complete, deciding how long it is worth spinning is for is an
166 	 * arbitrary decision: trading off power vs latency.
167 	 */
168 
169 	err = kstrtoull(buf, 0, &duration);
170 	if (err)
171 		return err;
172 
173 	clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
174 	if (duration != clamped)
175 		return -EINVAL;
176 
177 	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
178 
179 	return count;
180 }
181 
182 static ssize_t
183 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
184 {
185 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
186 
187 	return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
188 }
189 
190 static struct kobj_attribute max_spin_attr =
191 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
192 
193 static ssize_t
194 max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
195 {
196 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
197 
198 	return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
199 }
200 
201 static struct kobj_attribute max_spin_def =
202 __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
203 
204 static ssize_t
205 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
206 		const char *buf, size_t count)
207 {
208 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
209 	unsigned long long duration, clamped;
210 	int err;
211 
212 	/*
213 	 * Execlists uses a scheduling quantum (a timeslice) to alternate
214 	 * execution between ready-to-run contexts of equal priority. This
215 	 * ensures that all users (though only if they of equal importance)
216 	 * have the opportunity to run and prevents livelocks where contexts
217 	 * may have implicit ordering due to userspace semaphores.
218 	 */
219 
220 	err = kstrtoull(buf, 0, &duration);
221 	if (err)
222 		return err;
223 
224 	clamped = intel_clamp_timeslice_duration_ms(engine, duration);
225 	if (duration != clamped)
226 		return -EINVAL;
227 
228 	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
229 
230 	if (execlists_active(&engine->execlists))
231 		set_timer_ms(&engine->execlists.timer, duration);
232 
233 	return count;
234 }
235 
236 static ssize_t
237 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
238 {
239 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
240 
241 	return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
242 }
243 
244 static struct kobj_attribute timeslice_duration_attr =
245 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
246 
247 static ssize_t
248 timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
249 {
250 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
251 
252 	return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
253 }
254 
255 static struct kobj_attribute timeslice_duration_def =
256 __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
257 
258 static ssize_t
259 stop_store(struct kobject *kobj, struct kobj_attribute *attr,
260 	   const char *buf, size_t count)
261 {
262 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
263 	unsigned long long duration, clamped;
264 	int err;
265 
266 	/*
267 	 * When we allow ourselves to sleep before a GPU reset after disabling
268 	 * submission, even for a few milliseconds, gives an innocent context
269 	 * the opportunity to clear the GPU before the reset occurs. However,
270 	 * how long to sleep depends on the typical non-preemptible duration
271 	 * (a similar problem to determining the ideal preempt-reset timeout
272 	 * or even the heartbeat interval).
273 	 */
274 
275 	err = kstrtoull(buf, 0, &duration);
276 	if (err)
277 		return err;
278 
279 	clamped = intel_clamp_stop_timeout_ms(engine, duration);
280 	if (duration != clamped)
281 		return -EINVAL;
282 
283 	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
284 	return count;
285 }
286 
287 static ssize_t
288 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
289 {
290 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
291 
292 	return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
293 }
294 
295 static struct kobj_attribute stop_timeout_attr =
296 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
297 
298 static ssize_t
299 stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
300 {
301 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
302 
303 	return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
304 }
305 
306 static struct kobj_attribute stop_timeout_def =
307 __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
308 
309 static ssize_t
310 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
311 		      const char *buf, size_t count)
312 {
313 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
314 	unsigned long long timeout, clamped;
315 	int err;
316 
317 	/*
318 	 * After initialising a preemption request, we give the current
319 	 * resident a small amount of time to vacate the GPU. The preemption
320 	 * request is for a higher priority context and should be immediate to
321 	 * maintain high quality of service (and avoid priority inversion).
322 	 * However, the preemption granularity of the GPU can be quite coarse
323 	 * and so we need a compromise.
324 	 */
325 
326 	err = kstrtoull(buf, 0, &timeout);
327 	if (err)
328 		return err;
329 
330 	clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
331 	if (timeout != clamped)
332 		return -EINVAL;
333 
334 	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
335 
336 	if (READ_ONCE(engine->execlists.pending[0]))
337 		set_timer_ms(&engine->execlists.preempt, timeout);
338 
339 	return count;
340 }
341 
342 static ssize_t
343 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
344 		     char *buf)
345 {
346 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
347 
348 	return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
349 }
350 
351 static struct kobj_attribute preempt_timeout_attr =
352 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
353 
354 static ssize_t
355 preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
356 			char *buf)
357 {
358 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
359 
360 	return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
361 }
362 
363 static struct kobj_attribute preempt_timeout_def =
364 __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
365 
366 static ssize_t
367 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
368 		const char *buf, size_t count)
369 {
370 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
371 	unsigned long long delay, clamped;
372 	int err;
373 
374 	/*
375 	 * We monitor the health of the system via periodic heartbeat pulses.
376 	 * The pulses also provide the opportunity to perform garbage
377 	 * collection.  However, we interpret an incomplete pulse (a missed
378 	 * heartbeat) as an indication that the system is no longer responsive,
379 	 * i.e. hung, and perform an engine or full GPU reset. Given that the
380 	 * preemption granularity can be very coarse on a system, the optimal
381 	 * value for any workload is unknowable!
382 	 */
383 
384 	err = kstrtoull(buf, 0, &delay);
385 	if (err)
386 		return err;
387 
388 	clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
389 	if (delay != clamped)
390 		return -EINVAL;
391 
392 	err = intel_engine_set_heartbeat(engine, delay);
393 	if (err)
394 		return err;
395 
396 	return count;
397 }
398 
399 static ssize_t
400 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
401 {
402 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
403 
404 	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
405 }
406 
407 static struct kobj_attribute heartbeat_interval_attr =
408 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
409 
410 static ssize_t
411 heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
412 {
413 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
414 
415 	return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
416 }
417 
418 static struct kobj_attribute heartbeat_interval_def =
419 __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
420 
421 static void kobj_engine_release(struct kobject *kobj)
422 {
423 	kfree(kobj);
424 }
425 
426 static struct kobj_type kobj_engine_type = {
427 	.release = kobj_engine_release,
428 	.sysfs_ops = &kobj_sysfs_ops
429 };
430 
431 static struct kobject *
432 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
433 {
434 	struct kobj_engine *ke;
435 
436 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
437 	if (!ke)
438 		return NULL;
439 
440 	kobject_init(&ke->base, &kobj_engine_type);
441 	ke->engine = engine;
442 
443 	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
444 		kobject_put(&ke->base);
445 		return NULL;
446 	}
447 
448 	/* xfer ownership to sysfs tree */
449 	return &ke->base;
450 }
451 
452 static void add_defaults(struct kobj_engine *parent)
453 {
454 	static const struct attribute *files[] = {
455 		&max_spin_def.attr,
456 		&stop_timeout_def.attr,
457 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
458 		&heartbeat_interval_def.attr,
459 #endif
460 		NULL
461 	};
462 	struct kobj_engine *ke;
463 
464 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
465 	if (!ke)
466 		return;
467 
468 	kobject_init(&ke->base, &kobj_engine_type);
469 	ke->engine = parent->engine;
470 
471 	if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
472 		kobject_put(&ke->base);
473 		return;
474 	}
475 
476 	if (sysfs_create_files(&ke->base, files))
477 		return;
478 
479 	if (intel_engine_has_timeslices(ke->engine) &&
480 	    sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
481 		return;
482 
483 	if (intel_engine_has_preempt_reset(ke->engine) &&
484 	    sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
485 		return;
486 }
487 
488 #endif /* __linux__ */
489 
490 void intel_engines_add_sysfs(struct drm_i915_private *i915)
491 {
492 #ifdef __linux__
493 	static const struct attribute *files[] = {
494 		&name_attr.attr,
495 		&class_attr.attr,
496 		&inst_attr.attr,
497 		&mmio_attr.attr,
498 		&caps_attr.attr,
499 		&all_caps_attr.attr,
500 		&max_spin_attr.attr,
501 		&stop_timeout_attr.attr,
502 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
503 		&heartbeat_interval_attr.attr,
504 #endif
505 		NULL
506 	};
507 
508 	struct device *kdev = i915->drm.primary->kdev;
509 	struct intel_engine_cs *engine;
510 	struct kobject *dir;
511 
512 	dir = kobject_create_and_add("engine", &kdev->kobj);
513 	if (!dir)
514 		return;
515 
516 	for_each_uabi_engine(engine, i915) {
517 		struct kobject *kobj;
518 
519 		kobj = kobj_engine(dir, engine);
520 		if (!kobj)
521 			goto err_engine;
522 
523 		if (sysfs_create_files(kobj, files))
524 			goto err_object;
525 
526 		if (intel_engine_has_timeslices(engine) &&
527 		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
528 			goto err_engine;
529 
530 		if (intel_engine_has_preempt_reset(engine) &&
531 		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
532 			goto err_engine;
533 
534 		add_defaults(container_of(kobj, struct kobj_engine, base));
535 
536 		if (0) {
537 err_object:
538 			kobject_put(kobj);
539 err_engine:
540 			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
541 				engine->name);
542 			break;
543 		}
544 	}
545 #endif /* __linux__ */
546 }
547