xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/i915_query.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: i915_query.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2018 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_query.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $");
11 
12 #include <linux/nospec.h>
13 
14 #include "i915_drv.h"
15 #include "i915_perf.h"
16 #include "i915_query.h"
17 #include <uapi/drm/i915_drm.h>
18 
copy_query_item(void * query_hdr,size_t query_sz,u32 total_length,struct drm_i915_query_item * query_item)19 static int copy_query_item(void *query_hdr, size_t query_sz,
20 			   u32 total_length,
21 			   struct drm_i915_query_item *query_item)
22 {
23 	if (query_item->length == 0)
24 		return total_length;
25 
26 	if (query_item->length < total_length)
27 		return -EINVAL;
28 
29 	if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
30 			   query_sz))
31 		return -EFAULT;
32 
33 	if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
34 		       total_length))
35 		return -EFAULT;
36 
37 	return 0;
38 }
39 
query_topology_info(struct drm_i915_private * dev_priv,struct drm_i915_query_item * query_item)40 static int query_topology_info(struct drm_i915_private *dev_priv,
41 			       struct drm_i915_query_item *query_item)
42 {
43 	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
44 	struct drm_i915_query_topology_info topo;
45 	u32 slice_length, subslice_length, eu_length, total_length;
46 	int ret;
47 
48 	if (query_item->flags != 0)
49 		return -EINVAL;
50 
51 	if (sseu->max_slices == 0)
52 		return -ENODEV;
53 
54 	BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
55 
56 	slice_length = sizeof(sseu->slice_mask);
57 	subslice_length = sseu->max_slices * sseu->ss_stride;
58 	eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
59 	total_length = sizeof(topo) + slice_length + subslice_length +
60 		       eu_length;
61 
62 	ret = copy_query_item(&topo, sizeof(topo), total_length,
63 			      query_item);
64 	if (ret != 0)
65 		return ret;
66 
67 	if (topo.flags != 0)
68 		return -EINVAL;
69 
70 	memset(&topo, 0, sizeof(topo));
71 	topo.max_slices = sseu->max_slices;
72 	topo.max_subslices = sseu->max_subslices;
73 	topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
74 
75 	topo.subslice_offset = slice_length;
76 	topo.subslice_stride = sseu->ss_stride;
77 	topo.eu_offset = slice_length + subslice_length;
78 	topo.eu_stride = sseu->eu_stride;
79 
80 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
81 			   &topo, sizeof(topo)))
82 		return -EFAULT;
83 
84 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
85 			   &sseu->slice_mask, slice_length))
86 		return -EFAULT;
87 
88 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
89 					   sizeof(topo) + slice_length),
90 			   sseu->subslice_mask, subslice_length))
91 		return -EFAULT;
92 
93 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
94 					   sizeof(topo) +
95 					   slice_length + subslice_length),
96 			   sseu->eu_mask, eu_length))
97 		return -EFAULT;
98 
99 	return total_length;
100 }
101 
102 static int
query_engine_info(struct drm_i915_private * i915,struct drm_i915_query_item * query_item)103 query_engine_info(struct drm_i915_private *i915,
104 		  struct drm_i915_query_item *query_item)
105 {
106 	struct drm_i915_query_engine_info __user *query_ptr =
107 				u64_to_user_ptr(query_item->data_ptr);
108 	struct drm_i915_engine_info __user *info_ptr;
109 	struct drm_i915_query_engine_info query;
110 	struct drm_i915_engine_info info = { };
111 	unsigned int num_uabi_engines = 0;
112 	struct intel_engine_cs *engine;
113 	int len, ret;
114 
115 	if (query_item->flags)
116 		return -EINVAL;
117 
118 	for_each_uabi_engine(engine, i915)
119 		num_uabi_engines++;
120 
121 	len = sizeof(struct drm_i915_query_engine_info) +
122 	      num_uabi_engines * sizeof(struct drm_i915_engine_info);
123 
124 	ret = copy_query_item(&query, sizeof(query), len, query_item);
125 	if (ret != 0)
126 		return ret;
127 
128 	if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
129 	    query.rsvd[2])
130 		return -EINVAL;
131 
132 	info_ptr = &query_ptr->engines[0];
133 
134 	for_each_uabi_engine(engine, i915) {
135 		info.engine.engine_class = engine->uabi_class;
136 		info.engine.engine_instance = engine->uabi_instance;
137 		info.capabilities = engine->uabi_capabilities;
138 
139 		if (__copy_to_user(info_ptr, &info, sizeof(info)))
140 			return -EFAULT;
141 
142 		query.num_engines++;
143 		info_ptr++;
144 	}
145 
146 	if (__copy_to_user(query_ptr, &query, sizeof(query)))
147 		return -EFAULT;
148 
149 	return len;
150 }
151 
can_copy_perf_config_registers_or_number(u32 user_n_regs,u64 user_regs_ptr,u32 kernel_n_regs)152 static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
153 						    u64 user_regs_ptr,
154 						    u32 kernel_n_regs)
155 {
156 	/*
157 	 * We'll just put the number of registers, and won't copy the
158 	 * register.
159 	 */
160 	if (user_n_regs == 0)
161 		return 0;
162 
163 	if (user_n_regs < kernel_n_regs)
164 		return -EINVAL;
165 
166 	if (!access_ok(u64_to_user_ptr(user_regs_ptr),
167 		       2 * sizeof(u32) * kernel_n_regs))
168 		return -EFAULT;
169 
170 	return 0;
171 }
172 
copy_perf_config_registers_or_number(const struct i915_oa_reg * kernel_regs,u32 kernel_n_regs,u64 user_regs_ptr,u32 * user_n_regs)173 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
174 						u32 kernel_n_regs,
175 						u64 user_regs_ptr,
176 						u32 *user_n_regs)
177 {
178 	u32 r;
179 
180 	if (*user_n_regs == 0) {
181 		*user_n_regs = kernel_n_regs;
182 		return 0;
183 	}
184 
185 	*user_n_regs = kernel_n_regs;
186 
187 	for (r = 0; r < kernel_n_regs; r++) {
188 		u32 __user *user_reg_ptr =
189 			u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
190 		u32 __user *user_val_ptr =
191 			u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
192 					sizeof(u32));
193 		int ret;
194 
195 		ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
196 				 user_reg_ptr);
197 		if (ret)
198 			return -EFAULT;
199 
200 		ret = __put_user(kernel_regs[r].value, user_val_ptr);
201 		if (ret)
202 			return -EFAULT;
203 	}
204 
205 	return 0;
206 }
207 
query_perf_config_data(struct drm_i915_private * i915,struct drm_i915_query_item * query_item,bool use_uuid)208 static int query_perf_config_data(struct drm_i915_private *i915,
209 				  struct drm_i915_query_item *query_item,
210 				  bool use_uuid)
211 {
212 	struct drm_i915_query_perf_config __user *user_query_config_ptr =
213 		u64_to_user_ptr(query_item->data_ptr);
214 	struct drm_i915_perf_oa_config __user *user_config_ptr =
215 		u64_to_user_ptr(query_item->data_ptr +
216 				sizeof(struct drm_i915_query_perf_config));
217 	struct drm_i915_perf_oa_config user_config;
218 	struct i915_perf *perf = &i915->perf;
219 	struct i915_oa_config *oa_config;
220 	char uuid[UUID_STRING_LEN + 1];
221 	u64 config_id;
222 	u32 flags, total_size;
223 	int ret;
224 
225 	if (!perf->i915)
226 		return -ENODEV;
227 
228 	total_size =
229 		sizeof(struct drm_i915_query_perf_config) +
230 		sizeof(struct drm_i915_perf_oa_config);
231 
232 	if (query_item->length == 0)
233 		return total_size;
234 
235 	if (query_item->length < total_size) {
236 		DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
237 			  query_item->length, total_size);
238 		return -EINVAL;
239 	}
240 
241 	if (!access_ok(user_query_config_ptr, total_size))
242 		return -EFAULT;
243 
244 	if (__get_user(flags, &user_query_config_ptr->flags))
245 		return -EFAULT;
246 
247 	if (flags != 0)
248 		return -EINVAL;
249 
250 	if (use_uuid) {
251 		struct i915_oa_config *tmp;
252 		int id;
253 
254 		BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
255 
256 		memset(&uuid, 0, sizeof(uuid));
257 		if (__copy_from_user(uuid, user_query_config_ptr->uuid,
258 				     sizeof(user_query_config_ptr->uuid)))
259 			return -EFAULT;
260 
261 		oa_config = NULL;
262 		rcu_read_lock();
263 		idr_for_each_entry(&perf->metrics_idr, tmp, id) {
264 			if (!strcmp(tmp->uuid, uuid)) {
265 				oa_config = i915_oa_config_get(tmp);
266 				break;
267 			}
268 		}
269 		rcu_read_unlock();
270 	} else {
271 		if (__get_user(config_id, &user_query_config_ptr->config))
272 			return -EFAULT;
273 
274 		oa_config = i915_perf_get_oa_config(perf, config_id);
275 	}
276 	if (!oa_config)
277 		return -ENOENT;
278 
279 	if (__copy_from_user(&user_config, user_config_ptr,
280 			     sizeof(user_config))) {
281 		ret = -EFAULT;
282 		goto out;
283 	}
284 
285 	ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
286 						       user_config.boolean_regs_ptr,
287 						       oa_config->b_counter_regs_len);
288 	if (ret)
289 		goto out;
290 
291 	ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
292 						       user_config.flex_regs_ptr,
293 						       oa_config->flex_regs_len);
294 	if (ret)
295 		goto out;
296 
297 	ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
298 						       user_config.mux_regs_ptr,
299 						       oa_config->mux_regs_len);
300 	if (ret)
301 		goto out;
302 
303 	ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
304 						   oa_config->b_counter_regs_len,
305 						   user_config.boolean_regs_ptr,
306 						   &user_config.n_boolean_regs);
307 	if (ret)
308 		goto out;
309 
310 	ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
311 						   oa_config->flex_regs_len,
312 						   user_config.flex_regs_ptr,
313 						   &user_config.n_flex_regs);
314 	if (ret)
315 		goto out;
316 
317 	ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
318 						   oa_config->mux_regs_len,
319 						   user_config.mux_regs_ptr,
320 						   &user_config.n_mux_regs);
321 	if (ret)
322 		goto out;
323 
324 	memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
325 
326 	if (__copy_to_user(user_config_ptr, &user_config,
327 			   sizeof(user_config))) {
328 		ret = -EFAULT;
329 		goto out;
330 	}
331 
332 	ret = total_size;
333 
334 out:
335 	i915_oa_config_put(oa_config);
336 	return ret;
337 }
338 
sizeof_perf_config_list(size_t count)339 static size_t sizeof_perf_config_list(size_t count)
340 {
341 	return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
342 }
343 
sizeof_perf_metrics(struct i915_perf * perf)344 static size_t sizeof_perf_metrics(struct i915_perf *perf)
345 {
346 	struct i915_oa_config *tmp;
347 	size_t i;
348 	int id;
349 
350 	i = 1;
351 	rcu_read_lock();
352 	idr_for_each_entry(&perf->metrics_idr, tmp, id)
353 		i++;
354 	rcu_read_unlock();
355 
356 	return sizeof_perf_config_list(i);
357 }
358 
query_perf_config_list(struct drm_i915_private * i915,struct drm_i915_query_item * query_item)359 static int query_perf_config_list(struct drm_i915_private *i915,
360 				  struct drm_i915_query_item *query_item)
361 {
362 	struct drm_i915_query_perf_config __user *user_query_config_ptr =
363 		u64_to_user_ptr(query_item->data_ptr);
364 	struct i915_perf *perf = &i915->perf;
365 	u64 *oa_config_ids = NULL;
366 	int alloc, n_configs;
367 	u32 flags;
368 	int ret;
369 
370 	if (!perf->i915)
371 		return -ENODEV;
372 
373 	if (query_item->length == 0)
374 		return sizeof_perf_metrics(perf);
375 
376 	if (get_user(flags, &user_query_config_ptr->flags))
377 		return -EFAULT;
378 
379 	if (flags != 0)
380 		return -EINVAL;
381 
382 	n_configs = 1;
383 	do {
384 		struct i915_oa_config *tmp;
385 		u64 *ids;
386 		int id;
387 
388 		ids = krealloc(oa_config_ids,
389 			       n_configs * sizeof(*oa_config_ids),
390 			       GFP_KERNEL);
391 		if (!ids)
392 			return -ENOMEM;
393 
394 		alloc = fetch_and_zero(&n_configs);
395 
396 		ids[n_configs++] = 1ull; /* reserved for test_config */
397 		rcu_read_lock();
398 		idr_for_each_entry(&perf->metrics_idr, tmp, id) {
399 			if (n_configs < alloc)
400 				ids[n_configs] = id;
401 			n_configs++;
402 		}
403 		rcu_read_unlock();
404 
405 		oa_config_ids = ids;
406 	} while (n_configs > alloc);
407 
408 	if (query_item->length < sizeof_perf_config_list(n_configs)) {
409 		DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
410 			  query_item->length,
411 			  sizeof_perf_config_list(n_configs));
412 		kfree(oa_config_ids);
413 		return -EINVAL;
414 	}
415 
416 	if (put_user(n_configs, &user_query_config_ptr->config)) {
417 		kfree(oa_config_ids);
418 		return -EFAULT;
419 	}
420 
421 	ret = copy_to_user(user_query_config_ptr + 1,
422 			   oa_config_ids,
423 			   n_configs * sizeof(*oa_config_ids));
424 	kfree(oa_config_ids);
425 	if (ret)
426 		return -EFAULT;
427 
428 	return sizeof_perf_config_list(n_configs);
429 }
430 
query_perf_config(struct drm_i915_private * i915,struct drm_i915_query_item * query_item)431 static int query_perf_config(struct drm_i915_private *i915,
432 			     struct drm_i915_query_item *query_item)
433 {
434 	switch (query_item->flags) {
435 	case DRM_I915_QUERY_PERF_CONFIG_LIST:
436 		return query_perf_config_list(i915, query_item);
437 	case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
438 		return query_perf_config_data(i915, query_item, true);
439 	case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
440 		return query_perf_config_data(i915, query_item, false);
441 	default:
442 		return -EINVAL;
443 	}
444 }
445 
446 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
447 					struct drm_i915_query_item *query_item) = {
448 	query_topology_info,
449 	query_engine_info,
450 	query_perf_config,
451 };
452 
i915_query_ioctl(struct drm_device * dev,void * data,struct drm_file * file)453 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
454 {
455 	struct drm_i915_private *dev_priv = to_i915(dev);
456 	struct drm_i915_query *args = data;
457 	struct drm_i915_query_item __user *user_item_ptr =
458 		u64_to_user_ptr(args->items_ptr);
459 	u32 i;
460 
461 	if (args->flags != 0)
462 		return -EINVAL;
463 
464 	for (i = 0; i < args->num_items; i++, user_item_ptr++) {
465 		struct drm_i915_query_item item;
466 		unsigned long func_idx;
467 		int ret;
468 
469 		if (copy_from_user(&item, user_item_ptr, sizeof(item)))
470 			return -EFAULT;
471 
472 		if (item.query_id == 0)
473 			return -EINVAL;
474 
475 		if (overflows_type(item.query_id - 1, unsigned long))
476 			return -EINVAL;
477 
478 		func_idx = item.query_id - 1;
479 
480 		ret = -EINVAL;
481 		if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
482 			func_idx = array_index_nospec(func_idx,
483 						      ARRAY_SIZE(i915_query_funcs));
484 			ret = i915_query_funcs[func_idx](dev_priv, &item);
485 		}
486 
487 		/* Only write the length back to userspace if they differ. */
488 		if (ret != item.length && put_user(ret, &user_item_ptr->length))
489 			return -EFAULT;
490 	}
491 
492 	return 0;
493 }
494