xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/virtio/virtgpu_ioctl.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: virtgpu_ioctl.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $	*/
2 
3 /*
4  * Copyright (C) 2015 Red Hat, Inc.
5  * All Rights Reserved.
6  *
7  * Authors:
8  *    Dave Airlie
9  *    Alon Levy
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice shall be included in
19  * all copies or substantial portions of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtgpu_ioctl.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
32 
33 #include <linux/file.h>
34 #include <linux/sync_file.h>
35 
36 #include <drm/drm_file.h>
37 #include <drm/virtgpu_drm.h>
38 
39 #include "virtgpu_drv.h"
40 
virtio_gpu_map_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)41 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
42 				struct drm_file *file_priv)
43 {
44 	struct virtio_gpu_device *vgdev = dev->dev_private;
45 	struct drm_virtgpu_map *virtio_gpu_map = data;
46 
47 	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
48 					 virtio_gpu_map->handle,
49 					 &virtio_gpu_map->offset);
50 }
51 
52 /*
53  * Usage of execbuffer:
54  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
55  * However, the command as passed from user space must *not* contain the initial
56  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
57  */
virtio_gpu_execbuffer_ioctl(struct drm_device * dev,void * data,struct drm_file * drm_file)58 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
59 				 struct drm_file *drm_file)
60 {
61 	struct drm_virtgpu_execbuffer *exbuf = data;
62 	struct virtio_gpu_device *vgdev = dev->dev_private;
63 	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
64 	struct virtio_gpu_fence *out_fence;
65 	int ret;
66 	uint32_t *bo_handles = NULL;
67 	void __user *user_bo_handles = NULL;
68 	struct virtio_gpu_object_array *buflist = NULL;
69 	struct sync_file *sync_file;
70 	int in_fence_fd = exbuf->fence_fd;
71 	int out_fence_fd = -1;
72 	void *buf;
73 
74 	if (vgdev->has_virgl_3d == false)
75 		return -ENOSYS;
76 
77 	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
78 		return -EINVAL;
79 
80 	exbuf->fence_fd = -1;
81 
82 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
83 		struct dma_fence *in_fence;
84 
85 		in_fence = sync_file_get_fence(in_fence_fd);
86 
87 		if (!in_fence)
88 			return -EINVAL;
89 
90 		/*
91 		 * Wait if the fence is from a foreign context, or if the fence
92 		 * array contains any fence from a foreign context.
93 		 */
94 		ret = 0;
95 		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
96 			ret = dma_fence_wait(in_fence, true);
97 
98 		dma_fence_put(in_fence);
99 		if (ret)
100 			return ret;
101 	}
102 
103 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
104 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
105 		if (out_fence_fd < 0)
106 			return out_fence_fd;
107 	}
108 
109 	if (exbuf->num_bo_handles) {
110 		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
111 					    sizeof(uint32_t), GFP_KERNEL);
112 		if (!bo_handles) {
113 			ret = -ENOMEM;
114 			goto out_unused_fd;
115 		}
116 
117 		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
118 		if (copy_from_user(bo_handles, user_bo_handles,
119 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
120 			ret = -EFAULT;
121 			goto out_unused_fd;
122 		}
123 
124 		buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
125 							exbuf->num_bo_handles);
126 		if (!buflist) {
127 			ret = -ENOENT;
128 			goto out_unused_fd;
129 		}
130 		kvfree(bo_handles);
131 		bo_handles = NULL;
132 	}
133 
134 	if (buflist) {
135 		ret = virtio_gpu_array_lock_resv(buflist);
136 		if (ret)
137 			goto out_unused_fd;
138 	}
139 
140 	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
141 	if (IS_ERR(buf)) {
142 		ret = PTR_ERR(buf);
143 		goto out_unresv;
144 	}
145 
146 	out_fence = virtio_gpu_fence_alloc(vgdev);
147 	if(!out_fence) {
148 		ret = -ENOMEM;
149 		goto out_memdup;
150 	}
151 
152 	if (out_fence_fd >= 0) {
153 		sync_file = sync_file_create(&out_fence->f);
154 		if (!sync_file) {
155 			dma_fence_put(&out_fence->f);
156 			ret = -ENOMEM;
157 			goto out_memdup;
158 		}
159 
160 		exbuf->fence_fd = out_fence_fd;
161 		fd_install(out_fence_fd, sync_file->file);
162 	}
163 
164 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
165 			      vfpriv->ctx_id, buflist, out_fence);
166 	return 0;
167 
168 out_memdup:
169 	kvfree(buf);
170 out_unresv:
171 	if (buflist)
172 		virtio_gpu_array_unlock_resv(buflist);
173 out_unused_fd:
174 	kvfree(bo_handles);
175 	if (buflist)
176 		virtio_gpu_array_put_free(buflist);
177 
178 	if (out_fence_fd >= 0)
179 		put_unused_fd(out_fence_fd);
180 
181 	return ret;
182 }
183 
virtio_gpu_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)184 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
185 				     struct drm_file *file_priv)
186 {
187 	struct virtio_gpu_device *vgdev = dev->dev_private;
188 	struct drm_virtgpu_getparam *param = data;
189 	int value;
190 
191 	switch (param->param) {
192 	case VIRTGPU_PARAM_3D_FEATURES:
193 		value = vgdev->has_virgl_3d == true ? 1 : 0;
194 		break;
195 	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
196 		value = 1;
197 		break;
198 	default:
199 		return -EINVAL;
200 	}
201 	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
202 		return -EFAULT;
203 
204 	return 0;
205 }
206 
virtio_gpu_resource_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)207 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
208 					    struct drm_file *file_priv)
209 {
210 	struct virtio_gpu_device *vgdev = dev->dev_private;
211 	struct drm_virtgpu_resource_create *rc = data;
212 	struct virtio_gpu_fence *fence;
213 	int ret;
214 	struct virtio_gpu_object *qobj;
215 	struct drm_gem_object *obj;
216 	uint32_t handle = 0;
217 	struct virtio_gpu_object_params params = { 0 };
218 
219 	if (vgdev->has_virgl_3d == false) {
220 		if (rc->depth > 1)
221 			return -EINVAL;
222 		if (rc->nr_samples > 1)
223 			return -EINVAL;
224 		if (rc->last_level > 1)
225 			return -EINVAL;
226 		if (rc->target != 2)
227 			return -EINVAL;
228 		if (rc->array_size > 1)
229 			return -EINVAL;
230 	}
231 
232 	params.format = rc->format;
233 	params.width = rc->width;
234 	params.height = rc->height;
235 	params.size = rc->size;
236 	if (vgdev->has_virgl_3d) {
237 		params.virgl = true;
238 		params.target = rc->target;
239 		params.bind = rc->bind;
240 		params.depth = rc->depth;
241 		params.array_size = rc->array_size;
242 		params.last_level = rc->last_level;
243 		params.nr_samples = rc->nr_samples;
244 		params.flags = rc->flags;
245 	}
246 	/* allocate a single page size object */
247 	if (params.size == 0)
248 		params.size = PAGE_SIZE;
249 
250 	fence = virtio_gpu_fence_alloc(vgdev);
251 	if (!fence)
252 		return -ENOMEM;
253 	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
254 	dma_fence_put(&fence->f);
255 	if (ret < 0)
256 		return ret;
257 	obj = &qobj->base.base;
258 
259 	ret = drm_gem_handle_create(file_priv, obj, &handle);
260 	if (ret) {
261 		drm_gem_object_release(obj);
262 		return ret;
263 	}
264 	drm_gem_object_put_unlocked(obj);
265 
266 	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
267 	rc->bo_handle = handle;
268 	return 0;
269 }
270 
virtio_gpu_resource_info_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)271 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
272 					  struct drm_file *file_priv)
273 {
274 	struct drm_virtgpu_resource_info *ri = data;
275 	struct drm_gem_object *gobj = NULL;
276 	struct virtio_gpu_object *qobj = NULL;
277 
278 	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
279 	if (gobj == NULL)
280 		return -ENOENT;
281 
282 	qobj = gem_to_virtio_gpu_obj(gobj);
283 
284 	ri->size = qobj->base.base.size;
285 	ri->res_handle = qobj->hw_res_handle;
286 	drm_gem_object_put_unlocked(gobj);
287 	return 0;
288 }
289 
virtio_gpu_transfer_from_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)290 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
291 					       void *data,
292 					       struct drm_file *file)
293 {
294 	struct virtio_gpu_device *vgdev = dev->dev_private;
295 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
296 	struct drm_virtgpu_3d_transfer_from_host *args = data;
297 	struct virtio_gpu_object_array *objs;
298 	struct virtio_gpu_fence *fence;
299 	int ret;
300 	u32 offset = args->offset;
301 
302 	if (vgdev->has_virgl_3d == false)
303 		return -ENOSYS;
304 
305 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
306 	if (objs == NULL)
307 		return -ENOENT;
308 
309 	ret = virtio_gpu_array_lock_resv(objs);
310 	if (ret != 0)
311 		goto err_put_free;
312 
313 	fence = virtio_gpu_fence_alloc(vgdev);
314 	if (!fence) {
315 		ret = -ENOMEM;
316 		goto err_unlock;
317 	}
318 	virtio_gpu_cmd_transfer_from_host_3d
319 		(vgdev, vfpriv->ctx_id, offset, args->level,
320 		 &args->box, objs, fence);
321 	dma_fence_put(&fence->f);
322 	return 0;
323 
324 err_unlock:
325 	virtio_gpu_array_unlock_resv(objs);
326 err_put_free:
327 	virtio_gpu_array_put_free(objs);
328 	return ret;
329 }
330 
virtio_gpu_transfer_to_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)331 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
332 					     struct drm_file *file)
333 {
334 	struct virtio_gpu_device *vgdev = dev->dev_private;
335 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
336 	struct drm_virtgpu_3d_transfer_to_host *args = data;
337 	struct virtio_gpu_object_array *objs;
338 	struct virtio_gpu_fence *fence;
339 	int ret;
340 	u32 offset = args->offset;
341 
342 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
343 	if (objs == NULL)
344 		return -ENOENT;
345 
346 	if (!vgdev->has_virgl_3d) {
347 		virtio_gpu_cmd_transfer_to_host_2d
348 			(vgdev, offset,
349 			 args->box.w, args->box.h, args->box.x, args->box.y,
350 			 objs, NULL);
351 	} else {
352 		ret = virtio_gpu_array_lock_resv(objs);
353 		if (ret != 0)
354 			goto err_put_free;
355 
356 		ret = -ENOMEM;
357 		fence = virtio_gpu_fence_alloc(vgdev);
358 		if (!fence)
359 			goto err_unlock;
360 
361 		virtio_gpu_cmd_transfer_to_host_3d
362 			(vgdev,
363 			 vfpriv ? vfpriv->ctx_id : 0, offset,
364 			 args->level, &args->box, objs, fence);
365 		dma_fence_put(&fence->f);
366 	}
367 	return 0;
368 
369 err_unlock:
370 	virtio_gpu_array_unlock_resv(objs);
371 err_put_free:
372 	virtio_gpu_array_put_free(objs);
373 	return ret;
374 }
375 
virtio_gpu_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file)376 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
377 				 struct drm_file *file)
378 {
379 	struct drm_virtgpu_3d_wait *args = data;
380 	struct drm_gem_object *obj;
381 	long timeout = 15 * HZ;
382 	int ret;
383 
384 	obj = drm_gem_object_lookup(file, args->handle);
385 	if (obj == NULL)
386 		return -ENOENT;
387 
388 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
389 		ret = dma_resv_test_signaled_rcu(obj->resv, true);
390 	} else {
391 		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
392 						timeout);
393 	}
394 	if (ret == 0)
395 		ret = -EBUSY;
396 	else if (ret > 0)
397 		ret = 0;
398 
399 	drm_gem_object_put_unlocked(obj);
400 	return ret;
401 }
402 
virtio_gpu_get_caps_ioctl(struct drm_device * dev,void * data,struct drm_file * file)403 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
404 				void *data, struct drm_file *file)
405 {
406 	struct virtio_gpu_device *vgdev = dev->dev_private;
407 	struct drm_virtgpu_get_caps *args = data;
408 	unsigned size, host_caps_size;
409 	int i;
410 	int found_valid = -1;
411 	int ret;
412 	struct virtio_gpu_drv_cap_cache *cache_ent;
413 	void *ptr;
414 
415 	if (vgdev->num_capsets == 0)
416 		return -ENOSYS;
417 
418 	/* don't allow userspace to pass 0 */
419 	if (args->size == 0)
420 		return -EINVAL;
421 
422 	spin_lock(&vgdev->display_info_lock);
423 	for (i = 0; i < vgdev->num_capsets; i++) {
424 		if (vgdev->capsets[i].id == args->cap_set_id) {
425 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
426 				found_valid = i;
427 				break;
428 			}
429 		}
430 	}
431 
432 	if (found_valid == -1) {
433 		spin_unlock(&vgdev->display_info_lock);
434 		return -EINVAL;
435 	}
436 
437 	host_caps_size = vgdev->capsets[found_valid].max_size;
438 	/* only copy to user the minimum of the host caps size or the guest caps size */
439 	size = min(args->size, host_caps_size);
440 
441 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
442 		if (cache_ent->id == args->cap_set_id &&
443 		    cache_ent->version == args->cap_set_ver) {
444 			spin_unlock(&vgdev->display_info_lock);
445 			goto copy_exit;
446 		}
447 	}
448 	spin_unlock(&vgdev->display_info_lock);
449 
450 	/* not in cache - need to talk to hw */
451 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
452 				  &cache_ent);
453 
454 copy_exit:
455 	ret = wait_event_timeout(vgdev->resp_wq,
456 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
457 	if (!ret)
458 		return -EBUSY;
459 
460 	/* is_valid check must proceed before copy of the cache entry. */
461 	smp_rmb();
462 
463 	ptr = cache_ent->caps_cache;
464 
465 	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
466 		return -EFAULT;
467 
468 	return 0;
469 }
470 
471 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
472 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
473 			  DRM_RENDER_ALLOW),
474 
475 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
476 			  DRM_RENDER_ALLOW),
477 
478 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
479 			  DRM_RENDER_ALLOW),
480 
481 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
482 			  virtio_gpu_resource_create_ioctl,
483 			  DRM_RENDER_ALLOW),
484 
485 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
486 			  DRM_RENDER_ALLOW),
487 
488 	/* make transfer async to the main ring? - no sure, can we
489 	 * thread these in the underlying GL
490 	 */
491 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
492 			  virtio_gpu_transfer_from_host_ioctl,
493 			  DRM_RENDER_ALLOW),
494 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
495 			  virtio_gpu_transfer_to_host_ioctl,
496 			  DRM_RENDER_ALLOW),
497 
498 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
499 			  DRM_RENDER_ALLOW),
500 
501 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
502 			  DRM_RENDER_ALLOW),
503 };
504