xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ioctl.c (revision 677ff2f013ca9d21511b6ed6b595f40920aa8b80)
1 /*	$NetBSD: vmwgfx_ioctl.c,v 1.4 2022/10/25 23:35:43 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5  *
6  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_ioctl.c,v 1.4 2022/10/25 23:35:43 riastradh Exp $");
32 
33 #include "vmwgfx_drv.h"
34 #include <drm/vmwgfx_drm.h>
35 #include "vmwgfx_kms.h"
36 #include "device_include/svga3d_caps.h"
37 
38 struct svga_3d_compat_cap {
39 	SVGA3dCapsRecordHeader header;
40 	SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
41 };
42 
vmw_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)43 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
44 		       struct drm_file *file_priv)
45 {
46 	struct vmw_private *dev_priv = vmw_priv(dev);
47 	struct drm_vmw_getparam_arg *param =
48 	    (struct drm_vmw_getparam_arg *)data;
49 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
50 
51 	switch (param->param) {
52 	case DRM_VMW_PARAM_NUM_STREAMS:
53 		param->value = vmw_overlay_num_overlays(dev_priv);
54 		break;
55 	case DRM_VMW_PARAM_NUM_FREE_STREAMS:
56 		param->value = vmw_overlay_num_free_overlays(dev_priv);
57 		break;
58 	case DRM_VMW_PARAM_3D:
59 		param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
60 		break;
61 	case DRM_VMW_PARAM_HW_CAPS:
62 		param->value = dev_priv->capabilities;
63 		break;
64 	case DRM_VMW_PARAM_HW_CAPS2:
65 		param->value = dev_priv->capabilities2;
66 		break;
67 	case DRM_VMW_PARAM_FIFO_CAPS:
68 		param->value = dev_priv->fifo.capabilities;
69 		break;
70 	case DRM_VMW_PARAM_MAX_FB_SIZE:
71 		param->value = dev_priv->prim_bb_mem;
72 		break;
73 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
74 	{
75 		u32 *fifo_mem = dev_priv->mmio_virt;
76 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
77 
78 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
79 			param->value = SVGA3D_HWVERSION_WS8_B1;
80 			break;
81 		}
82 
83 		param->value =
84 			vmw_mmio_read(fifo_mem +
85 				      ((fifo->capabilities &
86 					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
87 				       SVGA_FIFO_3D_HWVERSION_REVISED :
88 				       SVGA_FIFO_3D_HWVERSION));
89 		break;
90 	}
91 	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
92 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
93 		    !vmw_fp->gb_aware)
94 			param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
95 		else
96 			param->value = dev_priv->memory_size;
97 		break;
98 	case DRM_VMW_PARAM_3D_CAPS_SIZE:
99 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
100 		    vmw_fp->gb_aware)
101 			param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
102 		else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
103 			param->value = sizeof(struct svga_3d_compat_cap) +
104 				sizeof(uint32_t);
105 		else
106 			param->value = (SVGA_FIFO_3D_CAPS_LAST -
107 					SVGA_FIFO_3D_CAPS + 1) *
108 				sizeof(uint32_t);
109 		break;
110 	case DRM_VMW_PARAM_MAX_MOB_MEMORY:
111 		vmw_fp->gb_aware = true;
112 		param->value = dev_priv->max_mob_pages * PAGE_SIZE;
113 		break;
114 	case DRM_VMW_PARAM_MAX_MOB_SIZE:
115 		param->value = dev_priv->max_mob_size;
116 		break;
117 	case DRM_VMW_PARAM_SCREEN_TARGET:
118 		param->value =
119 			(dev_priv->active_display_unit == vmw_du_screen_target);
120 		break;
121 	case DRM_VMW_PARAM_DX:
122 		param->value = dev_priv->has_dx;
123 		break;
124 	case DRM_VMW_PARAM_SM4_1:
125 		param->value = dev_priv->has_sm4_1;
126 		break;
127 	default:
128 		return -EINVAL;
129 	}
130 
131 	return 0;
132 }
133 
vmw_mask_multisample(unsigned int cap,u32 fmt_value)134 static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
135 {
136 	/*
137 	 * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
138 	 * to check the sample count supported by virtual device. Since there
139 	 * never was support for multisample count for backing MOB return 0.
140 	 */
141 	if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
142 		return 0;
143 
144 	return fmt_value;
145 }
146 
vmw_fill_compat_cap(struct vmw_private * dev_priv,void * bounce,size_t size)147 static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
148 			       size_t size)
149 {
150 	struct svga_3d_compat_cap *compat_cap =
151 		(struct svga_3d_compat_cap *) bounce;
152 	unsigned int i;
153 	size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
154 	unsigned int max_size;
155 
156 	if (size < pair_offset)
157 		return -EINVAL;
158 
159 	max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
160 
161 	if (max_size > SVGA3D_DEVCAP_MAX)
162 		max_size = SVGA3D_DEVCAP_MAX;
163 
164 	compat_cap->header.length =
165 		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
166 	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
167 
168 	spin_lock(&dev_priv->cap_lock);
169 	for (i = 0; i < max_size; ++i) {
170 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
171 		compat_cap->pairs[i][0] = i;
172 		compat_cap->pairs[i][1] = vmw_mask_multisample
173 			(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
174 	}
175 	spin_unlock(&dev_priv->cap_lock);
176 
177 	return 0;
178 }
179 
180 
vmw_get_cap_3d_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)181 int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
182 			 struct drm_file *file_priv)
183 {
184 	struct drm_vmw_get_3d_cap_arg *arg =
185 		(struct drm_vmw_get_3d_cap_arg *) data;
186 	struct vmw_private *dev_priv = vmw_priv(dev);
187 	uint32_t size;
188 	u32 *fifo_mem;
189 	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
190 	void *bounce;
191 	int ret;
192 	bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
193 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
194 
195 	if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
196 		VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
197 		return -EINVAL;
198 	}
199 
200 	if (gb_objects && vmw_fp->gb_aware)
201 		size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
202 	else if (gb_objects)
203 		size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
204 	else
205 		size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
206 			sizeof(uint32_t);
207 
208 	if (arg->max_size < size)
209 		size = arg->max_size;
210 
211 	bounce = vzalloc(size);
212 	if (unlikely(bounce == NULL)) {
213 		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
214 		return -ENOMEM;
215 	}
216 
217 	if (gb_objects && vmw_fp->gb_aware) {
218 		int i, num;
219 		uint32_t *bounce32 = (uint32_t *) bounce;
220 
221 		num = size / sizeof(uint32_t);
222 		if (num > SVGA3D_DEVCAP_MAX)
223 			num = SVGA3D_DEVCAP_MAX;
224 
225 		spin_lock(&dev_priv->cap_lock);
226 		for (i = 0; i < num; ++i) {
227 			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
228 			*bounce32++ = vmw_mask_multisample
229 				(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
230 		}
231 		spin_unlock(&dev_priv->cap_lock);
232 	} else if (gb_objects) {
233 		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
234 		if (unlikely(ret != 0))
235 			goto out_err;
236 	} else {
237 		fifo_mem = dev_priv->mmio_virt;
238 		memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
239 	}
240 
241 	ret = copy_to_user(buffer, bounce, size);
242 	if (ret)
243 		ret = -EFAULT;
244 out_err:
245 	vfree(bounce);
246 
247 	if (unlikely(ret != 0))
248 		DRM_ERROR("Failed to report 3D caps info.\n");
249 
250 	return ret;
251 }
252 
vmw_present_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)253 int vmw_present_ioctl(struct drm_device *dev, void *data,
254 		      struct drm_file *file_priv)
255 {
256 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
257 	struct vmw_private *dev_priv = vmw_priv(dev);
258 	struct drm_vmw_present_arg *arg =
259 		(struct drm_vmw_present_arg *)data;
260 	struct vmw_surface *surface;
261 	struct drm_vmw_rect __user *clips_ptr;
262 	struct drm_vmw_rect *clips = NULL;
263 	struct drm_framebuffer *fb;
264 	struct vmw_framebuffer *vfb;
265 	struct vmw_resource *res;
266 	uint32_t num_clips;
267 	int ret;
268 
269 	num_clips = arg->num_clips;
270 	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
271 
272 	if (unlikely(num_clips == 0))
273 		return 0;
274 
275 	if (clips_ptr == NULL) {
276 		VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
277 		ret = -EINVAL;
278 		goto out_clips;
279 	}
280 
281 	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
282 	if (clips == NULL) {
283 		DRM_ERROR("Failed to allocate clip rect list.\n");
284 		ret = -ENOMEM;
285 		goto out_clips;
286 	}
287 
288 	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
289 	if (ret) {
290 		DRM_ERROR("Failed to copy clip rects from userspace.\n");
291 		ret = -EFAULT;
292 		goto out_no_copy;
293 	}
294 
295 	drm_modeset_lock_all(dev);
296 
297 	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
298 	if (!fb) {
299 		VMW_DEBUG_USER("Invalid framebuffer id.\n");
300 		ret = -ENOENT;
301 		goto out_no_fb;
302 	}
303 	vfb = vmw_framebuffer_to_vfb(fb);
304 
305 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
306 	if (unlikely(ret != 0))
307 		goto out_no_ttm_lock;
308 
309 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
310 					      user_surface_converter,
311 					      &res);
312 	if (ret)
313 		goto out_no_surface;
314 
315 	surface = vmw_res_to_srf(res);
316 	ret = vmw_kms_present(dev_priv, file_priv,
317 			      vfb, surface, arg->sid,
318 			      arg->dest_x, arg->dest_y,
319 			      clips, num_clips);
320 
321 	/* vmw_user_surface_lookup takes one ref so does new_fb */
322 	vmw_surface_unreference(&surface);
323 
324 out_no_surface:
325 	ttm_read_unlock(&dev_priv->reservation_sem);
326 out_no_ttm_lock:
327 	drm_framebuffer_put(fb);
328 out_no_fb:
329 	drm_modeset_unlock_all(dev);
330 out_no_copy:
331 	kfree(clips);
332 out_clips:
333 	return ret;
334 }
335 
vmw_present_readback_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)336 int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
337 			       struct drm_file *file_priv)
338 {
339 	struct vmw_private *dev_priv = vmw_priv(dev);
340 	struct drm_vmw_present_readback_arg *arg =
341 		(struct drm_vmw_present_readback_arg *)data;
342 	struct drm_vmw_fence_rep __user *user_fence_rep =
343 		(struct drm_vmw_fence_rep __user *)
344 		(unsigned long)arg->fence_rep;
345 	struct drm_vmw_rect __user *clips_ptr;
346 	struct drm_vmw_rect *clips = NULL;
347 	struct drm_framebuffer *fb;
348 	struct vmw_framebuffer *vfb;
349 	uint32_t num_clips;
350 	int ret;
351 
352 	num_clips = arg->num_clips;
353 	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
354 
355 	if (unlikely(num_clips == 0))
356 		return 0;
357 
358 	if (clips_ptr == NULL) {
359 		VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
360 		ret = -EINVAL;
361 		goto out_clips;
362 	}
363 
364 	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
365 	if (clips == NULL) {
366 		DRM_ERROR("Failed to allocate clip rect list.\n");
367 		ret = -ENOMEM;
368 		goto out_clips;
369 	}
370 
371 	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
372 	if (ret) {
373 		DRM_ERROR("Failed to copy clip rects from userspace.\n");
374 		ret = -EFAULT;
375 		goto out_no_copy;
376 	}
377 
378 	drm_modeset_lock_all(dev);
379 
380 	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
381 	if (!fb) {
382 		VMW_DEBUG_USER("Invalid framebuffer id.\n");
383 		ret = -ENOENT;
384 		goto out_no_fb;
385 	}
386 
387 	vfb = vmw_framebuffer_to_vfb(fb);
388 	if (!vfb->bo) {
389 		VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
390 		ret = -EINVAL;
391 		goto out_no_ttm_lock;
392 	}
393 
394 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
395 	if (unlikely(ret != 0))
396 		goto out_no_ttm_lock;
397 
398 	ret = vmw_kms_readback(dev_priv, file_priv,
399 			       vfb, user_fence_rep,
400 			       clips, num_clips);
401 
402 	ttm_read_unlock(&dev_priv->reservation_sem);
403 out_no_ttm_lock:
404 	drm_framebuffer_put(fb);
405 out_no_fb:
406 	drm_modeset_unlock_all(dev);
407 out_no_copy:
408 	kfree(clips);
409 out_clips:
410 	return ret;
411 }
412 
413 
414 #ifndef __NetBSD__		/* XXX vmwgfx fops ping */
415 
416 /**
417  * vmw_fops_poll - wrapper around the drm_poll function
418  *
419  * @filp: See the linux fops poll documentation.
420  * @wait: See the linux fops poll documentation.
421  *
422  * Wrapper around the drm_poll function that makes sure the device is
423  * processing the fifo if drm_poll decides to wait.
424  */
vmw_fops_poll(struct file * filp,struct poll_table_struct * wait)425 __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
426 {
427 	struct drm_file *file_priv = filp->private_data;
428 	struct vmw_private *dev_priv =
429 		vmw_priv(file_priv->minor->dev);
430 
431 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
432 	return drm_poll(filp, wait);
433 }
434 
435 
436 /**
437  * vmw_fops_read - wrapper around the drm_read function
438  *
439  * @filp: See the linux fops read documentation.
440  * @buffer: See the linux fops read documentation.
441  * @count: See the linux fops read documentation.
442  * offset: See the linux fops read documentation.
443  *
444  * Wrapper around the drm_read function that makes sure the device is
445  * processing the fifo if drm_read decides to wait.
446  */
vmw_fops_read(struct file * filp,char __user * buffer,size_t count,loff_t * offset)447 ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
448 		      size_t count, loff_t *offset)
449 {
450 	struct drm_file *file_priv = filp->private_data;
451 	struct vmw_private *dev_priv =
452 		vmw_priv(file_priv->minor->dev);
453 
454 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
455 	return drm_read(filp, buffer, count, offset);
456 }
457 
458 #endif	/* __NetBSD__ */
459