1 /* $NetBSD: vmwgfx_ioctl.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $ */ 2 3 /************************************************************************** 4 * 5 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 **************************************************************************/ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_ioctl.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $"); 32 33 #include "vmwgfx_drv.h" 34 #include <drm/vmwgfx_drm.h> 35 #include "vmwgfx_kms.h" 36 #include "device_include/svga3d_caps.h" 37 38 struct svga_3d_compat_cap { 39 SVGA3dCapsRecordHeader header; 40 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; 41 }; 42 43 int vmw_getparam_ioctl(struct drm_device *dev, void *data, 44 struct drm_file *file_priv) 45 { 46 struct vmw_private *dev_priv = vmw_priv(dev); 47 struct drm_vmw_getparam_arg *param = 48 (struct drm_vmw_getparam_arg *)data; 49 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 50 51 switch (param->param) { 52 case DRM_VMW_PARAM_NUM_STREAMS: 53 param->value = vmw_overlay_num_overlays(dev_priv); 54 break; 55 case DRM_VMW_PARAM_NUM_FREE_STREAMS: 56 param->value = vmw_overlay_num_free_overlays(dev_priv); 57 break; 58 case DRM_VMW_PARAM_3D: 59 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; 60 break; 61 case DRM_VMW_PARAM_HW_CAPS: 62 param->value = dev_priv->capabilities; 63 break; 64 case DRM_VMW_PARAM_FIFO_CAPS: 65 param->value = dev_priv->fifo.capabilities; 66 break; 67 case DRM_VMW_PARAM_MAX_FB_SIZE: 68 param->value = dev_priv->prim_bb_mem; 69 break; 70 case DRM_VMW_PARAM_FIFO_HW_VERSION: 71 { 72 u32 *fifo_mem = dev_priv->mmio_virt; 73 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 74 75 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { 76 param->value = SVGA3D_HWVERSION_WS8_B1; 77 break; 78 } 79 80 param->value = 81 vmw_mmio_read(fifo_mem + 82 ((fifo->capabilities & 83 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? 84 SVGA_FIFO_3D_HWVERSION_REVISED : 85 SVGA_FIFO_3D_HWVERSION)); 86 break; 87 } 88 case DRM_VMW_PARAM_MAX_SURF_MEMORY: 89 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && 90 !vmw_fp->gb_aware) 91 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; 92 else 93 param->value = dev_priv->memory_size; 94 break; 95 case DRM_VMW_PARAM_3D_CAPS_SIZE: 96 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && 97 vmw_fp->gb_aware) 98 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); 99 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 100 param->value = sizeof(struct svga_3d_compat_cap) + 101 sizeof(uint32_t); 102 else 103 param->value = (SVGA_FIFO_3D_CAPS_LAST - 104 SVGA_FIFO_3D_CAPS + 1) * 105 sizeof(uint32_t); 106 break; 107 case DRM_VMW_PARAM_MAX_MOB_MEMORY: 108 vmw_fp->gb_aware = true; 109 param->value = dev_priv->max_mob_pages * PAGE_SIZE; 110 break; 111 case DRM_VMW_PARAM_MAX_MOB_SIZE: 112 param->value = dev_priv->max_mob_size; 113 break; 114 case DRM_VMW_PARAM_SCREEN_TARGET: 115 param->value = 116 (dev_priv->active_display_unit == vmw_du_screen_target); 117 break; 118 case DRM_VMW_PARAM_DX: 119 param->value = dev_priv->has_dx; 120 break; 121 default: 122 return -EINVAL; 123 } 124 125 return 0; 126 } 127 128 static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value) 129 { 130 /* If the header is updated, update the format test as well! */ 131 BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX); 132 133 if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 && 134 cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM) 135 fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 | 136 SVGADX_DXFMT_MULTISAMPLE_4 | 137 SVGADX_DXFMT_MULTISAMPLE_8); 138 else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES) 139 return 0; 140 141 return fmt_value; 142 } 143 144 static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, 145 size_t size) 146 { 147 struct svga_3d_compat_cap *compat_cap = 148 (struct svga_3d_compat_cap *) bounce; 149 unsigned int i; 150 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); 151 unsigned int max_size; 152 153 if (size < pair_offset) 154 return -EINVAL; 155 156 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); 157 158 if (max_size > SVGA3D_DEVCAP_MAX) 159 max_size = SVGA3D_DEVCAP_MAX; 160 161 compat_cap->header.length = 162 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); 163 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; 164 165 spin_lock(&dev_priv->cap_lock); 166 for (i = 0; i < max_size; ++i) { 167 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 168 compat_cap->pairs[i][0] = i; 169 compat_cap->pairs[i][1] = vmw_mask_multisample 170 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP)); 171 } 172 spin_unlock(&dev_priv->cap_lock); 173 174 return 0; 175 } 176 177 178 int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 179 struct drm_file *file_priv) 180 { 181 struct drm_vmw_get_3d_cap_arg *arg = 182 (struct drm_vmw_get_3d_cap_arg *) data; 183 struct vmw_private *dev_priv = vmw_priv(dev); 184 uint32_t size; 185 u32 *fifo_mem; 186 void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); 187 void *bounce; 188 int ret; 189 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 190 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 191 192 if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { 193 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 194 return -EINVAL; 195 } 196 197 if (gb_objects && vmw_fp->gb_aware) 198 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); 199 else if (gb_objects) 200 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); 201 else 202 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * 203 sizeof(uint32_t); 204 205 if (arg->max_size < size) 206 size = arg->max_size; 207 208 bounce = vzalloc(size); 209 if (unlikely(bounce == NULL)) { 210 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); 211 return -ENOMEM; 212 } 213 214 if (gb_objects && vmw_fp->gb_aware) { 215 int i, num; 216 uint32_t *bounce32 = (uint32_t *) bounce; 217 218 num = size / sizeof(uint32_t); 219 if (num > SVGA3D_DEVCAP_MAX) 220 num = SVGA3D_DEVCAP_MAX; 221 222 spin_lock(&dev_priv->cap_lock); 223 for (i = 0; i < num; ++i) { 224 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 225 *bounce32++ = vmw_mask_multisample 226 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP)); 227 } 228 spin_unlock(&dev_priv->cap_lock); 229 } else if (gb_objects) { 230 ret = vmw_fill_compat_cap(dev_priv, bounce, size); 231 if (unlikely(ret != 0)) 232 goto out_err; 233 } else { 234 fifo_mem = dev_priv->mmio_virt; 235 memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 236 } 237 238 ret = copy_to_user(buffer, bounce, size); 239 if (ret) 240 ret = -EFAULT; 241 out_err: 242 vfree(bounce); 243 244 if (unlikely(ret != 0)) 245 DRM_ERROR("Failed to report 3D caps info.\n"); 246 247 return ret; 248 } 249 250 int vmw_present_ioctl(struct drm_device *dev, void *data, 251 struct drm_file *file_priv) 252 { 253 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 254 struct vmw_private *dev_priv = vmw_priv(dev); 255 struct drm_vmw_present_arg *arg = 256 (struct drm_vmw_present_arg *)data; 257 struct vmw_surface *surface; 258 struct drm_vmw_rect __user *clips_ptr; 259 struct drm_vmw_rect *clips = NULL; 260 struct drm_framebuffer *fb; 261 struct vmw_framebuffer *vfb; 262 struct vmw_resource *res; 263 uint32_t num_clips; 264 int ret; 265 266 num_clips = arg->num_clips; 267 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; 268 269 if (unlikely(num_clips == 0)) 270 return 0; 271 272 if (clips_ptr == NULL) { 273 DRM_ERROR("Variable clips_ptr must be specified.\n"); 274 ret = -EINVAL; 275 goto out_clips; 276 } 277 278 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); 279 if (clips == NULL) { 280 DRM_ERROR("Failed to allocate clip rect list.\n"); 281 ret = -ENOMEM; 282 goto out_clips; 283 } 284 285 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); 286 if (ret) { 287 DRM_ERROR("Failed to copy clip rects from userspace.\n"); 288 ret = -EFAULT; 289 goto out_no_copy; 290 } 291 292 drm_modeset_lock_all(dev); 293 294 fb = drm_framebuffer_lookup(dev, arg->fb_id); 295 if (!fb) { 296 DRM_ERROR("Invalid framebuffer id.\n"); 297 ret = -ENOENT; 298 goto out_no_fb; 299 } 300 vfb = vmw_framebuffer_to_vfb(fb); 301 302 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 303 if (unlikely(ret != 0)) 304 goto out_no_ttm_lock; 305 306 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid, 307 user_surface_converter, 308 &res); 309 if (ret) 310 goto out_no_surface; 311 312 surface = vmw_res_to_srf(res); 313 ret = vmw_kms_present(dev_priv, file_priv, 314 vfb, surface, arg->sid, 315 arg->dest_x, arg->dest_y, 316 clips, num_clips); 317 318 /* vmw_user_surface_lookup takes one ref so does new_fb */ 319 vmw_surface_unreference(&surface); 320 321 out_no_surface: 322 ttm_read_unlock(&dev_priv->reservation_sem); 323 out_no_ttm_lock: 324 drm_framebuffer_unreference(fb); 325 out_no_fb: 326 drm_modeset_unlock_all(dev); 327 out_no_copy: 328 kfree(clips); 329 out_clips: 330 return ret; 331 } 332 333 int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 334 struct drm_file *file_priv) 335 { 336 struct vmw_private *dev_priv = vmw_priv(dev); 337 struct drm_vmw_present_readback_arg *arg = 338 (struct drm_vmw_present_readback_arg *)data; 339 struct drm_vmw_fence_rep __user *user_fence_rep = 340 (struct drm_vmw_fence_rep __user *) 341 (unsigned long)arg->fence_rep; 342 struct drm_vmw_rect __user *clips_ptr; 343 struct drm_vmw_rect *clips = NULL; 344 struct drm_framebuffer *fb; 345 struct vmw_framebuffer *vfb; 346 uint32_t num_clips; 347 int ret; 348 349 num_clips = arg->num_clips; 350 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; 351 352 if (unlikely(num_clips == 0)) 353 return 0; 354 355 if (clips_ptr == NULL) { 356 DRM_ERROR("Argument clips_ptr must be specified.\n"); 357 ret = -EINVAL; 358 goto out_clips; 359 } 360 361 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); 362 if (clips == NULL) { 363 DRM_ERROR("Failed to allocate clip rect list.\n"); 364 ret = -ENOMEM; 365 goto out_clips; 366 } 367 368 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); 369 if (ret) { 370 DRM_ERROR("Failed to copy clip rects from userspace.\n"); 371 ret = -EFAULT; 372 goto out_no_copy; 373 } 374 375 drm_modeset_lock_all(dev); 376 377 fb = drm_framebuffer_lookup(dev, arg->fb_id); 378 if (!fb) { 379 DRM_ERROR("Invalid framebuffer id.\n"); 380 ret = -ENOENT; 381 goto out_no_fb; 382 } 383 384 vfb = vmw_framebuffer_to_vfb(fb); 385 if (!vfb->dmabuf) { 386 DRM_ERROR("Framebuffer not dmabuf backed.\n"); 387 ret = -EINVAL; 388 goto out_no_ttm_lock; 389 } 390 391 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 392 if (unlikely(ret != 0)) 393 goto out_no_ttm_lock; 394 395 ret = vmw_kms_readback(dev_priv, file_priv, 396 vfb, user_fence_rep, 397 clips, num_clips); 398 399 ttm_read_unlock(&dev_priv->reservation_sem); 400 out_no_ttm_lock: 401 drm_framebuffer_unreference(fb); 402 out_no_fb: 403 drm_modeset_unlock_all(dev); 404 out_no_copy: 405 kfree(clips); 406 out_clips: 407 return ret; 408 } 409 410 411 /** 412 * vmw_fops_poll - wrapper around the drm_poll function 413 * 414 * @filp: See the linux fops poll documentation. 415 * @wait: See the linux fops poll documentation. 416 * 417 * Wrapper around the drm_poll function that makes sure the device is 418 * processing the fifo if drm_poll decides to wait. 419 */ 420 unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) 421 { 422 struct drm_file *file_priv = filp->private_data; 423 struct vmw_private *dev_priv = 424 vmw_priv(file_priv->minor->dev); 425 426 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 427 return drm_poll(filp, wait); 428 } 429 430 431 /** 432 * vmw_fops_read - wrapper around the drm_read function 433 * 434 * @filp: See the linux fops read documentation. 435 * @buffer: See the linux fops read documentation. 436 * @count: See the linux fops read documentation. 437 * offset: See the linux fops read documentation. 438 * 439 * Wrapper around the drm_read function that makes sure the device is 440 * processing the fifo if drm_read decides to wait. 441 */ 442 ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 443 size_t count, loff_t *offset) 444 { 445 struct drm_file *file_priv = filp->private_data; 446 struct vmw_private *dev_priv = 447 vmw_priv(file_priv->minor->dev); 448 449 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 450 return drm_read(filp, buffer, count, offset); 451 } 452