1 /* $NetBSD: virtgpu_gem.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $ */ 2 3 /* 4 * Copyright (C) 2015 Red Hat, Inc. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining 8 * a copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sublicense, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial 17 * portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: virtgpu_gem.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $"); 30 31 #include <drm/drmP.h> 32 #include "virtgpu_drv.h" 33 34 void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj) 35 { 36 struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj); 37 38 if (obj) 39 virtio_gpu_object_unref(&obj); 40 } 41 42 struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, 43 size_t size, bool kernel, 44 bool pinned) 45 { 46 struct virtio_gpu_device *vgdev = dev->dev_private; 47 struct virtio_gpu_object *obj; 48 int ret; 49 50 ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj); 51 if (ret) 52 return ERR_PTR(ret); 53 54 return obj; 55 } 56 57 int virtio_gpu_gem_create(struct drm_file *file, 58 struct drm_device *dev, 59 uint64_t size, 60 struct drm_gem_object **obj_p, 61 uint32_t *handle_p) 62 { 63 struct virtio_gpu_object *obj; 64 int ret; 65 u32 handle; 66 67 obj = virtio_gpu_alloc_object(dev, size, false, false); 68 if (IS_ERR(obj)) 69 return PTR_ERR(obj); 70 71 ret = drm_gem_handle_create(file, &obj->gem_base, &handle); 72 if (ret) { 73 drm_gem_object_release(&obj->gem_base); 74 return ret; 75 } 76 77 *obj_p = &obj->gem_base; 78 79 /* drop reference from allocate - handle holds it now */ 80 drm_gem_object_unreference_unlocked(&obj->gem_base); 81 82 *handle_p = handle; 83 return 0; 84 } 85 86 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, 87 struct drm_device *dev, 88 struct drm_mode_create_dumb *args) 89 { 90 struct virtio_gpu_device *vgdev = dev->dev_private; 91 struct drm_gem_object *gobj; 92 struct virtio_gpu_object *obj; 93 int ret; 94 uint32_t pitch; 95 uint32_t resid; 96 97 pitch = args->width * ((args->bpp + 1) / 8); 98 args->size = pitch * args->height; 99 args->size = ALIGN(args->size, PAGE_SIZE); 100 101 ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj, 102 &args->handle); 103 if (ret) 104 goto fail; 105 106 virtio_gpu_resource_id_get(vgdev, &resid); 107 virtio_gpu_cmd_create_resource(vgdev, resid, 108 2, args->width, args->height); 109 110 /* attach the object to the resource */ 111 obj = gem_to_virtio_gpu_obj(gobj); 112 ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL); 113 if (ret) 114 goto fail; 115 116 obj->dumb = true; 117 args->pitch = pitch; 118 return ret; 119 120 fail: 121 return ret; 122 } 123 124 int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv, 125 struct drm_device *dev, 126 uint32_t handle) 127 { 128 return drm_gem_handle_delete(file_priv, handle); 129 } 130 131 int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, 132 struct drm_device *dev, 133 uint32_t handle, uint64_t *offset_p) 134 { 135 struct drm_gem_object *gobj; 136 struct virtio_gpu_object *obj; 137 BUG_ON(!offset_p); 138 gobj = drm_gem_object_lookup(dev, file_priv, handle); 139 if (gobj == NULL) 140 return -ENOENT; 141 obj = gem_to_virtio_gpu_obj(gobj); 142 *offset_p = virtio_gpu_object_mmap_offset(obj); 143 drm_gem_object_unreference_unlocked(gobj); 144 return 0; 145 } 146 147 int virtio_gpu_gem_object_open(struct drm_gem_object *obj, 148 struct drm_file *file) 149 { 150 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 151 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 152 struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); 153 int r; 154 155 if (!vgdev->has_virgl_3d) 156 return 0; 157 158 r = virtio_gpu_object_reserve(qobj, false); 159 if (r) 160 return r; 161 162 virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, 163 qobj->hw_res_handle); 164 virtio_gpu_object_unreserve(qobj); 165 return 0; 166 } 167 168 void virtio_gpu_gem_object_close(struct drm_gem_object *obj, 169 struct drm_file *file) 170 { 171 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 172 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 173 struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); 174 int r; 175 176 if (!vgdev->has_virgl_3d) 177 return; 178 179 r = virtio_gpu_object_reserve(qobj, false); 180 if (r) 181 return; 182 183 virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, 184 qobj->hw_res_handle); 185 virtio_gpu_object_unreserve(qobj); 186 } 187