1*41ec0267Sriastradh /* $NetBSD: virtgpu_gem.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */
2efa246c0Sriastradh
3efa246c0Sriastradh /*
4efa246c0Sriastradh * Copyright (C) 2015 Red Hat, Inc.
5efa246c0Sriastradh * All Rights Reserved.
6efa246c0Sriastradh *
7efa246c0Sriastradh * Permission is hereby granted, free of charge, to any person obtaining
8efa246c0Sriastradh * a copy of this software and associated documentation files (the
9efa246c0Sriastradh * "Software"), to deal in the Software without restriction, including
10efa246c0Sriastradh * without limitation the rights to use, copy, modify, merge, publish,
11efa246c0Sriastradh * distribute, sublicense, and/or sell copies of the Software, and to
12efa246c0Sriastradh * permit persons to whom the Software is furnished to do so, subject to
13efa246c0Sriastradh * the following conditions:
14efa246c0Sriastradh *
15efa246c0Sriastradh * The above copyright notice and this permission notice (including the
16efa246c0Sriastradh * next paragraph) shall be included in all copies or substantial
17efa246c0Sriastradh * portions of the Software.
18efa246c0Sriastradh *
19efa246c0Sriastradh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20efa246c0Sriastradh * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21efa246c0Sriastradh * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22efa246c0Sriastradh * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23efa246c0Sriastradh * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24efa246c0Sriastradh * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25efa246c0Sriastradh * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26efa246c0Sriastradh */
27efa246c0Sriastradh
28efa246c0Sriastradh #include <sys/cdefs.h>
29*41ec0267Sriastradh __KERNEL_RCSID(0, "$NetBSD: virtgpu_gem.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
30efa246c0Sriastradh
31*41ec0267Sriastradh #include <drm/drm_file.h>
32*41ec0267Sriastradh #include <drm/drm_fourcc.h>
33*41ec0267Sriastradh
34efa246c0Sriastradh #include "virtgpu_drv.h"
35efa246c0Sriastradh
virtio_gpu_gem_create(struct drm_file * file,struct drm_device * dev,struct virtio_gpu_object_params * params,struct drm_gem_object ** obj_p,uint32_t * handle_p)36*41ec0267Sriastradh int virtio_gpu_gem_create(struct drm_file *file,
37*41ec0267Sriastradh struct drm_device *dev,
38*41ec0267Sriastradh struct virtio_gpu_object_params *params,
39*41ec0267Sriastradh struct drm_gem_object **obj_p,
40*41ec0267Sriastradh uint32_t *handle_p)
41efa246c0Sriastradh {
42efa246c0Sriastradh struct virtio_gpu_device *vgdev = dev->dev_private;
43efa246c0Sriastradh struct virtio_gpu_object *obj;
44efa246c0Sriastradh int ret;
45efa246c0Sriastradh u32 handle;
46efa246c0Sriastradh
47*41ec0267Sriastradh ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
48*41ec0267Sriastradh if (ret < 0)
49*41ec0267Sriastradh return ret;
50efa246c0Sriastradh
51*41ec0267Sriastradh ret = drm_gem_handle_create(file, &obj->base.base, &handle);
52efa246c0Sriastradh if (ret) {
53*41ec0267Sriastradh drm_gem_object_release(&obj->base.base);
54efa246c0Sriastradh return ret;
55efa246c0Sriastradh }
56efa246c0Sriastradh
57*41ec0267Sriastradh *obj_p = &obj->base.base;
58efa246c0Sriastradh
59efa246c0Sriastradh /* drop reference from allocate - handle holds it now */
60*41ec0267Sriastradh drm_gem_object_put_unlocked(&obj->base.base);
61efa246c0Sriastradh
62efa246c0Sriastradh *handle_p = handle;
63efa246c0Sriastradh return 0;
64efa246c0Sriastradh }
65efa246c0Sriastradh
virtio_gpu_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)66efa246c0Sriastradh int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
67efa246c0Sriastradh struct drm_device *dev,
68efa246c0Sriastradh struct drm_mode_create_dumb *args)
69efa246c0Sriastradh {
70efa246c0Sriastradh struct drm_gem_object *gobj;
71*41ec0267Sriastradh struct virtio_gpu_object_params params = { 0 };
72efa246c0Sriastradh int ret;
73efa246c0Sriastradh uint32_t pitch;
74efa246c0Sriastradh
75*41ec0267Sriastradh if (args->bpp != 32)
76*41ec0267Sriastradh return -EINVAL;
77*41ec0267Sriastradh
78*41ec0267Sriastradh pitch = args->width * 4;
79efa246c0Sriastradh args->size = pitch * args->height;
80efa246c0Sriastradh args->size = ALIGN(args->size, PAGE_SIZE);
81efa246c0Sriastradh
82*41ec0267Sriastradh params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
83*41ec0267Sriastradh params.width = args->width;
84*41ec0267Sriastradh params.height = args->height;
85*41ec0267Sriastradh params.size = args->size;
86*41ec0267Sriastradh params.dumb = true;
87*41ec0267Sriastradh ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj,
88efa246c0Sriastradh &args->handle);
89efa246c0Sriastradh if (ret)
90efa246c0Sriastradh goto fail;
91efa246c0Sriastradh
92efa246c0Sriastradh args->pitch = pitch;
93efa246c0Sriastradh return ret;
94efa246c0Sriastradh
95efa246c0Sriastradh fail:
96efa246c0Sriastradh return ret;
97efa246c0Sriastradh }
98efa246c0Sriastradh
virtio_gpu_mode_dumb_mmap(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)99efa246c0Sriastradh int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
100efa246c0Sriastradh struct drm_device *dev,
101efa246c0Sriastradh uint32_t handle, uint64_t *offset_p)
102efa246c0Sriastradh {
103efa246c0Sriastradh struct drm_gem_object *gobj;
104*41ec0267Sriastradh
105efa246c0Sriastradh BUG_ON(!offset_p);
106*41ec0267Sriastradh gobj = drm_gem_object_lookup(file_priv, handle);
107efa246c0Sriastradh if (gobj == NULL)
108efa246c0Sriastradh return -ENOENT;
109*41ec0267Sriastradh *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
110*41ec0267Sriastradh drm_gem_object_put_unlocked(gobj);
111efa246c0Sriastradh return 0;
112efa246c0Sriastradh }
113efa246c0Sriastradh
virtio_gpu_gem_object_open(struct drm_gem_object * obj,struct drm_file * file)114efa246c0Sriastradh int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
115efa246c0Sriastradh struct drm_file *file)
116efa246c0Sriastradh {
117efa246c0Sriastradh struct virtio_gpu_device *vgdev = obj->dev->dev_private;
118efa246c0Sriastradh struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
119*41ec0267Sriastradh struct virtio_gpu_object_array *objs;
120efa246c0Sriastradh
121efa246c0Sriastradh if (!vgdev->has_virgl_3d)
122efa246c0Sriastradh return 0;
123efa246c0Sriastradh
124*41ec0267Sriastradh objs = virtio_gpu_array_alloc(1);
125*41ec0267Sriastradh if (!objs)
126*41ec0267Sriastradh return -ENOMEM;
127*41ec0267Sriastradh virtio_gpu_array_add_obj(objs, obj);
128efa246c0Sriastradh
129efa246c0Sriastradh virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
130*41ec0267Sriastradh objs);
131efa246c0Sriastradh return 0;
132efa246c0Sriastradh }
133efa246c0Sriastradh
virtio_gpu_gem_object_close(struct drm_gem_object * obj,struct drm_file * file)134efa246c0Sriastradh void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
135efa246c0Sriastradh struct drm_file *file)
136efa246c0Sriastradh {
137efa246c0Sriastradh struct virtio_gpu_device *vgdev = obj->dev->dev_private;
138efa246c0Sriastradh struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
139*41ec0267Sriastradh struct virtio_gpu_object_array *objs;
140efa246c0Sriastradh
141efa246c0Sriastradh if (!vgdev->has_virgl_3d)
142efa246c0Sriastradh return;
143efa246c0Sriastradh
144*41ec0267Sriastradh objs = virtio_gpu_array_alloc(1);
145*41ec0267Sriastradh if (!objs)
146efa246c0Sriastradh return;
147*41ec0267Sriastradh virtio_gpu_array_add_obj(objs, obj);
148efa246c0Sriastradh
149efa246c0Sriastradh virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
150*41ec0267Sriastradh objs);
151*41ec0267Sriastradh }
152*41ec0267Sriastradh
virtio_gpu_array_alloc(u32 nents)153*41ec0267Sriastradh struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
154*41ec0267Sriastradh {
155*41ec0267Sriastradh struct virtio_gpu_object_array *objs;
156*41ec0267Sriastradh size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
157*41ec0267Sriastradh
158*41ec0267Sriastradh objs = kmalloc(size, GFP_KERNEL);
159*41ec0267Sriastradh if (!objs)
160*41ec0267Sriastradh return NULL;
161*41ec0267Sriastradh
162*41ec0267Sriastradh objs->nents = 0;
163*41ec0267Sriastradh objs->total = nents;
164*41ec0267Sriastradh return objs;
165*41ec0267Sriastradh }
166*41ec0267Sriastradh
virtio_gpu_array_free(struct virtio_gpu_object_array * objs)167*41ec0267Sriastradh static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
168*41ec0267Sriastradh {
169*41ec0267Sriastradh kfree(objs);
170*41ec0267Sriastradh }
171*41ec0267Sriastradh
172*41ec0267Sriastradh struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file * drm_file,u32 * handles,u32 nents)173*41ec0267Sriastradh virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
174*41ec0267Sriastradh {
175*41ec0267Sriastradh struct virtio_gpu_object_array *objs;
176*41ec0267Sriastradh u32 i;
177*41ec0267Sriastradh
178*41ec0267Sriastradh objs = virtio_gpu_array_alloc(nents);
179*41ec0267Sriastradh if (!objs)
180*41ec0267Sriastradh return NULL;
181*41ec0267Sriastradh
182*41ec0267Sriastradh for (i = 0; i < nents; i++) {
183*41ec0267Sriastradh objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
184*41ec0267Sriastradh if (!objs->objs[i]) {
185*41ec0267Sriastradh objs->nents = i;
186*41ec0267Sriastradh virtio_gpu_array_put_free(objs);
187*41ec0267Sriastradh return NULL;
188*41ec0267Sriastradh }
189*41ec0267Sriastradh }
190*41ec0267Sriastradh objs->nents = i;
191*41ec0267Sriastradh return objs;
192*41ec0267Sriastradh }
193*41ec0267Sriastradh
virtio_gpu_array_add_obj(struct virtio_gpu_object_array * objs,struct drm_gem_object * obj)194*41ec0267Sriastradh void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
195*41ec0267Sriastradh struct drm_gem_object *obj)
196*41ec0267Sriastradh {
197*41ec0267Sriastradh if (WARN_ON_ONCE(objs->nents == objs->total))
198*41ec0267Sriastradh return;
199*41ec0267Sriastradh
200*41ec0267Sriastradh drm_gem_object_get(obj);
201*41ec0267Sriastradh objs->objs[objs->nents] = obj;
202*41ec0267Sriastradh objs->nents++;
203*41ec0267Sriastradh }
204*41ec0267Sriastradh
virtio_gpu_array_lock_resv(struct virtio_gpu_object_array * objs)205*41ec0267Sriastradh int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
206*41ec0267Sriastradh {
207*41ec0267Sriastradh int ret;
208*41ec0267Sriastradh
209*41ec0267Sriastradh if (objs->nents == 1) {
210*41ec0267Sriastradh ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
211*41ec0267Sriastradh } else {
212*41ec0267Sriastradh ret = drm_gem_lock_reservations(objs->objs, objs->nents,
213*41ec0267Sriastradh &objs->ticket);
214*41ec0267Sriastradh }
215*41ec0267Sriastradh return ret;
216*41ec0267Sriastradh }
217*41ec0267Sriastradh
virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array * objs)218*41ec0267Sriastradh void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
219*41ec0267Sriastradh {
220*41ec0267Sriastradh if (objs->nents == 1) {
221*41ec0267Sriastradh dma_resv_unlock(objs->objs[0]->resv);
222*41ec0267Sriastradh } else {
223*41ec0267Sriastradh drm_gem_unlock_reservations(objs->objs, objs->nents,
224*41ec0267Sriastradh &objs->ticket);
225*41ec0267Sriastradh }
226*41ec0267Sriastradh }
227*41ec0267Sriastradh
virtio_gpu_array_add_fence(struct virtio_gpu_object_array * objs,struct dma_fence * fence)228*41ec0267Sriastradh void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
229*41ec0267Sriastradh struct dma_fence *fence)
230*41ec0267Sriastradh {
231*41ec0267Sriastradh int i;
232*41ec0267Sriastradh
233*41ec0267Sriastradh for (i = 0; i < objs->nents; i++)
234*41ec0267Sriastradh dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
235*41ec0267Sriastradh }
236*41ec0267Sriastradh
virtio_gpu_array_put_free(struct virtio_gpu_object_array * objs)237*41ec0267Sriastradh void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
238*41ec0267Sriastradh {
239*41ec0267Sriastradh u32 i;
240*41ec0267Sriastradh
241*41ec0267Sriastradh for (i = 0; i < objs->nents; i++)
242*41ec0267Sriastradh drm_gem_object_put_unlocked(objs->objs[i]);
243*41ec0267Sriastradh virtio_gpu_array_free(objs);
244*41ec0267Sriastradh }
245*41ec0267Sriastradh
virtio_gpu_array_put_free_delayed(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)246*41ec0267Sriastradh void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
247*41ec0267Sriastradh struct virtio_gpu_object_array *objs)
248*41ec0267Sriastradh {
249*41ec0267Sriastradh spin_lock(&vgdev->obj_free_lock);
250*41ec0267Sriastradh list_add_tail(&objs->next, &vgdev->obj_free_list);
251*41ec0267Sriastradh spin_unlock(&vgdev->obj_free_lock);
252*41ec0267Sriastradh schedule_work(&vgdev->obj_free_work);
253*41ec0267Sriastradh }
254*41ec0267Sriastradh
virtio_gpu_array_put_free_work(struct work_struct * work)255*41ec0267Sriastradh void virtio_gpu_array_put_free_work(struct work_struct *work)
256*41ec0267Sriastradh {
257*41ec0267Sriastradh struct virtio_gpu_device *vgdev =
258*41ec0267Sriastradh container_of(work, struct virtio_gpu_device, obj_free_work);
259*41ec0267Sriastradh struct virtio_gpu_object_array *objs;
260*41ec0267Sriastradh
261*41ec0267Sriastradh spin_lock(&vgdev->obj_free_lock);
262*41ec0267Sriastradh while (!list_empty(&vgdev->obj_free_list)) {
263*41ec0267Sriastradh objs = list_first_entry(&vgdev->obj_free_list,
264*41ec0267Sriastradh struct virtio_gpu_object_array, next);
265*41ec0267Sriastradh list_del(&objs->next);
266*41ec0267Sriastradh spin_unlock(&vgdev->obj_free_lock);
267*41ec0267Sriastradh virtio_gpu_array_put_free(objs);
268*41ec0267Sriastradh spin_lock(&vgdev->obj_free_lock);
269*41ec0267Sriastradh }
270*41ec0267Sriastradh spin_unlock(&vgdev->obj_free_lock);
271efa246c0Sriastradh }
272