xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/virtio/virtgpu_vq.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: virtgpu_vq.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $	*/
2 
3 /*
4  * Copyright (C) 2015 Red Hat, Inc.
5  * All Rights Reserved.
6  *
7  * Authors:
8  *    Dave Airlie <airlied@redhat.com>
9  *    Gerd Hoffmann <kraxel@redhat.com>
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: virtgpu_vq.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
33 
34 #include <linux/dma-mapping.h>
35 #include <linux/virtio.h>
36 #include <linux/virtio_config.h>
37 #include <linux/virtio_ring.h>
38 
39 #include "virtgpu_drv.h"
40 #include "virtgpu_trace.h"
41 
42 #define MAX_INLINE_CMD_SIZE   96
43 #define MAX_INLINE_RESP_SIZE  24
44 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
45 			       + MAX_INLINE_CMD_SIZE		 \
46 			       + MAX_INLINE_RESP_SIZE)
47 
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)48 static void convert_to_hw_box(struct virtio_gpu_box *dst,
49 			      const struct drm_virtgpu_3d_box *src)
50 {
51 	dst->x = cpu_to_le32(src->x);
52 	dst->y = cpu_to_le32(src->y);
53 	dst->z = cpu_to_le32(src->z);
54 	dst->w = cpu_to_le32(src->w);
55 	dst->h = cpu_to_le32(src->h);
56 	dst->d = cpu_to_le32(src->d);
57 }
58 
virtio_gpu_ctrl_ack(struct virtqueue * vq)59 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
60 {
61 	struct drm_device *dev = vq->vdev->priv;
62 	struct virtio_gpu_device *vgdev = dev->dev_private;
63 
64 	schedule_work(&vgdev->ctrlq.dequeue_work);
65 }
66 
virtio_gpu_cursor_ack(struct virtqueue * vq)67 void virtio_gpu_cursor_ack(struct virtqueue *vq)
68 {
69 	struct drm_device *dev = vq->vdev->priv;
70 	struct virtio_gpu_device *vgdev = dev->dev_private;
71 
72 	schedule_work(&vgdev->cursorq.dequeue_work);
73 }
74 
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76 {
77 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
78 					 VBUFFER_SIZE,
79 					 __alignof__(struct virtio_gpu_vbuffer),
80 					 0, NULL);
81 	if (!vgdev->vbufs)
82 		return -ENOMEM;
83 	return 0;
84 }
85 
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)86 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
87 {
88 	kmem_cache_destroy(vgdev->vbufs);
89 	vgdev->vbufs = NULL;
90 }
91 
92 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)93 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
94 		    int size, int resp_size, void *resp_buf,
95 		    virtio_gpu_resp_cb resp_cb)
96 {
97 	struct virtio_gpu_vbuffer *vbuf;
98 
99 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
100 	if (!vbuf)
101 		return ERR_PTR(-ENOMEM);
102 
103 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
104 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
105 	vbuf->size = size;
106 
107 	vbuf->resp_cb = resp_cb;
108 	vbuf->resp_size = resp_size;
109 	if (resp_size <= MAX_INLINE_RESP_SIZE)
110 		vbuf->resp_buf = (void *)vbuf->buf + size;
111 	else
112 		vbuf->resp_buf = resp_buf;
113 	BUG_ON(!vbuf->resp_buf);
114 	return vbuf;
115 }
116 
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)117 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
118 				  struct virtio_gpu_vbuffer **vbuffer_p,
119 				  int size)
120 {
121 	struct virtio_gpu_vbuffer *vbuf;
122 
123 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
124 				   sizeof(struct virtio_gpu_ctrl_hdr),
125 				   NULL, NULL);
126 	if (IS_ERR(vbuf)) {
127 		*vbuffer_p = NULL;
128 		return ERR_CAST(vbuf);
129 	}
130 	*vbuffer_p = vbuf;
131 	return vbuf->buf;
132 }
133 
134 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)135 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
136 			struct virtio_gpu_vbuffer **vbuffer_p)
137 {
138 	struct virtio_gpu_vbuffer *vbuf;
139 
140 	vbuf = virtio_gpu_get_vbuf
141 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
142 		 0, NULL, NULL);
143 	if (IS_ERR(vbuf)) {
144 		*vbuffer_p = NULL;
145 		return ERR_CAST(vbuf);
146 	}
147 	*vbuffer_p = vbuf;
148 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
149 }
150 
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)151 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
152 				       virtio_gpu_resp_cb cb,
153 				       struct virtio_gpu_vbuffer **vbuffer_p,
154 				       int cmd_size, int resp_size,
155 				       void *resp_buf)
156 {
157 	struct virtio_gpu_vbuffer *vbuf;
158 
159 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
160 				   resp_size, resp_buf, cb);
161 	if (IS_ERR(vbuf)) {
162 		*vbuffer_p = NULL;
163 		return ERR_CAST(vbuf);
164 	}
165 	*vbuffer_p = vbuf;
166 	return (struct virtio_gpu_command *)vbuf->buf;
167 }
168 
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)169 static void free_vbuf(struct virtio_gpu_device *vgdev,
170 		      struct virtio_gpu_vbuffer *vbuf)
171 {
172 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
173 		kfree(vbuf->resp_buf);
174 	kvfree(vbuf->data_buf);
175 	kmem_cache_free(vgdev->vbufs, vbuf);
176 }
177 
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)178 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
179 {
180 	struct virtio_gpu_vbuffer *vbuf;
181 	unsigned int len;
182 	int freed = 0;
183 
184 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
185 		list_add_tail(&vbuf->list, reclaim_list);
186 		freed++;
187 	}
188 	if (freed == 0)
189 		DRM_DEBUG("Huh? zero vbufs reclaimed");
190 }
191 
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)192 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
193 {
194 	struct virtio_gpu_device *vgdev =
195 		container_of(work, struct virtio_gpu_device,
196 			     ctrlq.dequeue_work);
197 	struct list_head reclaim_list;
198 	struct virtio_gpu_vbuffer *entry, *tmp;
199 	struct virtio_gpu_ctrl_hdr *resp;
200 	u64 fence_id = 0;
201 
202 	INIT_LIST_HEAD(&reclaim_list);
203 	spin_lock(&vgdev->ctrlq.qlock);
204 	do {
205 		virtqueue_disable_cb(vgdev->ctrlq.vq);
206 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
207 
208 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
209 	spin_unlock(&vgdev->ctrlq.qlock);
210 
211 	list_for_each_entry(entry, &reclaim_list, list) {
212 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
213 
214 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
215 
216 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
217 			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
218 				struct virtio_gpu_ctrl_hdr *cmd;
219 				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
220 				DRM_ERROR("response 0x%x (command 0x%x)\n",
221 					  le32_to_cpu(resp->type),
222 					  le32_to_cpu(cmd->type));
223 			} else
224 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
225 		}
226 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
227 			u64 f = le64_to_cpu(resp->fence_id);
228 
229 			if (fence_id > f) {
230 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
231 					  __func__, fence_id, f);
232 			} else {
233 				fence_id = f;
234 			}
235 		}
236 		if (entry->resp_cb)
237 			entry->resp_cb(vgdev, entry);
238 	}
239 	wake_up(&vgdev->ctrlq.ack_queue);
240 
241 	if (fence_id)
242 		virtio_gpu_fence_event_process(vgdev, fence_id);
243 
244 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
245 		if (entry->objs)
246 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
247 		list_del(&entry->list);
248 		free_vbuf(vgdev, entry);
249 	}
250 }
251 
virtio_gpu_dequeue_cursor_func(struct work_struct * work)252 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
253 {
254 	struct virtio_gpu_device *vgdev =
255 		container_of(work, struct virtio_gpu_device,
256 			     cursorq.dequeue_work);
257 	struct list_head reclaim_list;
258 	struct virtio_gpu_vbuffer *entry, *tmp;
259 
260 	INIT_LIST_HEAD(&reclaim_list);
261 	spin_lock(&vgdev->cursorq.qlock);
262 	do {
263 		virtqueue_disable_cb(vgdev->cursorq.vq);
264 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
265 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
266 	spin_unlock(&vgdev->cursorq.qlock);
267 
268 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
269 		list_del(&entry->list);
270 		free_vbuf(vgdev, entry);
271 	}
272 	wake_up(&vgdev->cursorq.ack_queue);
273 }
274 
275 /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)276 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
277 {
278 	int ret, s, i;
279 	struct sg_table *sgt;
280 	struct scatterlist *sg;
281 	struct page *pg;
282 
283 	if (WARN_ON(!PAGE_ALIGNED(data)))
284 		return NULL;
285 
286 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
287 	if (!sgt)
288 		return NULL;
289 
290 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
291 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
292 	if (ret) {
293 		kfree(sgt);
294 		return NULL;
295 	}
296 
297 	for_each_sg(sgt->sgl, sg, *sg_ents, i) {
298 		pg = vmalloc_to_page(data);
299 		if (!pg) {
300 			sg_free_table(sgt);
301 			kfree(sgt);
302 			return NULL;
303 		}
304 
305 		s = min_t(int, PAGE_SIZE, size);
306 		sg_set_page(sg, pg, s, 0);
307 
308 		size -= s;
309 		data += s;
310 	}
311 
312 	return sgt;
313 }
314 
virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct scatterlist * vout)315 static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
316 						struct virtio_gpu_vbuffer *vbuf,
317 						struct scatterlist *vout)
318 		__releases(&vgdev->ctrlq.qlock)
319 		__acquires(&vgdev->ctrlq.qlock)
320 {
321 	struct virtqueue *vq = vgdev->ctrlq.vq;
322 	struct scatterlist *sgs[3], vcmd, vresp;
323 	int outcnt = 0, incnt = 0;
324 	bool notify = false;
325 	int ret;
326 
327 	if (!vgdev->vqs_ready)
328 		return notify;
329 
330 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
331 	sgs[outcnt + incnt] = &vcmd;
332 	outcnt++;
333 
334 	if (vout) {
335 		sgs[outcnt + incnt] = vout;
336 		outcnt++;
337 	}
338 
339 	if (vbuf->resp_size) {
340 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
341 		sgs[outcnt + incnt] = &vresp;
342 		incnt++;
343 	}
344 
345 retry:
346 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
347 	if (ret == -ENOSPC) {
348 		spin_unlock(&vgdev->ctrlq.qlock);
349 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
350 		spin_lock(&vgdev->ctrlq.qlock);
351 		goto retry;
352 	} else {
353 		trace_virtio_gpu_cmd_queue(vq,
354 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
355 
356 		notify = virtqueue_kick_prepare(vq);
357 	}
358 	return notify;
359 }
360 
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_ctrl_hdr * hdr,struct virtio_gpu_fence * fence)361 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
362 						struct virtio_gpu_vbuffer *vbuf,
363 						struct virtio_gpu_ctrl_hdr *hdr,
364 						struct virtio_gpu_fence *fence)
365 {
366 	struct virtqueue *vq = vgdev->ctrlq.vq;
367 	struct scatterlist *vout = NULL, sg;
368 	struct sg_table *sgt = NULL;
369 	bool notify;
370 	int outcnt = 0;
371 
372 	if (vbuf->data_size) {
373 		if (is_vmalloc_addr(vbuf->data_buf)) {
374 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
375 					     &outcnt);
376 			if (!sgt)
377 				return;
378 			vout = sgt->sgl;
379 		} else {
380 			sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
381 			vout = &sg;
382 			outcnt = 1;
383 		}
384 	}
385 
386 again:
387 	spin_lock(&vgdev->ctrlq.qlock);
388 
389 	/*
390 	 * Make sure we have enouth space in the virtqueue.  If not
391 	 * wait here until we have.
392 	 *
393 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
394 	 * to wait for free space, which can result in fence ids being
395 	 * submitted out-of-order.
396 	 */
397 	if (vq->num_free < 2 + outcnt) {
398 		spin_unlock(&vgdev->ctrlq.qlock);
399 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
400 		goto again;
401 	}
402 
403 	if (hdr && fence) {
404 		virtio_gpu_fence_emit(vgdev, hdr, fence);
405 		if (vbuf->objs) {
406 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
407 			virtio_gpu_array_unlock_resv(vbuf->objs);
408 		}
409 	}
410 	notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
411 	spin_unlock(&vgdev->ctrlq.qlock);
412 	if (notify) {
413 		if (vgdev->disable_notify)
414 			vgdev->pending_notify = true;
415 		else
416 			virtqueue_notify(vgdev->ctrlq.vq);
417 	}
418 
419 	if (sgt) {
420 		sg_free_table(sgt);
421 		kfree(sgt);
422 	}
423 }
424 
virtio_gpu_disable_notify(struct virtio_gpu_device * vgdev)425 void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
426 {
427 	vgdev->disable_notify = true;
428 }
429 
virtio_gpu_enable_notify(struct virtio_gpu_device * vgdev)430 void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
431 {
432 	vgdev->disable_notify = false;
433 
434 	if (!vgdev->pending_notify)
435 		return;
436 	vgdev->pending_notify = false;
437 	virtqueue_notify(vgdev->ctrlq.vq);
438 }
439 
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)440 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
441 					 struct virtio_gpu_vbuffer *vbuf)
442 {
443 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
444 }
445 
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)446 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
447 				    struct virtio_gpu_vbuffer *vbuf)
448 {
449 	struct virtqueue *vq = vgdev->cursorq.vq;
450 	struct scatterlist *sgs[1], ccmd;
451 	bool notify;
452 	int ret;
453 	int outcnt;
454 
455 	if (!vgdev->vqs_ready)
456 		return;
457 
458 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
459 	sgs[0] = &ccmd;
460 	outcnt = 1;
461 
462 	spin_lock(&vgdev->cursorq.qlock);
463 retry:
464 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
465 	if (ret == -ENOSPC) {
466 		spin_unlock(&vgdev->cursorq.qlock);
467 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
468 		spin_lock(&vgdev->cursorq.qlock);
469 		goto retry;
470 	} else {
471 		trace_virtio_gpu_cmd_queue(vq,
472 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
473 
474 		notify = virtqueue_kick_prepare(vq);
475 	}
476 
477 	spin_unlock(&vgdev->cursorq.qlock);
478 
479 	if (notify)
480 		virtqueue_notify(vq);
481 }
482 
483 /* just create gem objects for userspace and long lived objects,
484  * just use dma_alloced pages for the queue objects?
485  */
486 
487 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)488 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
489 				    struct virtio_gpu_object *bo,
490 				    struct virtio_gpu_object_params *params,
491 				    struct virtio_gpu_object_array *objs,
492 				    struct virtio_gpu_fence *fence)
493 {
494 	struct virtio_gpu_resource_create_2d *cmd_p;
495 	struct virtio_gpu_vbuffer *vbuf;
496 
497 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
498 	memset(cmd_p, 0, sizeof(*cmd_p));
499 	vbuf->objs = objs;
500 
501 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
502 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
503 	cmd_p->format = cpu_to_le32(params->format);
504 	cmd_p->width = cpu_to_le32(params->width);
505 	cmd_p->height = cpu_to_le32(params->height);
506 
507 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
508 	bo->created = true;
509 }
510 
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,uint32_t resource_id)511 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
512 				   uint32_t resource_id)
513 {
514 	struct virtio_gpu_resource_unref *cmd_p;
515 	struct virtio_gpu_vbuffer *vbuf;
516 
517 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
518 	memset(cmd_p, 0, sizeof(*cmd_p));
519 
520 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
521 	cmd_p->resource_id = cpu_to_le32(resource_id);
522 
523 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
524 }
525 
virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_fence * fence)526 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
527 						  uint32_t resource_id,
528 						  struct virtio_gpu_fence *fence)
529 {
530 	struct virtio_gpu_resource_detach_backing *cmd_p;
531 	struct virtio_gpu_vbuffer *vbuf;
532 
533 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
534 	memset(cmd_p, 0, sizeof(*cmd_p));
535 
536 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
537 	cmd_p->resource_id = cpu_to_le32(resource_id);
538 
539 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
540 }
541 
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)542 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
543 				uint32_t scanout_id, uint32_t resource_id,
544 				uint32_t width, uint32_t height,
545 				uint32_t x, uint32_t y)
546 {
547 	struct virtio_gpu_set_scanout *cmd_p;
548 	struct virtio_gpu_vbuffer *vbuf;
549 
550 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
551 	memset(cmd_p, 0, sizeof(*cmd_p));
552 
553 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
554 	cmd_p->resource_id = cpu_to_le32(resource_id);
555 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
556 	cmd_p->r.width = cpu_to_le32(width);
557 	cmd_p->r.height = cpu_to_le32(height);
558 	cmd_p->r.x = cpu_to_le32(x);
559 	cmd_p->r.y = cpu_to_le32(y);
560 
561 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
562 }
563 
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height)564 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
565 				   uint32_t resource_id,
566 				   uint32_t x, uint32_t y,
567 				   uint32_t width, uint32_t height)
568 {
569 	struct virtio_gpu_resource_flush *cmd_p;
570 	struct virtio_gpu_vbuffer *vbuf;
571 
572 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
573 	memset(cmd_p, 0, sizeof(*cmd_p));
574 
575 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
576 	cmd_p->resource_id = cpu_to_le32(resource_id);
577 	cmd_p->r.width = cpu_to_le32(width);
578 	cmd_p->r.height = cpu_to_le32(height);
579 	cmd_p->r.x = cpu_to_le32(x);
580 	cmd_p->r.y = cpu_to_le32(y);
581 
582 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
583 }
584 
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)585 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
586 					uint64_t offset,
587 					uint32_t width, uint32_t height,
588 					uint32_t x, uint32_t y,
589 					struct virtio_gpu_object_array *objs,
590 					struct virtio_gpu_fence *fence)
591 {
592 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
593 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
594 	struct virtio_gpu_vbuffer *vbuf;
595 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
596 
597 	if (use_dma_api)
598 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
599 				       bo->pages->sgl, bo->pages->nents,
600 				       DMA_TO_DEVICE);
601 
602 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
603 	memset(cmd_p, 0, sizeof(*cmd_p));
604 	vbuf->objs = objs;
605 
606 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
607 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
608 	cmd_p->offset = cpu_to_le64(offset);
609 	cmd_p->r.width = cpu_to_le32(width);
610 	cmd_p->r.height = cpu_to_le32(height);
611 	cmd_p->r.x = cpu_to_le32(x);
612 	cmd_p->r.y = cpu_to_le32(y);
613 
614 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
615 }
616 
617 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)618 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
619 				       uint32_t resource_id,
620 				       struct virtio_gpu_mem_entry *ents,
621 				       uint32_t nents,
622 				       struct virtio_gpu_fence *fence)
623 {
624 	struct virtio_gpu_resource_attach_backing *cmd_p;
625 	struct virtio_gpu_vbuffer *vbuf;
626 
627 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
628 	memset(cmd_p, 0, sizeof(*cmd_p));
629 
630 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
631 	cmd_p->resource_id = cpu_to_le32(resource_id);
632 	cmd_p->nr_entries = cpu_to_le32(nents);
633 
634 	vbuf->data_buf = ents;
635 	vbuf->data_size = sizeof(*ents) * nents;
636 
637 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
638 }
639 
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)640 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
641 					       struct virtio_gpu_vbuffer *vbuf)
642 {
643 	struct virtio_gpu_resp_display_info *resp =
644 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
645 	int i;
646 
647 	spin_lock(&vgdev->display_info_lock);
648 	for (i = 0; i < vgdev->num_scanouts; i++) {
649 		vgdev->outputs[i].info = resp->pmodes[i];
650 		if (resp->pmodes[i].enabled) {
651 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
652 				  le32_to_cpu(resp->pmodes[i].r.width),
653 				  le32_to_cpu(resp->pmodes[i].r.height),
654 				  le32_to_cpu(resp->pmodes[i].r.x),
655 				  le32_to_cpu(resp->pmodes[i].r.y));
656 		} else {
657 			DRM_DEBUG("output %d: disabled", i);
658 		}
659 	}
660 
661 	vgdev->display_info_pending = false;
662 	spin_unlock(&vgdev->display_info_lock);
663 	wake_up(&vgdev->resp_wq);
664 
665 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
666 		drm_kms_helper_hotplug_event(vgdev->ddev);
667 }
668 
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)669 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
670 					      struct virtio_gpu_vbuffer *vbuf)
671 {
672 	struct virtio_gpu_get_capset_info *cmd =
673 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
674 	struct virtio_gpu_resp_capset_info *resp =
675 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
676 	int i = le32_to_cpu(cmd->capset_index);
677 
678 	spin_lock(&vgdev->display_info_lock);
679 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
680 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
681 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
682 	spin_unlock(&vgdev->display_info_lock);
683 	wake_up(&vgdev->resp_wq);
684 }
685 
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)686 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
687 				     struct virtio_gpu_vbuffer *vbuf)
688 {
689 	struct virtio_gpu_get_capset *cmd =
690 		(struct virtio_gpu_get_capset *)vbuf->buf;
691 	struct virtio_gpu_resp_capset *resp =
692 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
693 	struct virtio_gpu_drv_cap_cache *cache_ent;
694 
695 	spin_lock(&vgdev->display_info_lock);
696 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
697 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
698 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
699 			memcpy(cache_ent->caps_cache, resp->capset_data,
700 			       cache_ent->size);
701 			/* Copy must occur before is_valid is signalled. */
702 			smp_wmb();
703 			atomic_set(&cache_ent->is_valid, 1);
704 			break;
705 		}
706 	}
707 	spin_unlock(&vgdev->display_info_lock);
708 	wake_up_all(&vgdev->resp_wq);
709 }
710 
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)711 static int virtio_get_edid_block(void *data, u8 *buf,
712 				 unsigned int block, size_t len)
713 {
714 	struct virtio_gpu_resp_edid *resp = data;
715 	size_t start = block * EDID_LENGTH;
716 
717 	if (start + len > le32_to_cpu(resp->size))
718 		return -1;
719 	memcpy(buf, resp->edid + start, len);
720 	return 0;
721 }
722 
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)723 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
724 				       struct virtio_gpu_vbuffer *vbuf)
725 {
726 	struct virtio_gpu_cmd_get_edid *cmd =
727 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
728 	struct virtio_gpu_resp_edid *resp =
729 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
730 	uint32_t scanout = le32_to_cpu(cmd->scanout);
731 	struct virtio_gpu_output *output;
732 	struct edid *new_edid, *old_edid;
733 
734 	if (scanout >= vgdev->num_scanouts)
735 		return;
736 	output = vgdev->outputs + scanout;
737 
738 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
739 	drm_connector_update_edid_property(&output->conn, new_edid);
740 
741 	spin_lock(&vgdev->display_info_lock);
742 	old_edid = output->edid;
743 	output->edid = new_edid;
744 	spin_unlock(&vgdev->display_info_lock);
745 
746 	kfree(old_edid);
747 	wake_up(&vgdev->resp_wq);
748 }
749 
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)750 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
751 {
752 	struct virtio_gpu_ctrl_hdr *cmd_p;
753 	struct virtio_gpu_vbuffer *vbuf;
754 	void *resp_buf;
755 
756 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
757 			   GFP_KERNEL);
758 	if (!resp_buf)
759 		return -ENOMEM;
760 
761 	cmd_p = virtio_gpu_alloc_cmd_resp
762 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
763 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
764 		 resp_buf);
765 	memset(cmd_p, 0, sizeof(*cmd_p));
766 
767 	vgdev->display_info_pending = true;
768 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
769 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
770 	return 0;
771 }
772 
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)773 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
774 {
775 	struct virtio_gpu_get_capset_info *cmd_p;
776 	struct virtio_gpu_vbuffer *vbuf;
777 	void *resp_buf;
778 
779 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
780 			   GFP_KERNEL);
781 	if (!resp_buf)
782 		return -ENOMEM;
783 
784 	cmd_p = virtio_gpu_alloc_cmd_resp
785 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
786 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
787 		 resp_buf);
788 	memset(cmd_p, 0, sizeof(*cmd_p));
789 
790 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
791 	cmd_p->capset_index = cpu_to_le32(idx);
792 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
793 	return 0;
794 }
795 
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)796 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
797 			      int idx, int version,
798 			      struct virtio_gpu_drv_cap_cache **cache_p)
799 {
800 	struct virtio_gpu_get_capset *cmd_p;
801 	struct virtio_gpu_vbuffer *vbuf;
802 	int max_size;
803 	struct virtio_gpu_drv_cap_cache *cache_ent;
804 	struct virtio_gpu_drv_cap_cache *search_ent;
805 	void *resp_buf;
806 
807 	*cache_p = NULL;
808 
809 	if (idx >= vgdev->num_capsets)
810 		return -EINVAL;
811 
812 	if (version > vgdev->capsets[idx].max_version)
813 		return -EINVAL;
814 
815 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
816 	if (!cache_ent)
817 		return -ENOMEM;
818 
819 	max_size = vgdev->capsets[idx].max_size;
820 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
821 	if (!cache_ent->caps_cache) {
822 		kfree(cache_ent);
823 		return -ENOMEM;
824 	}
825 
826 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
827 			   GFP_KERNEL);
828 	if (!resp_buf) {
829 		kfree(cache_ent->caps_cache);
830 		kfree(cache_ent);
831 		return -ENOMEM;
832 	}
833 
834 	cache_ent->version = version;
835 	cache_ent->id = vgdev->capsets[idx].id;
836 	atomic_set(&cache_ent->is_valid, 0);
837 	cache_ent->size = max_size;
838 	spin_lock(&vgdev->display_info_lock);
839 	/* Search while under lock in case it was added by another task. */
840 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
841 		if (search_ent->id == vgdev->capsets[idx].id &&
842 		    search_ent->version == version) {
843 			*cache_p = search_ent;
844 			break;
845 		}
846 	}
847 	if (!*cache_p)
848 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
849 	spin_unlock(&vgdev->display_info_lock);
850 
851 	if (*cache_p) {
852 		/* Entry was found, so free everything that was just created. */
853 		kfree(resp_buf);
854 		kfree(cache_ent->caps_cache);
855 		kfree(cache_ent);
856 		return 0;
857 	}
858 
859 	cmd_p = virtio_gpu_alloc_cmd_resp
860 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
861 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
862 		 resp_buf);
863 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
864 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
865 	cmd_p->capset_version = cpu_to_le32(version);
866 	*cache_p = cache_ent;
867 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
868 
869 	return 0;
870 }
871 
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)872 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
873 {
874 	struct virtio_gpu_cmd_get_edid *cmd_p;
875 	struct virtio_gpu_vbuffer *vbuf;
876 	void *resp_buf;
877 	int scanout;
878 
879 	if (WARN_ON(!vgdev->has_edid))
880 		return -EINVAL;
881 
882 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
883 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
884 				   GFP_KERNEL);
885 		if (!resp_buf)
886 			return -ENOMEM;
887 
888 		cmd_p = virtio_gpu_alloc_cmd_resp
889 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
890 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
891 			 resp_buf);
892 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
893 		cmd_p->scanout = cpu_to_le32(scanout);
894 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
895 	}
896 
897 	return 0;
898 }
899 
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t nlen,const char * name)900 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
901 				   uint32_t nlen, const char *name)
902 {
903 	struct virtio_gpu_ctx_create *cmd_p;
904 	struct virtio_gpu_vbuffer *vbuf;
905 
906 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
907 	memset(cmd_p, 0, sizeof(*cmd_p));
908 
909 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
910 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
911 	cmd_p->nlen = cpu_to_le32(nlen);
912 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
913 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
914 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
915 }
916 
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)917 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
918 				    uint32_t id)
919 {
920 	struct virtio_gpu_ctx_destroy *cmd_p;
921 	struct virtio_gpu_vbuffer *vbuf;
922 
923 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
924 	memset(cmd_p, 0, sizeof(*cmd_p));
925 
926 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
927 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
928 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
929 }
930 
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)931 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
932 					    uint32_t ctx_id,
933 					    struct virtio_gpu_object_array *objs)
934 {
935 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
936 	struct virtio_gpu_ctx_resource *cmd_p;
937 	struct virtio_gpu_vbuffer *vbuf;
938 
939 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
940 	memset(cmd_p, 0, sizeof(*cmd_p));
941 	vbuf->objs = objs;
942 
943 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
944 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
945 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
946 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
947 
948 }
949 
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)950 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
951 					    uint32_t ctx_id,
952 					    struct virtio_gpu_object_array *objs)
953 {
954 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
955 	struct virtio_gpu_ctx_resource *cmd_p;
956 	struct virtio_gpu_vbuffer *vbuf;
957 
958 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
959 	memset(cmd_p, 0, sizeof(*cmd_p));
960 	vbuf->objs = objs;
961 
962 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
963 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
964 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
965 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
966 }
967 
968 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)969 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
970 				  struct virtio_gpu_object *bo,
971 				  struct virtio_gpu_object_params *params,
972 				  struct virtio_gpu_object_array *objs,
973 				  struct virtio_gpu_fence *fence)
974 {
975 	struct virtio_gpu_resource_create_3d *cmd_p;
976 	struct virtio_gpu_vbuffer *vbuf;
977 
978 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
979 	memset(cmd_p, 0, sizeof(*cmd_p));
980 	vbuf->objs = objs;
981 
982 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
983 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
984 	cmd_p->format = cpu_to_le32(params->format);
985 	cmd_p->width = cpu_to_le32(params->width);
986 	cmd_p->height = cpu_to_le32(params->height);
987 
988 	cmd_p->target = cpu_to_le32(params->target);
989 	cmd_p->bind = cpu_to_le32(params->bind);
990 	cmd_p->depth = cpu_to_le32(params->depth);
991 	cmd_p->array_size = cpu_to_le32(params->array_size);
992 	cmd_p->last_level = cpu_to_le32(params->last_level);
993 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
994 	cmd_p->flags = cpu_to_le32(params->flags);
995 
996 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
997 	bo->created = true;
998 }
999 
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1000 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1001 					uint32_t ctx_id,
1002 					uint64_t offset, uint32_t level,
1003 					struct drm_virtgpu_3d_box *box,
1004 					struct virtio_gpu_object_array *objs,
1005 					struct virtio_gpu_fence *fence)
1006 {
1007 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1008 	struct virtio_gpu_transfer_host_3d *cmd_p;
1009 	struct virtio_gpu_vbuffer *vbuf;
1010 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1011 
1012 	if (use_dma_api)
1013 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1014 				       bo->pages->sgl, bo->pages->nents,
1015 				       DMA_TO_DEVICE);
1016 
1017 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1018 	memset(cmd_p, 0, sizeof(*cmd_p));
1019 
1020 	vbuf->objs = objs;
1021 
1022 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1023 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1024 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1025 	convert_to_hw_box(&cmd_p->box, box);
1026 	cmd_p->offset = cpu_to_le64(offset);
1027 	cmd_p->level = cpu_to_le32(level);
1028 
1029 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1030 }
1031 
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1032 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1033 					  uint32_t ctx_id,
1034 					  uint64_t offset, uint32_t level,
1035 					  struct drm_virtgpu_3d_box *box,
1036 					  struct virtio_gpu_object_array *objs,
1037 					  struct virtio_gpu_fence *fence)
1038 {
1039 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1040 	struct virtio_gpu_transfer_host_3d *cmd_p;
1041 	struct virtio_gpu_vbuffer *vbuf;
1042 
1043 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1044 	memset(cmd_p, 0, sizeof(*cmd_p));
1045 
1046 	vbuf->objs = objs;
1047 
1048 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1049 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1050 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1051 	convert_to_hw_box(&cmd_p->box, box);
1052 	cmd_p->offset = cpu_to_le64(offset);
1053 	cmd_p->level = cpu_to_le32(level);
1054 
1055 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1056 }
1057 
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1058 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1059 			   void *data, uint32_t data_size,
1060 			   uint32_t ctx_id,
1061 			   struct virtio_gpu_object_array *objs,
1062 			   struct virtio_gpu_fence *fence)
1063 {
1064 	struct virtio_gpu_cmd_submit *cmd_p;
1065 	struct virtio_gpu_vbuffer *vbuf;
1066 
1067 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1068 	memset(cmd_p, 0, sizeof(*cmd_p));
1069 
1070 	vbuf->data_buf = data;
1071 	vbuf->data_size = data_size;
1072 	vbuf->objs = objs;
1073 
1074 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1075 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1076 	cmd_p->size = cpu_to_le32(data_size);
1077 
1078 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1079 }
1080 
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_fence * fence)1081 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1082 			     struct virtio_gpu_object *obj,
1083 			     struct virtio_gpu_fence *fence)
1084 {
1085 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1086 	struct virtio_gpu_mem_entry *ents;
1087 	struct scatterlist *sg;
1088 	int si, nents, ret;
1089 
1090 	if (WARN_ON_ONCE(!obj->created))
1091 		return -EINVAL;
1092 	if (WARN_ON_ONCE(obj->pages))
1093 		return -EINVAL;
1094 
1095 	ret = drm_gem_shmem_pin(&obj->base.base);
1096 	if (ret < 0)
1097 		return -EINVAL;
1098 
1099 	obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
1100 	if (obj->pages == NULL) {
1101 		drm_gem_shmem_unpin(&obj->base.base);
1102 		return -EINVAL;
1103 	}
1104 
1105 	if (use_dma_api) {
1106 		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
1107 					 obj->pages->sgl, obj->pages->nents,
1108 					 DMA_TO_DEVICE);
1109 		nents = obj->mapped;
1110 	} else {
1111 		nents = obj->pages->nents;
1112 	}
1113 
1114 	/* gets freed when the ring has consumed it */
1115 	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
1116 			     GFP_KERNEL);
1117 	if (!ents) {
1118 		DRM_ERROR("failed to allocate ent list\n");
1119 		return -ENOMEM;
1120 	}
1121 
1122 	for_each_sg(obj->pages->sgl, sg, nents, si) {
1123 		ents[si].addr = cpu_to_le64(use_dma_api
1124 					    ? sg_dma_address(sg)
1125 					    : sg_phys(sg));
1126 		ents[si].length = cpu_to_le32(sg->length);
1127 		ents[si].padding = 0;
1128 	}
1129 
1130 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1131 					       ents, nents,
1132 					       fence);
1133 	return 0;
1134 }
1135 
virtio_gpu_object_detach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj)1136 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1137 			      struct virtio_gpu_object *obj)
1138 {
1139 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1140 
1141 	if (WARN_ON_ONCE(!obj->pages))
1142 		return;
1143 
1144 	if (use_dma_api && obj->mapped) {
1145 		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1146 		/* detach backing and wait for the host process it ... */
1147 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1148 		dma_fence_wait(&fence->f, true);
1149 		dma_fence_put(&fence->f);
1150 
1151 		/* ... then tear down iommu mappings */
1152 		dma_unmap_sg(vgdev->vdev->dev.parent,
1153 			     obj->pages->sgl, obj->mapped,
1154 			     DMA_TO_DEVICE);
1155 		obj->mapped = 0;
1156 	} else {
1157 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1158 	}
1159 
1160 	sg_free_table(obj->pages);
1161 	obj->pages = NULL;
1162 
1163 	drm_gem_shmem_unpin(&obj->base.base);
1164 }
1165 
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1166 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1167 			    struct virtio_gpu_output *output)
1168 {
1169 	struct virtio_gpu_vbuffer *vbuf;
1170 	struct virtio_gpu_update_cursor *cur_p;
1171 
1172 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1173 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1174 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1175 	virtio_gpu_queue_cursor(vgdev, vbuf);
1176 }
1177