1 /* $NetBSD: dmabuf.c,v 1.3 2021/12/19 11:06:55 riastradh Exp $ */
2
3 /*
4 * Copyright 2017 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Zhiyuan Lv <zhiyuan.lv@intel.com>
27 *
28 * Contributors:
29 * Xiaoguang Chen
30 * Tina Zhang <tina.zhang@intel.com>
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: dmabuf.c,v 1.3 2021/12/19 11:06:55 riastradh Exp $");
35
36 #include <linux/dma-buf.h>
37 #include <linux/vfio.h>
38
39 #include "i915_drv.h"
40 #include "gvt.h"
41
42 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
43
vgpu_pin_dma_address(struct intel_vgpu * vgpu,unsigned long size,dma_addr_t dma_addr)44 static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
45 unsigned long size,
46 dma_addr_t dma_addr)
47 {
48 int ret = 0;
49
50 if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
51 ret = -EINVAL;
52
53 return ret;
54 }
55
vgpu_unpin_dma_address(struct intel_vgpu * vgpu,dma_addr_t dma_addr)56 static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
57 dma_addr_t dma_addr)
58 {
59 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
60 }
61
vgpu_gem_get_pages(struct drm_i915_gem_object * obj)62 static int vgpu_gem_get_pages(
63 struct drm_i915_gem_object *obj)
64 {
65 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
66 struct intel_vgpu *vgpu;
67 struct sg_table *st;
68 struct scatterlist *sg;
69 int i, j, ret;
70 gen8_pte_t __iomem *gtt_entries;
71 struct intel_vgpu_fb_info *fb_info;
72 u32 page_num;
73
74 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
75 if (WARN_ON(!fb_info))
76 return -ENODEV;
77
78 vgpu = fb_info->obj->vgpu;
79 if (WARN_ON(!vgpu))
80 return -ENODEV;
81
82 st = kmalloc(sizeof(*st), GFP_KERNEL);
83 if (unlikely(!st))
84 return -ENOMEM;
85
86 page_num = obj->base.size >> PAGE_SHIFT;
87 ret = sg_alloc_table(st, page_num, GFP_KERNEL);
88 if (ret) {
89 kfree(st);
90 return ret;
91 }
92 gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
93 (fb_info->start >> PAGE_SHIFT);
94 for_each_sg(st->sgl, sg, page_num, i) {
95 dma_addr_t dma_addr =
96 GEN8_DECODE_PTE(readq(>t_entries[i]));
97 if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
98 ret = -EINVAL;
99 goto out;
100 }
101
102 sg->offset = 0;
103 sg->length = PAGE_SIZE;
104 sg_dma_len(sg) = PAGE_SIZE;
105 sg_dma_address(sg) = dma_addr;
106 }
107
108 __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
109 out:
110 if (ret) {
111 dma_addr_t dma_addr;
112
113 for_each_sg(st->sgl, sg, i, j) {
114 dma_addr = sg_dma_address(sg);
115 if (dma_addr)
116 vgpu_unpin_dma_address(vgpu, dma_addr);
117 }
118 sg_free_table(st);
119 kfree(st);
120 }
121
122 return ret;
123
124 }
125
vgpu_gem_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)126 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
127 struct sg_table *pages)
128 {
129 struct scatterlist *sg;
130
131 if (obj->base.dma_buf) {
132 struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
133 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
134 struct intel_vgpu *vgpu = obj->vgpu;
135 int i;
136
137 for_each_sg(pages->sgl, sg, fb_info->size, i)
138 vgpu_unpin_dma_address(vgpu,
139 sg_dma_address(sg));
140 }
141
142 sg_free_table(pages);
143 kfree(pages);
144 }
145
dmabuf_gem_object_free(struct kref * kref)146 static void dmabuf_gem_object_free(struct kref *kref)
147 {
148 struct intel_vgpu_dmabuf_obj *obj =
149 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
150 struct intel_vgpu *vgpu = obj->vgpu;
151 struct list_head *pos;
152 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
153
154 if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
155 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
156 dmabuf_obj = container_of(pos,
157 struct intel_vgpu_dmabuf_obj, list);
158 if (dmabuf_obj == obj) {
159 intel_gvt_hypervisor_put_vfio_device(vgpu);
160 idr_remove(&vgpu->object_idr,
161 dmabuf_obj->dmabuf_id);
162 kfree(dmabuf_obj->info);
163 kfree(dmabuf_obj);
164 list_del(pos);
165 break;
166 }
167 }
168 } else {
169 /* Free the orphan dmabuf_objs here */
170 kfree(obj->info);
171 kfree(obj);
172 }
173 }
174
175
dmabuf_obj_get(struct intel_vgpu_dmabuf_obj * obj)176 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
177 {
178 kref_get(&obj->kref);
179 }
180
dmabuf_obj_put(struct intel_vgpu_dmabuf_obj * obj)181 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
182 {
183 kref_put(&obj->kref, dmabuf_gem_object_free);
184 }
185
vgpu_gem_release(struct drm_i915_gem_object * gem_obj)186 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
187 {
188
189 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
190 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
191 struct intel_vgpu *vgpu = obj->vgpu;
192
193 if (vgpu) {
194 mutex_lock(&vgpu->dmabuf_lock);
195 gem_obj->base.dma_buf = NULL;
196 dmabuf_obj_put(obj);
197 mutex_unlock(&vgpu->dmabuf_lock);
198 } else {
199 /* vgpu is NULL, as it has been removed already */
200 gem_obj->base.dma_buf = NULL;
201 dmabuf_obj_put(obj);
202 }
203 }
204
205 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
206 .flags = I915_GEM_OBJECT_IS_PROXY,
207 .get_pages = vgpu_gem_get_pages,
208 .put_pages = vgpu_gem_put_pages,
209 .release = vgpu_gem_release,
210 };
211
vgpu_create_gem(struct drm_device * dev,struct intel_vgpu_fb_info * info)212 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
213 struct intel_vgpu_fb_info *info)
214 {
215 static struct lock_class_key lock_class;
216 struct drm_i915_private *dev_priv = to_i915(dev);
217 struct drm_i915_gem_object *obj;
218
219 obj = i915_gem_object_alloc();
220 if (obj == NULL)
221 return NULL;
222
223 drm_gem_private_object_init(dev, &obj->base,
224 roundup(info->size, PAGE_SIZE));
225 i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
226 i915_gem_object_set_readonly(obj);
227
228 obj->read_domains = I915_GEM_DOMAIN_GTT;
229 obj->write_domain = 0;
230 if (INTEL_GEN(dev_priv) >= 9) {
231 unsigned int tiling_mode = 0;
232 unsigned int stride = 0;
233
234 switch (info->drm_format_mod) {
235 case DRM_FORMAT_MOD_LINEAR:
236 tiling_mode = I915_TILING_NONE;
237 break;
238 case I915_FORMAT_MOD_X_TILED:
239 tiling_mode = I915_TILING_X;
240 stride = info->stride;
241 break;
242 case I915_FORMAT_MOD_Y_TILED:
243 case I915_FORMAT_MOD_Yf_TILED:
244 tiling_mode = I915_TILING_Y;
245 stride = info->stride;
246 break;
247 default:
248 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
249 info->drm_format_mod);
250 }
251 obj->tiling_and_stride = tiling_mode | stride;
252 } else {
253 obj->tiling_and_stride = info->drm_format_mod ?
254 I915_TILING_X : 0;
255 }
256
257 return obj;
258 }
259
validate_hotspot(struct intel_vgpu_cursor_plane_format * c)260 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
261 {
262 if (c && c->x_hot <= c->width && c->y_hot <= c->height)
263 return true;
264 else
265 return false;
266 }
267
vgpu_get_plane_info(struct drm_device * dev,struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * info,int plane_id)268 static int vgpu_get_plane_info(struct drm_device *dev,
269 struct intel_vgpu *vgpu,
270 struct intel_vgpu_fb_info *info,
271 int plane_id)
272 {
273 struct intel_vgpu_primary_plane_format p;
274 struct intel_vgpu_cursor_plane_format c;
275 int ret, tile_height = 1;
276
277 memset(info, 0, sizeof(*info));
278
279 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
280 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
281 if (ret)
282 return ret;
283 info->start = p.base;
284 info->start_gpa = p.base_gpa;
285 info->width = p.width;
286 info->height = p.height;
287 info->stride = p.stride;
288 info->drm_format = p.drm_format;
289
290 switch (p.tiled) {
291 case PLANE_CTL_TILED_LINEAR:
292 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
293 break;
294 case PLANE_CTL_TILED_X:
295 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
296 tile_height = 8;
297 break;
298 case PLANE_CTL_TILED_Y:
299 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
300 tile_height = 32;
301 break;
302 case PLANE_CTL_TILED_YF:
303 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
304 tile_height = 32;
305 break;
306 default:
307 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
308 }
309 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
310 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
311 if (ret)
312 return ret;
313 info->start = c.base;
314 info->start_gpa = c.base_gpa;
315 info->width = c.width;
316 info->height = c.height;
317 info->stride = c.width * (c.bpp / 8);
318 info->drm_format = c.drm_format;
319 info->drm_format_mod = 0;
320 info->x_pos = c.x_pos;
321 info->y_pos = c.y_pos;
322
323 if (validate_hotspot(&c)) {
324 info->x_hot = c.x_hot;
325 info->y_hot = c.y_hot;
326 } else {
327 info->x_hot = UINT_MAX;
328 info->y_hot = UINT_MAX;
329 }
330 } else {
331 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
332 return -EINVAL;
333 }
334
335 info->size = info->stride * roundup(info->height, tile_height);
336 if (info->size == 0) {
337 gvt_vgpu_err("fb size is zero\n");
338 return -EINVAL;
339 }
340
341 if (info->start & (PAGE_SIZE - 1)) {
342 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
343 return -EFAULT;
344 }
345
346 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
347 gvt_vgpu_err("invalid gma addr\n");
348 return -EFAULT;
349 }
350
351 return 0;
352 }
353
354 static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_info(struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * latest_info)355 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
356 struct intel_vgpu_fb_info *latest_info)
357 {
358 struct list_head *pos;
359 struct intel_vgpu_fb_info *fb_info;
360 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
361 struct intel_vgpu_dmabuf_obj *ret = NULL;
362
363 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
364 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
365 list);
366 if ((dmabuf_obj == NULL) ||
367 (dmabuf_obj->info == NULL))
368 continue;
369
370 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
371 if ((fb_info->start == latest_info->start) &&
372 (fb_info->start_gpa == latest_info->start_gpa) &&
373 (fb_info->size == latest_info->size) &&
374 (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
375 (fb_info->drm_format == latest_info->drm_format) &&
376 (fb_info->width == latest_info->width) &&
377 (fb_info->height == latest_info->height)) {
378 ret = dmabuf_obj;
379 break;
380 }
381 }
382
383 return ret;
384 }
385
386 static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_num(struct intel_vgpu * vgpu,u32 id)387 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
388 {
389 struct list_head *pos;
390 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
391 struct intel_vgpu_dmabuf_obj *ret = NULL;
392
393 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
394 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
395 list);
396 if (!dmabuf_obj)
397 continue;
398
399 if (dmabuf_obj->dmabuf_id == id) {
400 ret = dmabuf_obj;
401 break;
402 }
403 }
404
405 return ret;
406 }
407
update_fb_info(struct vfio_device_gfx_plane_info * gvt_dmabuf,struct intel_vgpu_fb_info * fb_info)408 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
409 struct intel_vgpu_fb_info *fb_info)
410 {
411 gvt_dmabuf->drm_format = fb_info->drm_format;
412 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
413 gvt_dmabuf->width = fb_info->width;
414 gvt_dmabuf->height = fb_info->height;
415 gvt_dmabuf->stride = fb_info->stride;
416 gvt_dmabuf->size = fb_info->size;
417 gvt_dmabuf->x_pos = fb_info->x_pos;
418 gvt_dmabuf->y_pos = fb_info->y_pos;
419 gvt_dmabuf->x_hot = fb_info->x_hot;
420 gvt_dmabuf->y_hot = fb_info->y_hot;
421 }
422
intel_vgpu_query_plane(struct intel_vgpu * vgpu,void * args)423 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
424 {
425 struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
426 struct vfio_device_gfx_plane_info *gfx_plane_info = args;
427 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
428 struct intel_vgpu_fb_info fb_info;
429 int ret = 0;
430
431 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
432 VFIO_GFX_PLANE_TYPE_PROBE))
433 return ret;
434 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
435 (!gfx_plane_info->flags))
436 return -EINVAL;
437
438 ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
439 gfx_plane_info->drm_plane_type);
440 if (ret != 0)
441 goto out;
442
443 mutex_lock(&vgpu->dmabuf_lock);
444 /* If exists, pick up the exposed dmabuf_obj */
445 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
446 if (dmabuf_obj) {
447 update_fb_info(gfx_plane_info, &fb_info);
448 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
449
450 /* This buffer may be released between query_plane ioctl and
451 * get_dmabuf ioctl. Add the refcount to make sure it won't
452 * be released between the two ioctls.
453 */
454 if (!dmabuf_obj->initref) {
455 dmabuf_obj->initref = true;
456 dmabuf_obj_get(dmabuf_obj);
457 }
458 ret = 0;
459 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
460 vgpu->id, kref_read(&dmabuf_obj->kref),
461 gfx_plane_info->dmabuf_id);
462 mutex_unlock(&vgpu->dmabuf_lock);
463 goto out;
464 }
465
466 mutex_unlock(&vgpu->dmabuf_lock);
467
468 /* Need to allocate a new one*/
469 dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
470 if (unlikely(!dmabuf_obj)) {
471 gvt_vgpu_err("alloc dmabuf_obj failed\n");
472 ret = -ENOMEM;
473 goto out;
474 }
475
476 dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
477 GFP_KERNEL);
478 if (unlikely(!dmabuf_obj->info)) {
479 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
480 ret = -ENOMEM;
481 goto out_free_dmabuf;
482 }
483 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
484
485 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
486
487 dmabuf_obj->vgpu = vgpu;
488
489 idr_preload(GFP_NOWAIT); /* XXX ??? */
490 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
491 idr_preload_end();
492 if (ret < 0)
493 goto out_free_info;
494 gfx_plane_info->dmabuf_id = ret;
495 dmabuf_obj->dmabuf_id = ret;
496
497 dmabuf_obj->initref = true;
498
499 kref_init(&dmabuf_obj->kref);
500
501 mutex_lock(&vgpu->dmabuf_lock);
502 if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
503 gvt_vgpu_err("get vfio device failed\n");
504 mutex_unlock(&vgpu->dmabuf_lock);
505 goto out_free_info;
506 }
507 mutex_unlock(&vgpu->dmabuf_lock);
508
509 update_fb_info(gfx_plane_info, &fb_info);
510
511 INIT_LIST_HEAD(&dmabuf_obj->list);
512 mutex_lock(&vgpu->dmabuf_lock);
513 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
514 mutex_unlock(&vgpu->dmabuf_lock);
515
516 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
517 __func__, kref_read(&dmabuf_obj->kref), ret);
518
519 return 0;
520
521 out_free_info:
522 kfree(dmabuf_obj->info);
523 out_free_dmabuf:
524 kfree(dmabuf_obj);
525 out:
526 /* ENODEV means plane isn't ready, which might be a normal case. */
527 return (ret == -ENODEV) ? 0 : ret;
528 }
529
530 /* To associate an exposed dmabuf with the dmabuf_obj */
intel_vgpu_get_dmabuf(struct intel_vgpu * vgpu,unsigned int dmabuf_id)531 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
532 {
533 struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
534 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
535 struct drm_i915_gem_object *obj;
536 struct dma_buf *dmabuf;
537 int dmabuf_fd;
538 int ret = 0;
539
540 mutex_lock(&vgpu->dmabuf_lock);
541
542 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
543 if (dmabuf_obj == NULL) {
544 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
545 ret = -EINVAL;
546 goto out;
547 }
548
549 obj = vgpu_create_gem(dev, dmabuf_obj->info);
550 if (obj == NULL) {
551 gvt_vgpu_err("create gvt gem obj failed\n");
552 ret = -ENOMEM;
553 goto out;
554 }
555
556 obj->gvt_info = dmabuf_obj->info;
557
558 dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
559 if (IS_ERR(dmabuf)) {
560 gvt_vgpu_err("export dma-buf failed\n");
561 ret = PTR_ERR(dmabuf);
562 goto out_free_gem;
563 }
564
565 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
566 if (ret < 0) {
567 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
568 goto out_free_dmabuf;
569 }
570 dmabuf_fd = ret;
571
572 dmabuf_obj_get(dmabuf_obj);
573
574 if (dmabuf_obj->initref) {
575 dmabuf_obj->initref = false;
576 dmabuf_obj_put(dmabuf_obj);
577 }
578
579 mutex_unlock(&vgpu->dmabuf_lock);
580
581 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
582 " file count: %ld, GEM ref: %d\n",
583 vgpu->id, dmabuf_obj->dmabuf_id,
584 kref_read(&dmabuf_obj->kref),
585 dmabuf_fd,
586 file_count(dmabuf->file),
587 kref_read(&obj->base.refcount));
588
589 i915_gem_object_put(obj);
590
591 return dmabuf_fd;
592
593 out_free_dmabuf:
594 dma_buf_put(dmabuf);
595 out_free_gem:
596 i915_gem_object_put(obj);
597 out:
598 mutex_unlock(&vgpu->dmabuf_lock);
599 return ret;
600 }
601
intel_vgpu_dmabuf_cleanup(struct intel_vgpu * vgpu)602 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
603 {
604 struct list_head *pos, *n;
605 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
606
607 mutex_lock(&vgpu->dmabuf_lock);
608 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
609 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
610 list);
611 dmabuf_obj->vgpu = NULL;
612
613 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
614 intel_gvt_hypervisor_put_vfio_device(vgpu);
615 list_del(pos);
616
617 /* dmabuf_obj might be freed in dmabuf_obj_put */
618 if (dmabuf_obj->initref) {
619 dmabuf_obj->initref = false;
620 dmabuf_obj_put(dmabuf_obj);
621 }
622
623 }
624 mutex_unlock(&vgpu->dmabuf_lock);
625 }
626