1 /* $NetBSD: mock_dmabuf.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2016 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: mock_dmabuf.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11
12 #include "mock_dmabuf.h"
13
mock_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)14 static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
15 enum dma_data_direction dir)
16 {
17 struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
18 struct sg_table *st;
19 struct scatterlist *sg;
20 int i, err;
21
22 st = kmalloc(sizeof(*st), GFP_KERNEL);
23 if (!st)
24 return ERR_PTR(-ENOMEM);
25
26 err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
27 if (err)
28 goto err_free;
29
30 sg = st->sgl;
31 for (i = 0; i < mock->npages; i++) {
32 sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
33 sg = sg_next(sg);
34 }
35
36 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
37 err = -ENOMEM;
38 goto err_st;
39 }
40
41 return st;
42
43 err_st:
44 sg_free_table(st);
45 err_free:
46 kfree(st);
47 return ERR_PTR(err);
48 }
49
mock_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * st,enum dma_data_direction dir)50 static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
51 struct sg_table *st,
52 enum dma_data_direction dir)
53 {
54 dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
55 sg_free_table(st);
56 kfree(st);
57 }
58
mock_dmabuf_release(struct dma_buf * dma_buf)59 static void mock_dmabuf_release(struct dma_buf *dma_buf)
60 {
61 struct mock_dmabuf *mock = to_mock(dma_buf);
62 int i;
63
64 for (i = 0; i < mock->npages; i++)
65 put_page(mock->pages[i]);
66
67 kfree(mock);
68 }
69
mock_dmabuf_vmap(struct dma_buf * dma_buf)70 static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
71 {
72 struct mock_dmabuf *mock = to_mock(dma_buf);
73
74 return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
75 }
76
mock_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)77 static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
78 {
79 struct mock_dmabuf *mock = to_mock(dma_buf);
80
81 vm_unmap_ram(vaddr, mock->npages);
82 }
83
mock_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)84 static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
85 {
86 return -ENODEV;
87 }
88
89 static const struct dma_buf_ops mock_dmabuf_ops = {
90 .map_dma_buf = mock_map_dma_buf,
91 .unmap_dma_buf = mock_unmap_dma_buf,
92 .release = mock_dmabuf_release,
93 .mmap = mock_dmabuf_mmap,
94 .vmap = mock_dmabuf_vmap,
95 .vunmap = mock_dmabuf_vunmap,
96 };
97
mock_dmabuf(int npages)98 static struct dma_buf *mock_dmabuf(int npages)
99 {
100 struct mock_dmabuf *mock;
101 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
102 struct dma_buf *dmabuf;
103 int i;
104
105 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
106 GFP_KERNEL);
107 if (!mock)
108 return ERR_PTR(-ENOMEM);
109
110 mock->npages = npages;
111 for (i = 0; i < npages; i++) {
112 mock->pages[i] = alloc_page(GFP_KERNEL);
113 if (!mock->pages[i])
114 goto err;
115 }
116
117 exp_info.ops = &mock_dmabuf_ops;
118 exp_info.size = npages * PAGE_SIZE;
119 exp_info.flags = O_CLOEXEC;
120 exp_info.priv = mock;
121
122 dmabuf = dma_buf_export(&exp_info);
123 if (IS_ERR(dmabuf))
124 goto err;
125
126 return dmabuf;
127
128 err:
129 while (i--)
130 put_page(mock->pages[i]);
131 kfree(mock);
132 return ERR_PTR(-ENOMEM);
133 }
134