xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_dma_buf.c (revision 618d1137fd7385a507c3a4e2e29222e04e3e3b8f)
1 /*	$NetBSD: linux_dma_buf.c,v 1.3 2018/08/27 15:24:40 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.3 2018/08/27 15:24:40 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/file.h>
38 #include <sys/filedesc.h>
39 #include <sys/kmem.h>
40 #include <sys/mutex.h>
41 
42 #include <linux/dma-buf.h>
43 #include <linux/err.h>
44 #include <linux/fence.h>
45 #include <linux/reservation.h>
46 
47 struct dma_buf_file {
48 	struct dma_buf	*dbf_dmabuf;
49 };
50 
51 static int	dmabuf_fop_poll(struct file *, int);
52 static int	dmabuf_fop_close(struct file *);
53 static int	dmabuf_fop_kqfilter(struct file *, struct knote *);
54 static int	dmabuf_fop_mmap(struct file *, off_t *, size_t, int, int *,
55 		    int *, struct uvm_object **, int *);
56 
57 static const struct fileops dmabuf_fileops = {
58 	.fo_name = "dmabuf",
59 	.fo_read = fbadop_read,
60 	.fo_write = fbadop_write,
61 	.fo_ioctl = fbadop_ioctl,
62 	.fo_fcntl = fnullop_fcntl,
63 	.fo_poll = dmabuf_fop_poll,
64 	.fo_stat = fbadop_stat,
65 	.fo_close = dmabuf_fop_close,
66 	.fo_kqfilter = dmabuf_fop_kqfilter,
67 	.fo_restart = fnullop_restart,
68 	.fo_mmap = dmabuf_fop_mmap,
69 };
70 
71 struct dma_buf *
72 dma_buf_export(struct dma_buf_export_info *info)
73 {
74 	struct dma_buf *dmabuf;
75 
76 	if (info->resv == NULL) {
77 		dmabuf = kmem_zalloc(offsetof(struct dma_buf, db_resv_int[1]),
78 		    KM_SLEEP);
79 	} else {
80 		dmabuf = kmem_zalloc(sizeof(*dmabuf), KM_SLEEP);
81 	}
82 
83 	dmabuf->priv = info->priv;
84 	dmabuf->ops = info->ops;
85 	dmabuf->size = info->size;
86 	dmabuf->resv = info->resv;
87 
88 	mutex_init(&dmabuf->db_lock, MUTEX_DEFAULT, IPL_NONE);
89 	dmabuf->db_refcnt = 1;
90 
91 	if (dmabuf->resv == NULL) {
92 		dmabuf->resv = &dmabuf->db_resv_int[0];
93 		reservation_object_init(dmabuf->resv);
94 	}
95 
96 	return dmabuf;
97 }
98 
99 int
100 dma_buf_fd(struct dma_buf *dmabuf, int flags)
101 {
102 	struct file *file;
103 	int fd;
104 	unsigned refcnt __diagused;
105 	int ret;
106 
107 	ret = -fd_allocfile(&file, &fd);
108 	if (ret)
109 		goto out0;
110 
111 	refcnt = atomic_inc_uint_nv(&dmabuf->db_refcnt);
112 	KASSERT(refcnt > 1);
113 
114 	file->f_type = DTYPE_MISC;
115 	file->f_flag = 0;	/* XXX DRM code allows only O_CLOEXEC.  */
116 	file->f_ops = &dmabuf_fileops;
117 	file->f_data = dmabuf;
118 	fd_set_exclose(curlwp, fd, (flags & O_CLOEXEC) != 0);
119 	fd_affix(curproc, file, fd);
120 
121 	fd_putfile(fd);
122 	ret = fd;
123 out0:	return ret;
124 }
125 
126 struct dma_buf *
127 dma_buf_get(int fd)
128 {
129 	struct file *file;
130 	struct dma_buf *dmabuf;
131 	unsigned refcnt __diagused;
132 	int error;
133 
134 	if ((file = fd_getfile(fd)) == NULL) {
135 		error = EBADF;
136 		goto fail1;
137 	}
138 	if (file->f_type != DTYPE_MISC || file->f_ops != &dmabuf_fileops) {
139 		error = EINVAL;
140 		goto fail0;
141 	}
142 
143 	dmabuf = file->f_data;
144 	refcnt = atomic_inc_uint_nv(&dmabuf->db_refcnt);
145 	KASSERT(refcnt > 1);
146 	fd_putfile(fd);
147 	return dmabuf;
148 
149 fail1:	fd_putfile(fd);
150 fail0:	KASSERT(error);
151 	return ERR_PTR(-error);
152 }
153 
154 void
155 get_dma_buf(struct dma_buf *dmabuf)
156 {
157 	unsigned refcnt __diagused;
158 
159 	refcnt = atomic_inc_uint_nv(&dmabuf->db_refcnt);
160 	KASSERT(refcnt > 1);
161 }
162 
163 void
164 dma_buf_put(struct dma_buf *dmabuf)
165 {
166 
167 	if (atomic_dec_uint_nv(&dmabuf->db_refcnt) != 0)
168 		return;
169 
170 	mutex_destroy(&dmabuf->db_lock);
171 	if (dmabuf->resv == &dmabuf->db_resv_int[0]) {
172 		reservation_object_fini(dmabuf->resv);
173 		kmem_free(dmabuf, offsetof(struct dma_buf, db_resv_int[1]));
174 	} else {
175 		kmem_free(dmabuf, sizeof(*dmabuf));
176 	}
177 }
178 
179 struct dma_buf_attachment *
180 dma_buf_attach(struct dma_buf *dmabuf, struct device *dev)
181 {
182 	struct dma_buf_attachment *attach;
183 	int ret = 0;
184 
185 	attach = kmem_zalloc(sizeof(*attach), KM_SLEEP);
186 	attach->dmabuf = dmabuf;
187 
188 	mutex_enter(&dmabuf->db_lock);
189 	if (dmabuf->ops->attach)
190 		ret = dmabuf->ops->attach(dmabuf, dev, attach);
191 	mutex_exit(&dmabuf->db_lock);
192 	if (ret)
193 		goto fail0;
194 
195 	return attach;
196 
197 fail0:	kmem_free(attach, sizeof(*attach));
198 	return ERR_PTR(ret);
199 }
200 
201 void
202 dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
203 {
204 
205 	mutex_enter(&dmabuf->db_lock);
206 	if (dmabuf->ops->detach)
207 		dmabuf->ops->detach(dmabuf, attach);
208 	mutex_exit(&dmabuf->db_lock);
209 
210 	kmem_free(attach, sizeof(*attach));
211 }
212 
213 struct sg_table *
214 dma_buf_map_attachment(struct dma_buf_attachment *attach,
215     enum dma_data_direction dir)
216 {
217 
218 	return attach->dmabuf->ops->map_dma_buf(attach, dir);
219 }
220 
221 void
222 dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
223     struct sg_table *sg, enum dma_data_direction dir)
224 {
225 
226 	return attach->dmabuf->ops->unmap_dma_buf(attach, sg, dir);
227 }
228 
229 static int
230 dmabuf_fop_close(struct file *file)
231 {
232 	struct dma_buf_file *dbf = file->f_data;
233 	struct dma_buf *dmabuf = dbf->dbf_dmabuf;
234 
235 	dma_buf_put(dmabuf);
236 	return 0;
237 }
238 
239 static int
240 dmabuf_fop_poll(struct file *file, int events)
241 {
242 #ifdef notyet
243 	struct dma_buf_file *dbf = file->f_data;
244 	struct dma_buf *dmabuf = dbf->dbf_dmabuf;
245 
246 	return reservation_object_poll(dmabuf->resv, events);
247 #else
248 	return -ENOSYS;
249 #endif
250 }
251 
252 static int
253 dmabuf_fop_kqfilter(struct file *file, struct knote *knote)
254 {
255 #ifdef notyet
256 	struct dma_buf_file *dbf = file->f_data;
257 	struct dma_buf *dmabuf = dbf->dbf_dmabuf;
258 
259 	return reservation_object_kqfilter(dmabuf->resv, knote);
260 #else
261 	return -ENOSYS;
262 #endif
263 }
264 
265 static int
266 dmabuf_fop_mmap(struct file *file, off_t *offp, size_t size, int prot,
267     int *flagsp, int *advicep, struct uvm_object **uobjp, int *maxprotp)
268 {
269 	struct dma_buf_file *dbf = file->f_data;
270 	struct dma_buf *dmabuf = dbf->dbf_dmabuf;
271 
272 	if (size > dmabuf->size)
273 		return EINVAL;
274 
275 	return dmabuf->ops->mmap(dmabuf, offp, size, prot, flagsp, advicep,
276 	    uobjp, maxprotp);
277 }
278