xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_prime.c (revision 1dc3d718d716aff4eaf075016d00c57b56a9ea97)
1*1dc3d718Sriastradh /*	$NetBSD: drm_prime.c,v 1.20 2022/07/06 01:12:45 riastradh Exp $	*/
2efa246c0Sriastradh 
3fcd0cb28Sriastradh /*
4fcd0cb28Sriastradh  * Copyright © 2012 Red Hat
5fcd0cb28Sriastradh  *
6fcd0cb28Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
7fcd0cb28Sriastradh  * copy of this software and associated documentation files (the "Software"),
8fcd0cb28Sriastradh  * to deal in the Software without restriction, including without limitation
9fcd0cb28Sriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10fcd0cb28Sriastradh  * and/or sell copies of the Software, and to permit persons to whom the
11fcd0cb28Sriastradh  * Software is furnished to do so, subject to the following conditions:
12fcd0cb28Sriastradh  *
13fcd0cb28Sriastradh  * The above copyright notice and this permission notice (including the next
14fcd0cb28Sriastradh  * paragraph) shall be included in all copies or substantial portions of the
15fcd0cb28Sriastradh  * Software.
16fcd0cb28Sriastradh  *
17fcd0cb28Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18fcd0cb28Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19fcd0cb28Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20fcd0cb28Sriastradh  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21fcd0cb28Sriastradh  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22fcd0cb28Sriastradh  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23fcd0cb28Sriastradh  * IN THE SOFTWARE.
24fcd0cb28Sriastradh  *
25fcd0cb28Sriastradh  * Authors:
26fcd0cb28Sriastradh  *      Dave Airlie <airlied@redhat.com>
27fcd0cb28Sriastradh  *      Rob Clark <rob.clark@linaro.org>
28fcd0cb28Sriastradh  *
29fcd0cb28Sriastradh  */
30fcd0cb28Sriastradh 
31efa246c0Sriastradh #include <sys/cdefs.h>
32*1dc3d718Sriastradh __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.20 2022/07/06 01:12:45 riastradh Exp $");
33efa246c0Sriastradh 
34fcd0cb28Sriastradh #include <linux/export.h>
35fcd0cb28Sriastradh #include <linux/dma-buf.h>
3641ec0267Sriastradh #include <linux/rbtree.h>
3741ec0267Sriastradh 
3841ec0267Sriastradh #include <drm/drm.h>
3941ec0267Sriastradh #include <drm/drm_drv.h>
4041ec0267Sriastradh #include <drm/drm_file.h>
4141ec0267Sriastradh #include <drm/drm_framebuffer.h>
42efa246c0Sriastradh #include <drm/drm_gem.h>
4341ec0267Sriastradh #include <drm/drm_prime.h>
44efa246c0Sriastradh 
45efa246c0Sriastradh #include "drm_internal.h"
46fcd0cb28Sriastradh 
4740e1d52aSriastradh #ifdef __NetBSD__
4840e1d52aSriastradh 
4976f0c10cSriastradh #include <sys/file.h>
5076f0c10cSriastradh 
51eb0c859bSriastradh #include <drm/bus_dma_hacks.h>
52eb0c859bSriastradh 
531b46a69aSriastradh #include <linux/nbsd-namespace.h>
541b46a69aSriastradh 
5540e1d52aSriastradh #endif	/* __NetBSD__ */
5640e1d52aSriastradh 
5741ec0267Sriastradh /**
5841ec0267Sriastradh  * DOC: overview and lifetime rules
59fcd0cb28Sriastradh  *
6041ec0267Sriastradh  * Similar to GEM global names, PRIME file descriptors are also used to share
6141ec0267Sriastradh  * buffer objects across processes. They offer additional security: as file
6241ec0267Sriastradh  * descriptors must be explicitly sent over UNIX domain sockets to be shared
6341ec0267Sriastradh  * between applications, they can't be guessed like the globally unique GEM
6441ec0267Sriastradh  * names.
65fcd0cb28Sriastradh  *
6641ec0267Sriastradh  * Drivers that support the PRIME API implement the
6741ec0267Sriastradh  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
6841ec0267Sriastradh  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
6941ec0267Sriastradh  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
7041ec0267Sriastradh  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
7141ec0267Sriastradh  * and &drm_driver.gem_prime_import hooks.
72fcd0cb28Sriastradh  *
7341ec0267Sriastradh  * &dma_buf_ops implementations for GEM drivers are all individually exported
7441ec0267Sriastradh  * for drivers which need to overwrite or reimplement some of them.
75fcd0cb28Sriastradh  *
7641ec0267Sriastradh  * Reference Counting for GEM Drivers
7741ec0267Sriastradh  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7841ec0267Sriastradh  *
7941ec0267Sriastradh  * On the export the &dma_buf holds a reference to the exported buffer object,
8041ec0267Sriastradh  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
8141ec0267Sriastradh  * IOCTL, when it first calls &drm_gem_object_funcs.export
8241ec0267Sriastradh  * and stores the exporting GEM object in the &dma_buf.priv field. This
8341ec0267Sriastradh  * reference needs to be released when the final reference to the &dma_buf
8441ec0267Sriastradh  * itself is dropped and its &dma_buf_ops.release function is called.  For
8541ec0267Sriastradh  * GEM-based drivers, the &dma_buf should be exported using
8641ec0267Sriastradh  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
8741ec0267Sriastradh  *
8841ec0267Sriastradh  * Thus the chain of references always flows in one direction, avoiding loops:
8941ec0267Sriastradh  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
9041ec0267Sriastradh  * are the lookup caches for import and export. These are required to guarantee
9141ec0267Sriastradh  * that any given object will always have only one uniqe userspace handle. This
9241ec0267Sriastradh  * is required to allow userspace to detect duplicated imports, since some GEM
9341ec0267Sriastradh  * drivers do fail command submissions if a given buffer object is listed more
9441ec0267Sriastradh  * than once. These import and export caches in &drm_prime_file_private only
9541ec0267Sriastradh  * retain a weak reference, which is cleaned up when the corresponding object is
9641ec0267Sriastradh  * released.
9741ec0267Sriastradh  *
9841ec0267Sriastradh  * Self-importing: If userspace is using PRIME as a replacement for flink then
9941ec0267Sriastradh  * it will get a fd->handle request for a GEM object that it created.  Drivers
10041ec0267Sriastradh  * should detect this situation and return back the underlying object from the
10141ec0267Sriastradh  * dma-buf private. For GEM based drivers this is handled in
10241ec0267Sriastradh  * drm_gem_prime_import() already.
103fcd0cb28Sriastradh  */
104fcd0cb28Sriastradh 
105fcd0cb28Sriastradh struct drm_prime_member {
106fcd0cb28Sriastradh 	struct dma_buf *dma_buf;
107fcd0cb28Sriastradh 	uint32_t handle;
108fcd0cb28Sriastradh 
10941ec0267Sriastradh 	struct rb_node dmabuf_rb;
11041ec0267Sriastradh 	struct rb_node handle_rb;
1119d20d926Sriastradh };
1129d20d926Sriastradh 
113ec231e8fSriastradh #ifdef __NetBSD__
114ec231e8fSriastradh static int
compare_dmabufs(void * cookie,const void * va,const void * vb)115ec231e8fSriastradh compare_dmabufs(void *cookie, const void *va, const void *vb)
116ec231e8fSriastradh {
117ec231e8fSriastradh 	const struct drm_prime_member *ma = va;
118ec231e8fSriastradh 	const struct drm_prime_member *mb = vb;
119ec231e8fSriastradh 
120ec231e8fSriastradh 	if (ma->dma_buf < mb->dma_buf)
121ec231e8fSriastradh 		return -1;
122ec231e8fSriastradh 	if (ma->dma_buf > mb->dma_buf)
123ec231e8fSriastradh 		return +1;
124ec231e8fSriastradh 	return 0;
125ec231e8fSriastradh }
126ec231e8fSriastradh 
127ec231e8fSriastradh static int
compare_dmabuf_key(void * cookie,const void * vm,const void * vk)128ec231e8fSriastradh compare_dmabuf_key(void *cookie, const void *vm, const void *vk)
129ec231e8fSriastradh {
130ec231e8fSriastradh 	const struct drm_prime_member *m = vm;
131ec231e8fSriastradh 	const struct dma_buf *const *kp = vk;
132ec231e8fSriastradh 
133ec231e8fSriastradh 	if (m->dma_buf < *kp)
134ec231e8fSriastradh 		return -1;
135ec231e8fSriastradh 	if (m->dma_buf > *kp)
136ec231e8fSriastradh 		return +1;
137ec231e8fSriastradh 	return 0;
138ec231e8fSriastradh }
139ec231e8fSriastradh 
140ec231e8fSriastradh static int
compare_handles(void * cookie,const void * va,const void * vb)141ec231e8fSriastradh compare_handles(void *cookie, const void *va, const void *vb)
142ec231e8fSriastradh {
143ec231e8fSriastradh 	const struct drm_prime_member *ma = va;
144ec231e8fSriastradh 	const struct drm_prime_member *mb = vb;
145ec231e8fSriastradh 
146ec231e8fSriastradh 	if (ma->handle < mb->handle)
147ec231e8fSriastradh 		return -1;
148ec231e8fSriastradh 	if (ma->handle > mb->handle)
149ec231e8fSriastradh 		return +1;
150ec231e8fSriastradh 	return 0;
151ec231e8fSriastradh }
152ec231e8fSriastradh 
153ec231e8fSriastradh static int
compare_handle_key(void * cookie,const void * vm,const void * vk)154ec231e8fSriastradh compare_handle_key(void *cookie, const void *vm, const void *vk)
155ec231e8fSriastradh {
156ec231e8fSriastradh 	const struct drm_prime_member *m = vm;
157ec231e8fSriastradh 	const uint32_t *kp = vk;
158ec231e8fSriastradh 
159ec231e8fSriastradh 	if (m->handle < *kp)
160ec231e8fSriastradh 		return -1;
161ec231e8fSriastradh 	if (m->handle > *kp)
162ec231e8fSriastradh 		return +1;
163ec231e8fSriastradh 	return 0;
164ec231e8fSriastradh }
165ec231e8fSriastradh 
166ec231e8fSriastradh static const rb_tree_ops_t dmabuf_ops = {
167ec231e8fSriastradh 	.rbto_compare_nodes = compare_dmabufs,
168ec231e8fSriastradh 	.rbto_compare_key = compare_dmabuf_key,
169ec231e8fSriastradh 	.rbto_node_offset = offsetof(struct drm_prime_member, dmabuf_rb),
170ec231e8fSriastradh };
171ec231e8fSriastradh 
172ec231e8fSriastradh static const rb_tree_ops_t handle_ops = {
173ec231e8fSriastradh 	.rbto_compare_nodes = compare_handles,
174ec231e8fSriastradh 	.rbto_compare_key = compare_handle_key,
175ec231e8fSriastradh 	.rbto_node_offset = offsetof(struct drm_prime_member, handle_rb),
176ec231e8fSriastradh };
177ec231e8fSriastradh #endif
178ec231e8fSriastradh 
drm_prime_add_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t handle)1799d20d926Sriastradh static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
1809d20d926Sriastradh 				    struct dma_buf *dma_buf, uint32_t handle)
181fcd0cb28Sriastradh {
1829d20d926Sriastradh 	struct drm_prime_member *member;
183ec231e8fSriastradh #ifdef __NetBSD__
184ec231e8fSriastradh 	struct drm_prime_member *collision __diagused;
185ec231e8fSriastradh #else
18641ec0267Sriastradh 	struct rb_node **p, *rb;
187ec231e8fSriastradh #endif
188fcd0cb28Sriastradh 
1899d20d926Sriastradh 	member = kmalloc(sizeof(*member), GFP_KERNEL);
1909d20d926Sriastradh 	if (!member)
1919d20d926Sriastradh 		return -ENOMEM;
192fcd0cb28Sriastradh 
1939d20d926Sriastradh 	get_dma_buf(dma_buf);
1949d20d926Sriastradh 	member->dma_buf = dma_buf;
1959d20d926Sriastradh 	member->handle = handle;
19641ec0267Sriastradh 
197ec231e8fSriastradh #ifdef __NetBSD__
198ec231e8fSriastradh 	collision = rb_tree_insert_node(&prime_fpriv->dmabufs.rbr_tree,
199ec231e8fSriastradh 	    member);
200d1ee725eSriastradh 	KASSERT(collision == member);
201ec231e8fSriastradh #else
20241ec0267Sriastradh 	rb = NULL;
20341ec0267Sriastradh 	p = &prime_fpriv->dmabufs.rb_node;
20441ec0267Sriastradh 	while (*p) {
20541ec0267Sriastradh 		struct drm_prime_member *pos;
20641ec0267Sriastradh 
20741ec0267Sriastradh 		rb = *p;
20841ec0267Sriastradh 		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
20941ec0267Sriastradh 		if (dma_buf > pos->dma_buf)
21041ec0267Sriastradh 			p = &rb->rb_right;
21141ec0267Sriastradh 		else
21241ec0267Sriastradh 			p = &rb->rb_left;
21341ec0267Sriastradh 	}
21441ec0267Sriastradh 	rb_link_node(&member->dmabuf_rb, rb, p);
21541ec0267Sriastradh 	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
216ec231e8fSriastradh #endif
21741ec0267Sriastradh 
218ec231e8fSriastradh #ifdef __NetBSD__
219ec231e8fSriastradh 	collision = rb_tree_insert_node(&prime_fpriv->handles.rbr_tree,
220ec231e8fSriastradh 	    member);
221d1ee725eSriastradh 	KASSERT(collision == member);
222ec231e8fSriastradh #else
22341ec0267Sriastradh 	rb = NULL;
22441ec0267Sriastradh 	p = &prime_fpriv->handles.rb_node;
22541ec0267Sriastradh 	while (*p) {
22641ec0267Sriastradh 		struct drm_prime_member *pos;
22741ec0267Sriastradh 
22841ec0267Sriastradh 		rb = *p;
22941ec0267Sriastradh 		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
23041ec0267Sriastradh 		if (handle > pos->handle)
23141ec0267Sriastradh 			p = &rb->rb_right;
23241ec0267Sriastradh 		else
23341ec0267Sriastradh 			p = &rb->rb_left;
23441ec0267Sriastradh 	}
23541ec0267Sriastradh 	rb_link_node(&member->handle_rb, rb, p);
23641ec0267Sriastradh 	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
237ec231e8fSriastradh #endif
23841ec0267Sriastradh 
239fcd0cb28Sriastradh 	return 0;
240fcd0cb28Sriastradh }
241fcd0cb28Sriastradh 
drm_prime_lookup_buf_by_handle(struct drm_prime_file_private * prime_fpriv,uint32_t handle)2429d20d926Sriastradh static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
2439d20d926Sriastradh 						      uint32_t handle)
2449d20d926Sriastradh {
245ec231e8fSriastradh #ifdef __NetBSD__
2460c40181fSriastradh 	struct drm_prime_member *member;
2470c40181fSriastradh 
2480c40181fSriastradh 	member = rb_tree_find_node(&prime_fpriv->handles.rbr_tree, &handle);
2490c40181fSriastradh 	if (member == NULL)
2500c40181fSriastradh 		return NULL;
2510c40181fSriastradh 	return member->dma_buf;
252ec231e8fSriastradh #else
25341ec0267Sriastradh 	struct rb_node *rb;
25441ec0267Sriastradh 
25541ec0267Sriastradh 	rb = prime_fpriv->handles.rb_node;
25641ec0267Sriastradh 	while (rb) {
2579d20d926Sriastradh 		struct drm_prime_member *member;
2589d20d926Sriastradh 
25941ec0267Sriastradh 		member = rb_entry(rb, struct drm_prime_member, handle_rb);
2609d20d926Sriastradh 		if (member->handle == handle)
2619d20d926Sriastradh 			return member->dma_buf;
26241ec0267Sriastradh 		else if (member->handle < handle)
26341ec0267Sriastradh 			rb = rb->rb_right;
26441ec0267Sriastradh 		else
26541ec0267Sriastradh 			rb = rb->rb_left;
2669d20d926Sriastradh 	}
2679d20d926Sriastradh 
2689d20d926Sriastradh 	return NULL;
269ec231e8fSriastradh #endif
2709d20d926Sriastradh }
2719d20d926Sriastradh 
drm_prime_lookup_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t * handle)2729d20d926Sriastradh static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
2739d20d926Sriastradh 				       struct dma_buf *dma_buf,
2749d20d926Sriastradh 				       uint32_t *handle)
2759d20d926Sriastradh {
276ec231e8fSriastradh #ifdef __NetBSD__
277ec231e8fSriastradh 	struct drm_prime_member *member;
278ec231e8fSriastradh 
279ec231e8fSriastradh 	member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
280ec231e8fSriastradh 	if (member == NULL)
281ec231e8fSriastradh 		return -ENOENT;
282ec231e8fSriastradh 	*handle = member->handle;
283ec231e8fSriastradh 	return 0;
284ec231e8fSriastradh #else
28541ec0267Sriastradh 	struct rb_node *rb;
28641ec0267Sriastradh 
28741ec0267Sriastradh 	rb = prime_fpriv->dmabufs.rb_node;
28841ec0267Sriastradh 	while (rb) {
2899d20d926Sriastradh 		struct drm_prime_member *member;
2909d20d926Sriastradh 
29141ec0267Sriastradh 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
2929d20d926Sriastradh 		if (member->dma_buf == dma_buf) {
2939d20d926Sriastradh 			*handle = member->handle;
2949d20d926Sriastradh 			return 0;
29541ec0267Sriastradh 		} else if (member->dma_buf < dma_buf) {
29641ec0267Sriastradh 			rb = rb->rb_right;
29741ec0267Sriastradh 		} else {
29841ec0267Sriastradh 			rb = rb->rb_left;
2999d20d926Sriastradh 		}
3009d20d926Sriastradh 	}
30141ec0267Sriastradh 
3029d20d926Sriastradh 	return -ENOENT;
303ec231e8fSriastradh #endif
3049d20d926Sriastradh }
3059d20d926Sriastradh 
drm_prime_remove_buf_handle_locked(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf)3069d20d926Sriastradh void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
3079d20d926Sriastradh 					struct dma_buf *dma_buf)
3089d20d926Sriastradh {
309ec231e8fSriastradh #ifdef __NetBSD__
310ec231e8fSriastradh 	struct drm_prime_member *member;
311ec231e8fSriastradh 
312ec231e8fSriastradh 	member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
313ec231e8fSriastradh 	if (member != NULL) {
314ec231e8fSriastradh 		rb_tree_remove_node(&prime_fpriv->handles.rbr_tree, member);
315ec231e8fSriastradh 		rb_tree_remove_node(&prime_fpriv->dmabufs.rbr_tree, member);
316da1088e9Sriastradh 		dma_buf_put(dma_buf);
317da1088e9Sriastradh 		kfree(member);
318ec231e8fSriastradh 	}
319ec231e8fSriastradh #else
32041ec0267Sriastradh 	struct rb_node *rb;
3219d20d926Sriastradh 
32241ec0267Sriastradh 	rb = prime_fpriv->dmabufs.rb_node;
32341ec0267Sriastradh 	while (rb) {
32441ec0267Sriastradh 		struct drm_prime_member *member;
32541ec0267Sriastradh 
32641ec0267Sriastradh 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
3279d20d926Sriastradh 		if (member->dma_buf == dma_buf) {
32841ec0267Sriastradh 			rb_erase(&member->handle_rb, &prime_fpriv->handles);
32941ec0267Sriastradh 			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
33041ec0267Sriastradh 
3319d20d926Sriastradh 			dma_buf_put(dma_buf);
3329d20d926Sriastradh 			kfree(member);
33341ec0267Sriastradh 			return;
33441ec0267Sriastradh 		} else if (member->dma_buf < dma_buf) {
33541ec0267Sriastradh 			rb = rb->rb_right;
336fcd0cb28Sriastradh 		} else {
33741ec0267Sriastradh 			rb = rb->rb_left;
3389d20d926Sriastradh 		}
3399d20d926Sriastradh 	}
340ec231e8fSriastradh #endif
3419d20d926Sriastradh }
3429d20d926Sriastradh 
drm_prime_init_file_private(struct drm_prime_file_private * prime_fpriv)34341ec0267Sriastradh void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
3449d20d926Sriastradh {
34541ec0267Sriastradh 	mutex_init(&prime_fpriv->lock);
346ec231e8fSriastradh #ifdef __NetBSD__
347ec231e8fSriastradh 	rb_tree_init(&prime_fpriv->dmabufs.rbr_tree, &dmabuf_ops);
348ec231e8fSriastradh 	rb_tree_init(&prime_fpriv->handles.rbr_tree, &handle_ops);
349ec231e8fSriastradh #else
35041ec0267Sriastradh 	prime_fpriv->dmabufs = RB_ROOT;
35141ec0267Sriastradh 	prime_fpriv->handles = RB_ROOT;
352ec231e8fSriastradh #endif
35341ec0267Sriastradh }
35441ec0267Sriastradh 
drm_prime_destroy_file_private(struct drm_prime_file_private * prime_fpriv)35541ec0267Sriastradh void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
35641ec0267Sriastradh {
357da1088e9Sriastradh 	mutex_destroy(&prime_fpriv->lock);
35841ec0267Sriastradh 	/* by now drm_gem_release should've made sure the list is empty */
35941ec0267Sriastradh 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
360da1088e9Sriastradh 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->handles));
3619d20d926Sriastradh }
3629d20d926Sriastradh 
3639d20d926Sriastradh /**
36441ec0267Sriastradh  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
36541ec0267Sriastradh  * @dev: parent device for the exported dmabuf
36641ec0267Sriastradh  * @exp_info: the export information used by dma_buf_export()
36741ec0267Sriastradh  *
36841ec0267Sriastradh  * This wraps dma_buf_export() for use by generic GEM drivers that are using
36941ec0267Sriastradh  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
37041ec0267Sriastradh  * a reference to the &drm_device and the exported &drm_gem_object (stored in
37141ec0267Sriastradh  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
37241ec0267Sriastradh  *
37341ec0267Sriastradh  * Returns the new dmabuf.
37441ec0267Sriastradh  */
drm_gem_dmabuf_export(struct drm_device * dev,struct dma_buf_export_info * exp_info)37541ec0267Sriastradh struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
37641ec0267Sriastradh 				      struct dma_buf_export_info *exp_info)
37741ec0267Sriastradh {
37841ec0267Sriastradh 	struct drm_gem_object *obj = exp_info->priv;
37941ec0267Sriastradh 	struct dma_buf *dma_buf;
38041ec0267Sriastradh 
38141ec0267Sriastradh 	dma_buf = dma_buf_export(exp_info);
38241ec0267Sriastradh 	if (IS_ERR(dma_buf))
38341ec0267Sriastradh 		return dma_buf;
38441ec0267Sriastradh 
38541ec0267Sriastradh 	drm_dev_get(dev);
38641ec0267Sriastradh 	drm_gem_object_get(obj);
38776f0c10cSriastradh #ifndef __NetBSD__		/* XXX dmabuf share */
38841ec0267Sriastradh 	dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
38976f0c10cSriastradh #endif
39041ec0267Sriastradh 
39141ec0267Sriastradh 	return dma_buf;
39241ec0267Sriastradh }
39341ec0267Sriastradh EXPORT_SYMBOL(drm_gem_dmabuf_export);
39441ec0267Sriastradh 
39541ec0267Sriastradh /**
39641ec0267Sriastradh  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
3979d20d926Sriastradh  * @dma_buf: buffer to be released
3989d20d926Sriastradh  *
3999d20d926Sriastradh  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
40041ec0267Sriastradh  * must use this in their &dma_buf_ops structure as the release callback.
40141ec0267Sriastradh  * drm_gem_dmabuf_release() should be used in conjunction with
40241ec0267Sriastradh  * drm_gem_dmabuf_export().
4039d20d926Sriastradh  */
drm_gem_dmabuf_release(struct dma_buf * dma_buf)4049d20d926Sriastradh void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
4059d20d926Sriastradh {
4069d20d926Sriastradh 	struct drm_gem_object *obj = dma_buf->priv;
40741ec0267Sriastradh 	struct drm_device *dev = obj->dev;
4089d20d926Sriastradh 
4099d20d926Sriastradh 	/* drop the reference on the export fd holds */
41041ec0267Sriastradh 	drm_gem_object_put_unlocked(obj);
41141ec0267Sriastradh 
41241ec0267Sriastradh 	drm_dev_put(dev);
4139d20d926Sriastradh }
4149d20d926Sriastradh EXPORT_SYMBOL(drm_gem_dmabuf_release);
4159d20d926Sriastradh 
41641ec0267Sriastradh /**
41741ec0267Sriastradh  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
41841ec0267Sriastradh  * @dev: dev to export the buffer from
41941ec0267Sriastradh  * @file_priv: drm file-private structure
42041ec0267Sriastradh  * @prime_fd: fd id of the dma-buf which should be imported
42141ec0267Sriastradh  * @handle: pointer to storage for the handle of the imported buffer object
42241ec0267Sriastradh  *
42341ec0267Sriastradh  * This is the PRIME import function which must be used mandatorily by GEM
42441ec0267Sriastradh  * drivers to ensure correct lifetime management of the underlying GEM object.
42541ec0267Sriastradh  * The actual importing of GEM object from the dma-buf is done through the
42641ec0267Sriastradh  * &drm_driver.gem_prime_import driver callback.
42741ec0267Sriastradh  *
42841ec0267Sriastradh  * Returns 0 on success or a negative error code on failure.
42941ec0267Sriastradh  */
drm_gem_prime_fd_to_handle(struct drm_device * dev,struct drm_file * file_priv,int prime_fd,uint32_t * handle)43041ec0267Sriastradh int drm_gem_prime_fd_to_handle(struct drm_device *dev,
43141ec0267Sriastradh 			       struct drm_file *file_priv, int prime_fd,
43241ec0267Sriastradh 			       uint32_t *handle)
4339d20d926Sriastradh {
43441ec0267Sriastradh 	struct dma_buf *dma_buf;
43541ec0267Sriastradh 	struct drm_gem_object *obj;
43641ec0267Sriastradh 	int ret;
4379d20d926Sriastradh 
43841ec0267Sriastradh 	dma_buf = dma_buf_get(prime_fd);
43941ec0267Sriastradh 	if (IS_ERR(dma_buf))
44041ec0267Sriastradh 		return PTR_ERR(dma_buf);
44141ec0267Sriastradh 
44241ec0267Sriastradh 	mutex_lock(&file_priv->prime.lock);
44341ec0267Sriastradh 
44441ec0267Sriastradh 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
44541ec0267Sriastradh 			dma_buf, handle);
44641ec0267Sriastradh 	if (ret == 0)
44741ec0267Sriastradh 		goto out_put;
44841ec0267Sriastradh 
44941ec0267Sriastradh 	/* never seen this one, need to import */
45041ec0267Sriastradh 	mutex_lock(&dev->object_name_lock);
45141ec0267Sriastradh 	if (dev->driver->gem_prime_import)
45241ec0267Sriastradh 		obj = dev->driver->gem_prime_import(dev, dma_buf);
45341ec0267Sriastradh 	else
45441ec0267Sriastradh 		obj = drm_gem_prime_import(dev, dma_buf);
45541ec0267Sriastradh 	if (IS_ERR(obj)) {
45641ec0267Sriastradh 		ret = PTR_ERR(obj);
45741ec0267Sriastradh 		goto out_unlock;
4589d20d926Sriastradh 	}
4599d20d926Sriastradh 
46041ec0267Sriastradh 	if (obj->dma_buf) {
46141ec0267Sriastradh 		WARN_ON(obj->dma_buf != dma_buf);
46241ec0267Sriastradh 	} else {
46341ec0267Sriastradh 		obj->dma_buf = dma_buf;
46441ec0267Sriastradh 		get_dma_buf(dma_buf);
4659d20d926Sriastradh 	}
4669d20d926Sriastradh 
46741ec0267Sriastradh 	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
46841ec0267Sriastradh 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
46941ec0267Sriastradh 	drm_gem_object_put_unlocked(obj);
47041ec0267Sriastradh 	if (ret)
47141ec0267Sriastradh 		goto out_put;
47241ec0267Sriastradh 
47341ec0267Sriastradh 	ret = drm_prime_add_buf_handle(&file_priv->prime,
47441ec0267Sriastradh 			dma_buf, *handle);
47541ec0267Sriastradh 	mutex_unlock(&file_priv->prime.lock);
47641ec0267Sriastradh 	if (ret)
47741ec0267Sriastradh 		goto fail;
47841ec0267Sriastradh 
47941ec0267Sriastradh 	dma_buf_put(dma_buf);
48041ec0267Sriastradh 
48141ec0267Sriastradh 	return 0;
48241ec0267Sriastradh 
48341ec0267Sriastradh fail:
48441ec0267Sriastradh 	/* hmm, if driver attached, we are relying on the free-object path
48541ec0267Sriastradh 	 * to detach.. which seems ok..
48641ec0267Sriastradh 	 */
48741ec0267Sriastradh 	drm_gem_handle_delete(file_priv, *handle);
48841ec0267Sriastradh 	dma_buf_put(dma_buf);
48941ec0267Sriastradh 	return ret;
49041ec0267Sriastradh 
49141ec0267Sriastradh out_unlock:
49241ec0267Sriastradh 	mutex_unlock(&dev->object_name_lock);
49341ec0267Sriastradh out_put:
49441ec0267Sriastradh 	mutex_unlock(&file_priv->prime.lock);
49541ec0267Sriastradh 	dma_buf_put(dma_buf);
49641ec0267Sriastradh 	return ret;
4979d20d926Sriastradh }
49841ec0267Sriastradh EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
4999d20d926Sriastradh 
drm_prime_fd_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)50041ec0267Sriastradh int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
50141ec0267Sriastradh 				 struct drm_file *file_priv)
5029d20d926Sriastradh {
50341ec0267Sriastradh 	struct drm_prime_handle *args = data;
5049d20d926Sriastradh 
50541ec0267Sriastradh 	if (!dev->driver->prime_fd_to_handle)
5069d20d926Sriastradh 		return -ENOSYS;
5079d20d926Sriastradh 
50841ec0267Sriastradh 	return dev->driver->prime_fd_to_handle(dev, file_priv,
50941ec0267Sriastradh 			args->fd, &args->handle);
5109d20d926Sriastradh }
5119d20d926Sriastradh 
export_and_register_object(struct drm_device * dev,struct drm_gem_object * obj,uint32_t flags)5129d20d926Sriastradh static struct dma_buf *export_and_register_object(struct drm_device *dev,
5139d20d926Sriastradh 						  struct drm_gem_object *obj,
5149d20d926Sriastradh 						  uint32_t flags)
5159d20d926Sriastradh {
5169d20d926Sriastradh 	struct dma_buf *dmabuf;
5179d20d926Sriastradh 
5189d20d926Sriastradh 	/* prevent races with concurrent gem_close. */
5199d20d926Sriastradh 	if (obj->handle_count == 0) {
5209d20d926Sriastradh 		dmabuf = ERR_PTR(-ENOENT);
5219d20d926Sriastradh 		return dmabuf;
5229d20d926Sriastradh 	}
5239d20d926Sriastradh 
52441ec0267Sriastradh 	if (obj->funcs && obj->funcs->export)
52541ec0267Sriastradh 		dmabuf = obj->funcs->export(obj, flags);
52641ec0267Sriastradh 	else if (dev->driver->gem_prime_export)
52741ec0267Sriastradh 		dmabuf = dev->driver->gem_prime_export(obj, flags);
52841ec0267Sriastradh 	else
52941ec0267Sriastradh 		dmabuf = drm_gem_prime_export(obj, flags);
5309d20d926Sriastradh 	if (IS_ERR(dmabuf)) {
531fcd0cb28Sriastradh 		/* normally the created dma-buf takes ownership of the ref,
532fcd0cb28Sriastradh 		 * but if that fails then drop the ref
533fcd0cb28Sriastradh 		 */
5349d20d926Sriastradh 		return dmabuf;
535fcd0cb28Sriastradh 	}
536fcd0cb28Sriastradh 
5379d20d926Sriastradh 	/*
5389d20d926Sriastradh 	 * Note that callers do not need to clean up the export cache
5399d20d926Sriastradh 	 * since the check for obj->handle_count guarantees that someone
5409d20d926Sriastradh 	 * will clean it up.
5419d20d926Sriastradh 	 */
5429d20d926Sriastradh 	obj->dma_buf = dmabuf;
5439d20d926Sriastradh 	get_dma_buf(obj->dma_buf);
5449d20d926Sriastradh 
5459d20d926Sriastradh 	return dmabuf;
5469d20d926Sriastradh }
5479d20d926Sriastradh 
5489d20d926Sriastradh /**
5499d20d926Sriastradh  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
5509d20d926Sriastradh  * @dev: dev to export the buffer from
5519d20d926Sriastradh  * @file_priv: drm file-private structure
5529d20d926Sriastradh  * @handle: buffer handle to export
5539d20d926Sriastradh  * @flags: flags like DRM_CLOEXEC
5549d20d926Sriastradh  * @prime_fd: pointer to storage for the fd id of the create dma-buf
5559d20d926Sriastradh  *
5569d20d926Sriastradh  * This is the PRIME export function which must be used mandatorily by GEM
5579d20d926Sriastradh  * drivers to ensure correct lifetime management of the underlying GEM object.
5589d20d926Sriastradh  * The actual exporting from GEM object to a dma-buf is done through the
55941ec0267Sriastradh  * &drm_driver.gem_prime_export driver callback.
5609d20d926Sriastradh  */
drm_gem_prime_handle_to_fd(struct drm_device * dev,struct drm_file * file_priv,uint32_t handle,uint32_t flags,int * prime_fd)5619d20d926Sriastradh int drm_gem_prime_handle_to_fd(struct drm_device *dev,
5629d20d926Sriastradh 			       struct drm_file *file_priv, uint32_t handle,
5639d20d926Sriastradh 			       uint32_t flags,
5649d20d926Sriastradh 			       int *prime_fd)
5659d20d926Sriastradh {
5669d20d926Sriastradh 	struct drm_gem_object *obj;
5679d20d926Sriastradh 	int ret = 0;
5689d20d926Sriastradh 	struct dma_buf *dmabuf;
5699d20d926Sriastradh 
5709d20d926Sriastradh 	mutex_lock(&file_priv->prime.lock);
57141ec0267Sriastradh 	obj = drm_gem_object_lookup(file_priv, handle);
5729d20d926Sriastradh 	if (!obj)  {
5739d20d926Sriastradh 		ret = -ENOENT;
5749d20d926Sriastradh 		goto out_unlock;
5759d20d926Sriastradh 	}
5769d20d926Sriastradh 
5779d20d926Sriastradh 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
5789d20d926Sriastradh 	if (dmabuf) {
5799d20d926Sriastradh 		get_dma_buf(dmabuf);
5809d20d926Sriastradh 		goto out_have_handle;
5819d20d926Sriastradh 	}
5829d20d926Sriastradh 
5839d20d926Sriastradh 	mutex_lock(&dev->object_name_lock);
5849d20d926Sriastradh 	/* re-export the original imported object */
5859d20d926Sriastradh 	if (obj->import_attach) {
5869d20d926Sriastradh 		dmabuf = obj->import_attach->dmabuf;
5879d20d926Sriastradh 		get_dma_buf(dmabuf);
5889d20d926Sriastradh 		goto out_have_obj;
5899d20d926Sriastradh 	}
5909d20d926Sriastradh 
5919d20d926Sriastradh 	if (obj->dma_buf) {
5929d20d926Sriastradh 		get_dma_buf(obj->dma_buf);
5939d20d926Sriastradh 		dmabuf = obj->dma_buf;
5949d20d926Sriastradh 		goto out_have_obj;
5959d20d926Sriastradh 	}
5969d20d926Sriastradh 
5979d20d926Sriastradh 	dmabuf = export_and_register_object(dev, obj, flags);
5989d20d926Sriastradh 	if (IS_ERR(dmabuf)) {
5999d20d926Sriastradh 		/* normally the created dma-buf takes ownership of the ref,
6009d20d926Sriastradh 		 * but if that fails then drop the ref
6019d20d926Sriastradh 		 */
6029d20d926Sriastradh 		ret = PTR_ERR(dmabuf);
6039d20d926Sriastradh 		mutex_unlock(&dev->object_name_lock);
6049d20d926Sriastradh 		goto out;
6059d20d926Sriastradh 	}
6069d20d926Sriastradh 
6079d20d926Sriastradh out_have_obj:
6089d20d926Sriastradh 	/*
6099d20d926Sriastradh 	 * If we've exported this buffer then cheat and add it to the import list
6109d20d926Sriastradh 	 * so we get the correct handle back. We must do this under the
6119d20d926Sriastradh 	 * protection of dev->object_name_lock to ensure that a racing gem close
6129d20d926Sriastradh 	 * ioctl doesn't miss to remove this buffer handle from the cache.
6139d20d926Sriastradh 	 */
6149d20d926Sriastradh 	ret = drm_prime_add_buf_handle(&file_priv->prime,
6159d20d926Sriastradh 				       dmabuf, handle);
6169d20d926Sriastradh 	mutex_unlock(&dev->object_name_lock);
6179d20d926Sriastradh 	if (ret)
6189d20d926Sriastradh 		goto fail_put_dmabuf;
6199d20d926Sriastradh 
6209d20d926Sriastradh out_have_handle:
6219d20d926Sriastradh 	ret = dma_buf_fd(dmabuf, flags);
6229d20d926Sriastradh 	/*
6239d20d926Sriastradh 	 * We must _not_ remove the buffer from the handle cache since the newly
6249d20d926Sriastradh 	 * created dma buf is already linked in the global obj->dma_buf pointer,
6259d20d926Sriastradh 	 * and that is invariant as long as a userspace gem handle exists.
6269d20d926Sriastradh 	 * Closing the handle will clean out the cache anyway, so we don't leak.
6279d20d926Sriastradh 	 */
6289d20d926Sriastradh 	if (ret < 0) {
6299d20d926Sriastradh 		goto fail_put_dmabuf;
6309d20d926Sriastradh 	} else {
6319d20d926Sriastradh 		*prime_fd = ret;
6329d20d926Sriastradh 		ret = 0;
6339d20d926Sriastradh 	}
6349d20d926Sriastradh 
6359d20d926Sriastradh 	goto out;
6369d20d926Sriastradh 
6379d20d926Sriastradh fail_put_dmabuf:
6389d20d926Sriastradh 	dma_buf_put(dmabuf);
6399d20d926Sriastradh out:
64041ec0267Sriastradh 	drm_gem_object_put_unlocked(obj);
6419d20d926Sriastradh out_unlock:
642fcd0cb28Sriastradh 	mutex_unlock(&file_priv->prime.lock);
6439d20d926Sriastradh 
6449d20d926Sriastradh 	return ret;
645fcd0cb28Sriastradh }
646fcd0cb28Sriastradh EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
647fcd0cb28Sriastradh 
drm_prime_handle_to_fd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)648fcd0cb28Sriastradh int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
649fcd0cb28Sriastradh 				 struct drm_file *file_priv)
650fcd0cb28Sriastradh {
651fcd0cb28Sriastradh 	struct drm_prime_handle *args = data;
652fcd0cb28Sriastradh 
653fcd0cb28Sriastradh 	if (!dev->driver->prime_handle_to_fd)
654fcd0cb28Sriastradh 		return -ENOSYS;
655fcd0cb28Sriastradh 
656fcd0cb28Sriastradh 	/* check flags are valid */
65741ec0267Sriastradh 	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
658fcd0cb28Sriastradh 		return -EINVAL;
659fcd0cb28Sriastradh 
660fcd0cb28Sriastradh 	return dev->driver->prime_handle_to_fd(dev, file_priv,
66141ec0267Sriastradh 			args->handle, args->flags, &args->fd);
662fcd0cb28Sriastradh }
663fcd0cb28Sriastradh 
66441ec0267Sriastradh /**
66541ec0267Sriastradh  * DOC: PRIME Helpers
66641ec0267Sriastradh  *
66741ec0267Sriastradh  * Drivers can implement &drm_gem_object_funcs.export and
66841ec0267Sriastradh  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
66941ec0267Sriastradh  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
67041ec0267Sriastradh  * implement dma-buf support in terms of some lower-level helpers, which are
67141ec0267Sriastradh  * again exported for drivers to use individually:
67241ec0267Sriastradh  *
67341ec0267Sriastradh  * Exporting buffers
67441ec0267Sriastradh  * ~~~~~~~~~~~~~~~~~
67541ec0267Sriastradh  *
67641ec0267Sriastradh  * Optional pinning of buffers is handled at dma-buf attach and detach time in
67741ec0267Sriastradh  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
67841ec0267Sriastradh  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
67941ec0267Sriastradh  * &drm_gem_object_funcs.get_sg_table.
68041ec0267Sriastradh  *
68141ec0267Sriastradh  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
68241ec0267Sriastradh  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
68341ec0267Sriastradh  * drm_gem_dmabuf_mmap().
68441ec0267Sriastradh  *
68541ec0267Sriastradh  * Note that these export helpers can only be used if the underlying backing
68641ec0267Sriastradh  * storage is fully coherent and either permanently pinned, or it is safe to pin
68741ec0267Sriastradh  * it indefinitely.
68841ec0267Sriastradh  *
68941ec0267Sriastradh  * FIXME: The underlying helper functions are named rather inconsistently.
69041ec0267Sriastradh  *
69141ec0267Sriastradh  * Exporting buffers
69241ec0267Sriastradh  * ~~~~~~~~~~~~~~~~~
69341ec0267Sriastradh  *
69441ec0267Sriastradh  * Importing dma-bufs using drm_gem_prime_import() relies on
69541ec0267Sriastradh  * &drm_driver.gem_prime_import_sg_table.
69641ec0267Sriastradh  *
69741ec0267Sriastradh  * Note that similarly to the export helpers this permanently pins the
69841ec0267Sriastradh  * underlying backing storage. Which is ok for scanout, but is not the best
69941ec0267Sriastradh  * option for sharing lots of buffers for rendering.
70041ec0267Sriastradh  */
70141ec0267Sriastradh 
70241ec0267Sriastradh /**
70341ec0267Sriastradh  * drm_gem_map_attach - dma_buf attach implementation for GEM
70441ec0267Sriastradh  * @dma_buf: buffer to attach device to
70541ec0267Sriastradh  * @attach: buffer attachment data
70641ec0267Sriastradh  *
70741ec0267Sriastradh  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
70841ec0267Sriastradh  * used as the &dma_buf_ops.attach callback. Must be used together with
70941ec0267Sriastradh  * drm_gem_map_detach().
71041ec0267Sriastradh  *
71141ec0267Sriastradh  * Returns 0 on success, negative error code on failure.
71241ec0267Sriastradh  */
drm_gem_map_attach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)71341ec0267Sriastradh int drm_gem_map_attach(struct dma_buf *dma_buf,
71441ec0267Sriastradh 		       struct dma_buf_attachment *attach)
715fcd0cb28Sriastradh {
71641ec0267Sriastradh 	struct drm_gem_object *obj = dma_buf->priv;
717fcd0cb28Sriastradh 
71841ec0267Sriastradh 	return drm_gem_pin(obj);
71941ec0267Sriastradh }
72041ec0267Sriastradh EXPORT_SYMBOL(drm_gem_map_attach);
721fcd0cb28Sriastradh 
72241ec0267Sriastradh /**
72341ec0267Sriastradh  * drm_gem_map_detach - dma_buf detach implementation for GEM
72441ec0267Sriastradh  * @dma_buf: buffer to detach from
72541ec0267Sriastradh  * @attach: attachment to be detached
72641ec0267Sriastradh  *
72741ec0267Sriastradh  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
72841ec0267Sriastradh  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
72941ec0267Sriastradh  * &dma_buf_ops.detach callback.
73041ec0267Sriastradh  */
drm_gem_map_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)73141ec0267Sriastradh void drm_gem_map_detach(struct dma_buf *dma_buf,
73241ec0267Sriastradh 			struct dma_buf_attachment *attach)
73341ec0267Sriastradh {
73441ec0267Sriastradh 	struct drm_gem_object *obj = dma_buf->priv;
73541ec0267Sriastradh 
73641ec0267Sriastradh 	drm_gem_unpin(obj);
73741ec0267Sriastradh }
73841ec0267Sriastradh EXPORT_SYMBOL(drm_gem_map_detach);
73941ec0267Sriastradh 
74041ec0267Sriastradh /**
74141ec0267Sriastradh  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
74241ec0267Sriastradh  * @attach: attachment whose scatterlist is to be returned
74341ec0267Sriastradh  * @dir: direction of DMA transfer
74441ec0267Sriastradh  *
74541ec0267Sriastradh  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
74641ec0267Sriastradh  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
74741ec0267Sriastradh  * with drm_gem_unmap_dma_buf().
74841ec0267Sriastradh  *
74941ec0267Sriastradh  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
75041ec0267Sriastradh  * on error. May return -EINTR if it is interrupted by a signal.
75141ec0267Sriastradh  */
drm_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)75241ec0267Sriastradh struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
75341ec0267Sriastradh 				     enum dma_data_direction dir)
75441ec0267Sriastradh {
75541ec0267Sriastradh 	struct drm_gem_object *obj = attach->dmabuf->priv;
75641ec0267Sriastradh 	struct sg_table *sgt;
75741ec0267Sriastradh 
75841ec0267Sriastradh 	if (WARN_ON(dir == DMA_NONE))
75941ec0267Sriastradh 		return ERR_PTR(-EINVAL);
76041ec0267Sriastradh 
76141ec0267Sriastradh 	if (obj->funcs)
76241ec0267Sriastradh 		sgt = obj->funcs->get_sg_table(obj);
76341ec0267Sriastradh 	else
76441ec0267Sriastradh 		sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
76541ec0267Sriastradh 
76641ec0267Sriastradh 	if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
76741ec0267Sriastradh 			      DMA_ATTR_SKIP_CPU_SYNC)) {
76841ec0267Sriastradh 		sg_free_table(sgt);
76941ec0267Sriastradh 		kfree(sgt);
77041ec0267Sriastradh 		sgt = ERR_PTR(-ENOMEM);
77141ec0267Sriastradh 	}
77241ec0267Sriastradh 
77341ec0267Sriastradh 	return sgt;
77441ec0267Sriastradh }
77541ec0267Sriastradh EXPORT_SYMBOL(drm_gem_map_dma_buf);
77641ec0267Sriastradh 
77741ec0267Sriastradh /**
77841ec0267Sriastradh  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
77941ec0267Sriastradh  * @attach: attachment to unmap buffer from
78041ec0267Sriastradh  * @sgt: scatterlist info of the buffer to unmap
78141ec0267Sriastradh  * @dir: direction of DMA transfer
78241ec0267Sriastradh  *
78341ec0267Sriastradh  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
78441ec0267Sriastradh  */
drm_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)78541ec0267Sriastradh void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
78641ec0267Sriastradh 			   struct sg_table *sgt,
78741ec0267Sriastradh 			   enum dma_data_direction dir)
78841ec0267Sriastradh {
78941ec0267Sriastradh 	if (!sgt)
79041ec0267Sriastradh 		return;
79141ec0267Sriastradh 
79241ec0267Sriastradh 	dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
79341ec0267Sriastradh 			   DMA_ATTR_SKIP_CPU_SYNC);
79441ec0267Sriastradh 	sg_free_table(sgt);
79541ec0267Sriastradh 	kfree(sgt);
79641ec0267Sriastradh }
79741ec0267Sriastradh EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
79841ec0267Sriastradh 
79941ec0267Sriastradh /**
80041ec0267Sriastradh  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
80141ec0267Sriastradh  * @dma_buf: buffer to be mapped
80241ec0267Sriastradh  *
80341ec0267Sriastradh  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
80441ec0267Sriastradh  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
80541ec0267Sriastradh  *
80641ec0267Sriastradh  * Returns the kernel virtual address or NULL on failure.
80741ec0267Sriastradh  */
drm_gem_dmabuf_vmap(struct dma_buf * dma_buf)80841ec0267Sriastradh void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
80941ec0267Sriastradh {
81041ec0267Sriastradh 	struct drm_gem_object *obj = dma_buf->priv;
81141ec0267Sriastradh 	void *vaddr;
81241ec0267Sriastradh 
81341ec0267Sriastradh 	vaddr = drm_gem_vmap(obj);
81441ec0267Sriastradh 	if (IS_ERR(vaddr))
81541ec0267Sriastradh 		vaddr = NULL;
81641ec0267Sriastradh 
81741ec0267Sriastradh 	return vaddr;
81841ec0267Sriastradh }
81941ec0267Sriastradh EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
82041ec0267Sriastradh 
82141ec0267Sriastradh /**
82241ec0267Sriastradh  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
82341ec0267Sriastradh  * @dma_buf: buffer to be unmapped
82441ec0267Sriastradh  * @vaddr: the virtual address of the buffer
82541ec0267Sriastradh  *
82641ec0267Sriastradh  * Releases a kernel virtual mapping. This can be used as the
82741ec0267Sriastradh  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
82841ec0267Sriastradh  */
drm_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)82941ec0267Sriastradh void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
83041ec0267Sriastradh {
83141ec0267Sriastradh 	struct drm_gem_object *obj = dma_buf->priv;
83241ec0267Sriastradh 
83341ec0267Sriastradh 	drm_gem_vunmap(obj, vaddr);
83441ec0267Sriastradh }
83541ec0267Sriastradh EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
83641ec0267Sriastradh 
83741ec0267Sriastradh /**
83841ec0267Sriastradh  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
83941ec0267Sriastradh  * @obj: GEM object
84041ec0267Sriastradh  * @vma: Virtual address range
84141ec0267Sriastradh  *
84241ec0267Sriastradh  * This function sets up a userspace mapping for PRIME exported buffers using
84341ec0267Sriastradh  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
84441ec0267Sriastradh  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
84541ec0267Sriastradh  * called to set up the mapping.
84641ec0267Sriastradh  *
84741ec0267Sriastradh  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
84841ec0267Sriastradh  */
84976f0c10cSriastradh #ifdef __NetBSD__
drm_gem_prime_mmap(struct drm_gem_object * obj,off_t * offp,size_t size,int prot,int * flagsp,int * advicep,struct uvm_object ** uobjp,int * maxprotp)85076f0c10cSriastradh int drm_gem_prime_mmap(struct drm_gem_object *obj, off_t *offp, size_t size,
85176f0c10cSriastradh     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
85276f0c10cSriastradh     int *maxprotp)
85376f0c10cSriastradh #else
85441ec0267Sriastradh int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
85576f0c10cSriastradh #endif
85641ec0267Sriastradh {
85741ec0267Sriastradh 	struct drm_file *priv;
85841ec0267Sriastradh 	struct file *fil;
85941ec0267Sriastradh 	int ret;
86041ec0267Sriastradh 
86141ec0267Sriastradh 	/* Add the fake offset */
86276f0c10cSriastradh #ifdef __NetBSD__
86376f0c10cSriastradh 	*offp += drm_vma_node_start(&obj->vma_node);
86476f0c10cSriastradh #else
86541ec0267Sriastradh 	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
86676f0c10cSriastradh #endif
86741ec0267Sriastradh 
86841ec0267Sriastradh 	if (obj->funcs && obj->funcs->mmap) {
86976f0c10cSriastradh #ifdef __NetBSD__
87076f0c10cSriastradh 		ret = obj->funcs->mmap(obj, offp, size, prot, flagsp, advicep,
87176f0c10cSriastradh 		    uobjp, maxprotp);
87276f0c10cSriastradh #else
87341ec0267Sriastradh 		ret = obj->funcs->mmap(obj, vma);
87476f0c10cSriastradh #endif
87541ec0267Sriastradh 		if (ret)
87641ec0267Sriastradh 			return ret;
87776f0c10cSriastradh #ifndef __NetBSD__
87841ec0267Sriastradh 		vma->vm_private_data = obj;
87976f0c10cSriastradh #endif
88041ec0267Sriastradh 		drm_gem_object_get(obj);
88141ec0267Sriastradh 		return 0;
88241ec0267Sriastradh 	}
88341ec0267Sriastradh 
88441ec0267Sriastradh 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
88541ec0267Sriastradh 	fil = kzalloc(sizeof(*fil), GFP_KERNEL);
88641ec0267Sriastradh 	if (!priv || !fil) {
88741ec0267Sriastradh 		ret = -ENOMEM;
88841ec0267Sriastradh 		goto out;
88941ec0267Sriastradh 	}
89041ec0267Sriastradh 
89141ec0267Sriastradh 	/* Used by drm_gem_mmap() to lookup the GEM object */
89241ec0267Sriastradh 	priv->minor = obj->dev->primary;
89376f0c10cSriastradh #ifdef __NetBSD__
89476f0c10cSriastradh 	fil->f_data = priv;
89576f0c10cSriastradh #else
89641ec0267Sriastradh 	fil->private_data = priv;
89776f0c10cSriastradh #endif
89841ec0267Sriastradh 
89941ec0267Sriastradh 	ret = drm_vma_node_allow(&obj->vma_node, priv);
90041ec0267Sriastradh 	if (ret)
90141ec0267Sriastradh 		goto out;
90241ec0267Sriastradh 
90376f0c10cSriastradh #ifdef __NetBSD__
904*1dc3d718Sriastradh 	KASSERT(size > 0);
90576f0c10cSriastradh 	ret = obj->dev->driver->mmap_object(obj->dev, *offp, size, prot, uobjp,
90676f0c10cSriastradh 	    offp, fil);
90776f0c10cSriastradh #else
90841ec0267Sriastradh 	ret = obj->dev->driver->fops->mmap(fil, vma);
90976f0c10cSriastradh #endif
91041ec0267Sriastradh 
91141ec0267Sriastradh 	drm_vma_node_revoke(&obj->vma_node, priv);
91241ec0267Sriastradh out:
91341ec0267Sriastradh 	kfree(priv);
91441ec0267Sriastradh 	kfree(fil);
91541ec0267Sriastradh 
91641ec0267Sriastradh 	return ret;
91741ec0267Sriastradh }
91841ec0267Sriastradh EXPORT_SYMBOL(drm_gem_prime_mmap);
91941ec0267Sriastradh 
92041ec0267Sriastradh /**
92141ec0267Sriastradh  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
92241ec0267Sriastradh  * @dma_buf: buffer to be mapped
92341ec0267Sriastradh  * @vma: virtual address range
92441ec0267Sriastradh  *
92541ec0267Sriastradh  * Provides memory mapping for the buffer. This can be used as the
92641ec0267Sriastradh  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
92741ec0267Sriastradh  * which should be set to drm_gem_prime_mmap().
92841ec0267Sriastradh  *
92941ec0267Sriastradh  * FIXME: There's really no point to this wrapper, drivers which need anything
93041ec0267Sriastradh  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
93141ec0267Sriastradh  *
93241ec0267Sriastradh  * Returns 0 on success or a negative error code on failure.
93341ec0267Sriastradh  */
93441ec0267Sriastradh #ifdef __NetBSD__
935020ba322Sriastradh int
drm_gem_dmabuf_mmap(struct dma_buf * dma_buf,off_t * offp,size_t size,int prot,int * flagsp,int * advicep,struct uvm_object ** uobjp,int * maxprotp)93641ec0267Sriastradh drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
93741ec0267Sriastradh     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
93841ec0267Sriastradh     int *maxprotp)
93941ec0267Sriastradh #else
94041ec0267Sriastradh int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
94141ec0267Sriastradh #endif
94241ec0267Sriastradh {
94341ec0267Sriastradh 	struct drm_gem_object *obj = dma_buf->priv;
94441ec0267Sriastradh 	struct drm_device *dev = obj->dev;
94541ec0267Sriastradh 
94641ec0267Sriastradh 	if (!dev->driver->gem_prime_mmap)
947fcd0cb28Sriastradh 		return -ENOSYS;
948fcd0cb28Sriastradh 
94941ec0267Sriastradh #ifdef __NetBSD__
950*1dc3d718Sriastradh 	KASSERT(size > 0);
95141ec0267Sriastradh 	return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
95241ec0267Sriastradh 	    advicep, uobjp, maxprotp);
95341ec0267Sriastradh #else
95441ec0267Sriastradh 	return dev->driver->gem_prime_mmap(obj, vma);
95541ec0267Sriastradh #endif
956fcd0cb28Sriastradh }
95741ec0267Sriastradh EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
95841ec0267Sriastradh 
95941ec0267Sriastradh static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
96041ec0267Sriastradh 	.cache_sgt_mapping = true,
96141ec0267Sriastradh 	.attach = drm_gem_map_attach,
96241ec0267Sriastradh 	.detach = drm_gem_map_detach,
96341ec0267Sriastradh 	.map_dma_buf = drm_gem_map_dma_buf,
96441ec0267Sriastradh 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
96541ec0267Sriastradh 	.release = drm_gem_dmabuf_release,
96641ec0267Sriastradh 	.mmap = drm_gem_dmabuf_mmap,
96741ec0267Sriastradh 	.vmap = drm_gem_dmabuf_vmap,
96841ec0267Sriastradh 	.vunmap = drm_gem_dmabuf_vunmap,
96941ec0267Sriastradh };
970fcd0cb28Sriastradh 
9719d20d926Sriastradh /**
9729d20d926Sriastradh  * drm_prime_pages_to_sg - converts a page array into an sg list
9739d20d926Sriastradh  * @pages: pointer to the array of page pointers to convert
9749d20d926Sriastradh  * @nr_pages: length of the page vector
975fcd0cb28Sriastradh  *
9769d20d926Sriastradh  * This helper creates an sg table object from a set of pages
977fcd0cb28Sriastradh  * the driver is responsible for mapping the pages into the
9789d20d926Sriastradh  * importers address space for use with dma_buf itself.
97941ec0267Sriastradh  *
98041ec0267Sriastradh  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
981fcd0cb28Sriastradh  */
drm_prime_pages_to_sg(struct page ** pages,unsigned int nr_pages)982efa246c0Sriastradh struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
983fcd0cb28Sriastradh {
984fcd0cb28Sriastradh 	struct sg_table *sg = NULL;
985fcd0cb28Sriastradh 	int ret;
986fcd0cb28Sriastradh 
987fcd0cb28Sriastradh 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
9889d20d926Sriastradh 	if (!sg) {
9899d20d926Sriastradh 		ret = -ENOMEM;
990fcd0cb28Sriastradh 		goto out;
9919d20d926Sriastradh 	}
992fcd0cb28Sriastradh 
9939d20d926Sriastradh 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
9949d20d926Sriastradh 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
995fcd0cb28Sriastradh 	if (ret)
996fcd0cb28Sriastradh 		goto out;
997fcd0cb28Sriastradh 
998fcd0cb28Sriastradh 	return sg;
999fcd0cb28Sriastradh out:
1000fcd0cb28Sriastradh 	kfree(sg);
10019d20d926Sriastradh 	return ERR_PTR(ret);
1002fcd0cb28Sriastradh }
1003fcd0cb28Sriastradh EXPORT_SYMBOL(drm_prime_pages_to_sg);
1004fcd0cb28Sriastradh 
100541ec0267Sriastradh /**
100641ec0267Sriastradh  * drm_gem_prime_export - helper library implementation of the export callback
100741ec0267Sriastradh  * @obj: GEM object to export
100841ec0267Sriastradh  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
100941ec0267Sriastradh  *
101041ec0267Sriastradh  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
101141ec0267Sriastradh  * using the PRIME helpers. It is used as the default in
101241ec0267Sriastradh  * drm_gem_prime_handle_to_fd().
101341ec0267Sriastradh  */
drm_gem_prime_export(struct drm_gem_object * obj,int flags)101441ec0267Sriastradh struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
101541ec0267Sriastradh 				     int flags)
101641ec0267Sriastradh {
101741ec0267Sriastradh 	struct drm_device *dev = obj->dev;
101841ec0267Sriastradh 	struct dma_buf_export_info exp_info = {
101941ec0267Sriastradh #ifndef __NetBSD__
102041ec0267Sriastradh 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
102141ec0267Sriastradh 		.owner = dev->driver->fops->owner,
102241ec0267Sriastradh #endif
102341ec0267Sriastradh 		.ops = &drm_gem_prime_dmabuf_ops,
102441ec0267Sriastradh 		.size = obj->size,
102541ec0267Sriastradh 		.flags = flags,
102641ec0267Sriastradh 		.priv = obj,
102741ec0267Sriastradh 		.resv = obj->resv,
102841ec0267Sriastradh 	};
102941ec0267Sriastradh 
103041ec0267Sriastradh 	return drm_gem_dmabuf_export(dev, &exp_info);
103141ec0267Sriastradh }
103241ec0267Sriastradh EXPORT_SYMBOL(drm_gem_prime_export);
103341ec0267Sriastradh 
103441ec0267Sriastradh /**
103541ec0267Sriastradh  * drm_gem_prime_import_dev - core implementation of the import callback
103641ec0267Sriastradh  * @dev: drm_device to import into
103741ec0267Sriastradh  * @dma_buf: dma-buf object to import
103841ec0267Sriastradh  * @attach_dev: struct device to dma_buf attach
103941ec0267Sriastradh  *
104041ec0267Sriastradh  * This is the core of drm_gem_prime_import(). It's designed to be called by
104141ec0267Sriastradh  * drivers who want to use a different device structure than &drm_device.dev for
104241ec0267Sriastradh  * attaching via dma_buf. This function calls
104341ec0267Sriastradh  * &drm_driver.gem_prime_import_sg_table internally.
104441ec0267Sriastradh  *
104541ec0267Sriastradh  * Drivers must arrange to call drm_prime_gem_destroy() from their
104641ec0267Sriastradh  * &drm_gem_object_funcs.free hook when using this function.
104741ec0267Sriastradh  */
10489ff13907Sriastradh #ifdef __NetBSD__
drm_gem_prime_import_dev(struct drm_device * dev,struct dma_buf * dma_buf,bus_dma_tag_t attach_dev)10499ff13907Sriastradh struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
10509ff13907Sriastradh 					    struct dma_buf *dma_buf,
10519ff13907Sriastradh 					    bus_dma_tag_t attach_dev)
10529ff13907Sriastradh #else
105341ec0267Sriastradh struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
105441ec0267Sriastradh 					    struct dma_buf *dma_buf,
105541ec0267Sriastradh 					    struct device *attach_dev)
10569ff13907Sriastradh #endif
105741ec0267Sriastradh {
105841ec0267Sriastradh 	struct dma_buf_attachment *attach;
105941ec0267Sriastradh 	struct sg_table *sgt;
106041ec0267Sriastradh 	struct drm_gem_object *obj;
106141ec0267Sriastradh 	int ret;
106241ec0267Sriastradh 
106341ec0267Sriastradh 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
106441ec0267Sriastradh 		obj = dma_buf->priv;
106541ec0267Sriastradh 		if (obj->dev == dev) {
106641ec0267Sriastradh 			/*
106741ec0267Sriastradh 			 * Importing dmabuf exported from out own gem increases
106841ec0267Sriastradh 			 * refcount on gem itself instead of f_count of dmabuf.
106941ec0267Sriastradh 			 */
107041ec0267Sriastradh 			drm_gem_object_get(obj);
107141ec0267Sriastradh 			return obj;
107241ec0267Sriastradh 		}
107341ec0267Sriastradh 	}
107441ec0267Sriastradh 
107541ec0267Sriastradh 	if (!dev->driver->gem_prime_import_sg_table)
107641ec0267Sriastradh 		return ERR_PTR(-EINVAL);
107741ec0267Sriastradh 
107841ec0267Sriastradh 	attach = dma_buf_attach(dma_buf, attach_dev);
107941ec0267Sriastradh 	if (IS_ERR(attach))
108041ec0267Sriastradh 		return ERR_CAST(attach);
108141ec0267Sriastradh 
108241ec0267Sriastradh 	get_dma_buf(dma_buf);
108341ec0267Sriastradh 
108441ec0267Sriastradh 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
108541ec0267Sriastradh 	if (IS_ERR(sgt)) {
108641ec0267Sriastradh 		ret = PTR_ERR(sgt);
108741ec0267Sriastradh 		goto fail_detach;
108841ec0267Sriastradh 	}
108941ec0267Sriastradh 
109041ec0267Sriastradh 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
109141ec0267Sriastradh 	if (IS_ERR(obj)) {
109241ec0267Sriastradh 		ret = PTR_ERR(obj);
109341ec0267Sriastradh 		goto fail_unmap;
109441ec0267Sriastradh 	}
109541ec0267Sriastradh 
109641ec0267Sriastradh 	obj->import_attach = attach;
109741ec0267Sriastradh 	obj->resv = dma_buf->resv;
109841ec0267Sriastradh 
109941ec0267Sriastradh 	return obj;
110041ec0267Sriastradh 
110141ec0267Sriastradh fail_unmap:
110241ec0267Sriastradh 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
110341ec0267Sriastradh fail_detach:
110441ec0267Sriastradh 	dma_buf_detach(dma_buf, attach);
110541ec0267Sriastradh 	dma_buf_put(dma_buf);
110641ec0267Sriastradh 
110741ec0267Sriastradh 	return ERR_PTR(ret);
110841ec0267Sriastradh }
110941ec0267Sriastradh EXPORT_SYMBOL(drm_gem_prime_import_dev);
111041ec0267Sriastradh 
111141ec0267Sriastradh /**
111241ec0267Sriastradh  * drm_gem_prime_import - helper library implementation of the import callback
111341ec0267Sriastradh  * @dev: drm_device to import into
111441ec0267Sriastradh  * @dma_buf: dma-buf object to import
111541ec0267Sriastradh  *
111641ec0267Sriastradh  * This is the implementation of the gem_prime_import functions for GEM drivers
111741ec0267Sriastradh  * using the PRIME helpers. Drivers can use this as their
111841ec0267Sriastradh  * &drm_driver.gem_prime_import implementation. It is used as the default
111941ec0267Sriastradh  * implementation in drm_gem_prime_fd_to_handle().
112041ec0267Sriastradh  *
112141ec0267Sriastradh  * Drivers must arrange to call drm_prime_gem_destroy() from their
112241ec0267Sriastradh  * &drm_gem_object_funcs.free hook when using this function.
112341ec0267Sriastradh  */
drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)112441ec0267Sriastradh struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
112541ec0267Sriastradh 					    struct dma_buf *dma_buf)
112641ec0267Sriastradh {
11279ff13907Sriastradh #ifdef __NetBSD__
11289ff13907Sriastradh 	return drm_gem_prime_import_dev(dev, dma_buf, dev->dmat);
11299ff13907Sriastradh #else
113041ec0267Sriastradh 	return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
11319ff13907Sriastradh #endif
113241ec0267Sriastradh }
113341ec0267Sriastradh EXPORT_SYMBOL(drm_gem_prime_import);
113441ec0267Sriastradh 
1135f2331d95Sriastradh #ifdef __NetBSD__
1136f2331d95Sriastradh 
1137f2331d95Sriastradh struct sg_table *
drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat,const bus_dma_segment_t * segs,int nsegs)1138eb0c859bSriastradh drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
1139eb0c859bSriastradh     int nsegs)
114040e1d52aSriastradh {
114140e1d52aSriastradh 	struct sg_table *sg;
114240e1d52aSriastradh 	int ret;
114340e1d52aSriastradh 
114440e1d52aSriastradh 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
114540e1d52aSriastradh 	if (sg == NULL) {
114640e1d52aSriastradh 		ret = -ENOMEM;
114740e1d52aSriastradh 		goto out;
114840e1d52aSriastradh 	}
114940e1d52aSriastradh 
1150eb0c859bSriastradh 	ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
1151eb0c859bSriastradh 	    GFP_KERNEL);
115240e1d52aSriastradh 	if (ret)
115340e1d52aSriastradh 		goto out;
115440e1d52aSriastradh 
115540e1d52aSriastradh 	return sg;
115640e1d52aSriastradh out:
115740e1d52aSriastradh 	kfree(sg);
115840e1d52aSriastradh 	return ERR_PTR(ret);
115940e1d52aSriastradh }
116040e1d52aSriastradh 
116140e1d52aSriastradh bus_size_t
drm_prime_sg_size(struct sg_table * sg)116240e1d52aSriastradh drm_prime_sg_size(struct sg_table *sg)
116340e1d52aSriastradh {
116440e1d52aSriastradh 
11659ff13907Sriastradh 	return sg->sgl->sg_npgs << PAGE_SHIFT;
116640e1d52aSriastradh }
116740e1d52aSriastradh 
1168f2331d95Sriastradh void
drm_prime_sg_free(struct sg_table * sg)1169f2331d95Sriastradh drm_prime_sg_free(struct sg_table *sg)
1170f2331d95Sriastradh {
1171f2331d95Sriastradh 
1172f2331d95Sriastradh 	sg_free_table(sg);
1173f2331d95Sriastradh 	kfree(sg);
1174f2331d95Sriastradh }
1175f2331d95Sriastradh 
1176f2331d95Sriastradh int
drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat,bus_dma_segment_t * segs,int nsegs,int * rsegs,const struct sg_table * sgt)1177eb0c859bSriastradh drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
1178eb0c859bSriastradh     int nsegs, int *rsegs, const struct sg_table *sgt)
1179f2331d95Sriastradh {
1180f2331d95Sriastradh 
1181f2331d95Sriastradh 	/* XXX errno NetBSD->Linux */
11829ff13907Sriastradh 	return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs,
11839ff13907Sriastradh 	    sgt->sgl->sg_pgs, sgt->sgl->sg_npgs);
1184f2331d95Sriastradh }
1185f2331d95Sriastradh 
118640e1d52aSriastradh int
drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat,bus_dmamap_t map,struct sg_table * sgt)1187eb0c859bSriastradh drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
1188eb0c859bSriastradh     struct sg_table *sgt)
118940e1d52aSriastradh {
1190eb0c859bSriastradh 	bus_dma_segment_t *segs;
1191eb0c859bSriastradh 	bus_size_t size = drm_prime_sg_size(sgt);
11929ff13907Sriastradh 	int nsegs = sgt->sgl->sg_npgs;
1193eb0c859bSriastradh 	int ret;
119440e1d52aSriastradh 
11959ff13907Sriastradh 	segs = kcalloc(sgt->sgl->sg_npgs, sizeof(segs[0]), GFP_KERNEL);
1196eb0c859bSriastradh 	if (segs == NULL) {
1197eb0c859bSriastradh 		ret = -ENOMEM;
1198eb0c859bSriastradh 		goto out0;
1199eb0c859bSriastradh 	}
1200eb0c859bSriastradh 
1201eb0c859bSriastradh 	ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
1202eb0c859bSriastradh 	if (ret)
1203eb0c859bSriastradh 		goto out1;
12049ff13907Sriastradh 	KASSERT(nsegs <= sgt->sgl->sg_npgs);
1205eb0c859bSriastradh 
1206eb0c859bSriastradh 	/* XXX errno NetBSD->Linux */
1207eb0c859bSriastradh 	ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
1208eb0c859bSriastradh 	    BUS_DMA_NOWAIT);
1209eb0c859bSriastradh 	if (ret)
1210eb0c859bSriastradh 		goto out1;
1211eb0c859bSriastradh 
1212eb0c859bSriastradh out1:	kfree(segs);
1213eb0c859bSriastradh out0:	return ret;
121440e1d52aSriastradh }
121540e1d52aSriastradh 
1216b0f9d14dSriastradh bool
drm_prime_sg_importable(bus_dma_tag_t dmat,struct sg_table * sgt)1217b0f9d14dSriastradh drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
1218b0f9d14dSriastradh {
1219b0f9d14dSriastradh 	unsigned i;
1220b0f9d14dSriastradh 
12219ff13907Sriastradh 	for (i = 0; i < sgt->sgl->sg_npgs; i++) {
12229ff13907Sriastradh 		if (bus_dmatag_bounces_paddr(dmat,
12239ff13907Sriastradh 			VM_PAGE_TO_PHYS(&sgt->sgl->sg_pgs[i]->p_vmp)))
1224b0f9d14dSriastradh 			return false;
1225b0f9d14dSriastradh 	}
1226b0f9d14dSriastradh 	return true;
1227b0f9d14dSriastradh }
1228b0f9d14dSriastradh 
1229f2331d95Sriastradh #else  /* !__NetBSD__ */
1230f2331d95Sriastradh 
12319d20d926Sriastradh /**
12329d20d926Sriastradh  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
12339d20d926Sriastradh  * @sgt: scatter-gather table to convert
123441ec0267Sriastradh  * @pages: optional array of page pointers to store the page array in
12359d20d926Sriastradh  * @addrs: optional array to store the dma bus address of each page
123641ec0267Sriastradh  * @max_entries: size of both the passed-in arrays
12379d20d926Sriastradh  *
12389d20d926Sriastradh  * Exports an sg table into an array of pages and addresses. This is currently
12399d20d926Sriastradh  * required by the TTM driver in order to do correct fault handling.
124041ec0267Sriastradh  *
124141ec0267Sriastradh  * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
124241ec0267Sriastradh  * implementation.
12439d20d926Sriastradh  */
drm_prime_sg_to_page_addr_arrays(struct sg_table * sgt,struct page ** pages,dma_addr_t * addrs,int max_entries)1244fcd0cb28Sriastradh int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
124541ec0267Sriastradh 				     dma_addr_t *addrs, int max_entries)
1246fcd0cb28Sriastradh {
1247fcd0cb28Sriastradh 	unsigned count;
1248fcd0cb28Sriastradh 	struct scatterlist *sg;
1249fcd0cb28Sriastradh 	struct page *page;
125041ec0267Sriastradh 	u32 len, index;
1251fcd0cb28Sriastradh 	dma_addr_t addr;
1252fcd0cb28Sriastradh 
125341ec0267Sriastradh 	index = 0;
1254fcd0cb28Sriastradh 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
1255fcd0cb28Sriastradh 		len = sg->length;
1256fcd0cb28Sriastradh 		page = sg_page(sg);
1257fcd0cb28Sriastradh 		addr = sg_dma_address(sg);
1258fcd0cb28Sriastradh 
1259fcd0cb28Sriastradh 		while (len > 0) {
126041ec0267Sriastradh 			if (WARN_ON(index >= max_entries))
1261fcd0cb28Sriastradh 				return -1;
126241ec0267Sriastradh 			if (pages)
126341ec0267Sriastradh 				pages[index] = page;
1264fcd0cb28Sriastradh 			if (addrs)
126541ec0267Sriastradh 				addrs[index] = addr;
1266fcd0cb28Sriastradh 
1267fcd0cb28Sriastradh 			page++;
1268fcd0cb28Sriastradh 			addr += PAGE_SIZE;
1269fcd0cb28Sriastradh 			len -= PAGE_SIZE;
127041ec0267Sriastradh 			index++;
1271fcd0cb28Sriastradh 		}
1272fcd0cb28Sriastradh 	}
1273fcd0cb28Sriastradh 	return 0;
1274fcd0cb28Sriastradh }
1275fcd0cb28Sriastradh EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
12769d20d926Sriastradh 
1277f2331d95Sriastradh #endif	/* __NetBSD__ */
1278f2331d95Sriastradh 
12799d20d926Sriastradh /**
12809d20d926Sriastradh  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
12819d20d926Sriastradh  * @obj: GEM object which was created from a dma-buf
12829d20d926Sriastradh  * @sg: the sg-table which was pinned at import time
12839d20d926Sriastradh  *
12849d20d926Sriastradh  * This is the cleanup functions which GEM drivers need to call when they use
128541ec0267Sriastradh  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
12869d20d926Sriastradh  */
drm_prime_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)1287fcd0cb28Sriastradh void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1288fcd0cb28Sriastradh {
1289fcd0cb28Sriastradh 	struct dma_buf_attachment *attach;
1290fcd0cb28Sriastradh 	struct dma_buf *dma_buf;
1291fcd0cb28Sriastradh 	attach = obj->import_attach;
1292fcd0cb28Sriastradh 	if (sg)
1293fcd0cb28Sriastradh 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1294fcd0cb28Sriastradh 	dma_buf = attach->dmabuf;
1295fcd0cb28Sriastradh 	dma_buf_detach(attach->dmabuf, attach);
1296fcd0cb28Sriastradh 	/* remove the reference */
1297fcd0cb28Sriastradh 	dma_buf_put(dma_buf);
1298fcd0cb28Sriastradh }
1299fcd0cb28Sriastradh EXPORT_SYMBOL(drm_prime_gem_destroy);
1300