xref: /netbsd-src/sys/external/bsd/drm2/drm/drm_gem_cma_helper.c (revision c7cd7423973046142557688afaf3e358b912f42e)
1 /* $NetBSD: drm_gem_cma_helper.c,v 1.15 2023/08/15 04:57:36 mrg Exp $ */
2 
3 /*-
4  * Copyright (c) 2015-2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: drm_gem_cma_helper.c,v 1.15 2023/08/15 04:57:36 mrg Exp $");
31 
32 #include <linux/err.h>
33 
34 #include <drm/bus_dma_hacks.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_gem_cma_helper.h>
37 #include <drm/drm_prime.h>
38 #include <drm/drm_print.h>
39 
40 #include <uvm/uvm_extern.h>
41 
42 static struct drm_gem_cma_object *
drm_gem_cma_create_internal(struct drm_device * ddev,size_t size,struct sg_table * sgt)43 drm_gem_cma_create_internal(struct drm_device *ddev, size_t size,
44     struct sg_table *sgt)
45 {
46 	struct drm_gem_cma_object *obj;
47 	int error = EINVAL, nsegs;
48 
49 	obj = kmem_zalloc(sizeof(*obj), KM_SLEEP);
50 	obj->dmat = ddev->dmat;
51 	obj->dmasize = size;
52 
53 	if (sgt) {
54 		error = -drm_prime_sg_to_bus_dmamem(obj->dmat, obj->dmasegs, 1,
55 		    &nsegs, sgt);
56 	} else {
57 		if (ddev->cma_pool != NULL) {
58 			error = vmem_xalloc(ddev->cma_pool, obj->dmasize,
59 			    PAGE_SIZE, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
60 			    VM_BESTFIT | VM_NOSLEEP, &obj->vmem_addr);
61 			if (!error) {
62 				obj->vmem_pool = ddev->cma_pool;
63 				obj->dmasegs[0].ds_addr =
64 				    PHYS_TO_BUS_MEM(obj->dmat, obj->vmem_addr);
65 				obj->dmasegs[0].ds_len =
66 				    roundup(obj->dmasize, PAGE_SIZE);
67 				nsegs = 1;
68 			}
69 		}
70 		if (obj->vmem_pool == NULL) {
71 			error = bus_dmamem_alloc(obj->dmat, obj->dmasize,
72 			    PAGE_SIZE, 0, obj->dmasegs, 1, &nsegs,
73 			    BUS_DMA_WAITOK);
74 		}
75 	}
76 	if (error)
77 		goto failed;
78 	error = bus_dmamem_map(obj->dmat, obj->dmasegs, nsegs,
79 	    obj->dmasize, &obj->vaddr,
80 	    BUS_DMA_WAITOK | BUS_DMA_PREFETCHABLE);
81 	if (error)
82 		goto free;
83 	error = bus_dmamap_create(obj->dmat, obj->dmasize, 1,
84 	    obj->dmasize, 0, BUS_DMA_WAITOK, &obj->dmamap);
85 	if (error)
86 		goto unmap;
87 	error = bus_dmamap_load(obj->dmat, obj->dmamap, obj->vaddr,
88 	    obj->dmasize, NULL, BUS_DMA_WAITOK);
89 	if (error)
90 		goto destroy;
91 
92 	if (!sgt)
93 		memset(obj->vaddr, 0, obj->dmasize);
94 
95 	drm_gem_private_object_init(ddev, &obj->base, size);
96 
97 	return obj;
98 
99 destroy:
100 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
101 unmap:
102 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
103 free:
104 	if (obj->sgt)
105 		drm_prime_sg_free(obj->sgt);
106 	else if (obj->vmem_pool)
107 		vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
108 	else
109 		bus_dmamem_free(obj->dmat, obj->dmasegs, nsegs);
110 failed:
111 	kmem_free(obj, sizeof(*obj));
112 
113 	return NULL;
114 }
115 
116 struct drm_gem_cma_object *
drm_gem_cma_create(struct drm_device * ddev,size_t size)117 drm_gem_cma_create(struct drm_device *ddev, size_t size)
118 {
119 
120 	return drm_gem_cma_create_internal(ddev, size, NULL);
121 }
122 
123 static void
drm_gem_cma_obj_free(struct drm_gem_cma_object * obj)124 drm_gem_cma_obj_free(struct drm_gem_cma_object *obj)
125 {
126 
127 	bus_dmamap_unload(obj->dmat, obj->dmamap);
128 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
129 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
130 	if (obj->sgt)
131 		drm_prime_sg_free(obj->sgt);
132 	else if (obj->vmem_pool)
133 		vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
134 	else
135 		bus_dmamem_free(obj->dmat, obj->dmasegs, 1);
136 	kmem_free(obj, sizeof(*obj));
137 }
138 
139 void
drm_gem_cma_free_object(struct drm_gem_object * gem_obj)140 drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
141 {
142 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
143 
144 	drm_gem_free_mmap_offset(gem_obj);
145 	drm_gem_object_release(gem_obj);
146 	drm_gem_cma_obj_free(obj);
147 }
148 
149 int
drm_gem_cma_dumb_create(struct drm_file * file_priv,struct drm_device * ddev,struct drm_mode_create_dumb * args)150 drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *ddev,
151     struct drm_mode_create_dumb *args)
152 {
153 	struct drm_gem_cma_object *obj;
154 	uint32_t handle;
155 	int error;
156 
157 	args->pitch = args->width * ((args->bpp + 7) / 8);
158 	args->size = args->pitch * args->height;
159 	args->size = roundup(args->size, PAGE_SIZE);
160 	args->handle = 0;
161 
162 	obj = drm_gem_cma_create(ddev, args->size);
163 	if (obj == NULL)
164 		return -ENOMEM;
165 
166 	error = drm_gem_handle_create(file_priv, &obj->base, &handle);
167 	drm_gem_object_put_unlocked(&obj->base);
168 	if (error) {
169 		drm_gem_cma_obj_free(obj);
170 		return error;
171 	}
172 
173 	args->handle = handle;
174 
175 	return 0;
176 }
177 
178 static int
drm_gem_cma_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)179 drm_gem_cma_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
180     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
181     int flags)
182 {
183 	struct vm_map_entry *entry = ufi->entry;
184 	struct uvm_object *uobj = entry->object.uvm_obj;
185 	struct drm_gem_object *gem_obj =
186 	    container_of(uobj, struct drm_gem_object, gemo_uvmobj);
187 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
188 	off_t curr_offset;
189 	vaddr_t curr_va;
190 	paddr_t paddr, mdpgno;
191 	u_int mmapflags;
192 	int lcv, retval;
193 	vm_prot_t mapprot;
194 
195 	if (UVM_ET_ISCOPYONWRITE(entry))
196 		return EIO;
197 
198 	curr_offset = entry->offset + (vaddr - entry->start);
199 	curr_va = vaddr;
200 
201 	retval = 0;
202 	for (lcv = 0; lcv < npages; lcv++, curr_offset += PAGE_SIZE,
203 	    curr_va += PAGE_SIZE) {
204 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
205 			continue;
206 		if (pps[lcv] == PGO_DONTCARE)
207 			continue;
208 
209 		mdpgno = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
210 		    curr_offset, access_type, BUS_DMA_PREFETCHABLE);
211 		if (mdpgno == -1) {
212 			retval = EIO;
213 			break;
214 		}
215 		paddr = pmap_phys_address(mdpgno);
216 		mmapflags = pmap_mmap_flags(mdpgno);
217 		mapprot = ufi->entry->protection;
218 
219 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
220 		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
221 			pmap_update(ufi->orig_map->pmap);
222 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
223 			return ENOMEM;
224 		}
225 	}
226 
227 	pmap_update(ufi->orig_map->pmap);
228 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
229 
230 	return retval;
231 }
232 
233 const struct uvm_pagerops drm_gem_cma_uvm_ops = {
234 	.pgo_reference = drm_gem_pager_reference,
235 	.pgo_detach = drm_gem_pager_detach,
236 	.pgo_fault = drm_gem_cma_fault,
237 };
238 
239 struct sg_table *
drm_gem_cma_prime_get_sg_table(struct drm_gem_object * gem_obj)240 drm_gem_cma_prime_get_sg_table(struct drm_gem_object *gem_obj)
241 {
242 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
243 
244 	return drm_prime_bus_dmamem_to_sg(obj->dmat, obj->dmasegs, 1);
245 }
246 
247 struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device * ddev,struct dma_buf_attachment * attach,struct sg_table * sgt)248 drm_gem_cma_prime_import_sg_table(struct drm_device *ddev,
249     struct dma_buf_attachment *attach, struct sg_table *sgt)
250 {
251 	size_t size = drm_prime_sg_size(sgt);
252 	struct drm_gem_cma_object *obj;
253 
254 	obj = drm_gem_cma_create_internal(ddev, size, sgt);
255 	if (obj == NULL)
256 		return ERR_PTR(-ENOMEM);
257 
258 	return &obj->base;
259 }
260 
261 void *
drm_gem_cma_prime_vmap(struct drm_gem_object * gem_obj)262 drm_gem_cma_prime_vmap(struct drm_gem_object *gem_obj)
263 {
264 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
265 
266 	return obj->vaddr;
267 }
268 
269 void
drm_gem_cma_prime_vunmap(struct drm_gem_object * gem_obj,void * vaddr)270 drm_gem_cma_prime_vunmap(struct drm_gem_object *gem_obj, void *vaddr)
271 {
272 	struct drm_gem_cma_object *obj __diagused =
273 	    to_drm_gem_cma_obj(gem_obj);
274 
275 	KASSERT(vaddr == obj->vaddr);
276 }
277