xref: /netbsd-src/sys/external/bsd/drm2/drm/drm_memory.c (revision e67b1c183ca65e42dcf3c592ea6e7e2035fa0b04)
1 /*	$NetBSD: drm_memory.c,v 1.17 2021/12/19 10:47:13 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_memory.c,v 1.17 2021/12/19 10:47:13 riastradh Exp $");
34 
35 #if defined(__i386__) || defined(__x86_64__)
36 
37 # ifdef _KERNEL_OPT
38 #  include "agp.h"
39 #  if NAGP > 0
40 #   include "agp_i810.h"
41 #  else
42 #   define NAGP_I810	0
43 #  endif
44 #  include "genfb.h"
45 # else
46 #  define NAGP_I810	1
47 #  define NGENFB	0
48 # endif
49 
50 #else
51 
52 # ifdef _KERNEL_OPT
53 #  define NAGP_I810	0
54 #  include "genfb.h"
55 # else
56 #  define NAGP_I810	0
57 #  define NGENFB	0
58 # endif
59 
60 #endif
61 
62 #include <sys/bus.h>
63 
64 #if NAGP_I810 > 0
65 /* XXX include order botch -- shouldn't need to include pcivar.h */
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/agpvar.h>
68 #endif
69 
70 #if NGENFB > 0
71 #include <dev/wsfb/genfbvar.h>
72 #endif
73 
74 #include <drm/drm_device.h>
75 #include <drm/drm_drv.h>
76 #include <drm/drm_cache.h>
77 #include <drm/drm_legacy.h>
78 #include <drm/drm_pci.h>
79 #include <drm/drm_print.h>
80 
81 /*
82  * XXX drm_bus_borrow is a horrible kludge!
83  */
84 static bool
drm_bus_borrow(bus_addr_t base,bus_size_t size,bus_space_handle_t * handlep)85 drm_bus_borrow(bus_addr_t base, bus_size_t size, bus_space_handle_t *handlep)
86 {
87 
88 #if NAGP_I810 > 0
89 	if (agp_i810_borrow(base, size, handlep))
90 		return true;
91 #endif
92 
93 #if NGENFB > 0
94 	if (genfb_borrow(base, handlep))
95 		return true;
96 #endif
97 
98 	return false;
99 }
100 
101 void
drm_legacy_ioremap(struct drm_local_map * map,struct drm_device * dev)102 drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
103 {
104 	const bus_space_tag_t bst = dev->bst;
105 	unsigned int unit;
106 
107 	/*
108 	 * Search dev's bus maps for a match.
109 	 */
110 	for (unit = 0; unit < dev->bus_nmaps; unit++) {
111 		struct drm_bus_map *const bm = &dev->bus_maps[unit];
112 		int flags = bm->bm_flags;
113 
114 		/* Reject maps starting after the request.  */
115 		if (map->offset < bm->bm_base)
116 			continue;
117 
118 		/* Reject maps smaller than the request.  */
119 		if (bm->bm_size < map->size)
120 			continue;
121 
122 		/* Reject maps that the request doesn't fit in.  */
123 		if ((bm->bm_size - map->size) <
124 		    (map->offset - bm->bm_base))
125 			continue;
126 
127 		/* Ensure we can map the space into virtual memory.  */
128 		if (!ISSET(flags, BUS_SPACE_MAP_LINEAR))
129 			continue;
130 
131 		/* Reflect requested flags in the bus_space map.  */
132 		if (ISSET(map->flags, _DRM_WRITE_COMBINING))
133 			flags |= BUS_SPACE_MAP_PREFETCHABLE;
134 
135 		/* Map it.  */
136 		if (bus_space_map(bst, map->offset, map->size, flags,
137 			&map->lm_data.bus_space.bsh))
138 			break;
139 
140 		map->lm_data.bus_space.bus_map = bm;
141 		goto win;
142 	}
143 
144 	/* Couldn't map it.  Try borrowing from someone else.  */
145 	if (drm_bus_borrow(map->offset, map->size,
146 		&map->lm_data.bus_space.bsh)) {
147 		map->lm_data.bus_space.bus_map = NULL;
148 		goto win;
149 	}
150 
151 	/* Failure!  */
152 	return;
153 
154 win:	map->lm_data.bus_space.bst = bst;
155 	map->handle = bus_space_vaddr(bst, map->lm_data.bus_space.bsh);
156 }
157 
158 void
drm_legacy_ioremapfree(struct drm_local_map * map,struct drm_device * dev)159 drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
160 {
161 	if (map->lm_data.bus_space.bus_map != NULL) {
162 		bus_space_unmap(map->lm_data.bus_space.bst,
163 		    map->lm_data.bus_space.bsh, map->size);
164 		map->lm_data.bus_space.bus_map = NULL;
165 		map->handle = NULL;
166 	}
167 }
168 
169 /*
170  * Allocate a drm dma handle, allocate memory fit for DMA, and map it.
171  *
172  * XXX This is called drm_pci_alloc for hysterical raisins; it is not
173  * specific to PCI.
174  *
175  * XXX For now, we use non-blocking allocations because this is called
176  * by ioctls with the drm global mutex held.
177  *
178  * XXX Error information is lost because this returns NULL on failure,
179  * not even an error embedded in a pointer.
180  */
181 struct drm_dma_handle *
drm_pci_alloc(struct drm_device * dev,size_t size,size_t align)182 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
183 {
184 	int nsegs;
185 	int error;
186 
187 	/*
188 	 * Allocate a drm_dma_handle record.
189 	 */
190 	struct drm_dma_handle *const dmah = kmem_alloc(sizeof(*dmah),
191 	    KM_NOSLEEP);
192 	if (dmah == NULL) {
193 		error = -ENOMEM;
194 		goto out;
195 	}
196 	dmah->dmah_tag = dev->dmat;
197 
198 	/*
199 	 * Allocate the requested amount of DMA-safe memory.
200 	 */
201 	/* XXX errno NetBSD->Linux */
202 	error = -bus_dmamem_alloc(dmah->dmah_tag, size, align, 0,
203 	    &dmah->dmah_seg, 1, &nsegs, BUS_DMA_NOWAIT);
204 	if (error)
205 		goto fail0;
206 	KASSERT(nsegs == 1);
207 
208 	/*
209 	 * Map the DMA-safe memory into kernel virtual address space.
210 	 */
211 	/* XXX errno NetBSD->Linux */
212 	error = -bus_dmamem_map(dmah->dmah_tag, &dmah->dmah_seg, 1, size,
213 	    &dmah->vaddr,
214 	    (BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE));
215 	if (error)
216 		goto fail1;
217 	dmah->size = size;
218 
219 	/*
220 	 * Create a map for DMA transfers.
221 	 */
222 	/* XXX errno NetBSD->Linux */
223 	error = -bus_dmamap_create(dmah->dmah_tag, size, 1, size, 0,
224 	    BUS_DMA_NOWAIT, &dmah->dmah_map);
225 	if (error)
226 		goto fail2;
227 
228 	/*
229 	 * Load the kva buffer into the map for DMA transfers.
230 	 */
231 	/* XXX errno NetBSD->Linux */
232 	error = -bus_dmamap_load(dmah->dmah_tag, dmah->dmah_map, dmah->vaddr,
233 	    size, NULL, (BUS_DMA_NOWAIT | BUS_DMA_NOCACHE));
234 	if (error)
235 		goto fail3;
236 
237 	/* Record the bus address for convenient reference.  */
238 	dmah->busaddr = dmah->dmah_map->dm_segs[0].ds_addr;
239 
240 	/* Zero the DMA buffer.  XXX Yikes!  Is this necessary?  */
241 	memset(dmah->vaddr, 0, size);
242 
243 	/* Success!  */
244 	return dmah;
245 
246 fail3:	bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
247 fail2:	bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
248 fail1:	bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
249 fail0:	dmah->dmah_tag = NULL;	/* XXX paranoia */
250 	kmem_free(dmah, sizeof(*dmah));
251 out:	DRM_DEBUG("drm_pci_alloc failed: %d\n", error);
252 	return NULL;
253 }
254 
255 /*
256  * Release the bus DMA mappings and memory in dmah, and deallocate it.
257  */
258 void
drm_pci_free(struct drm_device * dev,struct drm_dma_handle * dmah)259 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
260 {
261 
262 	bus_dmamap_unload(dmah->dmah_tag, dmah->dmah_map);
263 	bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
264 	bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
265 	bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
266 	dmah->dmah_tag = NULL;	/* XXX paranoia */
267 	kmem_free(dmah, sizeof(*dmah));
268 }
269 
270 /*
271  * Make sure the DMA-safe memory allocated for dev lies between
272  * min_addr and max_addr.  Can be used multiple times to restrict the
273  * bounds further, but never to expand the bounds again.
274  *
275  * XXX Caller must guarantee nobody has used the tag yet,
276  * i.e. allocated any DMA memory.
277  */
278 int
drm_limit_dma_space(struct drm_device * dev,resource_size_t min_addr,resource_size_t max_addr)279 drm_limit_dma_space(struct drm_device *dev, resource_size_t min_addr,
280     resource_size_t max_addr)
281 {
282 	int ret;
283 
284 	KASSERT(min_addr <= max_addr);
285 
286 	/*
287 	 * Limit it further if we have already limited it, and destroy
288 	 * the old subregion DMA tag.
289 	 */
290 	if (dev->dmat_subregion_p) {
291 		min_addr = MAX(min_addr, dev->dmat_subregion_min);
292 		max_addr = MIN(max_addr, dev->dmat_subregion_max);
293 		bus_dmatag_destroy(dev->dmat);
294 	}
295 
296 	/*
297 	 * If our limit contains the 32-bit space but for some reason
298 	 * we can't use a subregion, either because the bus doesn't
299 	 * support >32-bit DMA or because bus_dma(9) on this platform
300 	 * lacks bus_dmatag_subregion, just use the 32-bit space.
301 	 */
302 	if (min_addr == 0 && max_addr >= UINT32_C(0xffffffff) &&
303 	    dev->bus_dmat == dev->bus_dmat32) {
304 dma32:		dev->dmat = dev->bus_dmat32;
305 		dev->dmat_subregion_p = false;
306 		dev->dmat_subregion_min = 0;
307 		dev->dmat_subregion_max = UINT32_C(0xffffffff);
308 		return 0;
309 	}
310 
311 	/*
312 	 * Create a DMA tag for a subregion from the bus's DMA tag.  If
313 	 * that fails, restore dev->dmat to the whole region so that we
314 	 * need not worry about dev->dmat being uninitialized (not that
315 	 * the caller should try to allocate DMA-safe memory on failure
316 	 * anyway, but...paranoia).
317 	 */
318 	/* XXX errno NetBSD->Linux */
319 	ret = -bus_dmatag_subregion(dev->bus_dmat, min_addr, max_addr,
320 	    &dev->dmat, BUS_DMA_WAITOK);
321 	if (ret) {
322 		/*
323 		 * bus_dmatag_subregion may fail.  If so, and if the
324 		 * subregion contains the 32-bit space, just use the
325 		 * 32-bit DMA tag.
326 		 */
327 		if (ret == -EOPNOTSUPP && dev->bus_dmat32 &&
328 		    min_addr == 0 && max_addr >= UINT32_C(0xffffffff))
329 			goto dma32;
330 		/* XXX Back out?  */
331 		dev->dmat = dev->bus_dmat;
332 		dev->dmat_subregion_p = false;
333 		dev->dmat_subregion_min = 0;
334 		dev->dmat_subregion_max = __type_max(bus_addr_t);
335 		return ret;
336 	}
337 
338 	/*
339 	 * Remember that we have a subregion tag so that we know to
340 	 * destroy it later, and record the bounds in case we need to
341 	 * limit them again.
342 	 */
343 	dev->dmat_subregion_p = true;
344 	dev->dmat_subregion_min = min_addr;
345 	dev->dmat_subregion_max = max_addr;
346 
347 	/* Success!  */
348 	return 0;
349 }
350 
351 bool
drm_need_swiotlb(int dma_bits)352 drm_need_swiotlb(int dma_bits)
353 {
354 
355 	return false;
356 }
357