xref: /netbsd-src/sys/arch/x86/x86/bus_dma.c (revision 47bfd0ee808ed4081131bc76192fa7efb0c9c0c3)
1 /*	$NetBSD: bus_dma.c,v 1.91 2024/06/04 21:42:58 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 2007, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.91 2024/06/04 21:42:58 riastradh Exp $");
35 
36 /*
37  * The following is included because _bus_dma_uiomove is derived from
38  * uiomove() in kern_subr.c.
39  */
40 
41 /*
42  * Copyright (c) 1982, 1986, 1991, 1993
43  *	The Regents of the University of California.  All rights reserved.
44  * (c) UNIX System Laboratories, Inc.
45  * All or some portions of this file are derived from material licensed
46  * to the University of California by American Telephone and Telegraph
47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48  * the permission of UNIX System Laboratories, Inc.
49  *
50  * Copyright (c) 1992, 1993
51  *	The Regents of the University of California.  All rights reserved.
52  *
53  * This software was developed by the Computer Systems Engineering group
54  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
55  * contributed to Berkeley.
56  *
57  * All advertising materials mentioning features or use of this software
58  * must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Lawrence Berkeley Laboratory.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions
64  * are met:
65  * 1. Redistributions of source code must retain the above copyright
66  *    notice, this list of conditions and the following disclaimer.
67  * 2. Redistributions in binary form must reproduce the above copyright
68  *    notice, this list of conditions and the following disclaimer in the
69  *    documentation and/or other materials provided with the distribution.
70  * 3. Neither the name of the University nor the names of its contributors
71  *    may be used to endorse or promote products derived from this software
72  *    without specific prior written permission.
73  *
74  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
75  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
76  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
77  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
78  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
79  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
80  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
81  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
82  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
83  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
84  * SUCH DAMAGE.
85  */
86 
87 #include "ioapic.h"
88 #include "isa.h"
89 #include "opt_mpbios.h"
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/kmem.h>
95 #include <sys/malloc.h>
96 #include <sys/mbuf.h>
97 #include <sys/proc.h>
98 #include <sys/asan.h>
99 #include <sys/msan.h>
100 
101 #include <sys/bus.h>
102 #include <machine/bus_private.h>
103 #if NIOAPIC > 0
104 #include <machine/i82093var.h>
105 #endif
106 #ifdef MPBIOS
107 #include <machine/mpbiosvar.h>
108 #endif
109 #include <machine/pmap_private.h>
110 
111 #if NISA > 0
112 #include <dev/isa/isareg.h>
113 #include <dev/isa/isavar.h>
114 #endif
115 
116 #include <uvm/uvm.h>
117 
118 extern	paddr_t avail_end;
119 
120 #define	IDTVEC(name)	__CONCAT(X,name)
121 typedef void (vector)(void);
122 extern vector *IDTVEC(intr)[];
123 
124 #define	BUSDMA_BOUNCESTATS
125 
126 #ifdef BUSDMA_BOUNCESTATS
127 #define	BUSDMA_EVCNT_DECL(name)						\
128 static struct evcnt bus_dma_ev_##name =					\
129     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "bus_dma", #name);		\
130 EVCNT_ATTACH_STATIC(bus_dma_ev_##name)
131 
132 #define	STAT_INCR(name)							\
133     bus_dma_ev_##name.ev_count++
134 #define	STAT_DECR(name)							\
135     bus_dma_ev_##name.ev_count--
136 
137 BUSDMA_EVCNT_DECL(nbouncebufs);
138 BUSDMA_EVCNT_DECL(loads);
139 BUSDMA_EVCNT_DECL(bounces);
140 #else
141 #define STAT_INCR(x)
142 #define STAT_DECR(x)
143 #endif
144 
145 static int	_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
146 	    bus_size_t, int, bus_dmamap_t *);
147 static void	_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
148 static int	_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
149 	    bus_size_t, struct proc *, int);
150 static int	_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
151 	    struct mbuf *, int);
152 static int	_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
153 	    struct uio *, int);
154 static int	_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
155 	    bus_dma_segment_t *, int, bus_size_t, int);
156 static void	_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
157 static void	_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
158 	    bus_size_t, int);
159 
160 static int	_bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
161 	    bus_size_t alignment, bus_size_t boundary,
162 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
163 static void	_bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
164 	    int nsegs);
165 static int	_bus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs,
166 	    int nsegs, size_t size, void **kvap, int flags);
167 static void	_bus_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
168 static paddr_t	_bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
169 	    int nsegs, off_t off, int prot, int flags);
170 
171 static int	_bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
172 	    bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags);
173 static void	_bus_dmatag_destroy(bus_dma_tag_t tag);
174 
175 static int _bus_dma_uiomove(void *, struct uio *, size_t, int);
176 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
177 	    bus_size_t size, int flags);
178 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
179 static int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
180 	    void *buf, bus_size_t buflen, struct vmspace *vm, int flags);
181 static int _bus_dmamap_load_busaddr(bus_dma_tag_t, bus_dmamap_t,
182     bus_addr_t, bus_size_t);
183 
184 #ifndef _BUS_DMAMEM_ALLOC_RANGE
185 static int	_bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
186 	    bus_size_t alignment, bus_size_t boundary,
187 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
188 	    bus_addr_t low, bus_addr_t high);
189 
190 #define _BUS_DMAMEM_ALLOC_RANGE _bus_dmamem_alloc_range
191 
192 /*
193  * Allocate physical memory from the given physical address range.
194  * Called by DMA-safe memory allocation methods.
195  */
196 static int
_bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,bus_addr_t low,bus_addr_t high)197 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
198     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
199     int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
200 {
201 	paddr_t curaddr, lastaddr;
202 	struct vm_page *m;
203 	struct pglist mlist;
204 	int curseg, error;
205 	bus_size_t uboundary;
206 
207 	/* Always round the size. */
208 	size = round_page(size);
209 
210 	KASSERTMSG(boundary >= PAGE_SIZE || boundary == 0,
211 	    "boundary=0x%"PRIxBUSSIZE, boundary);
212 
213 	/*
214 	 * Allocate pages from the VM system.
215 	 * We accept boundaries < size, splitting in multiple segments
216 	 * if needed. uvm_pglistalloc does not, so compute an appropriate
217 	 * boundary: next power of 2 >= size
218 	 */
219 
220 	if (boundary == 0)
221 		uboundary = 0;
222 	else {
223 		uboundary = boundary;
224 		while (uboundary < size)
225 			uboundary = uboundary << 1;
226 	}
227 	error = uvm_pglistalloc(size, low, high, alignment, uboundary,
228 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
229 	if (error)
230 		return (error);
231 
232 	/*
233 	 * Compute the location, size, and number of segments actually
234 	 * returned by the VM code.
235 	 */
236 	m = TAILQ_FIRST(&mlist);
237 	curseg = 0;
238 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
239 	segs[curseg].ds_len = PAGE_SIZE;
240 	m = m->pageq.queue.tqe_next;
241 
242 	for (; m != NULL; m = m->pageq.queue.tqe_next) {
243 		curaddr = VM_PAGE_TO_PHYS(m);
244 		KASSERTMSG(curaddr >= low, "curaddr=%#"PRIxPADDR
245 		    " low=%#"PRIxBUSADDR" high=%#"PRIxBUSADDR,
246 		    curaddr, low, high);
247 		KASSERTMSG(curaddr < high, "curaddr=%#"PRIxPADDR
248 		    " low=%#"PRIxBUSADDR" high=%#"PRIxBUSADDR,
249 		    curaddr, low, high);
250 		if (curaddr == (lastaddr + PAGE_SIZE) &&
251 		    (lastaddr & boundary) == (curaddr & boundary)) {
252 			segs[curseg].ds_len += PAGE_SIZE;
253 		} else {
254 			curseg++;
255 			KASSERTMSG(curseg < nsegs, "curseg %d size %llx",
256 			    curseg, (long long)size);
257 			segs[curseg].ds_addr = curaddr;
258 			segs[curseg].ds_len = PAGE_SIZE;
259 		}
260 		lastaddr = curaddr;
261 	}
262 
263 	*rsegs = curseg + 1;
264 
265 	return (0);
266 }
267 #endif /* _BUS_DMAMEM_ALLOC_RANGE */
268 
269 /*
270  * Create a DMA map.
271  */
272 static int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)273 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
274     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
275 {
276 	struct x86_bus_dma_cookie *cookie;
277 	bus_dmamap_t map;
278 	int error, cookieflags;
279 	void *cookiestore, *mapstore;
280 	size_t cookiesize, mapsize;
281 
282 	/*
283 	 * Allocate and initialize the DMA map.  The end of the map
284 	 * is a variable-sized array of segments, so we allocate enough
285 	 * room for them in one shot.
286 	 *
287 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
288 	 * of ALLOCNOW notifies others that we've reserved these resources,
289 	 * and they are not to be freed.
290 	 *
291 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
292 	 * the (nsegments - 1).
293 	 */
294 	error = 0;
295 	mapsize = sizeof(struct x86_bus_dmamap) +
296 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
297 	if ((mapstore = malloc(mapsize, M_DMAMAP, M_ZERO |
298 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL)
299 		return (ENOMEM);
300 
301 	map = (struct x86_bus_dmamap *)mapstore;
302 	map->_dm_size = size;
303 	map->_dm_segcnt = nsegments;
304 	map->_dm_maxmaxsegsz = maxsegsz;
305 	map->_dm_boundary = boundary;
306 	map->_dm_bounce_thresh = t->_bounce_thresh;
307 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
308 	map->dm_maxsegsz = maxsegsz;
309 	map->dm_mapsize = 0;		/* no valid mappings */
310 	map->dm_nsegs = 0;
311 
312 	if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh - 1)
313 		map->_dm_bounce_thresh = 0;
314 	cookieflags = 0;
315 
316 	if (t->_may_bounce != NULL) {
317 		error = t->_may_bounce(t, map, flags, &cookieflags);
318 		if (error != 0)
319 			goto out;
320 	}
321 
322 	if (map->_dm_bounce_thresh != 0)
323 		cookieflags |= X86_DMA_MIGHT_NEED_BOUNCE;
324 
325 	if ((cookieflags & X86_DMA_MIGHT_NEED_BOUNCE) == 0) {
326 		*dmamp = map;
327 		return 0;
328 	}
329 
330 	cookiesize = sizeof(struct x86_bus_dma_cookie) +
331 	    (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
332 
333 	/*
334 	 * Allocate our cookie.
335 	 */
336 	if ((cookiestore = malloc(cookiesize, M_DMAMAP, M_ZERO |
337 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL) {
338 		error = ENOMEM;
339 		goto out;
340 	}
341 	cookie = (struct x86_bus_dma_cookie *)cookiestore;
342 	cookie->id_flags = cookieflags;
343 	map->_dm_cookie = cookie;
344 
345 	error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
346  out:
347 	if (error)
348 		_bus_dmamap_destroy(t, map);
349 	else
350 		*dmamp = map;
351 
352 	return (error);
353 }
354 
355 /*
356  * Destroy a DMA map.
357  */
358 static void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)359 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
360 {
361 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
362 
363 	/*
364 	 * Free any bounce pages this map might hold.
365 	 */
366 	if (cookie != NULL) {
367 		if (cookie->id_flags & X86_DMA_HAS_BOUNCE)
368 			_bus_dma_free_bouncebuf(t, map);
369 		free(cookie, M_DMAMAP);
370 	}
371 
372 	free(map, M_DMAMAP);
373 }
374 
375 /*
376  * Load a DMA map with a linear buffer.
377  */
378 static int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)379 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
380     bus_size_t buflen, struct proc *p, int flags)
381 {
382 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
383 	int error;
384 	struct vmspace *vm;
385 
386 	STAT_INCR(loads);
387 
388 	/*
389 	 * Make sure that on error condition we return "no valid mappings."
390 	 */
391 	map->dm_mapsize = 0;
392 	map->dm_nsegs = 0;
393 	KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
394 	    "maxsegsz=0x%"PRIxBUSSIZE", maxmaxsegsz=0x%"PRIxBUSSIZE,
395 	    map->dm_maxsegsz, map->_dm_maxmaxsegsz);
396 
397 	if (buflen > map->_dm_size)
398 		return EINVAL;
399 
400 	if (p != NULL) {
401 		vm = p->p_vmspace;
402 	} else {
403 		vm = vmspace_kernel();
404 	}
405 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
406 	if (error == 0) {
407 		if (cookie != NULL)
408 			cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
409 		map->dm_mapsize = buflen;
410 		return 0;
411 	}
412 
413 	if (cookie == NULL ||
414 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
415 		return error;
416 
417 	/*
418 	 * First attempt failed; bounce it.
419 	 */
420 
421 	STAT_INCR(bounces);
422 
423 	/*
424 	 * Allocate bounce pages, if necessary.
425 	 */
426 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
427 		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
428 		if (error)
429 			return (error);
430 	}
431 
432 	/*
433 	 * Cache a pointer to the caller's buffer and load the DMA map
434 	 * with the bounce buffer.
435 	 */
436 	cookie->id_origbuf = buf;
437 	cookie->id_origbuflen = buflen;
438 	cookie->id_buftype = X86_DMA_BUFTYPE_LINEAR;
439 	map->dm_nsegs = 0;
440 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
441 	    p, flags);
442 	if (error)
443 		return (error);
444 
445 	/* ...so _bus_dmamap_sync() knows we're bouncing */
446 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
447 	return (0);
448 }
449 
450 static int
_bus_dmamap_load_busaddr(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t addr,bus_size_t size)451 _bus_dmamap_load_busaddr(bus_dma_tag_t t, bus_dmamap_t map,
452     bus_addr_t addr, bus_size_t size)
453 {
454 	bus_dma_segment_t * const segs = map->dm_segs;
455 	int nseg = map->dm_nsegs;
456 	bus_addr_t bmask = ~(map->_dm_boundary - 1);
457 	bus_addr_t lastaddr = 0xdead; /* XXX gcc */
458 	bus_size_t sgsize;
459 
460 	if (nseg > 0)
461 		lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len;
462 again:
463 	sgsize = size;
464 	/*
465 	 * Make sure we don't cross any boundaries.
466 	 */
467 	if (map->_dm_boundary > 0) {
468 		bus_addr_t baddr; /* next boundary address */
469 
470 		baddr = (addr + map->_dm_boundary) & bmask;
471 		if (sgsize > (baddr - addr))
472 			sgsize = (baddr - addr);
473 	}
474 
475 	/*
476 	 * Insert chunk into a segment, coalescing with
477 	 * previous segment if possible.
478 	 */
479 	if (nseg > 0 && addr == lastaddr &&
480 	    segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz &&
481 	    (map->_dm_boundary == 0 ||
482 	     (segs[nseg-1].ds_addr & bmask) == (addr & bmask))) {
483 		/* coalesce */
484 		segs[nseg-1].ds_len += sgsize;
485 	} else if (nseg >= map->_dm_segcnt) {
486 		return EFBIG;
487 	} else {
488 		/* new segment */
489 		segs[nseg].ds_addr = addr;
490 		segs[nseg].ds_len = sgsize;
491 		nseg++;
492 	}
493 
494 	lastaddr = addr + sgsize;
495 	if (map->_dm_bounce_thresh != 0 && lastaddr > map->_dm_bounce_thresh)
496 		return EINVAL;
497 
498 	addr += sgsize;
499 	size -= sgsize;
500 	if (size > 0)
501 		goto again;
502 
503 	map->dm_nsegs = nseg;
504 	return 0;
505 }
506 
507 /*
508  * Like _bus_dmamap_load(), but for mbufs.
509  */
510 static int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)511 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
512     int flags)
513 {
514 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
515 	int error;
516 	struct mbuf *m;
517 
518 	/*
519 	 * Make sure on error condition we return "no valid mappings."
520 	 */
521 	map->dm_mapsize = 0;
522 	map->dm_nsegs = 0;
523 	KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
524 	    "maxsegsz=0x%"PRIxBUSSIZE", maxmaxsegsz=0x%"PRIxBUSSIZE,
525 	    map->dm_maxsegsz, map->_dm_maxmaxsegsz);
526 
527 	KASSERTMSG(m0->m_flags & M_PKTHDR, "m0=%p m_flags=0x%x", m0,
528 	    m0->m_flags);
529 	if (m0->m_pkthdr.len > map->_dm_size)
530 		return (EINVAL);
531 
532 	error = 0;
533 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
534 		int offset;
535 		int remainbytes;
536 		const struct vm_page * const *pgs;
537 		paddr_t paddr;
538 		int size;
539 
540 		if (m->m_len == 0)
541 			continue;
542 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
543 		case M_EXT|M_EXT_CLUSTER:
544 			/* XXX KDASSERT */
545 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
546 			paddr = m->m_ext.ext_paddr +
547 			    (m->m_data - m->m_ext.ext_buf);
548 			size = m->m_len;
549 			error = _bus_dmamap_load_busaddr(t, map,
550 			    _BUS_PHYS_TO_BUS(paddr), size);
551 			break;
552 
553 		case M_EXT|M_EXT_PAGES:
554 			KASSERTMSG(m->m_ext.ext_buf <= m->m_data,
555 			    "m=%p m_ext.ext_buf=%p m_ext.ext_size=%zu"
556 			    " m_data=%p",
557 			    m, m->m_ext.ext_buf, m->m_ext.ext_size, m->m_data);
558 			KASSERTMSG((m->m_data <=
559 				m->m_ext.ext_buf + m->m_ext.ext_size),
560 			    "m=%p m_ext.ext_buf=%p m_ext.ext_size=%zu"
561 			    " m_data=%p",
562 			    m, m->m_ext.ext_buf, m->m_ext.ext_size, m->m_data);
563 
564 			offset = (vaddr_t)m->m_data -
565 			    trunc_page((vaddr_t)m->m_ext.ext_buf);
566 			remainbytes = m->m_len;
567 
568 			/* skip uninteresting pages */
569 			pgs = (const struct vm_page * const *)
570 			    m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
571 
572 			offset &= PAGE_MASK; /* offset in the first page */
573 
574 			/* load each pages */
575 			while (remainbytes > 0) {
576 				const struct vm_page *pg;
577 				bus_addr_t busaddr;
578 
579 				size = MIN(remainbytes, PAGE_SIZE - offset);
580 
581 				pg = *pgs++;
582 				KASSERT(pg);
583 				busaddr = _BUS_VM_PAGE_TO_BUS(pg) + offset;
584 
585 				error = _bus_dmamap_load_busaddr(t, map,
586 				    busaddr, size);
587 				if (error)
588 					break;
589 				offset = 0;
590 				remainbytes -= size;
591 			}
592 			break;
593 
594 		case 0:
595 			paddr = m->m_paddr + M_BUFOFFSET(m) +
596 			    (m->m_data - M_BUFADDR(m));
597 			size = m->m_len;
598 			error = _bus_dmamap_load_busaddr(t, map,
599 			    _BUS_PHYS_TO_BUS(paddr), size);
600 			break;
601 
602 		default:
603 			error = _bus_dmamap_load_buffer(t, map, m->m_data,
604 			    m->m_len, vmspace_kernel(), flags);
605 		}
606 	}
607 	if (error == 0) {
608 		map->dm_mapsize = m0->m_pkthdr.len;
609 		return 0;
610 	}
611 
612 	map->dm_nsegs = 0;
613 
614 	if (cookie == NULL ||
615 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
616 		return error;
617 
618 	/*
619 	 * First attempt failed; bounce it.
620 	 */
621 
622 	STAT_INCR(bounces);
623 
624 	/*
625 	 * Allocate bounce pages, if necessary.
626 	 */
627 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
628 		error = _bus_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
629 		    flags);
630 		if (error)
631 			return (error);
632 	}
633 
634 	/*
635 	 * Cache a pointer to the caller's buffer and load the DMA map
636 	 * with the bounce buffer.
637 	 */
638 	cookie->id_origbuf = m0;
639 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
640 	cookie->id_buftype = X86_DMA_BUFTYPE_MBUF;
641 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
642 	    m0->m_pkthdr.len, NULL, flags);
643 	if (error)
644 		return (error);
645 
646 	/* ...so _bus_dmamap_sync() knows we're bouncing */
647 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
648 	return (0);
649 }
650 
651 /*
652  * Like _bus_dmamap_load(), but for uios.
653  */
654 static int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)655 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
656     int flags)
657 {
658 	int i, error;
659 	bus_size_t minlen, resid;
660 	struct vmspace *vm;
661 	struct iovec *iov;
662 	void *addr;
663 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
664 
665 	/*
666 	 * Make sure that on error condition we return "no valid mappings."
667 	 */
668 	map->dm_mapsize = 0;
669 	map->dm_nsegs = 0;
670 	KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
671 	    "maxsegsz=0x%"PRIxBUSSIZE", maxmaxsegsz=0x%"PRIxBUSSIZE,
672 	    map->dm_maxsegsz, map->_dm_maxmaxsegsz);
673 
674 	resid = uio->uio_resid;
675 	iov = uio->uio_iov;
676 
677 	vm = uio->uio_vmspace;
678 
679 	error = 0;
680 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
681 		/*
682 		 * Now at the first iovec to load.  Load each iovec
683 		 * until we have exhausted the residual count.
684 		 */
685 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
686 		addr = (void *)iov[i].iov_base;
687 
688 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
689 		    vm, flags);
690 
691 		resid -= minlen;
692 	}
693 	if (error == 0) {
694 		map->dm_mapsize = uio->uio_resid;
695 		return 0;
696 	}
697 
698 	map->dm_nsegs = 0;
699 
700 	if (cookie == NULL ||
701 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
702 		return error;
703 
704 	STAT_INCR(bounces);
705 
706 	/*
707 	 * Allocate bounce pages, if necessary.
708 	 */
709 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
710 		error = _bus_dma_alloc_bouncebuf(t, map, uio->uio_resid,
711 		    flags);
712 		if (error)
713 			return (error);
714 	}
715 
716 	/*
717 	 * Cache a pointer to the caller's buffer and load the DMA map
718 	 * with the bounce buffer.
719 	 */
720 	cookie->id_origbuf = uio;
721 	cookie->id_origbuflen = uio->uio_resid;
722 	cookie->id_buftype = X86_DMA_BUFTYPE_UIO;
723 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
724 	    uio->uio_resid, NULL, flags);
725 	if (error)
726 		return (error);
727 
728 	/* ...so _bus_dmamap_sync() knows we're bouncing */
729 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
730 	return (0);
731 }
732 
733 /*
734  * Like _bus_dmamap_load(), but for raw memory allocated with
735  * bus_dmamem_alloc().
736  */
737 static int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size0,int flags)738 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
739     bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
740 {
741 	bus_size_t size;
742 	int i, error = 0;
743 
744 	/*
745 	 * Make sure that on error condition we return "no valid mappings."
746 	 */
747 	map->dm_mapsize = 0;
748 	map->dm_nsegs = 0;
749 	KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
750 	    "maxsegsz=0x%"PRIxBUSSIZE", maxmaxsegsz=0x%"PRIxBUSSIZE,
751 	    map->dm_maxsegsz, map->_dm_maxmaxsegsz);
752 
753 	if (size0 > map->_dm_size)
754 		return EINVAL;
755 
756 	for (i = 0, size = size0; i < nsegs && size > 0; i++) {
757 		bus_dma_segment_t *ds = &segs[i];
758 		bus_size_t sgsize;
759 
760 		sgsize = MIN(ds->ds_len, size);
761 		if (sgsize == 0)
762 			continue;
763 		error = _bus_dmamap_load_busaddr(t, map, ds->ds_addr, sgsize);
764 		if (error != 0)
765 			break;
766 		size -= sgsize;
767 	}
768 
769 	if (error != 0) {
770 		map->dm_mapsize = 0;
771 		map->dm_nsegs = 0;
772 		return error;
773 	}
774 
775 	/* XXX TBD bounce */
776 
777 	map->dm_mapsize = size0;
778 	return 0;
779 }
780 
781 /*
782  * Unload a DMA map.
783  */
784 static void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)785 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
786 {
787 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
788 
789 	/*
790 	 * If we have bounce pages, free them, unless they're
791 	 * reserved for our exclusive use.
792 	 */
793 	if (cookie != NULL) {
794 		cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
795 		cookie->id_buftype = X86_DMA_BUFTYPE_INVALID;
796 	}
797 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
798 	map->dm_mapsize = 0;
799 	map->dm_nsegs = 0;
800 }
801 
802 /*
803  * Synchronize a DMA map.
804  *
805  * Reference:
806  *
807  *	AMD64 Architecture Programmer's Manual, Volume 2: System
808  *	Programming, 24593--Rev. 3.38--November 2021, Sec. 7.4.2 Memory
809  *	Barrier Interaction with Memory Types, Table 7-3, p. 196.
810  *	https://web.archive.org/web/20220625040004/https://www.amd.com/system/files/TechDocs/24593.pdf#page=256
811  */
812 static void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)813 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
814     bus_size_t len, int ops)
815 {
816 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
817 
818 	/*
819 	 * Mixing PRE and POST operations is not allowed.
820 	 */
821 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
822 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
823 		panic("%s: mix PRE and POST", __func__);
824 
825 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
826 		KASSERTMSG(offset < map->dm_mapsize,
827 		    "bad offset 0x%"PRIxBUSADDR" >= 0x%"PRIxBUSSIZE,
828 		    offset, map->dm_mapsize);
829 		KASSERTMSG(len <= map->dm_mapsize - offset,
830 		    "bad length 0x%"PRIxBUSADDR" + 0x%"PRIxBUSSIZE
831 		    " > 0x%"PRIxBUSSIZE,
832 		    offset, len, map->dm_mapsize);
833 	}
834 
835 	/*
836 	 * BUS_DMASYNC_POSTREAD: The caller has been alerted to DMA
837 	 * completion by reading a register or DMA descriptor, and the
838 	 * caller is about to read out of the DMA memory buffer that
839 	 * the device just filled.
840 	 *
841 	 * => LFENCE ensures that these happen in order so that the
842 	 *    caller, or the bounce buffer logic here, doesn't proceed
843 	 *    to read any stale data from cache or speculation.  x86
844 	 *    never reorders loads from wp/wt/wb or uc memory, but it
845 	 *    may execute loads from wc/wc+ memory early, e.g. with
846 	 *    BUS_SPACE_MAP_PREFETCHABLE.
847 	 */
848 	if (ops & BUS_DMASYNC_POSTREAD)
849 		x86_lfence();
850 
851 	/*
852 	 * If we're not bouncing, just return; nothing to do.
853 	 */
854 	if (len == 0 || cookie == NULL ||
855 	    (cookie->id_flags & X86_DMA_IS_BOUNCING) == 0)
856 		goto end;
857 
858 	switch (cookie->id_buftype) {
859 	case X86_DMA_BUFTYPE_LINEAR:
860 		/*
861 		 * Nothing to do for pre-read.
862 		 */
863 
864 		if (ops & BUS_DMASYNC_PREWRITE) {
865 			/*
866 			 * Copy the caller's buffer to the bounce buffer.
867 			 */
868 			memcpy((char *)cookie->id_bouncebuf + offset,
869 			    (char *)cookie->id_origbuf + offset, len);
870 		}
871 
872 		if (ops & BUS_DMASYNC_POSTREAD) {
873 			/*
874 			 * Copy the bounce buffer to the caller's buffer.
875 			 */
876 			memcpy((char *)cookie->id_origbuf + offset,
877 			    (char *)cookie->id_bouncebuf + offset, len);
878 		}
879 
880 		/*
881 		 * Nothing to do for post-write.
882 		 */
883 		break;
884 
885 	case X86_DMA_BUFTYPE_MBUF:
886 	    {
887 		struct mbuf *m, *m0 = cookie->id_origbuf;
888 		bus_size_t minlen, moff;
889 
890 		/*
891 		 * Nothing to do for pre-read.
892 		 */
893 
894 		if (ops & BUS_DMASYNC_PREWRITE) {
895 			/*
896 			 * Copy the caller's buffer to the bounce buffer.
897 			 */
898 			m_copydata(m0, offset, len,
899 			    (char *)cookie->id_bouncebuf + offset);
900 		}
901 
902 		if (ops & BUS_DMASYNC_POSTREAD) {
903 			/*
904 			 * Copy the bounce buffer to the caller's buffer.
905 			 */
906 			for (moff = offset, m = m0; m != NULL && len != 0;
907 			     m = m->m_next) {
908 				/* Find the beginning mbuf. */
909 				if (moff >= m->m_len) {
910 					moff -= m->m_len;
911 					continue;
912 				}
913 
914 				/*
915 				 * Now at the first mbuf to sync; nail
916 				 * each one until we have exhausted the
917 				 * length.
918 				 */
919 				minlen = len < m->m_len - moff ?
920 				    len : m->m_len - moff;
921 
922 				memcpy(mtod(m, char *) + moff,
923 				    (char *)cookie->id_bouncebuf + offset,
924 				    minlen);
925 
926 				moff = 0;
927 				len -= minlen;
928 				offset += minlen;
929 			}
930 		}
931 
932 		/*
933 		 * Nothing to do for post-write.
934 		 */
935 		break;
936 	    }
937 	case X86_DMA_BUFTYPE_UIO:
938 	    {
939 		struct uio *uio;
940 
941 		uio = (struct uio *)cookie->id_origbuf;
942 
943 		/*
944 		 * Nothing to do for pre-read.
945 		 */
946 
947 		if (ops & BUS_DMASYNC_PREWRITE) {
948 			/*
949 			 * Copy the caller's buffer to the bounce buffer.
950 			 */
951 			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
952 			    uio, len, UIO_WRITE);
953 		}
954 
955 		if (ops & BUS_DMASYNC_POSTREAD) {
956 			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
957 			    uio, len, UIO_READ);
958 		}
959 
960 		/*
961 		 * Nothing to do for post-write.
962 		 */
963 		break;
964 	    }
965 
966 	case X86_DMA_BUFTYPE_RAW:
967 		panic("%s: X86_DMA_BUFTYPE_RAW", __func__);
968 		break;
969 
970 	case X86_DMA_BUFTYPE_INVALID:
971 		panic("%s: X86_DMA_BUFTYPE_INVALID", __func__);
972 		break;
973 
974 	default:
975 		panic("%s: unknown buffer type %d", __func__,
976 		    cookie->id_buftype);
977 		break;
978 	}
979 end:
980 	/*
981 	 * BUS_DMASYNC_PREREAD: The caller may have previously been
982 	 * using a DMA memory buffer, with loads and stores, and is
983 	 * about to trigger DMA by writing to a register or DMA
984 	 * descriptor.
985 	 *
986 	 * => SFENCE ensures that the stores happen in order, in case
987 	 *    the latter one is non-temporal or to wc/wc+ memory and
988 	 *    thus may be executed early.  x86 never reorders
989 	 *    load;store to store;load for any memory type, so no
990 	 *    barrier is needed for prior loads.
991 	 *
992 	 * BUS_DMASYNC_PREWRITE: The caller has just written to a DMA
993 	 * memory buffer, or we just wrote to to the bounce buffer,
994 	 * data that the device needs to use, and the caller is about
995 	 * to trigger DMA by writing to a register or DMA descriptor.
996 	 *
997 	 * => SFENCE ensures that these happen in order so that any
998 	 *    buffered stores are visible to the device before the DMA
999 	 *    is triggered.  x86 never reorders (non-temporal) stores
1000 	 *    to wp/wt/wb or uc memory, but it may reorder two stores
1001 	 *    if one is to wc/wc+ memory, e.g. if the DMA descriptor is
1002 	 *    mapped with BUS_SPACE_MAP_PREFETCHABLE.
1003 	 */
1004 	if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE))
1005 		x86_sfence();
1006 
1007 	/*
1008 	 * BUS_DMASYNC_POSTWRITE: The caller has been alerted to DMA
1009 	 * completion by reading a register or DMA descriptor, and the
1010 	 * caller may proceed to reuse the DMA memory buffer, with
1011 	 * loads and stores.
1012 	 *
1013 	 * => No barrier is needed.  Since the DMA memory buffer is not
1014 	 *    changing (we're sending data to the device, not receiving
1015 	 *    data from the device), prefetched loads are safe.  x86
1016 	 *    never reoreders load;store to store;load for any memory
1017 	 *    type, so early execution of stores prior to witnessing
1018 	 *    the DMA completion is not possible.
1019 	 */
1020 }
1021 
1022 /*
1023  * Allocate memory safe for DMA.
1024  */
1025 static int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1026 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1027     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1028     int flags)
1029 {
1030 	bus_addr_t high;
1031 
1032 	if (t->_bounce_alloc_hi != 0 && _BUS_AVAIL_END > t->_bounce_alloc_hi - 1)
1033 		high = t->_bounce_alloc_hi - 1;
1034 	else
1035 		high = _BUS_AVAIL_END;
1036 
1037 	return (_BUS_DMAMEM_ALLOC_RANGE(t, size, alignment, boundary,
1038 	    segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high));
1039 }
1040 
1041 static int
_bus_dma_alloc_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map,bus_size_t size,int flags)1042 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
1043     bus_size_t size, int flags)
1044 {
1045 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
1046 	int error = 0;
1047 
1048 	KASSERT(cookie != NULL);
1049 
1050 	cookie->id_bouncebuflen = round_page(size);
1051 	error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1052 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
1053 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1054 	if (error) {
1055 		cookie->id_bouncebuflen = 0;
1056 		cookie->id_nbouncesegs = 0;
1057 		return error;
1058 	}
1059 
1060 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1061 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1062 	    (void **)&cookie->id_bouncebuf, flags);
1063 
1064 	if (error) {
1065 		_bus_dmamem_free(t, cookie->id_bouncesegs,
1066 		    cookie->id_nbouncesegs);
1067 		cookie->id_bouncebuflen = 0;
1068 		cookie->id_nbouncesegs = 0;
1069 	} else {
1070 		cookie->id_flags |= X86_DMA_HAS_BOUNCE;
1071 		STAT_INCR(nbouncebufs);
1072 	}
1073 
1074 	return (error);
1075 }
1076 
1077 static void
_bus_dma_free_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map)1078 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1079 {
1080 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
1081 
1082 	KASSERT(cookie != NULL);
1083 
1084 	STAT_DECR(nbouncebufs);
1085 
1086 	_bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1087 	_bus_dmamem_free(t, cookie->id_bouncesegs,
1088 	    cookie->id_nbouncesegs);
1089 	cookie->id_bouncebuflen = 0;
1090 	cookie->id_nbouncesegs = 0;
1091 	cookie->id_flags &= ~X86_DMA_HAS_BOUNCE;
1092 }
1093 
1094 
1095 /*
1096  * This function does the same as uiomove, but takes an explicit
1097  * direction, and does not update the uio structure.
1098  */
1099 static int
_bus_dma_uiomove(void * buf,struct uio * uio,size_t n,int direction)1100 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1101 {
1102 	struct iovec *iov;
1103 	int error;
1104 	struct vmspace *vm;
1105 	char *cp;
1106 	size_t resid, cnt;
1107 	int i;
1108 
1109 	iov = uio->uio_iov;
1110 	vm = uio->uio_vmspace;
1111 	cp = buf;
1112 	resid = n;
1113 
1114 	for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1115 		iov = &uio->uio_iov[i];
1116 		if (iov->iov_len == 0)
1117 			continue;
1118 		cnt = MIN(resid, iov->iov_len);
1119 
1120 		if (!VMSPACE_IS_KERNEL_P(vm)) {
1121 			preempt_point();
1122 		}
1123 		if (direction == UIO_READ) {
1124 			error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1125 		} else {
1126 			error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1127 		}
1128 		if (error)
1129 			return (error);
1130 		cp += cnt;
1131 		resid -= cnt;
1132 	}
1133 	return (0);
1134 }
1135 
1136 /*
1137  * Common function for freeing DMA-safe memory.  May be called by
1138  * bus-specific DMA memory free functions.
1139  */
1140 static void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1141 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1142 {
1143 	struct vm_page *m;
1144 	bus_addr_t addr;
1145 	struct pglist mlist;
1146 	int curseg;
1147 
1148 	/*
1149 	 * Build a list of pages to free back to the VM system.
1150 	 */
1151 	TAILQ_INIT(&mlist);
1152 	for (curseg = 0; curseg < nsegs; curseg++) {
1153 		for (addr = segs[curseg].ds_addr;
1154 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1155 		    addr += PAGE_SIZE) {
1156 			m = _BUS_BUS_TO_VM_PAGE(addr);
1157 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1158 		}
1159 	}
1160 
1161 	uvm_pglistfree(&mlist);
1162 }
1163 
1164 /*
1165  * Common function for mapping DMA-safe memory.  May be called by
1166  * bus-specific DMA memory map functions.
1167  * This supports BUS_DMA_NOCACHE and BUS_DMA_PREFETCHABLE.
1168  */
1169 static int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1170 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1171     size_t size, void **kvap, int flags)
1172 {
1173 	vaddr_t va;
1174 	bus_addr_t addr;
1175 	int curseg;
1176 	const uvm_flag_t kmflags =
1177 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1178 	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
1179 
1180 	size = round_page(size);
1181 	KASSERTMSG(((flags & (BUS_DMA_NOCACHE|BUS_DMA_PREFETCHABLE)) !=
1182 		(BUS_DMA_NOCACHE|BUS_DMA_PREFETCHABLE)),
1183 	    "BUS_DMA_NOCACHE and BUS_DMA_PREFETCHABLE are mutually exclusive");
1184 	if (flags & BUS_DMA_NOCACHE)
1185 		pmapflags |= PMAP_NOCACHE;
1186 	if (flags & BUS_DMA_PREFETCHABLE)
1187 		pmapflags |= PMAP_WRITE_COMBINE;
1188 
1189 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1190 
1191 	if (va == 0)
1192 		return ENOMEM;
1193 
1194 	*kvap = (void *)va;
1195 
1196 	for (curseg = 0; curseg < nsegs; curseg++) {
1197 		for (addr = segs[curseg].ds_addr;
1198 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1199 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1200 			if (size == 0)
1201 				panic("_bus_dmamem_map: size botch");
1202 			_BUS_PMAP_ENTER(pmap_kernel(), va, addr,
1203 			    VM_PROT_READ | VM_PROT_WRITE,
1204 			    pmapflags);
1205 		}
1206 	}
1207 	pmap_update(pmap_kernel());
1208 
1209 	return 0;
1210 }
1211 
1212 /*
1213  * Common function for unmapping DMA-safe memory.  May be called by
1214  * bus-specific DMA memory unmapping functions.
1215  */
1216 
1217 static void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1218 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1219 {
1220 	pt_entry_t *pte, opte;
1221 	vaddr_t va, sva, eva;
1222 
1223 	KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva);
1224 
1225 	size = round_page(size);
1226 	sva = (vaddr_t)kva;
1227 	eva = sva + size;
1228 
1229 	/*
1230 	 * mark pages cacheable again.
1231 	 */
1232 	for (va = sva; va < eva; va += PAGE_SIZE) {
1233 		pte = kvtopte(va);
1234 		opte = *pte;
1235 		if ((opte & PTE_PCD) != 0)
1236 			pmap_pte_clearbits(pte, PTE_PCD);
1237 	}
1238 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
1239 	pmap_update(pmap_kernel());
1240 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1241 }
1242 
1243 /*
1244  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
1245  * bus-specific DMA mmap(2)'ing functions.
1246  */
1247 static paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1248 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1249     off_t off, int prot, int flags)
1250 {
1251 	int i;
1252 
1253 	for (i = 0; i < nsegs; i++) {
1254 		KASSERTMSG((off & PGOFSET) == 0, "off=0x%jx", (uintmax_t)off);
1255 		KASSERTMSG((segs[i].ds_addr & PGOFSET) == 0,
1256 		    "segs[%u].ds_addr=%"PRIxBUSADDR, i, segs[i].ds_addr);
1257 		KASSERTMSG((segs[i].ds_len & PGOFSET) == 0,
1258 		    "segs[%u].ds_len=%"PRIxBUSSIZE, i, segs[i].ds_len);
1259 		if (off >= segs[i].ds_len) {
1260 			off -= segs[i].ds_len;
1261 			continue;
1262 		}
1263 
1264 		return (x86_btop(_BUS_BUS_TO_PHYS(segs[i].ds_addr + off)));
1265 	}
1266 
1267 	/* Page not found. */
1268 	return (-1);
1269 }
1270 
1271 /**********************************************************************
1272  * DMA utility functions
1273  **********************************************************************/
1274 
1275 /*
1276  * Utility function to load a linear buffer.
1277  */
1278 static int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct vmspace * vm,int flags)1279 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1280     bus_size_t buflen, struct vmspace *vm, int flags)
1281 {
1282 	bus_size_t sgsize;
1283 	bus_addr_t curaddr;
1284 	vaddr_t vaddr = (vaddr_t)buf;
1285 	pmap_t pmap;
1286 
1287 	if (vm != NULL)
1288 		pmap = vm_map_pmap(&vm->vm_map);
1289 	else
1290 		pmap = pmap_kernel();
1291 
1292 	while (buflen > 0) {
1293 		int error;
1294 
1295 		/*
1296 		 * Get the bus address for this segment.
1297 		 */
1298 		curaddr = _BUS_VIRT_TO_BUS(pmap, vaddr);
1299 
1300 		/*
1301 		 * Compute the segment size, and adjust counts.
1302 		 */
1303 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1304 		if (buflen < sgsize)
1305 			sgsize = buflen;
1306 
1307 		/*
1308 		 * If we're beyond the bounce threshold, notify
1309 		 * the caller.
1310 		 */
1311 		if (map->_dm_bounce_thresh != 0 &&
1312 		    curaddr + sgsize >= map->_dm_bounce_thresh)
1313 			return (EINVAL);
1314 
1315 
1316 		error = _bus_dmamap_load_busaddr(t, map, curaddr, sgsize);
1317 		if (error)
1318 			return error;
1319 
1320 		vaddr += sgsize;
1321 		buflen -= sgsize;
1322 	}
1323 
1324 	return (0);
1325 }
1326 
1327 static int
_bus_dmatag_subregion(bus_dma_tag_t tag,bus_addr_t min_addr,bus_addr_t max_addr,bus_dma_tag_t * newtag,int flags)1328 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1329 		      bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1330 {
1331 
1332 	if ((tag->_bounce_thresh != 0   && max_addr >= tag->_bounce_thresh - 1) &&
1333 	    (tag->_bounce_alloc_hi != 0 && max_addr >= tag->_bounce_alloc_hi - 1) &&
1334 	    (min_addr <= tag->_bounce_alloc_lo)) {
1335 		*newtag = tag;
1336 		/* if the tag must be freed, add a reference */
1337 		if (tag->_tag_needs_free)
1338 			(tag->_tag_needs_free)++;
1339 		return 0;
1340 	}
1341 
1342 	if ((*newtag = malloc(sizeof(struct x86_bus_dma_tag), M_DMAMAP,
1343 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1344 		return ENOMEM;
1345 
1346 	**newtag = *tag;
1347 	(*newtag)->_tag_needs_free = 1;
1348 
1349 	if (tag->_bounce_thresh == 0 || max_addr < tag->_bounce_thresh)
1350 		(*newtag)->_bounce_thresh = max_addr;
1351 	if (tag->_bounce_alloc_hi == 0 || max_addr < tag->_bounce_alloc_hi)
1352 		(*newtag)->_bounce_alloc_hi = max_addr;
1353 	if (min_addr > tag->_bounce_alloc_lo)
1354 		(*newtag)->_bounce_alloc_lo = min_addr;
1355 
1356 	return 0;
1357 }
1358 
1359 static void
_bus_dmatag_destroy(bus_dma_tag_t tag)1360 _bus_dmatag_destroy(bus_dma_tag_t tag)
1361 {
1362 
1363 	switch (tag->_tag_needs_free) {
1364 	case 0:
1365 		break;				/* not allocated with malloc */
1366 	case 1:
1367 		free(tag, M_DMAMAP);		/* last reference to tag */
1368 		break;
1369 	default:
1370 		(tag->_tag_needs_free)--;	/* one less reference */
1371 	}
1372 }
1373 
1374 
1375 void
bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t p,bus_addr_t o,bus_size_t l,int ops)1376 bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t p, bus_addr_t o, bus_size_t l,
1377 		int ops)
1378 {
1379 	bus_dma_tag_t it;
1380 
1381 	kasan_dma_sync(p, o, l, ops);
1382 	kmsan_dma_sync(p, o, l, ops);
1383 
1384 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
1385 		;	/* skip override */
1386 	else for (it = t; it != NULL; it = it->bdt_super) {
1387 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
1388 			continue;
1389 		(*it->bdt_ov->ov_dmamap_sync)(it->bdt_ctx, t, p, o,
1390 		    l, ops);
1391 		return;
1392 	}
1393 
1394 	_bus_dmamap_sync(t, p, o, l, ops);
1395 }
1396 
1397 int
bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)1398 bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1399 		  bus_size_t maxsegsz, bus_size_t boundary, int flags,
1400 		  bus_dmamap_t *dmamp)
1401 {
1402 	bus_dma_tag_t it;
1403 
1404 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
1405 		;	/* skip override */
1406 	else for (it = t; it != NULL; it = it->bdt_super) {
1407 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
1408 			continue;
1409 		return (*it->bdt_ov->ov_dmamap_create)(it->bdt_ctx, t, size,
1410 		    nsegments, maxsegsz, boundary, flags, dmamp);
1411 	}
1412 
1413 	return _bus_dmamap_create(t, size, nsegments, maxsegsz,
1414 	    boundary, flags, dmamp);
1415 }
1416 
1417 void
bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t dmam)1418 bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t dmam)
1419 {
1420 	bus_dma_tag_t it;
1421 
1422 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
1423 		;	/* skip override */
1424 	else for (it = t; it != NULL; it = it->bdt_super) {
1425 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
1426 			continue;
1427 		(*it->bdt_ov->ov_dmamap_destroy)(it->bdt_ctx, t, dmam);
1428 		return;
1429 	}
1430 
1431 	_bus_dmamap_destroy(t, dmam);
1432 }
1433 
1434 int
bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t dmam,void * buf,bus_size_t buflen,struct proc * p,int flags)1435 bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t dmam, void *buf,
1436 		bus_size_t buflen, struct proc *p, int flags)
1437 {
1438 	bus_dma_tag_t it;
1439 
1440 	kasan_dma_load(dmam, buf, buflen, KASAN_DMA_LINEAR);
1441 	kmsan_dma_load(dmam, buf, buflen, KMSAN_DMA_LINEAR);
1442 
1443 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
1444 		;	/* skip override */
1445 	else for (it = t; it != NULL; it = it->bdt_super) {
1446 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
1447 			continue;
1448 		return (*it->bdt_ov->ov_dmamap_load)(it->bdt_ctx, t, dmam,
1449 		    buf, buflen, p, flags);
1450 	}
1451 
1452 	return _bus_dmamap_load(t, dmam, buf, buflen, p, flags);
1453 }
1454 
1455 int
bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t dmam,struct mbuf * chain,int flags)1456 bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t dmam,
1457 		     struct mbuf *chain, int flags)
1458 {
1459 	bus_dma_tag_t it;
1460 
1461 	kasan_dma_load(dmam, chain, 0, KASAN_DMA_MBUF);
1462 	kmsan_dma_load(dmam, chain, 0, KMSAN_DMA_MBUF);
1463 
1464 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
1465 		;	/* skip override */
1466 	else for (it = t; it != NULL; it = it->bdt_super) {
1467 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
1468 			continue;
1469 		return (*it->bdt_ov->ov_dmamap_load_mbuf)(it->bdt_ctx, t, dmam,
1470 		    chain, flags);
1471 	}
1472 
1473 	return _bus_dmamap_load_mbuf(t, dmam, chain, flags);
1474 }
1475 
1476 int
bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t dmam,struct uio * uio,int flags)1477 bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t dmam,
1478 		    struct uio *uio, int flags)
1479 {
1480 	bus_dma_tag_t it;
1481 
1482 	kasan_dma_load(dmam, uio, 0, KASAN_DMA_UIO);
1483 	kmsan_dma_load(dmam, uio, 0, KMSAN_DMA_UIO);
1484 
1485 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
1486 		;	/* skip override */
1487 	else for (it = t; it != NULL; it = it->bdt_super) {
1488 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
1489 			continue;
1490 		return (*it->bdt_ov->ov_dmamap_load_uio)(it->bdt_ctx, t, dmam,
1491 		    uio, flags);
1492 	}
1493 
1494 	return _bus_dmamap_load_uio(t, dmam, uio, flags);
1495 }
1496 
1497 int
bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t dmam,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1498 bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t dmam,
1499 		    bus_dma_segment_t *segs, int nsegs,
1500 		    bus_size_t size, int flags)
1501 {
1502 	bus_dma_tag_t it;
1503 
1504 	kasan_dma_load(dmam, NULL, 0, KASAN_DMA_RAW);
1505 	kmsan_dma_load(dmam, NULL, 0, KMSAN_DMA_RAW);
1506 
1507 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
1508 		;	/* skip override */
1509 	else for (it = t; it != NULL; it = it->bdt_super) {
1510 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
1511 			continue;
1512 		return (*it->bdt_ov->ov_dmamap_load_raw)(it->bdt_ctx, t, dmam,
1513 		    segs, nsegs, size, flags);
1514 	}
1515 
1516 	return _bus_dmamap_load_raw(t, dmam, segs, nsegs, size, flags);
1517 }
1518 
1519 void
bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t dmam)1520 bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t dmam)
1521 {
1522 	bus_dma_tag_t it;
1523 
1524 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
1525 		;	/* skip override */
1526 	else for (it = t; it != NULL; it = it->bdt_super) {
1527 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
1528 			continue;
1529 		(*it->bdt_ov->ov_dmamap_unload)(it->bdt_ctx, t, dmam);
1530 		return;
1531 	}
1532 
1533 	_bus_dmamap_unload(t, dmam);
1534 }
1535 
1536 int
bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1537 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1538 		 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs,
1539 		 int *rsegs, int flags)
1540 {
1541 	bus_dma_tag_t it;
1542 
1543 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
1544 		;	/* skip override */
1545 	else for (it = t; it != NULL; it = it->bdt_super) {
1546 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
1547 			continue;
1548 		return (*it->bdt_ov->ov_dmamem_alloc)(it->bdt_ctx, t, size,
1549 		    alignment, boundary, segs, nsegs, rsegs, flags);
1550 	}
1551 
1552 	return _bus_dmamem_alloc(t, size, alignment, boundary, segs,
1553 	    nsegs, rsegs, flags);
1554 }
1555 
1556 void
bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1557 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1558 {
1559 	bus_dma_tag_t it;
1560 
1561 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_FREE) == 0)
1562 		;	/* skip override */
1563 	else for (it = t; it != NULL; it = it->bdt_super) {
1564 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_FREE) == 0)
1565 			continue;
1566 		(*it->bdt_ov->ov_dmamem_free)(it->bdt_ctx, t, segs, nsegs);
1567 		return;
1568 	}
1569 
1570 	_bus_dmamem_free(t, segs, nsegs);
1571 }
1572 
1573 int
bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1574 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1575 	       size_t size, void **kvap, int flags)
1576 {
1577 	bus_dma_tag_t it;
1578 
1579 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MAP) == 0)
1580 		;	/* skip override */
1581 	else for (it = t; it != NULL; it = it->bdt_super) {
1582 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MAP) == 0)
1583 			continue;
1584 		return (*it->bdt_ov->ov_dmamem_map)(it->bdt_ctx, t,
1585 		    segs, nsegs, size, kvap, flags);
1586 	}
1587 
1588 	return _bus_dmamem_map(t, segs, nsegs, size, kvap, flags);
1589 }
1590 
1591 void
bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1592 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1593 {
1594 	bus_dma_tag_t it;
1595 
1596 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
1597 		;	/* skip override */
1598 	else for (it = t; it != NULL; it = it->bdt_super) {
1599 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
1600 			continue;
1601 		(*it->bdt_ov->ov_dmamem_unmap)(it->bdt_ctx, t, kva, size);
1602 		return;
1603 	}
1604 
1605 	_bus_dmamem_unmap(t, kva, size);
1606 }
1607 
1608 paddr_t
bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1609 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1610 		off_t off, int prot, int flags)
1611 {
1612 	bus_dma_tag_t it;
1613 
1614 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
1615 		;	/* skip override */
1616 	else for (it = t; it != NULL; it = it->bdt_super) {
1617 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
1618 			continue;
1619 		return (*it->bdt_ov->ov_dmamem_mmap)(it->bdt_ctx, t, segs,
1620 		    nsegs, off, prot, flags);
1621 	}
1622 
1623 	return _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags);
1624 }
1625 
1626 int
bus_dmatag_subregion(bus_dma_tag_t t,bus_addr_t min_addr,bus_addr_t max_addr,bus_dma_tag_t * newtag,int flags)1627 bus_dmatag_subregion(bus_dma_tag_t t, bus_addr_t min_addr,
1628 		     bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1629 {
1630 	bus_dma_tag_t it;
1631 
1632 	if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
1633 		;	/* skip override */
1634 	else for (it = t; it != NULL; it = it->bdt_super) {
1635 		if ((it->bdt_present & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
1636 			continue;
1637 		return (*it->bdt_ov->ov_dmatag_subregion)(it->bdt_ctx, t,
1638 		    min_addr, max_addr, newtag, flags);
1639 	}
1640 
1641 	return _bus_dmatag_subregion(t, min_addr, max_addr, newtag, flags);
1642 }
1643 
1644 void
bus_dmatag_destroy(bus_dma_tag_t t)1645 bus_dmatag_destroy(bus_dma_tag_t t)
1646 {
1647 	bus_dma_tag_t it;
1648 
1649 	if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
1650 		;	/* skip override */
1651 	else for (it = t; it != NULL; it = it->bdt_super) {
1652 		if ((it->bdt_present & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
1653 			continue;
1654 		(*it->bdt_ov->ov_dmatag_destroy)(it->bdt_ctx, t);
1655 		return;
1656 	}
1657 
1658 	_bus_dmatag_destroy(t);
1659 }
1660 
1661 static const void *
bit_to_function_pointer(const struct bus_dma_overrides * ov,uint64_t bit)1662 bit_to_function_pointer(const struct bus_dma_overrides *ov, uint64_t bit)
1663 {
1664 	switch (bit) {
1665 	case BUS_DMAMAP_OVERRIDE_CREATE:
1666 		return ov->ov_dmamap_create;
1667 	case BUS_DMAMAP_OVERRIDE_DESTROY:
1668 		return ov->ov_dmamap_destroy;
1669 	case BUS_DMAMAP_OVERRIDE_LOAD:
1670 		return ov->ov_dmamap_load;
1671 	case BUS_DMAMAP_OVERRIDE_LOAD_MBUF:
1672 		return ov->ov_dmamap_load_mbuf;
1673 	case BUS_DMAMAP_OVERRIDE_LOAD_UIO:
1674 		return ov->ov_dmamap_load_uio;
1675 	case BUS_DMAMAP_OVERRIDE_LOAD_RAW:
1676 		return ov->ov_dmamap_load_raw;
1677 	case BUS_DMAMAP_OVERRIDE_UNLOAD:
1678 		return ov->ov_dmamap_unload;
1679 	case BUS_DMAMAP_OVERRIDE_SYNC:
1680 		return ov->ov_dmamap_sync;
1681 	case BUS_DMAMEM_OVERRIDE_ALLOC:
1682 		return ov->ov_dmamem_alloc;
1683 	case BUS_DMAMEM_OVERRIDE_FREE:
1684 		return ov->ov_dmamem_free;
1685 	case BUS_DMAMEM_OVERRIDE_MAP:
1686 		return ov->ov_dmamem_map;
1687 	case BUS_DMAMEM_OVERRIDE_UNMAP:
1688 		return ov->ov_dmamem_unmap;
1689 	case BUS_DMAMEM_OVERRIDE_MMAP:
1690 		return ov->ov_dmamem_mmap;
1691 	case BUS_DMATAG_OVERRIDE_SUBREGION:
1692 		return ov->ov_dmatag_subregion;
1693 	case BUS_DMATAG_OVERRIDE_DESTROY:
1694 		return ov->ov_dmatag_destroy;
1695 	default:
1696 		return NULL;
1697 	}
1698 }
1699 
1700 void
bus_dma_tag_destroy(bus_dma_tag_t bdt)1701 bus_dma_tag_destroy(bus_dma_tag_t bdt)
1702 {
1703 	if (bdt->bdt_super != NULL)
1704 		bus_dmatag_destroy(bdt->bdt_super);
1705 	kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
1706 }
1707 
1708 int
bus_dma_tag_create(bus_dma_tag_t obdt,const uint64_t present,const struct bus_dma_overrides * ov,void * ctx,bus_dma_tag_t * bdtp)1709 bus_dma_tag_create(bus_dma_tag_t obdt, const uint64_t present,
1710     const struct bus_dma_overrides *ov, void *ctx, bus_dma_tag_t *bdtp)
1711 {
1712 	uint64_t bit, bits, nbits;
1713 	bus_dma_tag_t bdt;
1714 	const void *fp;
1715 
1716 	if (ov == NULL || present == 0)
1717 		return EINVAL;
1718 
1719 	bdt = kmem_alloc(sizeof(struct x86_bus_dma_tag), KM_SLEEP);
1720 	*bdt = *obdt;
1721 	/* don't let bus_dmatag_destroy free these */
1722 	bdt->_tag_needs_free = 0;
1723 
1724 	bdt->bdt_super = obdt;
1725 
1726 	for (bits = present; bits != 0; bits = nbits) {
1727 		nbits = bits & (bits - 1);
1728 		bit = nbits ^ bits;
1729 		if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
1730 #ifdef DEBUG
1731 			printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
1732 #endif
1733 			goto einval;
1734 		}
1735 	}
1736 
1737 	bdt->bdt_ov = ov;
1738 	bdt->bdt_exists = obdt->bdt_exists | present;
1739 	bdt->bdt_present = present;
1740 	bdt->bdt_ctx = ctx;
1741 
1742 	*bdtp = bdt;
1743 	if (obdt->_tag_needs_free)
1744 		obdt->_tag_needs_free++;
1745 
1746 	return 0;
1747 einval:
1748 	kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
1749 	return EINVAL;
1750 }
1751