xref: /netbsd-src/sys/arch/atari/isa/isa_dma.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: isa_dma.c,v 1.2 2003/05/05 12:55:42 fvdl Exp $	*/
2 
3 #define ISA_DMA_STATS
4 
5 /*-
6  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
11  * Simulation Facility, NASA Ames Research Center.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the NetBSD
24  *	Foundation, Inc. and its contributors.
25  * 4. Neither the name of The NetBSD Foundation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/syslog.h>
45 #include <sys/device.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/mbuf.h>
49 
50 #define _ATARI_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 
53 #include <dev/isa/isareg.h>
54 #include <dev/isa/isavar.h>
55 
56 #include <uvm/uvm_extern.h>
57 
58 extern	paddr_t avail_end;
59 
60 /*
61  * Cookie used by ISA dma.  A pointer to one of these it stashed in
62  * the DMA map.
63  */
64 struct atari_isa_dma_cookie {
65 	int	id_flags;		/* flags; see below */
66 
67 	/*
68 	 * Information about the original buffer used during
69 	 * DMA map syncs.  Note that origibuflen is only used
70 	 * for ID_BUFTYPE_LINEAR.
71 	 */
72 	void	*id_origbuf;		/* pointer to orig buffer if
73 					   bouncing */
74 	bus_size_t id_origbuflen;	/* ...and size */
75 	int	id_buftype;		/* type of buffer */
76 
77 	void	*id_bouncebuf;		/* pointer to the bounce buffer */
78 	bus_size_t id_bouncebuflen;	/* ...and size */
79 	int	id_nbouncesegs;		/* number of valid bounce segs */
80 	bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
81 					       physical memory segments */
82 };
83 
84 /* id_flags */
85 #define	ID_MIGHT_NEED_BOUNCE	0x01	/* map could need bounce buffers */
86 #define	ID_HAS_BOUNCE		0x02	/* map currently has bounce buffers */
87 #define	ID_IS_BOUNCING		0x04	/* map is bouncing current xfer */
88 
89 /* id_buftype */
90 #define	ID_BUFTYPE_INVALID	0
91 #define	ID_BUFTYPE_LINEAR	1
92 #define	ID_BUFTYPE_MBUF		2
93 #define	ID_BUFTYPE_UIO		3
94 #define	ID_BUFTYPE_RAW		4
95 
96 int	_isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
97 	    bus_size_t, bus_size_t, int, bus_dmamap_t *));
98 void	_isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
99 int	_isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
100 	    bus_size_t, struct proc *, int));
101 int	_isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
102 	    struct mbuf *, int));
103 int	_isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
104 	    struct uio *, int));
105 int	_isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
106 	    bus_dma_segment_t *, int, bus_size_t, int));
107 void	_isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
108 void	_isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
109 	    bus_addr_t, bus_size_t, int));
110 
111 int	_isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
112 	    bus_size_t, bus_dma_segment_t *, int, int *, int));
113 
114 int	_isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
115 	    bus_size_t, int));
116 void	_isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
117 
118 /*
119  * Entry points for ISA DMA.  These are mostly wrappers around
120  * the generic functions that understand how to deal with bounce
121  * buffers, if necessary.
122  */
123 struct atari_bus_dma_tag isa_bus_dma_tag = {
124 	ISA_DMA_BOUNCE_THRESHOLD,
125 	0,
126 	_isa_bus_dmamap_create,
127 	_isa_bus_dmamap_destroy,
128 	_isa_bus_dmamap_load,
129 	_isa_bus_dmamap_load_mbuf,
130 	_isa_bus_dmamap_load_uio,
131 	_isa_bus_dmamap_load_raw,
132 	_isa_bus_dmamap_unload,
133 	_isa_bus_dmamap_sync,
134 };
135 
136 /**********************************************************************
137  * bus.h dma interface entry points
138  **********************************************************************/
139 
140 #ifdef ISA_DMA_STATS
141 #define	STAT_INCR(v)	(v)++
142 #define	STAT_DECR(v)	do { \
143 		if ((v) == 0) \
144 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
145 		else \
146 			(v)--; \
147 		} while (0)
148 u_long	isa_dma_stats_loads;
149 u_long	isa_dma_stats_bounces;
150 u_long	isa_dma_stats_nbouncebufs;
151 #else
152 #define	STAT_INCR(v)
153 #define	STAT_DECR(v)
154 #endif
155 
156 /*
157  * Create an ISA DMA map.
158  */
159 int
160 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
161 	bus_dma_tag_t t;
162 	bus_size_t size;
163 	int nsegments;
164 	bus_size_t maxsegsz;
165 	bus_size_t boundary;
166 	int flags;
167 	bus_dmamap_t *dmamp;
168 {
169 	struct atari_isa_dma_cookie *cookie;
170 	bus_dmamap_t map;
171 	int error, cookieflags;
172 	void *cookiestore;
173 	size_t cookiesize;
174 
175 	/* Call common function to create the basic map. */
176 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
177 	    flags, dmamp);
178 	if (error)
179 		return (error);
180 
181 	map = *dmamp;
182 	map->_dm_cookie = NULL;
183 
184 	cookiesize = sizeof(struct atari_isa_dma_cookie);
185 
186 	/*
187 	 * ISA only has 24-bits of address space.  This means
188 	 * we can't DMA to pages over 16M.  In order to DMA to
189 	 * arbitrary buffers, we use "bounce buffers" - pages
190 	 * in memory below the 16M boundary.  On DMA reads,
191 	 * DMA happens to the bounce buffers, and is copied into
192 	 * the caller's buffer.  On writes, data is copied into
193 	 * but bounce buffer, and the DMA happens from those
194 	 * pages.  To software using the DMA mapping interface,
195 	 * this looks simply like a data cache.
196 	 *
197 	 * If we have more than 16M of RAM in the system, we may
198 	 * need bounce buffers.  We check and remember that here.
199 	 *
200 	 * There are exceptions, however.  VLB devices can do
201 	 * 32-bit DMA, and indicate that here.
202 	 *
203 	 * ...or, there is an opposite case.  The most segments
204 	 * a transfer will require is (maxxfer / PAGE_SIZE) + 1.  If
205 	 * the caller can't handle that many segments (e.g. the
206 	 * ISA DMA controller), we may have to bounce it as well.
207 	 */
208 	if (avail_end <= t->_bounce_thresh ||
209 	    (flags & ISABUS_DMA_32BIT) != 0) {
210 		/* Bouncing not necessary due to memory size. */
211 		map->_dm_bounce_thresh = 0;
212 	}
213 	cookieflags = 0;
214 	if (map->_dm_bounce_thresh != 0 ||
215 	    ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
216 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
217 		cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
218 	}
219 
220 	/*
221 	 * Allocate our cookie.
222 	 */
223 	if ((cookiestore = malloc(cookiesize, M_DMAMAP,
224 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
225 		error = ENOMEM;
226 		goto out;
227 	}
228 	memset(cookiestore, 0, cookiesize);
229 	cookie = (struct atari_isa_dma_cookie *)cookiestore;
230 	cookie->id_flags = cookieflags;
231 	map->_dm_cookie = cookie;
232 
233 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
234 		/*
235 		 * Allocate the bounce pages now if the caller
236 		 * wishes us to do so.
237 		 */
238 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
239 			goto out;
240 
241 		error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
242 	}
243 
244  out:
245 	if (error) {
246 		if (map->_dm_cookie != NULL)
247 			free(map->_dm_cookie, M_DMAMAP);
248 		_bus_dmamap_destroy(t, map);
249 	}
250 	return (error);
251 }
252 
253 /*
254  * Destroy an ISA DMA map.
255  */
256 void
257 _isa_bus_dmamap_destroy(t, map)
258 	bus_dma_tag_t t;
259 	bus_dmamap_t map;
260 {
261 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
262 
263 	/*
264 	 * Free any bounce pages this map might hold.
265 	 */
266 	if (cookie->id_flags & ID_HAS_BOUNCE)
267 		_isa_dma_free_bouncebuf(t, map);
268 
269 	free(cookie, M_DMAMAP);
270 	_bus_dmamap_destroy(t, map);
271 }
272 
273 /*
274  * Load an ISA DMA map with a linear buffer.
275  */
276 int
277 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
278 	bus_dma_tag_t t;
279 	bus_dmamap_t map;
280 	void *buf;
281 	bus_size_t buflen;
282 	struct proc *p;
283 	int flags;
284 {
285 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
286 	int error;
287 
288 	STAT_INCR(isa_dma_stats_loads);
289 
290 	/*
291 	 * Make sure that on error condition we return "no valid mappings."
292 	 */
293 	map->dm_mapsize = 0;
294 	map->dm_nsegs = 0;
295 
296 	/*
297 	 * Try to load the map the normal way.  If this errors out,
298 	 * and we can bounce, we will.
299 	 */
300 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
301 	if (error == 0 ||
302 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
303 		return (error);
304 
305 	/*
306 	 * First attempt failed; bounce it.
307 	 */
308 
309 	STAT_INCR(isa_dma_stats_bounces);
310 
311 	/*
312 	 * Allocate bounce pages, if necessary.
313 	 */
314 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
315 		error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
316 		if (error)
317 			return (error);
318 	}
319 
320 	/*
321 	 * Cache a pointer to the caller's buffer and load the DMA map
322 	 * with the bounce buffer.
323 	 */
324 	cookie->id_origbuf = buf;
325 	cookie->id_origbuflen = buflen;
326 	cookie->id_buftype = ID_BUFTYPE_LINEAR;
327 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
328 	    p, flags);
329 	if (error) {
330 		/*
331 		 * Free the bounce pages, unless our resources
332 		 * are reserved for our exclusive use.
333 		 */
334 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
335 			_isa_dma_free_bouncebuf(t, map);
336 		return (error);
337 	}
338 
339 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
340 	cookie->id_flags |= ID_IS_BOUNCING;
341 	return (0);
342 }
343 
344 /*
345  * Like _isa_bus_dmamap_load(), but for mbufs.
346  */
347 int
348 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
349 	bus_dma_tag_t t;
350 	bus_dmamap_t map;
351 	struct mbuf *m0;
352 	int flags;
353 {
354 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
355 	int error;
356 
357 	/*
358 	 * Make sure on error condition we return "no valid mappings."
359 	 */
360 	map->dm_mapsize = 0;
361 	map->dm_nsegs = 0;
362 
363 #ifdef DIAGNOSTIC
364 	if ((m0->m_flags & M_PKTHDR) == 0)
365 		panic("_isa_bus_dmamap_load_mbuf: no packet header");
366 #endif
367 
368 	if (m0->m_pkthdr.len > map->_dm_size)
369 		return (EINVAL);
370 
371 	/*
372 	 * Try to load the map the normal way.  If this errors out,
373 	 * and we can bounce, we will.
374 	 */
375 	error = _bus_dmamap_load_mbuf(t, map, m0, flags);
376 	if (error == 0 ||
377 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
378 		return (error);
379 
380 	/*
381 	 * First attempt failed; bounce it.
382 	 */
383 
384 	STAT_INCR(isa_dma_stats_bounces);
385 
386 	/*
387 	 * Allocate bounce pages, if necessary.
388 	 */
389 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
390 		error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
391 		    flags);
392 		if (error)
393 			return (error);
394 	}
395 
396 	/*
397 	 * Cache a pointer to the caller's buffer and load the DMA map
398 	 * with the bounce buffer.
399 	 */
400 	cookie->id_origbuf = m0;
401 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
402 	cookie->id_buftype = ID_BUFTYPE_MBUF;
403 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
404 	    m0->m_pkthdr.len, NULL, flags);
405 	if (error) {
406 		/*
407 		 * Free the bounce pages, unless our resources
408 		 * are reserved for our exclusive use.
409 		 */
410 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
411 			_isa_dma_free_bouncebuf(t, map);
412 		return (error);
413 	}
414 
415 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
416 	cookie->id_flags |= ID_IS_BOUNCING;
417 	return (0);
418 }
419 
420 /*
421  * Like _isa_bus_dmamap_load(), but for uios.
422  */
423 int
424 _isa_bus_dmamap_load_uio(t, map, uio, flags)
425 	bus_dma_tag_t t;
426 	bus_dmamap_t map;
427 	struct uio *uio;
428 	int flags;
429 {
430 
431 	panic("_isa_bus_dmamap_load_uio: not implemented");
432 }
433 
434 /*
435  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
436  * bus_dmamem_alloc().
437  */
438 int
439 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
440 	bus_dma_tag_t t;
441 	bus_dmamap_t map;
442 	bus_dma_segment_t *segs;
443 	int nsegs;
444 	bus_size_t size;
445 	int flags;
446 {
447 
448 	panic("_isa_bus_dmamap_load_raw: not implemented");
449 }
450 
451 /*
452  * Unload an ISA DMA map.
453  */
454 void
455 _isa_bus_dmamap_unload(t, map)
456 	bus_dma_tag_t t;
457 	bus_dmamap_t map;
458 {
459 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
460 
461 	/*
462 	 * If we have bounce pages, free them, unless they're
463 	 * reserved for our exclusive use.
464 	 */
465 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
466 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
467 		_isa_dma_free_bouncebuf(t, map);
468 
469 	cookie->id_flags &= ~ID_IS_BOUNCING;
470 	cookie->id_buftype = ID_BUFTYPE_INVALID;
471 
472 	/*
473 	 * Do the generic bits of the unload.
474 	 */
475 	_bus_dmamap_unload(t, map);
476 }
477 
478 /*
479  * Synchronize an ISA DMA map.
480  */
481 void
482 _isa_bus_dmamap_sync(t, map, offset, len, ops)
483 	bus_dma_tag_t t;
484 	bus_dmamap_t map;
485 	bus_addr_t offset;
486 	bus_size_t len;
487 	int ops;
488 {
489 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
490 
491 	/*
492 	 * Mixing PRE and POST operations is not allowed.
493 	 */
494 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
495 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
496 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
497 
498 #ifdef DIAGNOSTIC
499 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
500 		if (offset >= map->dm_mapsize)
501 			panic("_isa_bus_dmamap_sync: bad offset");
502 		if (len == 0 || (offset + len) > map->dm_mapsize)
503 			panic("_isa_bus_dmamap_sync: bad length");
504 	}
505 #endif
506 
507 	/*
508 	 * If we're not bouncing, just return; nothing to do.
509 	 */
510 	if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
511 		return;
512 
513 	switch (cookie->id_buftype) {
514 	case ID_BUFTYPE_LINEAR:
515 		/*
516 		 * Nothing to do for pre-read.
517 		 */
518 
519 		if (ops & BUS_DMASYNC_PREWRITE) {
520 			/*
521 			 * Copy the caller's buffer to the bounce buffer.
522 			 */
523 			memcpy((char *)cookie->id_bouncebuf + offset,
524 			    (char *)cookie->id_origbuf + offset, len);
525 		}
526 
527 		if (ops & BUS_DMASYNC_POSTREAD) {
528 			/*
529 			 * Copy the bounce buffer to the caller's buffer.
530 			 */
531 			memcpy((char *)cookie->id_origbuf + offset,
532 			    (char *)cookie->id_bouncebuf + offset, len);
533 		}
534 
535 		/*
536 		 * Nothing to do for post-write.
537 		 */
538 		break;
539 
540 	case ID_BUFTYPE_MBUF:
541 	    {
542 		struct mbuf *m, *m0 = cookie->id_origbuf;
543 		bus_size_t minlen, moff;
544 
545 		/*
546 		 * Nothing to do for pre-read.
547 		 */
548 
549 		if (ops & BUS_DMASYNC_PREWRITE) {
550 			/*
551 			 * Copy the caller's buffer to the bounce buffer.
552 			 */
553 			m_copydata(m0, offset, len,
554 			    (char *)cookie->id_bouncebuf + offset);
555 		}
556 
557 		if (ops & BUS_DMASYNC_POSTREAD) {
558 			/*
559 			 * Copy the bounce buffer to the caller's buffer.
560 			 */
561 			for (moff = offset, m = m0; m != NULL && len != 0;
562 			     m = m->m_next) {
563 				/* Find the beginning mbuf. */
564 				if (moff >= m->m_len) {
565 					moff -= m->m_len;
566 					continue;
567 				}
568 
569 				/*
570 				 * Now at the first mbuf to sync; nail
571 				 * each one until we have exhausted the
572 				 * length.
573 				 */
574 				minlen = len < m->m_len - moff ?
575 				    len : m->m_len - moff;
576 
577 				memcpy(mtod(m, caddr_t) + moff,
578 				    (char *)cookie->id_bouncebuf + offset,
579 				    minlen);
580 
581 				moff = 0;
582 				len -= minlen;
583 				offset += minlen;
584 			}
585 		}
586 
587 		/*
588 		 * Nothing to do for post-write.
589 		 */
590 		break;
591 	    }
592 
593 	case ID_BUFTYPE_UIO:
594 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
595 		break;
596 
597 	case ID_BUFTYPE_RAW:
598 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
599 		break;
600 
601 	case ID_BUFTYPE_INVALID:
602 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
603 		break;
604 
605 	default:
606 		printf("unknown buffer type %d\n", cookie->id_buftype);
607 		panic("_isa_bus_dmamap_sync");
608 	}
609 }
610 
611 /*
612  * Allocate memory safe for ISA DMA.
613  */
614 int
615 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
616 	bus_dma_tag_t t;
617 	bus_size_t size, alignment, boundary;
618 	bus_dma_segment_t *segs;
619 	int nsegs;
620 	int *rsegs;
621 	int flags;
622 {
623 	paddr_t high;
624 
625 	if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
626 		high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
627 	else
628 		high = trunc_page(avail_end);
629 
630 	return (bus_dmamem_alloc_range(t, size, alignment, boundary,
631 	    segs, nsegs, rsegs, flags, 0, high));
632 }
633 
634 /**********************************************************************
635  * ISA DMA utility functions
636  **********************************************************************/
637 
638 int
639 _isa_dma_alloc_bouncebuf(t, map, size, flags)
640 	bus_dma_tag_t t;
641 	bus_dmamap_t map;
642 	bus_size_t size;
643 	int flags;
644 {
645 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
646 	int error = 0;
647 
648 	cookie->id_bouncebuflen = round_page(size);
649 	error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
650 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
651 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
652 	if (error)
653 		goto out;
654 	error = bus_dmamem_map(t, cookie->id_bouncesegs,
655 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
656 	    (caddr_t *)&cookie->id_bouncebuf, flags);
657 
658  out:
659 	if (error) {
660 		bus_dmamem_free(t, cookie->id_bouncesegs,
661 		    cookie->id_nbouncesegs);
662 		cookie->id_bouncebuflen = 0;
663 		cookie->id_nbouncesegs = 0;
664 	} else {
665 		cookie->id_flags |= ID_HAS_BOUNCE;
666 		STAT_INCR(isa_dma_stats_nbouncebufs);
667 	}
668 
669 	return (error);
670 }
671 
672 void
673 _isa_dma_free_bouncebuf(t, map)
674 	bus_dma_tag_t t;
675 	bus_dmamap_t map;
676 {
677 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
678 
679 	STAT_DECR(isa_dma_stats_nbouncebufs);
680 
681 	bus_dmamem_unmap(t, cookie->id_bouncebuf,
682 	    cookie->id_bouncebuflen);
683 	bus_dmamem_free(t, cookie->id_bouncesegs,
684 	    cookie->id_nbouncesegs);
685 	cookie->id_bouncebuflen = 0;
686 	cookie->id_nbouncesegs = 0;
687 	cookie->id_flags &= ~ID_HAS_BOUNCE;
688 }
689