xref: /netbsd-src/sys/arch/atari/isa/isa_dma.c (revision e6a4e4eb043b127d350a480e09484638c4b2f764)
1 /*	$NetBSD: isa_dma.c,v 1.16 2022/01/22 15:10:30 skrll Exp $	*/
2 
3 #define ISA_DMA_STATS
4 
5 /*-
6  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
11  * Simulation Facility, NASA Ames Research Center.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isa_dma.c,v 1.16 2022/01/22 15:10:30 skrll Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/syslog.h>
42 #include <sys/device.h>
43 #include <sys/kmem.h>
44 #include <sys/proc.h>
45 #include <sys/mbuf.h>
46 
47 #define _ATARI_BUS_DMA_PRIVATE
48 #include <sys/bus.h>
49 
50 #include <dev/isa/isareg.h>
51 #include <dev/isa/isavar.h>
52 
53 #include <uvm/uvm_extern.h>
54 
55 extern	paddr_t avail_end;
56 
57 /*
58  * Cookie used by ISA dma.  A pointer to one of these it stashed in
59  * the DMA map.
60  */
61 struct atari_isa_dma_cookie {
62 	int	id_flags;		/* flags; see below */
63 
64 	/*
65 	 * Information about the original buffer used during
66 	 * DMA map syncs.  Note that origibuflen is only used
67 	 * for ID_BUFTYPE_LINEAR.
68 	 */
69 	void	*id_origbuf;		/* pointer to orig buffer if
70 					   bouncing */
71 	bus_size_t id_origbuflen;	/* ...and size */
72 	int	id_buftype;		/* type of buffer */
73 
74 	void	*id_bouncebuf;		/* pointer to the bounce buffer */
75 	bus_size_t id_bouncebuflen;	/* ...and size */
76 	int	id_nbouncesegs;		/* number of valid bounce segs */
77 	bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
78 					       physical memory segments */
79 };
80 
81 /* id_flags */
82 #define	ID_MIGHT_NEED_BOUNCE	0x01	/* map could need bounce buffers */
83 #define	ID_HAS_BOUNCE		0x02	/* map currently has bounce buffers */
84 #define	ID_IS_BOUNCING		0x04	/* map is bouncing current xfer */
85 
86 /* id_buftype */
87 #define	ID_BUFTYPE_INVALID	0
88 #define	ID_BUFTYPE_LINEAR	1
89 #define	ID_BUFTYPE_MBUF		2
90 #define	ID_BUFTYPE_UIO		3
91 #define	ID_BUFTYPE_RAW		4
92 
93 int	_isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
94 	    bus_size_t, bus_size_t, int, bus_dmamap_t *);
95 void	_isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
96 int	_isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
97 	    bus_size_t, struct proc *, int);
98 int	_isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
99 	    struct mbuf *, int);
100 int	_isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
101 	    struct uio *, int);
102 int	_isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
103 	    bus_dma_segment_t *, int, bus_size_t, int);
104 void	_isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
105 void	_isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
106 	    bus_addr_t, bus_size_t, int);
107 
108 int	_isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
109 	    bus_size_t, bus_dma_segment_t *, int, int *, int);
110 
111 int	_isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
112 	    bus_size_t, int);
113 void	_isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
114 
115 /*
116  * Entry points for ISA DMA.  These are mostly wrappers around
117  * the generic functions that understand how to deal with bounce
118  * buffers, if necessary.
119  */
120 struct atari_bus_dma_tag isa_bus_dma_tag = {
121 	ISA_DMA_BOUNCE_THRESHOLD,
122 	0,
123 	_isa_bus_dmamap_create,
124 	_isa_bus_dmamap_destroy,
125 	_isa_bus_dmamap_load,
126 	_isa_bus_dmamap_load_mbuf,
127 	_isa_bus_dmamap_load_uio,
128 	_isa_bus_dmamap_load_raw,
129 	_isa_bus_dmamap_unload,
130 	_isa_bus_dmamap_sync,
131 };
132 
133 /**********************************************************************
134  * bus.h dma interface entry points
135  **********************************************************************/
136 
137 #ifdef ISA_DMA_STATS
138 #define	STAT_INCR(v)	(v)++
139 #define	STAT_DECR(v)	do { \
140 		if ((v) == 0) \
141 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
142 		else \
143 			(v)--; \
144 		} while (0)
145 u_long	isa_dma_stats_loads;
146 u_long	isa_dma_stats_bounces;
147 u_long	isa_dma_stats_nbouncebufs;
148 #else
149 #define	STAT_INCR(v)
150 #define	STAT_DECR(v)
151 #endif
152 
153 static int
isadma_bounce_cookieflags(bus_dma_tag_t const t,bus_dmamap_t const map,int const flags)154 isadma_bounce_cookieflags(bus_dma_tag_t const t, bus_dmamap_t const map,
155     int const flags)
156 {
157 	int cookieflags = 0;
158 
159 	/*
160 	 * ISA only has 24-bits of address space.  This means
161 	 * we can't DMA to pages over 16M.  In order to DMA to
162 	 * arbitrary buffers, we use "bounce buffers" - pages
163 	 * in memory below the 16M boundary.  On DMA reads,
164 	 * DMA happens to the bounce buffers, and is copied into
165 	 * the caller's buffer.  On writes, data is copied into
166 	 * the bounce buffer, and the DMA happens from those
167 	 * pages.  To software using the DMA mapping interface,
168 	 * this looks simply like a data cache.
169 	 *
170 	 * If we have more than 16M of RAM in the system, we may
171 	 * need bounce buffers.  We check and remember that here.
172 	 *
173 	 * There are exceptions, however.  VLB devices can do
174 	 * 32-bit DMA, and indicate that here.
175 	 *
176 	 * ...or, there is an opposite case.  The most segments
177 	 * a transfer will require is (maxxfer / PAGE_SIZE) + 1.  If
178 	 * the caller can't handle that many segments (e.g. the
179 	 * ISA DMA controller), we may have to bounce it as well.
180 	 */
181 	if (avail_end <= t->_bounce_thresh ||
182 	    (flags & ISABUS_DMA_32BIT) != 0) {
183 		/* Bouncing not necessary due to memory size. */
184 		map->_dm_bounce_thresh = 0;
185 	}
186 	if (map->_dm_bounce_thresh != 0 ||
187 	    ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
188 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
189 	}
190 	return cookieflags;
191 }
192 
193 static size_t
isadma_bounce_cookiesize(bus_dmamap_t const map,int cookieflags)194 isadma_bounce_cookiesize(bus_dmamap_t const map, int cookieflags)
195 {
196 	size_t cookiesize = sizeof(struct atari_isa_dma_cookie);
197 
198 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
199 		cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
200 	}
201 	return cookiesize;
202 }
203 
204 static int
isadma_bounce_cookie_alloc(bus_dma_tag_t const t,bus_dmamap_t const map,int const flags)205 isadma_bounce_cookie_alloc(bus_dma_tag_t const t, bus_dmamap_t const map,
206     int const flags)
207 {
208 	struct atari_isa_dma_cookie *cookie;
209 	int cookieflags = isadma_bounce_cookieflags(t, map, flags);
210 
211 	if ((cookie = kmem_zalloc(isadma_bounce_cookiesize(map, cookieflags),
212 	     (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
213 		return ENOMEM;
214 	}
215 
216 	cookie->id_flags = cookieflags;
217 	map->_dm_cookie = cookie;
218 
219 	return 0;
220 }
221 
222 static void
isadma_bounce_cookie_free(bus_dmamap_t const map)223 isadma_bounce_cookie_free(bus_dmamap_t const map)
224 {
225 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
226 
227 	if (cookie != NULL) {
228 		kmem_free(map->_dm_cookie,
229 		    isadma_bounce_cookiesize(map, cookie->id_flags));
230 		map->_dm_cookie = NULL;
231 	}
232 }
233 
234 /*
235  * Create an ISA DMA map.
236  */
237 int
_isa_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)238 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
239 {
240 	struct atari_isa_dma_cookie *cookie;
241 	bus_dmamap_t map;
242 	int error;
243 
244 	/* Call common function to create the basic map. */
245 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
246 	    flags, dmamp);
247 	if (error)
248 		return (error);
249 
250 	map = *dmamp;
251 	map->_dm_cookie = NULL;
252 
253 	/*
254 	 * Allocate our cookie.
255 	 */
256 	if ((error = isadma_bounce_cookie_alloc(t, map, flags)) != 0) {
257 		goto out;
258 	}
259 	cookie = map->_dm_cookie;
260 
261 	if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
262 		/*
263 		 * Allocate the bounce pages now if the caller
264 		 * wishes us to do so.
265 		 */
266 		if (flags & BUS_DMA_ALLOCNOW) {
267 			error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
268 		}
269 	}
270 
271  out:
272 	if (error) {
273 		isadma_bounce_cookie_free(map);
274 		_bus_dmamap_destroy(t, map);
275 	}
276 	return (error);
277 }
278 
279 /*
280  * Destroy an ISA DMA map.
281  */
282 void
_isa_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)283 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
284 {
285 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
286 
287 	/*
288 	 * Free any bounce pages this map might hold.
289 	 */
290 	if (cookie->id_flags & ID_HAS_BOUNCE)
291 		_isa_dma_free_bouncebuf(t, map);
292 
293 	isadma_bounce_cookie_free(map);
294 	_bus_dmamap_destroy(t, map);
295 }
296 
297 /*
298  * Load an ISA DMA map with a linear buffer.
299  */
300 int
_isa_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)301 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
302     bus_size_t buflen, struct proc *p, int flags)
303 {
304 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
305 	int error;
306 
307 	STAT_INCR(isa_dma_stats_loads);
308 
309 	/*
310 	 * Make sure that on error condition we return "no valid mappings."
311 	 */
312 	map->dm_mapsize = 0;
313 	map->dm_nsegs = 0;
314 
315 	/*
316 	 * Try to load the map the normal way.  If this errors out,
317 	 * and we can bounce, we will.
318 	 */
319 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
320 	if (error == 0 || (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)
321 		return (error);
322 
323 	/*
324 	 * First attempt failed; bounce it.
325 	 */
326 
327 	STAT_INCR(isa_dma_stats_bounces);
328 
329 	/*
330 	 * Allocate bounce pages, if necessary.
331 	 */
332 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
333 		error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
334 		if (error)
335 			return (error);
336 	}
337 
338 	/*
339 	 * Cache a pointer to the caller's buffer and load the DMA map
340 	 * with the bounce buffer.
341 	 */
342 	cookie->id_origbuf = buf;
343 	cookie->id_origbuflen = buflen;
344 	cookie->id_buftype = ID_BUFTYPE_LINEAR;
345 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
346 	    p, flags);
347 	if (error) {
348 		/*
349 		 * Free the bounce pages, unless our resources
350 		 * are reserved for our exclusive use.
351 		 */
352 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
353 			_isa_dma_free_bouncebuf(t, map);
354 		return (error);
355 	}
356 
357 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
358 	cookie->id_flags |= ID_IS_BOUNCING;
359 	return (0);
360 }
361 
362 /*
363  * Like _isa_bus_dmamap_load(), but for mbufs.
364  */
365 int
_isa_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)366 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
367     int flags)
368 {
369 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
370 	int error;
371 
372 	/*
373 	 * Make sure on error condition we return "no valid mappings."
374 	 */
375 	map->dm_mapsize = 0;
376 	map->dm_nsegs = 0;
377 
378 #ifdef DIAGNOSTIC
379 	if ((m0->m_flags & M_PKTHDR) == 0)
380 		panic("_isa_bus_dmamap_load_mbuf: no packet header");
381 #endif
382 
383 	if (m0->m_pkthdr.len > map->_dm_size)
384 		return (EINVAL);
385 
386 	/*
387 	 * Try to load the map the normal way.  If this errors out,
388 	 * and we can bounce, we will.
389 	 */
390 	error = _bus_dmamap_load_mbuf(t, map, m0, flags);
391 	if (error == 0 || (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)
392 		return (error);
393 
394 	/*
395 	 * First attempt failed; bounce it.
396 	 */
397 
398 	STAT_INCR(isa_dma_stats_bounces);
399 
400 	/*
401 	 * Allocate bounce pages, if necessary.
402 	 */
403 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
404 		error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
405 		    flags);
406 		if (error)
407 			return (error);
408 	}
409 
410 	/*
411 	 * Cache a pointer to the caller's buffer and load the DMA map
412 	 * with the bounce buffer.
413 	 */
414 	cookie->id_origbuf = m0;
415 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
416 	cookie->id_buftype = ID_BUFTYPE_MBUF;
417 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
418 	    m0->m_pkthdr.len, NULL, flags);
419 	if (error) {
420 		/*
421 		 * Free the bounce pages, unless our resources
422 		 * are reserved for our exclusive use.
423 		 */
424 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
425 			_isa_dma_free_bouncebuf(t, map);
426 		return (error);
427 	}
428 
429 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
430 	cookie->id_flags |= ID_IS_BOUNCING;
431 	return (0);
432 }
433 
434 /*
435  * Like _isa_bus_dmamap_load(), but for uios.
436  */
437 int
_isa_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)438 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
439 {
440 
441 	panic("_isa_bus_dmamap_load_uio: not implemented");
442 }
443 
444 /*
445  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
446  * bus_dmamem_alloc().
447  */
448 int
_isa_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)449 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
450 {
451 
452 	panic("_isa_bus_dmamap_load_raw: not implemented");
453 }
454 
455 /*
456  * Unload an ISA DMA map.
457  */
458 void
_isa_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)459 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
460 {
461 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
462 
463 	/*
464 	 * If we have bounce pages, free them, unless they're
465 	 * reserved for our exclusive use.
466 	 */
467 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
468 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
469 		_isa_dma_free_bouncebuf(t, map);
470 
471 	cookie->id_flags &= ~ID_IS_BOUNCING;
472 	cookie->id_buftype = ID_BUFTYPE_INVALID;
473 
474 	/*
475 	 * Do the generic bits of the unload.
476 	 */
477 	_bus_dmamap_unload(t, map);
478 }
479 
480 /*
481  * Synchronize an ISA DMA map.
482  */
483 void
_isa_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)484 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops)
485 {
486 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
487 
488 	/*
489 	 * Mixing PRE and POST operations is not allowed.
490 	 */
491 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
492 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
493 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
494 
495 #ifdef DIAGNOSTIC
496 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
497 		if (offset >= map->dm_mapsize)
498 			panic("_isa_bus_dmamap_sync: bad offset");
499 		if (len == 0 || (offset + len) > map->dm_mapsize)
500 			panic("_isa_bus_dmamap_sync: bad length");
501 	}
502 #endif
503 
504 	/*
505 	 * If we're not bouncing, just return; nothing to do.
506 	 */
507 	if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
508 		return;
509 
510 	switch (cookie->id_buftype) {
511 	case ID_BUFTYPE_LINEAR:
512 		/*
513 		 * Nothing to do for pre-read.
514 		 */
515 
516 		if (ops & BUS_DMASYNC_PREWRITE) {
517 			/*
518 			 * Copy the caller's buffer to the bounce buffer.
519 			 */
520 			memcpy((char *)cookie->id_bouncebuf + offset,
521 			    (char *)cookie->id_origbuf + offset, len);
522 		}
523 
524 		if (ops & BUS_DMASYNC_POSTREAD) {
525 			/*
526 			 * Copy the bounce buffer to the caller's buffer.
527 			 */
528 			memcpy((char *)cookie->id_origbuf + offset,
529 			    (char *)cookie->id_bouncebuf + offset, len);
530 		}
531 
532 		/*
533 		 * Nothing to do for post-write.
534 		 */
535 		break;
536 
537 	case ID_BUFTYPE_MBUF:
538 	    {
539 		struct mbuf *m, *m0 = cookie->id_origbuf;
540 		bus_size_t minlen, moff;
541 
542 		/*
543 		 * Nothing to do for pre-read.
544 		 */
545 
546 		if (ops & BUS_DMASYNC_PREWRITE) {
547 			/*
548 			 * Copy the caller's buffer to the bounce buffer.
549 			 */
550 			m_copydata(m0, offset, len,
551 			    (char *)cookie->id_bouncebuf + offset);
552 		}
553 
554 		if (ops & BUS_DMASYNC_POSTREAD) {
555 			/*
556 			 * Copy the bounce buffer to the caller's buffer.
557 			 */
558 			for (moff = offset, m = m0; m != NULL && len != 0;
559 			     m = m->m_next) {
560 				/* Find the beginning mbuf. */
561 				if (moff >= m->m_len) {
562 					moff -= m->m_len;
563 					continue;
564 				}
565 
566 				/*
567 				 * Now at the first mbuf to sync; nail
568 				 * each one until we have exhausted the
569 				 * length.
570 				 */
571 				minlen = len < m->m_len - moff ?
572 				    len : m->m_len - moff;
573 
574 				memcpy(mtod(m, char *) + moff,
575 				    (char *)cookie->id_bouncebuf + offset,
576 				    minlen);
577 
578 				moff = 0;
579 				len -= minlen;
580 				offset += minlen;
581 			}
582 		}
583 
584 		/*
585 		 * Nothing to do for post-write.
586 		 */
587 		break;
588 	    }
589 
590 	case ID_BUFTYPE_UIO:
591 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
592 		break;
593 
594 	case ID_BUFTYPE_RAW:
595 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
596 		break;
597 
598 	case ID_BUFTYPE_INVALID:
599 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
600 		break;
601 
602 	default:
603 		printf("unknown buffer type %d\n", cookie->id_buftype);
604 		panic("_isa_bus_dmamap_sync");
605 	}
606 }
607 
608 /*
609  * Allocate memory safe for ISA DMA.
610  */
611 int
_isa_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)612 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
613 {
614 	paddr_t high;
615 
616 	if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
617 		high = ISA_DMA_BOUNCE_THRESHOLD - 1;
618 	else
619 		high = avail_end - 1;
620 
621 	return (bus_dmamem_alloc_range(t, size, alignment, boundary,
622 	    segs, nsegs, rsegs, flags, 0, high));
623 }
624 
625 /**********************************************************************
626  * ISA DMA utility functions
627  **********************************************************************/
628 
629 int
_isa_dma_alloc_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map,bus_size_t size,int flags)630 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags)
631 {
632 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
633 	int error = 0;
634 
635 	cookie->id_bouncebuflen = round_page(size);
636 	error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
637 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
638 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
639 	if (error)
640 		goto out;
641 	error = bus_dmamem_map(t, cookie->id_bouncesegs,
642 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
643 	    (void **)&cookie->id_bouncebuf, flags);
644 
645  out:
646 	if (error) {
647 		bus_dmamem_free(t, cookie->id_bouncesegs,
648 		    cookie->id_nbouncesegs);
649 		cookie->id_bouncebuflen = 0;
650 		cookie->id_nbouncesegs = 0;
651 	} else {
652 		cookie->id_flags |= ID_HAS_BOUNCE;
653 		STAT_INCR(isa_dma_stats_nbouncebufs);
654 	}
655 
656 	return (error);
657 }
658 
659 void
_isa_dma_free_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map)660 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
661 {
662 	struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
663 
664 	STAT_DECR(isa_dma_stats_nbouncebufs);
665 
666 	bus_dmamem_unmap(t, cookie->id_bouncebuf,
667 	    cookie->id_bouncebuflen);
668 	bus_dmamem_free(t, cookie->id_bouncesegs,
669 	    cookie->id_nbouncesegs);
670 	cookie->id_bouncebuflen = 0;
671 	cookie->id_nbouncesegs = 0;
672 	cookie->id_flags &= ~ID_HAS_BOUNCE;
673 }
674