1 /* $OpenBSD: isa_machdep.c,v 1.31 2020/09/29 03:06:34 guenther Exp $ */
2 /* $NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $ */
3
4 #define ISA_DMA_STATS
5
6 /*-
7 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
12 * NASA Ames Research Center.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /*-
37 * Copyright (c) 1993, 1994, 1996, 1997
38 * Charles M. Hannum. All rights reserved.
39 * Copyright (c) 1991 The Regents of the University of California.
40 * All rights reserved.
41 *
42 * This code is derived from software contributed to Berkeley by
43 * William Jolitz.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. Neither the name of the University nor the names of its contributors
54 * may be used to endorse or promote products derived from this software
55 * without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * SUCH DAMAGE.
68 *
69 * @(#)isa.c 7.2 (Berkeley) 5/13/91
70 */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/syslog.h>
75 #include <sys/malloc.h>
76 #include <sys/proc.h>
77
78 #include <uvm/uvm_extern.h>
79
80 #include "ioapic.h"
81
82 #if NIOAPIC > 0
83 #include <machine/i82093var.h>
84 #include <machine/mpbiosvar.h>
85 #endif
86
87 #include <machine/intr.h>
88 #include <machine/i8259.h>
89
90 #include <dev/isa/isavar.h>
91
92 #include "isadma.h"
93
94 extern paddr_t avail_end;
95
96 #if NISADMA > 0
97 int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
98 bus_size_t, bus_size_t, int, bus_dmamap_t *);
99 void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
100 int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
101 bus_size_t, struct proc *, int);
102 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
103 struct mbuf *, int);
104 int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
105 struct uio *, int);
106 int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
107 bus_dma_segment_t *, int, bus_size_t, int);
108 void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
109 void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
110 bus_addr_t, bus_size_t, int);
111
112 int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
113 bus_size_t, bus_dma_segment_t *, int, int *, int);
114
115 int _isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t,
116 struct proc *);
117 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
118 bus_size_t, int);
119 void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
120
121 /*
122 * Entry points for ISA DMA. These are mostly wrappers around
123 * the generic functions that understand how to deal with bounce
124 * buffers, if necessary.
125 */
126 struct bus_dma_tag isa_bus_dma_tag = {
127 NULL, /* _cookie */
128 _isa_bus_dmamap_create,
129 _isa_bus_dmamap_destroy,
130 _isa_bus_dmamap_load,
131 _isa_bus_dmamap_load_mbuf,
132 _isa_bus_dmamap_load_uio,
133 _isa_bus_dmamap_load_raw,
134 _isa_bus_dmamap_unload,
135 _isa_bus_dmamap_sync,
136 _isa_bus_dmamem_alloc,
137 _bus_dmamem_alloc_range,
138 _bus_dmamem_free,
139 _bus_dmamem_map,
140 _bus_dmamem_unmap,
141 _bus_dmamem_mmap,
142 };
143 #endif /* NISADMA > 0 */
144
145 int intrtype[ICU_LEN], intrlevel[ICU_LEN];
146 struct intrhand *intrhand[ICU_LEN];
147
148 #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2)
149
150 int
isa_intr_alloc(isa_chipset_tag_t ic,int mask,int type,int * irq)151 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
152 {
153 int i, bestirq, count;
154 int tmp;
155 struct intrhand **p, *q;
156
157 if (type == IST_NONE)
158 panic("intr_alloc: bogus type");
159
160 bestirq = -1;
161 count = -1;
162
163 /* some interrupts should never be dynamically allocated */
164 mask &= 0xdef8;
165
166 /*
167 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
168 * the right answer is to do "breadth-first" searching of devices.
169 */
170 mask &= 0xefbf;
171
172 for (i = 0; i < ICU_LEN; i++) {
173 if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
174 continue;
175
176 switch(intrtype[i]) {
177 case IST_NONE:
178 /*
179 * if nothing's using the irq, just return it
180 */
181 *irq = i;
182 return (0);
183
184 case IST_EDGE:
185 case IST_LEVEL:
186 if (type != intrtype[i])
187 continue;
188 /*
189 * if the irq is shareable, count the number of other
190 * handlers, and if it's smaller than the last irq like
191 * this, remember it
192 *
193 * XXX We should probably also consider the
194 * interrupt level and stick IPL_TTY with other
195 * IPL_TTY, etc.
196 */
197 for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
198 p = &q->ih_next, tmp++)
199 ;
200 if ((bestirq == -1) || (count > tmp)) {
201 bestirq = i;
202 count = tmp;
203 }
204 break;
205
206 case IST_PULSE:
207 /* this just isn't shareable */
208 continue;
209 }
210 }
211
212 if (bestirq == -1)
213 return (1);
214
215 *irq = bestirq;
216
217 return (0);
218 }
219
220 /*
221 * Just check to see if an IRQ is available/can be shared.
222 * 0 = interrupt not available
223 * 1 = interrupt shareable
224 * 2 = interrupt all to ourself
225 */
226 int
isa_intr_check(isa_chipset_tag_t ic,int irq,int type)227 isa_intr_check(isa_chipset_tag_t ic, int irq, int type)
228 {
229 if (!LEGAL_IRQ(irq) || type == IST_NONE)
230 return (0);
231
232 switch (intrtype[irq]) {
233 case IST_NONE:
234 return (2);
235 break;
236 case IST_LEVEL:
237 if (type != intrtype[irq])
238 return (0);
239 return (1);
240 break;
241 case IST_EDGE:
242 case IST_PULSE:
243 if (type != IST_NONE)
244 return (0);
245 }
246 return (1);
247 }
248
249 /*
250 * Set up an interrupt handler to start being called.
251 * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
252 */
253 void *
isa_intr_establish(isa_chipset_tag_t ic,int irq,int type,int level,int (* ih_fun)(void *),void * ih_arg,char * ih_what)254 isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level,
255 int (*ih_fun)(void *), void *ih_arg, char *ih_what)
256 {
257 struct pic *pic = &i8259_pic;
258 int pin = irq;
259
260 #if NIOAPIC > 0
261 struct mp_intr_map *mip;
262
263 if (mp_busses != NULL) {
264 if (mp_isa_bus == NULL)
265 panic("no isa bus");
266
267 for (mip = mp_isa_bus->mb_intrs; mip != NULL;
268 mip = mip->next) {
269 if (mip->bus_pin == pin) {
270 pin = APIC_IRQ_PIN(mip->ioapic_ih);
271 pic = &mip->ioapic->sc_pic;
272 break;
273 }
274 }
275 }
276 #endif
277
278 KASSERT(pic);
279
280 return intr_establish(irq, pic, pin, type, level, NULL, ih_fun,
281 ih_arg, ih_what);
282 }
283
284 /*
285 * Deregister an interrupt handler.
286 */
287 void
isa_intr_disestablish(isa_chipset_tag_t ic,void * arg)288 isa_intr_disestablish(isa_chipset_tag_t ic, void *arg)
289 {
290 intr_disestablish(arg);
291 return;
292 }
293
294 void
isa_attach_hook(struct device * parent,struct device * self,struct isabus_attach_args * iba)295 isa_attach_hook(struct device *parent, struct device *self,
296 struct isabus_attach_args *iba)
297 {
298 extern int isa_has_been_seen;
299
300 /*
301 * Notify others that might need to know that the ISA bus
302 * has now been attached.
303 */
304 if (isa_has_been_seen)
305 panic("isaattach: ISA bus already seen!");
306 isa_has_been_seen = 1;
307 }
308
309 #if NISADMA > 0
310 /**********************************************************************
311 * bus.h dma interface entry points
312 **********************************************************************/
313
314 #ifdef ISA_DMA_STATS
315 #define STAT_INCR(v) (v)++
316 #define STAT_DECR(v) do { \
317 if ((v) == 0) \
318 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
319 else \
320 (v)--; \
321 } while (0)
322 u_long isa_dma_stats_loads;
323 u_long isa_dma_stats_bounces;
324 u_long isa_dma_stats_nbouncebufs;
325 #else
326 #define STAT_INCR(v)
327 #define STAT_DECR(v)
328 #endif
329
330 /*
331 * Create an ISA DMA map.
332 */
333 int
_isa_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)334 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
335 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
336 {
337 struct isa_dma_cookie *cookie;
338 bus_dmamap_t map;
339 int error, cookieflags;
340 void *cookiestore;
341 size_t cookiesize;
342
343 /* Call common function to create the basic map. */
344 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
345 flags, dmamp);
346 if (error)
347 return (error);
348
349 map = *dmamp;
350 map->_dm_cookie = NULL;
351
352 cookiesize = sizeof(struct isa_dma_cookie);
353
354 /*
355 * ISA only has 24-bits of address space. This means
356 * we can't DMA to pages over 16M. In order to DMA to
357 * arbitrary buffers, we use "bounce buffers" - pages
358 * in memory below the 16M boundary. On DMA reads,
359 * DMA happens to the bounce buffers, and is copied into
360 * the caller's buffer. On writes, data is copied into
361 * the bounce buffer, and the DMA happens from those
362 * pages. To software using the DMA mapping interface,
363 * this looks simply like a data cache.
364 *
365 * If we have more than 16M of RAM in the system, we may
366 * need bounce buffers. We check and remember that here.
367 *
368 * There are exceptions, however. VLB devices can do
369 * 32-bit DMA, and indicate that here.
370 *
371 * ...or, there is an opposite case. The most segments
372 * a transfer will require is (maxxfer / NBPG) + 1. If
373 * the caller can't handle that many segments (e.g. the
374 * ISA DMA controller), we may have to bounce it as well.
375 */
376 cookieflags = 0;
377 if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD &&
378 (flags & ISABUS_DMA_32BIT) == 0) ||
379 ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
380 cookieflags |= ID_MIGHT_NEED_BOUNCE;
381 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
382 }
383
384 /*
385 * Allocate our cookie.
386 */
387 if ((cookiestore = malloc(cookiesize, M_DEVBUF,
388 (flags & BUS_DMA_NOWAIT) ?
389 (M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO))) == NULL) {
390 error = ENOMEM;
391 goto out;
392 }
393 cookie = (struct isa_dma_cookie *)cookiestore;
394 cookie->id_flags = cookieflags;
395 map->_dm_cookie = cookie;
396
397 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
398 /*
399 * Allocate the bounce pages now if the caller
400 * wishes us to do so.
401 */
402 if ((flags & BUS_DMA_ALLOCNOW) == 0)
403 goto out;
404
405 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
406 }
407
408 out:
409 if (error) {
410 free(map->_dm_cookie, M_DEVBUF, cookiesize);
411 _bus_dmamap_destroy(t, map);
412 }
413 return (error);
414 }
415
416 /*
417 * Destroy an ISA DMA map.
418 */
419 void
_isa_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)420 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
421 {
422 struct isa_dma_cookie *cookie = map->_dm_cookie;
423
424 /*
425 * Free any bounce pages this map might hold.
426 */
427 if (cookie->id_flags & ID_HAS_BOUNCE)
428 _isa_dma_free_bouncebuf(t, map);
429
430 free(cookie, M_DEVBUF, 0);
431 _bus_dmamap_destroy(t, map);
432 }
433
434 /*
435 * Load an ISA DMA map with a linear buffer.
436 */
437 int
_isa_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)438 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
439 bus_size_t buflen, struct proc *p, int flags)
440 {
441 struct isa_dma_cookie *cookie = map->_dm_cookie;
442 int error;
443
444 STAT_INCR(isa_dma_stats_loads);
445
446 /*
447 * Check to see if we might need to bounce the transfer.
448 */
449 if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
450 /*
451 * Check if all pages are below the bounce
452 * threshold. If they are, don't bother bouncing.
453 */
454 if (_isa_dma_check_buffer(buf, buflen,
455 map->_dm_segcnt, map->_dm_boundary, p) == 0)
456 return (_bus_dmamap_load(t, map, buf, buflen,
457 p, flags));
458
459 STAT_INCR(isa_dma_stats_bounces);
460
461 /*
462 * Allocate bounce pages, if necessary.
463 */
464 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
465 error = _isa_dma_alloc_bouncebuf(t, map, buflen,
466 flags);
467 if (error)
468 return (error);
469 }
470
471 /*
472 * Cache a pointer to the caller's buffer and
473 * load the DMA map with the bounce buffer.
474 */
475 cookie->id_origbuf = buf;
476 cookie->id_origbuflen = buflen;
477 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
478 buflen, p, flags);
479
480 if (error) {
481 /*
482 * Free the bounce pages, unless our resources
483 * are reserved for our exclusive use.
484 */
485 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
486 _isa_dma_free_bouncebuf(t, map);
487 }
488
489 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
490 cookie->id_flags |= ID_IS_BOUNCING;
491 } else {
492 /*
493 * Just use the generic load function.
494 */
495 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
496 }
497
498 return (error);
499 }
500
501 /*
502 * Like _isa_bus_dmamap_load(), but for mbufs.
503 */
504 int
_isa_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m,int flags)505 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
506 int flags)
507 {
508
509 panic("_isa_bus_dmamap_load_mbuf: not implemented");
510 }
511
512 /*
513 * Like _isa_bus_dmamap_load(), but for uios.
514 */
515 int
_isa_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)516 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
517 int flags)
518 {
519
520 panic("_isa_bus_dmamap_load_uio: not implemented");
521 }
522
523 /*
524 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
525 * bus_dmamem_alloc().
526 */
527 int
_isa_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)528 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
529 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
530 {
531
532 panic("_isa_bus_dmamap_load_raw: not implemented");
533 }
534
535 /*
536 * Unload an ISA DMA map.
537 */
538 void
_isa_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)539 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
540 {
541 struct isa_dma_cookie *cookie = map->_dm_cookie;
542
543 /*
544 * If we have bounce pages, free them, unless they're
545 * reserved for our exclusive use.
546 */
547 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
548 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
549 _isa_dma_free_bouncebuf(t, map);
550
551 cookie->id_flags &= ~ID_IS_BOUNCING;
552
553 /*
554 * Do the generic bits of the unload.
555 */
556 _bus_dmamap_unload(t, map);
557 }
558
559 /*
560 * Synchronize an ISA DMA map.
561 */
562 void
_isa_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int op)563 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
564 bus_size_t len, int op)
565 {
566 struct isa_dma_cookie *cookie = map->_dm_cookie;
567
568 #ifdef DEBUG
569 if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
570 if (offset >= map->dm_mapsize)
571 panic("_isa_bus_dmamap_sync: bad offset");
572 if (len == 0 || (offset + len) > map->dm_mapsize)
573 panic("_isa_bus_dmamap_sync: bad length");
574 }
575 #endif
576 #ifdef DIAGNOSTIC
577 if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 &&
578 (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0)
579 panic("_isa_bus_dmamap_sync: mix PRE and POST");
580 #endif /* DIAGNOSTIC */
581
582 /* PREREAD and POSTWRITE are no-ops */
583 if (op & BUS_DMASYNC_PREWRITE) {
584 /*
585 * If we're bouncing this transfer, copy the
586 * caller's buffer to the bounce buffer.
587 */
588 if (cookie->id_flags & ID_IS_BOUNCING)
589 memcpy(cookie->id_bouncebuf + offset,
590 cookie->id_origbuf + offset, len);
591 }
592
593 _bus_dmamap_sync(t, map, offset, len, op);
594
595 if (op & BUS_DMASYNC_POSTREAD) {
596 /*
597 * If we're bouncing this transfer, copy the
598 * bounce buffer to the caller's buffer.
599 */
600 if (cookie->id_flags & ID_IS_BOUNCING)
601 memcpy(cookie->id_origbuf + offset,
602 cookie->id_bouncebuf + offset, len);
603 }
604 }
605
606 /*
607 * Allocate memory safe for ISA DMA.
608 */
609 int
_isa_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)610 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
611 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
612 int flags)
613 {
614 int error;
615
616 /* Try in ISA addressable region first */
617 error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
618 segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD);
619 if (!error)
620 return (error);
621
622 /* Otherwise try anywhere (we'll bounce later) */
623 error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
624 segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1);
625 return (error);
626 }
627
628 /**********************************************************************
629 * ISA DMA utility functions
630 **********************************************************************/
631
632 /*
633 * Return 0 if all pages in the passed buffer lie within the DMA'able
634 * range RAM.
635 */
636 int
_isa_dma_check_buffer(void * buf,bus_size_t buflen,int segcnt,bus_size_t boundary,struct proc * p)637 _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt,
638 bus_size_t boundary, struct proc *p)
639 {
640 vaddr_t vaddr = (vaddr_t)buf;
641 vaddr_t endva;
642 paddr_t pa, lastpa;
643 u_long pagemask = ~(boundary - 1);
644 pmap_t pmap;
645 int nsegs;
646
647 endva = round_page(vaddr + buflen);
648
649 nsegs = 1;
650 lastpa = 0;
651
652 if (p != NULL)
653 pmap = p->p_vmspace->vm_map.pmap;
654 else
655 pmap = pmap_kernel();
656
657 for (; vaddr < endva; vaddr += NBPG) {
658 /*
659 * Get physical address for this segment.
660 */
661 pmap_extract(pmap, (vaddr_t)vaddr, &pa);
662 pa = trunc_page(pa);
663
664 /*
665 * Is it below the DMA'able threshold?
666 */
667 if (pa > ISA_DMA_BOUNCE_THRESHOLD)
668 return (EINVAL);
669
670 if (lastpa) {
671 /*
672 * Check excessive segment count.
673 */
674 if (lastpa + NBPG != pa) {
675 if (++nsegs > segcnt)
676 return (EFBIG);
677 }
678
679 /*
680 * Check boundary restriction.
681 */
682 if (boundary) {
683 if ((lastpa ^ pa) & pagemask)
684 return (EINVAL);
685 }
686 }
687 lastpa = pa;
688 }
689
690 return (0);
691 }
692
693 int
_isa_dma_alloc_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map,bus_size_t size,int flags)694 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size,
695 int flags)
696 {
697 struct isa_dma_cookie *cookie = map->_dm_cookie;
698 int error = 0;
699
700 cookie->id_bouncebuflen = round_page(size);
701 error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen,
702 NBPG, map->_dm_boundary, cookie->id_bouncesegs,
703 map->_dm_segcnt, &cookie->id_nbouncesegs, flags,
704 0, ISA_DMA_BOUNCE_THRESHOLD);
705 if (error)
706 goto out;
707 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
708 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
709 (caddr_t *)&cookie->id_bouncebuf, flags);
710
711 out:
712 if (error) {
713 _bus_dmamem_free(t, cookie->id_bouncesegs,
714 cookie->id_nbouncesegs);
715 cookie->id_bouncebuflen = 0;
716 cookie->id_nbouncesegs = 0;
717 } else {
718 cookie->id_flags |= ID_HAS_BOUNCE;
719 STAT_INCR(isa_dma_stats_nbouncebufs);
720 }
721
722 return (error);
723 }
724
725 void
_isa_dma_free_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map)726 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
727 {
728 struct isa_dma_cookie *cookie = map->_dm_cookie;
729
730 STAT_DECR(isa_dma_stats_nbouncebufs);
731
732 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
733 cookie->id_bouncebuflen);
734 _bus_dmamem_free(t, cookie->id_bouncesegs,
735 cookie->id_nbouncesegs);
736 cookie->id_bouncebuflen = 0;
737 cookie->id_nbouncesegs = 0;
738 cookie->id_flags &= ~ID_HAS_BOUNCE;
739 }
740 #endif /* NISADMA > 0 */
741