1 /* $OpenBSD: isa_machdep.c,v 1.28 2015/09/27 10:12:09 semarie Exp $ */ 2 /* $NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $ */ 3 4 #define ISA_DMA_STATS 5 6 /*- 7 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to The NetBSD Foundation 11 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 12 * NASA Ames Research Center. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /*- 37 * Copyright (c) 1993, 1994, 1996, 1997 38 * Charles M. Hannum. All rights reserved. 39 * Copyright (c) 1991 The Regents of the University of California. 40 * All rights reserved. 41 * 42 * This code is derived from software contributed to Berkeley by 43 * William Jolitz. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * @(#)isa.c 7.2 (Berkeley) 5/13/91 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/syslog.h> 75 #include <sys/device.h> 76 #include <sys/malloc.h> 77 #include <sys/proc.h> 78 79 #include <uvm/uvm_extern.h> 80 81 #include "ioapic.h" 82 83 #if NIOAPIC > 0 84 #include <machine/i82093var.h> 85 #include <machine/mpbiosvar.h> 86 #endif 87 88 #include <machine/bus.h> 89 90 #include <machine/intr.h> 91 #include <machine/pio.h> 92 #include <machine/cpufunc.h> 93 #include <machine/i8259.h> 94 95 #include <dev/isa/isavar.h> 96 #if 0 97 #include <dev/isa/isadmavar.h> 98 #endif 99 #include <i386/isa/isa_machdep.h> 100 101 #include "isadma.h" 102 103 extern paddr_t avail_end; 104 105 #define IDTVEC(name) __CONCAT(X,name) 106 /* default interrupt vector table entries */ 107 typedef int (*vector)(void); 108 extern vector IDTVEC(intr)[]; 109 void isa_strayintr(int); 110 int fakeintr(void *); 111 112 #if NISADMA > 0 113 int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, 114 bus_size_t, bus_size_t, int, bus_dmamap_t *); 115 void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 116 int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 117 bus_size_t, struct proc *, int); 118 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, 119 struct mbuf *, int); 120 int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, 121 struct uio *, int); 122 int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 123 bus_dma_segment_t *, int, bus_size_t, int); 124 void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 125 void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 126 bus_addr_t, bus_size_t, int); 127 128 int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, 129 bus_size_t, bus_dma_segment_t *, int, int *, int); 130 131 int _isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t, 132 struct proc *); 133 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 134 bus_size_t, int); 135 void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 136 137 /* 138 * Entry points for ISA DMA. These are mostly wrappers around 139 * the generic functions that understand how to deal with bounce 140 * buffers, if necessary. 141 */ 142 struct bus_dma_tag isa_bus_dma_tag = { 143 NULL, /* _cookie */ 144 _isa_bus_dmamap_create, 145 _isa_bus_dmamap_destroy, 146 _isa_bus_dmamap_load, 147 _isa_bus_dmamap_load_mbuf, 148 _isa_bus_dmamap_load_uio, 149 _isa_bus_dmamap_load_raw, 150 _isa_bus_dmamap_unload, 151 _isa_bus_dmamap_sync, 152 _isa_bus_dmamem_alloc, 153 _bus_dmamem_alloc_range, 154 _bus_dmamem_free, 155 _bus_dmamem_map, 156 _bus_dmamem_unmap, 157 _bus_dmamem_mmap, 158 }; 159 #endif /* NISADMA > 0 */ 160 161 #define GICODE_SEL 10 162 163 u_long intrstray[ICU_LEN]; 164 165 /* 166 * Caught a stray interrupt, notify 167 */ 168 void 169 isa_strayintr(int irq) 170 { 171 /* 172 * Stray interrupts on irq 7 occur when an interrupt line is raised 173 * and then lowered before the CPU acknowledges it. This generally 174 * means either the device is screwed or something is cli'ing too 175 * long and it's timing out. 176 */ 177 if (++intrstray[irq] <= 5) 178 log(LOG_ERR, "stray interrupt %d%s\n", irq, 179 intrstray[irq] >= 5 ? "; stopped logging" : ""); 180 } 181 182 int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN]; 183 int iminlevel[ICU_LEN], imaxlevel[ICU_LEN]; 184 struct intrhand *intrhand[ICU_LEN]; 185 186 int 187 fakeintr(void *arg) 188 { 189 return 0; 190 } 191 192 #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2) 193 194 int 195 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq) 196 { 197 int i, bestirq, count; 198 int tmp; 199 struct intrhand **p, *q; 200 201 if (type == IST_NONE) 202 panic("intr_alloc: bogus type"); 203 204 bestirq = -1; 205 count = -1; 206 207 /* some interrupts should never be dynamically allocated */ 208 mask &= 0xdef8; 209 210 /* 211 * XXX some interrupts will be used later (6 for fdc, 12 for pms). 212 * the right answer is to do "breadth-first" searching of devices. 213 */ 214 mask &= 0xefbf; 215 216 for (i = 0; i < ICU_LEN; i++) { 217 if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0) 218 continue; 219 220 switch(intrtype[i]) { 221 case IST_NONE: 222 /* 223 * if nothing's using the irq, just return it 224 */ 225 *irq = i; 226 return (0); 227 228 case IST_EDGE: 229 case IST_LEVEL: 230 if (type != intrtype[i]) 231 continue; 232 /* 233 * if the irq is shareable, count the number of other 234 * handlers, and if it's smaller than the last irq like 235 * this, remember it 236 * 237 * XXX We should probably also consider the 238 * interrupt level and stick IPL_TTY with other 239 * IPL_TTY, etc. 240 */ 241 for (p = &intrhand[i], tmp = 0; (q = *p) != NULL; 242 p = &q->ih_next, tmp++) 243 ; 244 if ((bestirq == -1) || (count > tmp)) { 245 bestirq = i; 246 count = tmp; 247 } 248 break; 249 250 case IST_PULSE: 251 /* this just isn't shareable */ 252 continue; 253 } 254 } 255 256 if (bestirq == -1) 257 return (1); 258 259 *irq = bestirq; 260 261 return (0); 262 } 263 264 /* 265 * Just check to see if an IRQ is available/can be shared. 266 * 0 = interrupt not available 267 * 1 = interrupt shareable 268 * 2 = interrupt all to ourself 269 */ 270 int 271 isa_intr_check(isa_chipset_tag_t ic, int irq, int type) 272 { 273 if (!LEGAL_IRQ(irq) || type == IST_NONE) 274 return (0); 275 276 switch (intrtype[irq]) { 277 case IST_NONE: 278 return (2); 279 break; 280 case IST_LEVEL: 281 if (type != intrtype[irq]) 282 return (0); 283 return (1); 284 break; 285 case IST_EDGE: 286 case IST_PULSE: 287 if (type != IST_NONE) 288 return (0); 289 } 290 return (1); 291 } 292 293 /* 294 * Set up an interrupt handler to start being called. 295 * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM. 296 */ 297 void * 298 isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level, 299 int (*ih_fun)(void *), void *ih_arg, char *ih_what) 300 { 301 struct pic *pic = &i8259_pic; 302 int pin = irq; 303 304 #if NIOAPIC > 0 305 struct mp_intr_map *mip; 306 307 if (mp_busses != NULL) { 308 if (mp_isa_bus == NULL) 309 panic("no isa bus"); 310 311 for (mip = mp_isa_bus->mb_intrs; mip != NULL; 312 mip = mip->next) { 313 if (mip->bus_pin == pin) { 314 pin = APIC_IRQ_PIN(mip->ioapic_ih); 315 pic = &mip->ioapic->sc_pic; 316 break; 317 } 318 } 319 } 320 #endif 321 322 KASSERT(pic); 323 324 return intr_establish(irq, pic, pin, type, level, ih_fun, 325 ih_arg, ih_what); 326 } 327 328 /* 329 * Deregister an interrupt handler. 330 */ 331 void 332 isa_intr_disestablish(isa_chipset_tag_t ic, void *arg) 333 { 334 intr_disestablish(arg); 335 return; 336 } 337 338 void 339 isa_attach_hook(struct device *parent, struct device *self, 340 struct isabus_attach_args *iba) 341 { 342 extern int isa_has_been_seen; 343 344 /* 345 * Notify others that might need to know that the ISA bus 346 * has now been attached. 347 */ 348 if (isa_has_been_seen) 349 panic("isaattach: ISA bus already seen!"); 350 isa_has_been_seen = 1; 351 } 352 353 #if NISADMA > 0 354 /********************************************************************** 355 * bus.h dma interface entry points 356 **********************************************************************/ 357 358 #ifdef ISA_DMA_STATS 359 #define STAT_INCR(v) (v)++ 360 #define STAT_DECR(v) do { \ 361 if ((v) == 0) \ 362 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ 363 else \ 364 (v)--; \ 365 } while (0) 366 u_long isa_dma_stats_loads; 367 u_long isa_dma_stats_bounces; 368 u_long isa_dma_stats_nbouncebufs; 369 #else 370 #define STAT_INCR(v) 371 #define STAT_DECR(v) 372 #endif 373 374 /* 375 * Create an ISA DMA map. 376 */ 377 int 378 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 379 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 380 { 381 struct isa_dma_cookie *cookie; 382 bus_dmamap_t map; 383 int error, cookieflags; 384 void *cookiestore; 385 size_t cookiesize; 386 387 /* Call common function to create the basic map. */ 388 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 389 flags, dmamp); 390 if (error) 391 return (error); 392 393 map = *dmamp; 394 map->_dm_cookie = NULL; 395 396 cookiesize = sizeof(struct isa_dma_cookie); 397 398 /* 399 * ISA only has 24-bits of address space. This means 400 * we can't DMA to pages over 16M. In order to DMA to 401 * arbitrary buffers, we use "bounce buffers" - pages 402 * in memory below the 16M boundary. On DMA reads, 403 * DMA happens to the bounce buffers, and is copied into 404 * the caller's buffer. On writes, data is copied into 405 * the bounce buffer, and the DMA happens from those 406 * pages. To software using the DMA mapping interface, 407 * this looks simply like a data cache. 408 * 409 * If we have more than 16M of RAM in the system, we may 410 * need bounce buffers. We check and remember that here. 411 * 412 * There are exceptions, however. VLB devices can do 413 * 32-bit DMA, and indicate that here. 414 * 415 * ...or, there is an opposite case. The most segments 416 * a transfer will require is (maxxfer / NBPG) + 1. If 417 * the caller can't handle that many segments (e.g. the 418 * ISA DMA controller), we may have to bounce it as well. 419 */ 420 cookieflags = 0; 421 if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD && 422 (flags & ISABUS_DMA_32BIT) == 0) || 423 ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) { 424 cookieflags |= ID_MIGHT_NEED_BOUNCE; 425 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 426 } 427 428 /* 429 * Allocate our cookie. 430 */ 431 if ((cookiestore = malloc(cookiesize, M_DEVBUF, 432 (flags & BUS_DMA_NOWAIT) ? 433 (M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO))) == NULL) { 434 error = ENOMEM; 435 goto out; 436 } 437 cookie = (struct isa_dma_cookie *)cookiestore; 438 cookie->id_flags = cookieflags; 439 map->_dm_cookie = cookie; 440 441 if (cookieflags & ID_MIGHT_NEED_BOUNCE) { 442 /* 443 * Allocate the bounce pages now if the caller 444 * wishes us to do so. 445 */ 446 if ((flags & BUS_DMA_ALLOCNOW) == 0) 447 goto out; 448 449 error = _isa_dma_alloc_bouncebuf(t, map, size, flags); 450 } 451 452 out: 453 if (error) { 454 free(map->_dm_cookie, M_DEVBUF, cookiesize); 455 _bus_dmamap_destroy(t, map); 456 } 457 return (error); 458 } 459 460 /* 461 * Destroy an ISA DMA map. 462 */ 463 void 464 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 465 { 466 struct isa_dma_cookie *cookie = map->_dm_cookie; 467 468 /* 469 * Free any bounce pages this map might hold. 470 */ 471 if (cookie->id_flags & ID_HAS_BOUNCE) 472 _isa_dma_free_bouncebuf(t, map); 473 474 free(cookie, M_DEVBUF, 0); 475 _bus_dmamap_destroy(t, map); 476 } 477 478 /* 479 * Load an ISA DMA map with a linear buffer. 480 */ 481 int 482 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 483 bus_size_t buflen, struct proc *p, int flags) 484 { 485 struct isa_dma_cookie *cookie = map->_dm_cookie; 486 int error; 487 488 STAT_INCR(isa_dma_stats_loads); 489 490 /* 491 * Check to see if we might need to bounce the transfer. 492 */ 493 if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) { 494 /* 495 * Check if all pages are below the bounce 496 * threshold. If they are, don't bother bouncing. 497 */ 498 if (_isa_dma_check_buffer(buf, buflen, 499 map->_dm_segcnt, map->_dm_boundary, p) == 0) 500 return (_bus_dmamap_load(t, map, buf, buflen, 501 p, flags)); 502 503 STAT_INCR(isa_dma_stats_bounces); 504 505 /* 506 * Allocate bounce pages, if necessary. 507 */ 508 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 509 error = _isa_dma_alloc_bouncebuf(t, map, buflen, 510 flags); 511 if (error) 512 return (error); 513 } 514 515 /* 516 * Cache a pointer to the caller's buffer and 517 * load the DMA map with the bounce buffer. 518 */ 519 cookie->id_origbuf = buf; 520 cookie->id_origbuflen = buflen; 521 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, 522 buflen, p, flags); 523 524 if (error) { 525 /* 526 * Free the bounce pages, unless our resources 527 * are reserved for our exclusive use. 528 */ 529 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 530 _isa_dma_free_bouncebuf(t, map); 531 } 532 533 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ 534 cookie->id_flags |= ID_IS_BOUNCING; 535 } else { 536 /* 537 * Just use the generic load function. 538 */ 539 error = _bus_dmamap_load(t, map, buf, buflen, p, flags); 540 } 541 542 return (error); 543 } 544 545 /* 546 * Like _isa_bus_dmamap_load(), but for mbufs. 547 */ 548 int 549 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m, 550 int flags) 551 { 552 553 panic("_isa_bus_dmamap_load_mbuf: not implemented"); 554 } 555 556 /* 557 * Like _isa_bus_dmamap_load(), but for uios. 558 */ 559 int 560 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 561 int flags) 562 { 563 564 panic("_isa_bus_dmamap_load_uio: not implemented"); 565 } 566 567 /* 568 * Like _isa_bus_dmamap_load(), but for raw memory allocated with 569 * bus_dmamem_alloc(). 570 */ 571 int 572 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 573 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 574 { 575 576 panic("_isa_bus_dmamap_load_raw: not implemented"); 577 } 578 579 /* 580 * Unload an ISA DMA map. 581 */ 582 void 583 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 584 { 585 struct isa_dma_cookie *cookie = map->_dm_cookie; 586 587 /* 588 * If we have bounce pages, free them, unless they're 589 * reserved for our exclusive use. 590 */ 591 if ((cookie->id_flags & ID_HAS_BOUNCE) && 592 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 593 _isa_dma_free_bouncebuf(t, map); 594 595 cookie->id_flags &= ~ID_IS_BOUNCING; 596 597 /* 598 * Do the generic bits of the unload. 599 */ 600 _bus_dmamap_unload(t, map); 601 } 602 603 /* 604 * Synchronize an ISA DMA map. 605 */ 606 void 607 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 608 bus_size_t len, int op) 609 { 610 struct isa_dma_cookie *cookie = map->_dm_cookie; 611 612 #ifdef DEBUG 613 if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 614 if (offset >= map->dm_mapsize) 615 panic("_isa_bus_dmamap_sync: bad offset"); 616 if (len == 0 || (offset + len) > map->dm_mapsize) 617 panic("_isa_bus_dmamap_sync: bad length"); 618 } 619 #endif 620 #ifdef DIAGNOSTIC 621 if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 && 622 (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0) 623 panic("_isa_bus_dmamap_sync: mix PRE and POST"); 624 #endif /* DIAGNOSTIC */ 625 626 /* PREREAD and POSTWRITE are no-ops */ 627 if (op & BUS_DMASYNC_PREWRITE) { 628 /* 629 * If we're bouncing this transfer, copy the 630 * caller's buffer to the bounce buffer. 631 */ 632 if (cookie->id_flags & ID_IS_BOUNCING) 633 memcpy(cookie->id_bouncebuf + offset, 634 cookie->id_origbuf + offset, len); 635 } 636 637 _bus_dmamap_sync(t, map, offset, len, op); 638 639 if (op & BUS_DMASYNC_POSTREAD) { 640 /* 641 * If we're bouncing this transfer, copy the 642 * bounce buffer to the caller's buffer. 643 */ 644 if (cookie->id_flags & ID_IS_BOUNCING) 645 memcpy(cookie->id_origbuf + offset, 646 cookie->id_bouncebuf + offset, len); 647 } 648 } 649 650 /* 651 * Allocate memory safe for ISA DMA. 652 */ 653 int 654 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 655 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 656 int flags) 657 { 658 int error; 659 660 /* Try in ISA addressable region first */ 661 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 662 segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD); 663 if (!error) 664 return (error); 665 666 /* Otherwise try anywhere (we'll bounce later) */ 667 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 668 segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1); 669 return (error); 670 } 671 672 /********************************************************************** 673 * ISA DMA utility functions 674 **********************************************************************/ 675 676 /* 677 * Return 0 if all pages in the passed buffer lie within the DMA'able 678 * range RAM. 679 */ 680 int 681 _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt, 682 bus_size_t boundary, struct proc *p) 683 { 684 vaddr_t vaddr = (vaddr_t)buf; 685 vaddr_t endva; 686 paddr_t pa, lastpa; 687 u_long pagemask = ~(boundary - 1); 688 pmap_t pmap; 689 int nsegs; 690 691 endva = round_page(vaddr + buflen); 692 693 nsegs = 1; 694 lastpa = 0; 695 696 if (p != NULL) 697 pmap = p->p_vmspace->vm_map.pmap; 698 else 699 pmap = pmap_kernel(); 700 701 for (; vaddr < endva; vaddr += NBPG) { 702 /* 703 * Get physical address for this segment. 704 */ 705 pmap_extract(pmap, (vaddr_t)vaddr, &pa); 706 pa = trunc_page(pa); 707 708 /* 709 * Is it below the DMA'able threshold? 710 */ 711 if (pa > ISA_DMA_BOUNCE_THRESHOLD) 712 return (EINVAL); 713 714 if (lastpa) { 715 /* 716 * Check excessive segment count. 717 */ 718 if (lastpa + NBPG != pa) { 719 if (++nsegs > segcnt) 720 return (EFBIG); 721 } 722 723 /* 724 * Check boundary restriction. 725 */ 726 if (boundary) { 727 if ((lastpa ^ pa) & pagemask) 728 return (EINVAL); 729 } 730 } 731 lastpa = pa; 732 } 733 734 return (0); 735 } 736 737 int 738 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, 739 int flags) 740 { 741 struct isa_dma_cookie *cookie = map->_dm_cookie; 742 int error = 0; 743 744 cookie->id_bouncebuflen = round_page(size); 745 error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen, 746 NBPG, map->_dm_boundary, cookie->id_bouncesegs, 747 map->_dm_segcnt, &cookie->id_nbouncesegs, flags, 748 0, ISA_DMA_BOUNCE_THRESHOLD); 749 if (error) 750 goto out; 751 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 752 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 753 (caddr_t *)&cookie->id_bouncebuf, flags); 754 755 out: 756 if (error) { 757 _bus_dmamem_free(t, cookie->id_bouncesegs, 758 cookie->id_nbouncesegs); 759 cookie->id_bouncebuflen = 0; 760 cookie->id_nbouncesegs = 0; 761 } else { 762 cookie->id_flags |= ID_HAS_BOUNCE; 763 STAT_INCR(isa_dma_stats_nbouncebufs); 764 } 765 766 return (error); 767 } 768 769 void 770 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 771 { 772 struct isa_dma_cookie *cookie = map->_dm_cookie; 773 774 STAT_DECR(isa_dma_stats_nbouncebufs); 775 776 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 777 cookie->id_bouncebuflen); 778 _bus_dmamem_free(t, cookie->id_bouncesegs, 779 cookie->id_nbouncesegs); 780 cookie->id_bouncebuflen = 0; 781 cookie->id_nbouncesegs = 0; 782 cookie->id_flags &= ~ID_HAS_BOUNCE; 783 } 784 #endif /* NISADMA > 0 */ 785