1 /* $NetBSD: isadma_bounce.c,v 1.13 2016/02/29 15:28:35 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 34 35 __KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.13 2016/02/29 15:28:35 christos Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/syslog.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 #include <sys/mbuf.h> 44 45 #define _ALPHA_BUS_DMA_PRIVATE 46 #include <sys/bus.h> 47 48 #include <dev/isa/isareg.h> 49 #include <dev/isa/isavar.h> 50 51 extern paddr_t avail_end; 52 53 /* 54 * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed 55 * in the DMA map. 56 */ 57 struct isadma_bounce_cookie { 58 int id_flags; /* flags; see below */ 59 60 /* 61 * Information about the original buffer used during 62 * DMA map syncs. Note that origbuflen is only used 63 * for ID_BUFTYPE_LINEAR. 64 */ 65 void *id_origbuf; /* pointer to orig buffer if 66 bouncing */ 67 bus_size_t id_origbuflen; /* ...and size */ 68 int id_buftype; /* type of buffer */ 69 70 void *id_bouncebuf; /* pointer to the bounce buffer */ 71 bus_size_t id_bouncebuflen; /* ...and size */ 72 int id_nbouncesegs; /* number of valid bounce segs */ 73 bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer 74 physical memory segments */ 75 }; 76 77 /* id_flags */ 78 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */ 79 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */ 80 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */ 81 82 /* id_buftype */ 83 #define ID_BUFTYPE_INVALID 0 84 #define ID_BUFTYPE_LINEAR 1 85 #define ID_BUFTYPE_MBUF 2 86 #define ID_BUFTYPE_UIO 3 87 #define ID_BUFTYPE_RAW 4 88 89 int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 90 bus_size_t, int); 91 void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 92 93 /* 94 * Create an ISA DMA map. 95 */ 96 int 97 isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 98 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 99 { 100 struct isadma_bounce_cookie *cookie; 101 bus_dmamap_t map; 102 int error, cookieflags; 103 void *cookiestore; 104 size_t cookiesize; 105 106 /* Call common function to create the basic map. */ 107 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 108 flags, dmamp); 109 if (error) 110 return (error); 111 112 map = *dmamp; 113 map->_dm_cookie = NULL; 114 115 cookiesize = sizeof(*cookie); 116 117 /* 118 * ISA only has 24-bits of address space. This means 119 * we can't DMA to pages over 16M. In order to DMA to 120 * arbitrary buffers, we use "bounce buffers" - pages 121 * in memory below the 16M boundary. On DMA reads, 122 * DMA happens to the bounce buffers, and is copied into 123 * the caller's buffer. On writes, data is copied into 124 * but bounce buffer, and the DMA happens from those 125 * pages. To software using the DMA mapping interface, 126 * this looks simply like a data cache. 127 * 128 * If we have more than 16M of RAM in the system, we may 129 * need bounce buffers. We check and remember that here. 130 * 131 * ...or, there is an opposite case. The most segments 132 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If 133 * the caller can't handle that many segments (e.g. the 134 * ISA DMA controller), we may have to bounce it as well. 135 */ 136 cookieflags = 0; 137 if (avail_end > (t->_wbase + t->_wsize) || 138 ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { 139 cookieflags |= ID_MIGHT_NEED_BOUNCE; 140 cookiesize += (sizeof(bus_dma_segment_t) * 141 (map->_dm_segcnt - 1)); 142 } 143 144 /* 145 * Allocate our cookie. 146 */ 147 if ((cookiestore = malloc(cookiesize, M_DMAMAP, 148 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { 149 error = ENOMEM; 150 goto out; 151 } 152 memset(cookiestore, 0, cookiesize); 153 cookie = (struct isadma_bounce_cookie *)cookiestore; 154 cookie->id_flags = cookieflags; 155 map->_dm_cookie = cookie; 156 157 if (cookieflags & ID_MIGHT_NEED_BOUNCE) { 158 /* 159 * Allocate the bounce pages now if the caller 160 * wishes us to do so. 161 */ 162 if ((flags & BUS_DMA_ALLOCNOW) == 0) 163 goto out; 164 165 error = isadma_bounce_alloc_bouncebuf(t, map, size, flags); 166 } 167 168 out: 169 if (error) { 170 if (map->_dm_cookie != NULL) 171 free(map->_dm_cookie, M_DMAMAP); 172 _bus_dmamap_destroy(t, map); 173 } 174 return (error); 175 } 176 177 /* 178 * Destroy an ISA DMA map. 179 */ 180 void 181 isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 182 { 183 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 184 185 /* 186 * Free any bounce pages this map might hold. 187 */ 188 if (cookie->id_flags & ID_HAS_BOUNCE) 189 isadma_bounce_free_bouncebuf(t, map); 190 191 free(cookie, M_DMAMAP); 192 _bus_dmamap_destroy(t, map); 193 } 194 195 /* 196 * Load an ISA DMA map with a linear buffer. 197 */ 198 int 199 isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 200 size_t buflen, struct proc *p, int flags) 201 { 202 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 203 int error; 204 205 /* 206 * Make sure that on error condition we return "no valid mappings." 207 */ 208 map->dm_mapsize = 0; 209 map->dm_nsegs = 0; 210 211 /* 212 * Try to load the map the normal way. If this errors out, 213 * and we can bounce, we will. 214 */ 215 error = _bus_dmamap_load_direct(t, map, buf, buflen, p, flags); 216 if (error == 0 || (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0) 217 return (error); 218 219 /* 220 * First attempt failed; bounce it. 221 */ 222 223 /* 224 * Allocate bounce pages, if necessary. 225 */ 226 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 227 error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags); 228 if (error) 229 return (error); 230 } 231 232 /* 233 * Cache a pointer to the caller's buffer and load the DMA map 234 * with the bounce buffer. 235 */ 236 cookie->id_origbuf = buf; 237 cookie->id_origbuflen = buflen; 238 cookie->id_buftype = ID_BUFTYPE_LINEAR; 239 error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, buflen, 240 p, flags); 241 if (error) { 242 /* 243 * Free the bounce pages, unless our resources 244 * are reserved for our exclusive use. 245 */ 246 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 247 isadma_bounce_free_bouncebuf(t, map); 248 return (error); 249 } 250 251 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 252 cookie->id_flags |= ID_IS_BOUNCING; 253 map->_dm_window = t; 254 return (0); 255 } 256 257 /* 258 * Like isadma_bounce_dmamap_load(), but for mbufs. 259 */ 260 int 261 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 262 struct mbuf *m0, int flags) 263 { 264 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 265 int error; 266 267 /* 268 * Make sure on error condition we return "no valid mappings." 269 */ 270 map->dm_mapsize = 0; 271 map->dm_nsegs = 0; 272 273 #ifdef DIAGNOSTIC 274 if ((m0->m_flags & M_PKTHDR) == 0) 275 panic("isadma_bounce_dmamap_load_mbuf: no packet header"); 276 #endif 277 278 if (m0->m_pkthdr.len > map->_dm_size) 279 return (EINVAL); 280 281 /* 282 * Try to load the map the normal way. If this errors out, 283 * and we can bounce, we will. 284 */ 285 error = _bus_dmamap_load_mbuf_direct(t, map, m0, flags); 286 if (error == 0 || (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0) 287 return (error); 288 289 /* 290 * First attempt failed; bounce it. 291 */ 292 293 /* 294 * Allocate bounce pages, if necessary. 295 */ 296 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 297 error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len, 298 flags); 299 if (error) 300 return (error); 301 } 302 303 /* 304 * Cache a pointer to the caller's buffer and load the DMA map 305 * with the bounce buffer. 306 */ 307 cookie->id_origbuf = m0; 308 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ 309 cookie->id_buftype = ID_BUFTYPE_MBUF; 310 error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, 311 m0->m_pkthdr.len, NULL, flags); 312 if (error) { 313 /* 314 * Free the bounce pages, unless our resources 315 * are reserved for our exclusive use. 316 */ 317 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 318 isadma_bounce_free_bouncebuf(t, map); 319 return (error); 320 } 321 322 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 323 cookie->id_flags |= ID_IS_BOUNCING; 324 map->_dm_window = t; 325 return (0); 326 } 327 328 /* 329 * Like isadma_bounce_dmamap_load(), but for uios. 330 */ 331 int 332 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 333 struct uio *uio, int flags) 334 { 335 336 panic("isadma_bounce_dmamap_load_uio: not implemented"); 337 } 338 339 /* 340 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with 341 * bus_dmamem_alloc(). 342 */ 343 int 344 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 345 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 346 { 347 348 panic("isadma_bounce_dmamap_load_raw: not implemented"); 349 } 350 351 /* 352 * Unload an ISA DMA map. 353 */ 354 void 355 isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 356 { 357 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 358 359 /* 360 * If we have bounce pages, free them, unless they're 361 * reserved for our exclusive use. 362 */ 363 if ((cookie->id_flags & ID_HAS_BOUNCE) && 364 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 365 isadma_bounce_free_bouncebuf(t, map); 366 367 cookie->id_flags &= ~ID_IS_BOUNCING; 368 cookie->id_buftype = ID_BUFTYPE_INVALID; 369 370 /* 371 * Do the generic bits of the unload. 372 */ 373 _bus_dmamap_unload(t, map); 374 } 375 376 /* 377 * Synchronize an ISA DMA map. 378 */ 379 void 380 isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 381 bus_size_t len, int ops) 382 { 383 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 384 385 /* 386 * Mixing PRE and POST operations is not allowed. 387 */ 388 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 389 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 390 panic("isadma_bounce_dmamap_sync: mix PRE and POST"); 391 392 #ifdef DIAGNOSTIC 393 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 394 if (offset >= map->dm_mapsize) 395 panic("isadma_bounce_dmamap_sync: bad offset"); 396 if (len == 0 || (offset + len) > map->dm_mapsize) 397 panic("isadma_bounce_dmamap_sync: bad length"); 398 } 399 #endif 400 401 /* 402 * If we're not bouncing, just drain the write buffer 403 * and return. 404 */ 405 if ((cookie->id_flags & ID_IS_BOUNCING) == 0) { 406 alpha_mb(); 407 return; 408 } 409 410 switch (cookie->id_buftype) { 411 case ID_BUFTYPE_LINEAR: 412 /* 413 * Nothing to do for pre-read. 414 */ 415 416 if (ops & BUS_DMASYNC_PREWRITE) { 417 /* 418 * Copy the caller's buffer to the bounce buffer. 419 */ 420 memcpy((char *)cookie->id_bouncebuf + offset, 421 (char *)cookie->id_origbuf + offset, len); 422 } 423 424 if (ops & BUS_DMASYNC_POSTREAD) { 425 /* 426 * Copy the bounce buffer to the caller's buffer. 427 */ 428 memcpy((char *)cookie->id_origbuf + offset, 429 (char *)cookie->id_bouncebuf + offset, len); 430 } 431 432 /* 433 * Nothing to do for post-write. 434 */ 435 break; 436 437 case ID_BUFTYPE_MBUF: 438 { 439 struct mbuf *m, *m0 = cookie->id_origbuf; 440 bus_size_t minlen, moff; 441 442 /* 443 * Nothing to do for pre-read. 444 */ 445 446 if (ops & BUS_DMASYNC_PREWRITE) { 447 /* 448 * Copy the caller's buffer to the bounce buffer. 449 */ 450 m_copydata(m0, offset, len, 451 (char *)cookie->id_bouncebuf + offset); 452 } 453 454 if (ops & BUS_DMASYNC_POSTREAD) { 455 /* 456 * Copy the bounce buffer to the caller's buffer. 457 */ 458 for (moff = offset, m = m0; m != NULL && len != 0; 459 m = m->m_next) { 460 /* Find the beginning mbuf. */ 461 if (moff >= m->m_len) { 462 moff -= m->m_len; 463 continue; 464 } 465 466 /* 467 * Now at the first mbuf to sync; nail 468 * each one until we have exhausted the 469 * length. 470 */ 471 minlen = len < m->m_len - moff ? 472 len : m->m_len - moff; 473 474 memcpy(mtod(m, char *) + moff, 475 (char *)cookie->id_bouncebuf + offset, 476 minlen); 477 478 moff = 0; 479 len -= minlen; 480 offset += minlen; 481 } 482 } 483 484 /* 485 * Nothing to do for post-write. 486 */ 487 break; 488 } 489 490 case ID_BUFTYPE_UIO: 491 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO"); 492 break; 493 494 case ID_BUFTYPE_RAW: 495 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW"); 496 break; 497 498 case ID_BUFTYPE_INVALID: 499 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID"); 500 break; 501 502 default: 503 printf("unknown buffer type %d\n", cookie->id_buftype); 504 panic("isadma_bounce_dmamap_sync"); 505 } 506 507 /* Drain the write buffer. */ 508 alpha_mb(); 509 } 510 511 /* 512 * Allocate memory safe for ISA DMA. 513 */ 514 int 515 isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, 516 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 517 int nsegs, int *rsegs, int flags) 518 { 519 paddr_t high; 520 521 if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) 522 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); 523 else 524 high = trunc_page(avail_end); 525 526 return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 527 segs, nsegs, rsegs, flags, 0, high)); 528 } 529 530 /********************************************************************** 531 * ISA DMA utility functions 532 **********************************************************************/ 533 534 int 535 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 536 bus_size_t size, int flags) 537 { 538 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 539 int error = 0; 540 541 cookie->id_bouncebuflen = round_page(size); 542 error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen, 543 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 544 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 545 if (error) 546 goto out; 547 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 548 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 549 (void **)&cookie->id_bouncebuf, flags); 550 551 out: 552 if (error) { 553 _bus_dmamem_free(t, cookie->id_bouncesegs, 554 cookie->id_nbouncesegs); 555 cookie->id_bouncebuflen = 0; 556 cookie->id_nbouncesegs = 0; 557 } else 558 cookie->id_flags |= ID_HAS_BOUNCE; 559 560 return (error); 561 } 562 563 void 564 isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 565 { 566 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 567 568 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 569 cookie->id_bouncebuflen); 570 _bus_dmamem_free(t, cookie->id_bouncesegs, 571 cookie->id_nbouncesegs); 572 cookie->id_bouncebuflen = 0; 573 cookie->id_nbouncesegs = 0; 574 cookie->id_flags &= ~ID_HAS_BOUNCE; 575 } 576