1 /* $NetBSD: isadma_bounce.c,v 1.11 2011/07/10 00:03:53 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.11 2011/07/10 00:03:53 matt Exp $"); 35 36 #define _MIPS_BUS_DMA_PRIVATE 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/proc.h> 44 #include <sys/systm.h> 45 46 #include <mips/cache.h> 47 #include <mips/locore.h> 48 49 #include <dev/isa/isareg.h> 50 #include <dev/isa/isavar.h> 51 52 #include <uvm/uvm_extern.h> 53 54 int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 55 bus_size_t, int); 56 void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 57 58 /* 59 * Create an ISA DMA map. 60 */ 61 int 62 isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 63 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 64 { 65 struct mips_bus_dma_cookie *cookie; 66 bus_dmamap_t map; 67 int error, cookieflags; 68 void *cookiestore; 69 size_t cookiesize; 70 71 /* Call common function to create the basic map. */ 72 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 73 flags, dmamp); 74 if (error) 75 return (error); 76 77 map = *dmamp; 78 map->_dm_cookie = NULL; 79 80 cookiesize = sizeof(*cookie); 81 82 /* 83 * ISA only has 24-bits of address space. This means 84 * we can't DMA to pages over 16M. In order to DMA to 85 * arbitrary buffers, we use "bounce buffers" - pages 86 * in memory below the 16M boundary. On DMA reads, 87 * DMA happens to the bounce buffers, and is copied into 88 * the caller's buffer. On writes, data is copied into 89 * but bounce buffer, and the DMA happens from those 90 * pages. To software using the DMA mapping interface, 91 * this looks simply like a data cache. 92 * 93 * If we have more than 16M of RAM in the system, we may 94 * need bounce buffers. We check and remember that here. 95 * 96 * ...or, there is an opposite case. The most segments 97 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If 98 * the caller can't handle that many segments (e.g. the 99 * ISA DMA controller), we may have to bounce it as well. 100 */ 101 cookieflags = 0; 102 if (_BUS_AVAIL_END > (t->_wbase + t->_bounce_alloc_hi - t->_bounce_alloc_lo) 103 || ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { 104 cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE; 105 cookiesize += (sizeof(bus_dma_segment_t) * 106 (map->_dm_segcnt - 1)); 107 } 108 109 /* 110 * Allocate our cookie. 111 */ 112 if ((cookiestore = malloc(cookiesize, M_DMAMAP, 113 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { 114 error = ENOMEM; 115 goto out; 116 } 117 memset(cookiestore, 0, cookiesize); 118 cookie = (struct mips_bus_dma_cookie *)cookiestore; 119 cookie->id_flags = cookieflags; 120 map->_dm_cookie = cookie; 121 122 if (cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) { 123 /* 124 * Allocate the bounce pages now if the caller 125 * wishes us to do so. 126 */ 127 if ((flags & BUS_DMA_ALLOCNOW) == 0) 128 goto out; 129 130 error = isadma_bounce_alloc_bouncebuf(t, map, size, flags); 131 } 132 133 out: 134 if (error) { 135 if (map->_dm_cookie != NULL) 136 free(map->_dm_cookie, M_DMAMAP); 137 _bus_dmamap_destroy(t, map); 138 } 139 return (error); 140 } 141 142 /* 143 * Destroy an ISA DMA map. 144 */ 145 void 146 isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 147 { 148 struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 149 150 /* 151 * Free any bounce pages this map might hold. 152 */ 153 if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE) 154 isadma_bounce_free_bouncebuf(t, map); 155 156 free(cookie, M_DMAMAP); 157 _bus_dmamap_destroy(t, map); 158 } 159 160 /* 161 * Load an ISA DMA map with a linear buffer. 162 */ 163 int 164 isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 165 bus_size_t buflen, struct proc *p, int flags) 166 { 167 struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 168 int error; 169 170 /* 171 * Make sure that on error condition we return "no valid mappings." 172 */ 173 map->dm_mapsize = 0; 174 map->dm_nsegs = 0; 175 176 /* 177 * Try to load the map the normal way. If this errors out, 178 * and we can bounce, we will. 179 */ 180 error = _bus_dmamap_load(t, map, buf, buflen, p, flags); 181 if (error == 0 || 182 (error != 0 && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0)) 183 return (error); 184 185 /* 186 * First attempt failed; bounce it. 187 */ 188 189 /* 190 * Allocate bounce pages, if necessary. 191 */ 192 if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) { 193 error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags); 194 if (error) 195 return (error); 196 } 197 198 /* 199 * Cache a pointer to the caller's buffer and load the DMA map 200 * with the bounce buffer. 201 */ 202 cookie->id_origbuf = buf; 203 cookie->id_origbuflen = buflen; 204 cookie->id_buftype = _BUS_DMA_BUFTYPE_LINEAR; 205 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, 206 p, flags); 207 if (error) { 208 /* 209 * Free the bounce pages, unless our resources 210 * are reserved for our exclusive use. 211 */ 212 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 213 isadma_bounce_free_bouncebuf(t, map); 214 return (error); 215 } 216 217 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 218 cookie->id_flags |= _BUS_DMA_IS_BOUNCING; 219 return (0); 220 } 221 222 /* 223 * Like isadma_bounce_dmamap_load(), but for mbufs. 224 */ 225 int 226 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 227 struct mbuf *m0, int flags) 228 { 229 struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 230 int error; 231 232 /* 233 * Make sure on error condition we return "no valid mappings." 234 */ 235 map->dm_mapsize = 0; 236 map->dm_nsegs = 0; 237 238 #ifdef DIAGNOSTIC 239 if ((m0->m_flags & M_PKTHDR) == 0) 240 panic("isadma_bounce_dmamap_load_mbuf: no packet header"); 241 #endif 242 243 if (m0->m_pkthdr.len > map->_dm_size) 244 return (EINVAL); 245 246 /* 247 * Try to load the map the normal way. If this errors out, 248 * and we can bounce, we will. 249 */ 250 error = _bus_dmamap_load_mbuf(t, map, m0, flags); 251 if (error == 0 || 252 (error != 0 && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0)) 253 return (error); 254 255 /* 256 * First attempt failed; bounce it. 257 */ 258 259 /* 260 * Allocate bounce pages, if necessary. 261 */ 262 if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) { 263 error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len, 264 flags); 265 if (error) 266 return (error); 267 } 268 269 /* 270 * Cache a pointer to the caller's buffer and load the DMA map 271 * with the bounce buffer. 272 */ 273 cookie->id_origbuf = m0; 274 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ 275 cookie->id_buftype = _BUS_DMA_BUFTYPE_MBUF; 276 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, 277 m0->m_pkthdr.len, NULL, flags); 278 if (error) { 279 /* 280 * Free the bounce pages, unless our resources 281 * are reserved for our exclusive use. 282 */ 283 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 284 isadma_bounce_free_bouncebuf(t, map); 285 return (error); 286 } 287 288 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 289 cookie->id_flags |= _BUS_DMA_IS_BOUNCING; 290 return (0); 291 } 292 293 /* 294 * Like isadma_bounce_dmamap_load(), but for uios. 295 */ 296 int 297 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 298 struct uio *uio, int flags) 299 { 300 301 panic("isadma_bounce_dmamap_load_uio: not implemented"); 302 } 303 304 /* 305 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with 306 * bus_dmamem_alloc(). 307 */ 308 int 309 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 310 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 311 { 312 313 panic("isadma_bounce_dmamap_load_raw: not implemented"); 314 } 315 316 /* 317 * Unload an ISA DMA map. 318 */ 319 void 320 isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 321 { 322 struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 323 324 /* 325 * If we have bounce pages, free them, unless they're 326 * reserved for our exclusive use. 327 */ 328 if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) && 329 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 330 isadma_bounce_free_bouncebuf(t, map); 331 332 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 333 cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID; 334 335 /* 336 * Do the generic bits of the unload. 337 */ 338 _bus_dmamap_unload(t, map); 339 } 340 341 /* 342 * Synchronize an ISA DMA map. 343 */ 344 void 345 isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 346 bus_size_t len, int ops) 347 { 348 struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 349 350 /* 351 * Mixing PRE and POST operations is not allowed. 352 */ 353 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 354 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 355 panic("isadma_bounce_dmamap_sync: mix PRE and POST"); 356 357 #ifdef DIAGNOSTIC 358 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 359 if (offset >= map->dm_mapsize) 360 panic("isadma_bounce_dmamap_sync: bad offset"); 361 if (len == 0 || (offset + len) > map->dm_mapsize) 362 panic("isadma_bounce_dmamap_sync: bad length"); 363 } 364 #endif 365 366 /* 367 * If we're not bouncing, just do the normal sync operation 368 * and return. 369 */ 370 if ((cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0) { 371 _bus_dmamap_sync(t, map, offset, len, ops); 372 return; 373 } 374 375 /* 376 * Flush data cache for PREREAD. This has the side-effect 377 * of invalidating the cache. Done at PREREAD since it 378 * causes the cache line(s) to be written back to memory. 379 * 380 * Copy the original buffer to the bounce buffer and flush 381 * the data cache for PREWRITE, so that the contents 382 * of the data buffer in memory reflect reality. 383 * 384 * Copy the bounce buffer to the original buffer in POSTREAD. 385 */ 386 387 switch (cookie->id_buftype) { 388 case _BUS_DMA_BUFTYPE_LINEAR: 389 /* 390 * Nothing to do for pre-read. 391 */ 392 393 if (ops & BUS_DMASYNC_PREWRITE) { 394 /* 395 * Copy the caller's buffer to the bounce buffer. 396 */ 397 memcpy((char *)cookie->id_bouncebuf + offset, 398 (char *)cookie->id_origbuf + offset, len); 399 wbflush(); 400 } 401 402 if (ops & BUS_DMASYNC_POSTREAD) { 403 /* 404 * Copy the bounce buffer to the caller's buffer. 405 */ 406 memcpy((char *)cookie->id_origbuf + offset, 407 (char *)cookie->id_bouncebuf + offset, len); 408 } 409 410 /* 411 * Nothing to do for post-write. 412 */ 413 break; 414 415 case _BUS_DMA_BUFTYPE_MBUF: 416 { 417 struct mbuf *m, *m0 = cookie->id_origbuf; 418 bus_size_t minlen, moff; 419 420 /* 421 * Nothing to do for pre-read. 422 */ 423 424 if (ops & BUS_DMASYNC_PREWRITE) { 425 /* 426 * Copy the caller's buffer to the bounce buffer. 427 */ 428 m_copydata(m0, offset, len, 429 (char *)cookie->id_bouncebuf + offset); 430 } 431 432 if (ops & BUS_DMASYNC_POSTREAD) { 433 /* 434 * Copy the bounce buffer to the caller's buffer. 435 */ 436 for (moff = offset, m = m0; m != NULL && len != 0; 437 m = m->m_next) { 438 /* Find the beginning mbuf. */ 439 if (moff >= m->m_len) { 440 moff -= m->m_len; 441 continue; 442 } 443 444 /* 445 * Now at the first mbuf to sync; nail 446 * each one until we have exhausted the 447 * length. 448 */ 449 minlen = len < m->m_len - moff ? 450 len : m->m_len - moff; 451 452 memcpy(mtod(m, char *) + moff, 453 (char *)cookie->id_bouncebuf + offset, 454 minlen); 455 456 moff = 0; 457 len -= minlen; 458 offset += minlen; 459 } 460 } 461 462 /* 463 * Nothing to do for post-write. 464 */ 465 break; 466 } 467 468 case _BUS_DMA_BUFTYPE_UIO: 469 panic("isadma_bounce_dmamap_sync: _BUS_DMA_BUFTYPE_UIO"); 470 break; 471 472 case _BUS_DMA_BUFTYPE_RAW: 473 panic("isadma_bounce_dmamap_sync: _BUS_DMA_BUFTYPE_RAW"); 474 break; 475 476 case _BUS_DMA_BUFTYPE_INVALID: 477 panic("isadma_bounce_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID"); 478 break; 479 480 default: 481 printf("unknown buffer type %d\n", cookie->id_buftype); 482 panic("isadma_bounce_dmamap_sync"); 483 } 484 485 /* Drain the write buffer. */ 486 wbflush(); 487 488 /* XXXJRT */ 489 if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) 490 mips_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + offset, 491 len); 492 } 493 494 /* 495 * Allocate memory safe for ISA DMA. 496 */ 497 int 498 isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, 499 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 500 int nsegs, int *rsegs, int flags) 501 { 502 paddr_t high; 503 504 if (_BUS_AVAIL_END > ISA_DMA_BOUNCE_THRESHOLD) 505 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); 506 else 507 high = trunc_page(_BUS_AVAIL_END); 508 509 return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 510 segs, nsegs, rsegs, flags, 0, high)); 511 } 512 513 /********************************************************************** 514 * ISA DMA utility functions 515 **********************************************************************/ 516 517 int 518 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 519 bus_size_t size, int flags) 520 { 521 struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 522 int error = 0; 523 524 cookie->id_bouncebuflen = round_page(size); 525 error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen, 526 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 527 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 528 if (error) 529 goto out; 530 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 531 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 532 (void **)&cookie->id_bouncebuf, flags); 533 534 out: 535 if (error) { 536 _bus_dmamem_free(t, cookie->id_bouncesegs, 537 cookie->id_nbouncesegs); 538 cookie->id_bouncebuflen = 0; 539 cookie->id_nbouncesegs = 0; 540 } else 541 cookie->id_flags |= _BUS_DMA_HAS_BOUNCE; 542 543 return (error); 544 } 545 546 void 547 isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 548 { 549 struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 550 551 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 552 cookie->id_bouncebuflen); 553 _bus_dmamem_free(t, cookie->id_bouncesegs, 554 cookie->id_nbouncesegs); 555 cookie->id_bouncebuflen = 0; 556 cookie->id_nbouncesegs = 0; 557 cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE; 558 } 559