1 /* $NetBSD: bus_dma.c,v 1.47 2005/12/24 20:06:47 perry Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #define _ARM32_BUS_DMA_PRIVATE 41 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.47 2005/12/24 20:06:47 perry Exp $"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/proc.h> 49 #include <sys/buf.h> 50 #include <sys/reboot.h> 51 #include <sys/conf.h> 52 #include <sys/file.h> 53 #include <sys/malloc.h> 54 #include <sys/mbuf.h> 55 #include <sys/vnode.h> 56 #include <sys/device.h> 57 58 #include <uvm/uvm_extern.h> 59 60 #include <machine/bus.h> 61 #include <machine/cpu.h> 62 63 #include <arm/cpufunc.h> 64 65 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 66 bus_size_t, struct proc *, int); 67 struct arm32_dma_range *_bus_dma_inrange(struct arm32_dma_range *, 68 int, bus_addr_t); 69 70 /* 71 * Check to see if the specified page is in an allowed DMA range. 72 */ 73 inline struct arm32_dma_range * 74 _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 75 bus_addr_t curaddr) 76 { 77 struct arm32_dma_range *dr; 78 int i; 79 80 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 81 if (curaddr >= dr->dr_sysbase && 82 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 83 return (dr); 84 } 85 86 return (NULL); 87 } 88 89 /* 90 * Common function to load the specified physical address into the 91 * DMA map, coalescing segments and boundary checking as necessary. 92 */ 93 static int 94 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map, 95 bus_addr_t paddr, bus_size_t size) 96 { 97 bus_dma_segment_t * const segs = map->dm_segs; 98 int nseg = map->dm_nsegs; 99 bus_addr_t lastaddr = 0xdead; /* XXX gcc */ 100 bus_addr_t bmask = ~(map->_dm_boundary - 1); 101 bus_addr_t curaddr; 102 bus_size_t sgsize; 103 104 if (nseg > 0) 105 lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len; 106 again: 107 sgsize = size; 108 109 /* Make sure we're in an allowed DMA range. */ 110 if (t->_ranges != NULL) { 111 /* XXX cache last result? */ 112 const struct arm32_dma_range * const dr = 113 _bus_dma_inrange(t->_ranges, t->_nranges, paddr); 114 if (dr == NULL) 115 return (EINVAL); 116 117 /* 118 * In a valid DMA range. Translate the physical 119 * memory address to an address in the DMA window. 120 */ 121 curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase; 122 } else 123 curaddr = paddr; 124 125 /* 126 * Make sure we don't cross any boundaries. 127 */ 128 if (map->_dm_boundary > 0) { 129 bus_addr_t baddr; /* next boundary address */ 130 131 baddr = (curaddr + map->_dm_boundary) & bmask; 132 if (sgsize > (baddr - curaddr)) 133 sgsize = (baddr - curaddr); 134 } 135 136 /* 137 * Insert chunk into a segment, coalescing with the 138 * previous segment if possible. 139 */ 140 if (nseg > 0 && curaddr == lastaddr && 141 segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz && 142 (map->_dm_boundary == 0 || 143 (segs[nseg-1].ds_addr & bmask) == (curaddr & bmask))) { 144 /* coalesce */ 145 segs[nseg-1].ds_len += sgsize; 146 } else if (nseg >= map->_dm_segcnt) { 147 return (EFBIG); 148 } else { 149 /* new segment */ 150 segs[nseg].ds_addr = curaddr; 151 segs[nseg].ds_len = sgsize; 152 nseg++; 153 } 154 155 lastaddr = curaddr + sgsize; 156 157 paddr += sgsize; 158 size -= sgsize; 159 if (size > 0) 160 goto again; 161 162 map->dm_nsegs = nseg; 163 return (0); 164 } 165 166 /* 167 * Common function for DMA map creation. May be called by bus-specific 168 * DMA map creation functions. 169 */ 170 int 171 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 172 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 173 { 174 struct arm32_bus_dmamap *map; 175 void *mapstore; 176 size_t mapsize; 177 178 #ifdef DEBUG_DMA 179 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n", 180 t, size, nsegments, maxsegsz, boundary, flags); 181 #endif /* DEBUG_DMA */ 182 183 /* 184 * Allocate and initialize the DMA map. The end of the map 185 * is a variable-sized array of segments, so we allocate enough 186 * room for them in one shot. 187 * 188 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 189 * of ALLOCNOW notifies others that we've reserved these resources, 190 * and they are not to be freed. 191 * 192 * The bus_dmamap_t includes one bus_dma_segment_t, hence 193 * the (nsegments - 1). 194 */ 195 mapsize = sizeof(struct arm32_bus_dmamap) + 196 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 197 if ((mapstore = malloc(mapsize, M_DMAMAP, 198 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 199 return (ENOMEM); 200 201 memset(mapstore, 0, mapsize); 202 map = (struct arm32_bus_dmamap *)mapstore; 203 map->_dm_size = size; 204 map->_dm_segcnt = nsegments; 205 map->_dm_maxmaxsegsz = maxsegsz; 206 map->_dm_boundary = boundary; 207 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 208 map->_dm_origbuf = NULL; 209 map->_dm_buftype = ARM32_BUFTYPE_INVALID; 210 map->_dm_proc = NULL; 211 map->dm_maxsegsz = maxsegsz; 212 map->dm_mapsize = 0; /* no valid mappings */ 213 map->dm_nsegs = 0; 214 215 *dmamp = map; 216 #ifdef DEBUG_DMA 217 printf("dmamap_create:map=%p\n", map); 218 #endif /* DEBUG_DMA */ 219 return (0); 220 } 221 222 /* 223 * Common function for DMA map destruction. May be called by bus-specific 224 * DMA map destruction functions. 225 */ 226 void 227 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 228 { 229 230 #ifdef DEBUG_DMA 231 printf("dmamap_destroy: t=%p map=%p\n", t, map); 232 #endif /* DEBUG_DMA */ 233 234 /* 235 * Explicit unload. 236 */ 237 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 238 map->dm_mapsize = 0; 239 map->dm_nsegs = 0; 240 map->_dm_origbuf = NULL; 241 map->_dm_buftype = ARM32_BUFTYPE_INVALID; 242 map->_dm_proc = NULL; 243 244 free(map, M_DMAMAP); 245 } 246 247 /* 248 * Common function for loading a DMA map with a linear buffer. May 249 * be called by bus-specific DMA map load functions. 250 */ 251 int 252 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 253 bus_size_t buflen, struct proc *p, int flags) 254 { 255 int error; 256 257 #ifdef DEBUG_DMA 258 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n", 259 t, map, buf, buflen, p, flags); 260 #endif /* DEBUG_DMA */ 261 262 /* 263 * Make sure that on error condition we return "no valid mappings". 264 */ 265 map->dm_mapsize = 0; 266 map->dm_nsegs = 0; 267 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 268 269 if (buflen > map->_dm_size) 270 return (EINVAL); 271 272 /* _bus_dmamap_load_buffer() clears this if we're not... */ 273 map->_dm_flags |= ARM32_DMAMAP_COHERENT; 274 275 error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags); 276 if (error == 0) { 277 map->dm_mapsize = buflen; 278 map->_dm_origbuf = buf; 279 map->_dm_buftype = ARM32_BUFTYPE_LINEAR; 280 map->_dm_proc = p; 281 } 282 #ifdef DEBUG_DMA 283 printf("dmamap_load: error=%d\n", error); 284 #endif /* DEBUG_DMA */ 285 return (error); 286 } 287 288 /* 289 * Like _bus_dmamap_load(), but for mbufs. 290 */ 291 int 292 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 293 int flags) 294 { 295 int error; 296 struct mbuf *m; 297 298 #ifdef DEBUG_DMA 299 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n", 300 t, map, m0, flags); 301 #endif /* DEBUG_DMA */ 302 303 /* 304 * Make sure that on error condition we return "no valid mappings." 305 */ 306 map->dm_mapsize = 0; 307 map->dm_nsegs = 0; 308 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 309 310 #ifdef DIAGNOSTIC 311 if ((m0->m_flags & M_PKTHDR) == 0) 312 panic("_bus_dmamap_load_mbuf: no packet header"); 313 #endif /* DIAGNOSTIC */ 314 315 if (m0->m_pkthdr.len > map->_dm_size) 316 return (EINVAL); 317 318 /* 319 * Mbuf chains should almost never have coherent (i.e. 320 * un-cached) mappings, so clear that flag now. 321 */ 322 map->_dm_flags &= ~ARM32_DMAMAP_COHERENT; 323 324 error = 0; 325 for (m = m0; m != NULL && error == 0; m = m->m_next) { 326 int offset; 327 int remainbytes; 328 const struct vm_page * const *pgs; 329 paddr_t paddr; 330 int size; 331 332 if (m->m_len == 0) 333 continue; 334 switch (m->m_flags & (M_EXT|M_CLUSTER|M_EXT_PAGES)) { 335 case M_EXT|M_CLUSTER: 336 /* XXX KDASSERT */ 337 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 338 paddr = m->m_ext.ext_paddr + 339 (m->m_data - m->m_ext.ext_buf); 340 size = m->m_len; 341 error = _bus_dmamap_load_paddr(t, map, paddr, size); 342 break; 343 344 case M_EXT|M_EXT_PAGES: 345 KASSERT(m->m_ext.ext_buf <= m->m_data); 346 KASSERT(m->m_data <= 347 m->m_ext.ext_buf + m->m_ext.ext_size); 348 349 offset = (vaddr_t)m->m_data - 350 trunc_page((vaddr_t)m->m_ext.ext_buf); 351 remainbytes = m->m_len; 352 353 /* skip uninteresting pages */ 354 pgs = (const struct vm_page * const *) 355 m->m_ext.ext_pgs + (offset >> PAGE_SHIFT); 356 357 offset &= PAGE_MASK; /* offset in the first page */ 358 359 /* load each page */ 360 while (remainbytes > 0) { 361 const struct vm_page *pg; 362 363 size = MIN(remainbytes, PAGE_SIZE - offset); 364 365 pg = *pgs++; 366 KASSERT(pg); 367 paddr = VM_PAGE_TO_PHYS(pg) + offset; 368 369 error = _bus_dmamap_load_paddr(t, map, 370 paddr, size); 371 if (error) 372 break; 373 offset = 0; 374 remainbytes -= size; 375 } 376 break; 377 378 case 0: 379 paddr = m->m_paddr + M_BUFOFFSET(m) + 380 (m->m_data - M_BUFADDR(m)); 381 size = m->m_len; 382 error = _bus_dmamap_load_paddr(t, map, paddr, size); 383 break; 384 385 default: 386 error = _bus_dmamap_load_buffer(t, map, m->m_data, 387 m->m_len, NULL, flags); 388 } 389 } 390 if (error == 0) { 391 map->dm_mapsize = m0->m_pkthdr.len; 392 map->_dm_origbuf = m0; 393 map->_dm_buftype = ARM32_BUFTYPE_MBUF; 394 map->_dm_proc = NULL; /* always kernel */ 395 } 396 #ifdef DEBUG_DMA 397 printf("dmamap_load_mbuf: error=%d\n", error); 398 #endif /* DEBUG_DMA */ 399 return (error); 400 } 401 402 /* 403 * Like _bus_dmamap_load(), but for uios. 404 */ 405 int 406 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 407 int flags) 408 { 409 int i, error; 410 bus_size_t minlen, resid; 411 struct proc *p = NULL; 412 struct iovec *iov; 413 caddr_t addr; 414 415 /* 416 * Make sure that on error condition we return "no valid mappings." 417 */ 418 map->dm_mapsize = 0; 419 map->dm_nsegs = 0; 420 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 421 422 resid = uio->uio_resid; 423 iov = uio->uio_iov; 424 425 if (uio->uio_segflg == UIO_USERSPACE) { 426 p = uio->uio_lwp ? uio->uio_lwp->l_proc : NULL; 427 #ifdef DIAGNOSTIC 428 if (p == NULL) 429 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 430 #endif 431 } 432 433 /* _bus_dmamap_load_buffer() clears this if we're not... */ 434 map->_dm_flags |= ARM32_DMAMAP_COHERENT; 435 436 error = 0; 437 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 438 /* 439 * Now at the first iovec to load. Load each iovec 440 * until we have exhausted the residual count. 441 */ 442 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 443 addr = (caddr_t)iov[i].iov_base; 444 445 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 446 p, flags); 447 448 resid -= minlen; 449 } 450 if (error == 0) { 451 map->dm_mapsize = uio->uio_resid; 452 map->_dm_origbuf = uio; 453 map->_dm_buftype = ARM32_BUFTYPE_UIO; 454 map->_dm_proc = p; 455 } 456 return (error); 457 } 458 459 /* 460 * Like _bus_dmamap_load(), but for raw memory allocated with 461 * bus_dmamem_alloc(). 462 */ 463 int 464 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 465 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 466 { 467 468 panic("_bus_dmamap_load_raw: not implemented"); 469 } 470 471 /* 472 * Common function for unloading a DMA map. May be called by 473 * bus-specific DMA map unload functions. 474 */ 475 void 476 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 477 { 478 479 #ifdef DEBUG_DMA 480 printf("dmamap_unload: t=%p map=%p\n", t, map); 481 #endif /* DEBUG_DMA */ 482 483 /* 484 * No resources to free; just mark the mappings as 485 * invalid. 486 */ 487 map->dm_mapsize = 0; 488 map->dm_nsegs = 0; 489 map->_dm_origbuf = NULL; 490 map->_dm_buftype = ARM32_BUFTYPE_INVALID; 491 map->_dm_proc = NULL; 492 } 493 494 static inline void 495 _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 496 bus_size_t len, int ops) 497 { 498 vaddr_t addr = (vaddr_t) map->_dm_origbuf; 499 500 addr += offset; 501 502 switch (ops) { 503 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 504 cpu_dcache_wbinv_range(addr, len); 505 break; 506 507 case BUS_DMASYNC_PREREAD: 508 if (((addr | len) & arm_dcache_align_mask) == 0) 509 cpu_dcache_inv_range(addr, len); 510 else 511 cpu_dcache_wbinv_range(addr, len); 512 break; 513 514 case BUS_DMASYNC_PREWRITE: 515 cpu_dcache_wb_range(addr, len); 516 break; 517 } 518 } 519 520 static inline void 521 _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 522 bus_size_t len, int ops) 523 { 524 struct mbuf *m, *m0 = map->_dm_origbuf; 525 bus_size_t minlen, moff; 526 vaddr_t maddr; 527 528 for (moff = offset, m = m0; m != NULL && len != 0; 529 m = m->m_next) { 530 /* Find the beginning mbuf. */ 531 if (moff >= m->m_len) { 532 moff -= m->m_len; 533 continue; 534 } 535 536 /* 537 * Now at the first mbuf to sync; nail each one until 538 * we have exhausted the length. 539 */ 540 minlen = m->m_len - moff; 541 if (len < minlen) 542 minlen = len; 543 544 maddr = mtod(m, vaddr_t); 545 maddr += moff; 546 547 /* 548 * We can save a lot of work here if we know the mapping 549 * is read-only at the MMU: 550 * 551 * If a mapping is read-only, no dirty cache blocks will 552 * exist for it. If a writable mapping was made read-only, 553 * we know any dirty cache lines for the range will have 554 * been cleaned for us already. Therefore, if the upper 555 * layer can tell us we have a read-only mapping, we can 556 * skip all cache cleaning. 557 * 558 * NOTE: This only works if we know the pmap cleans pages 559 * before making a read-write -> read-only transition. If 560 * this ever becomes non-true (e.g. Physically Indexed 561 * cache), this will have to be revisited. 562 */ 563 switch (ops) { 564 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 565 if (! M_ROMAP(m)) { 566 cpu_dcache_wbinv_range(maddr, minlen); 567 break; 568 } 569 /* else FALLTHROUGH */ 570 571 case BUS_DMASYNC_PREREAD: 572 if (((maddr | minlen) & arm_dcache_align_mask) == 0) 573 cpu_dcache_inv_range(maddr, minlen); 574 else 575 cpu_dcache_wbinv_range(maddr, minlen); 576 break; 577 578 case BUS_DMASYNC_PREWRITE: 579 if (! M_ROMAP(m)) 580 cpu_dcache_wb_range(maddr, minlen); 581 break; 582 } 583 moff = 0; 584 len -= minlen; 585 } 586 } 587 588 static inline void 589 _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 590 bus_size_t len, int ops) 591 { 592 struct uio *uio = map->_dm_origbuf; 593 struct iovec *iov; 594 bus_size_t minlen, ioff; 595 vaddr_t addr; 596 597 for (iov = uio->uio_iov, ioff = offset; len != 0; iov++) { 598 /* Find the beginning iovec. */ 599 if (ioff >= iov->iov_len) { 600 ioff -= iov->iov_len; 601 continue; 602 } 603 604 /* 605 * Now at the first iovec to sync; nail each one until 606 * we have exhausted the length. 607 */ 608 minlen = iov->iov_len - ioff; 609 if (len < minlen) 610 minlen = len; 611 612 addr = (vaddr_t) iov->iov_base; 613 addr += ioff; 614 615 switch (ops) { 616 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 617 cpu_dcache_wbinv_range(addr, minlen); 618 break; 619 620 case BUS_DMASYNC_PREREAD: 621 if (((addr | minlen) & arm_dcache_align_mask) == 0) 622 cpu_dcache_inv_range(addr, minlen); 623 else 624 cpu_dcache_wbinv_range(addr, minlen); 625 break; 626 627 case BUS_DMASYNC_PREWRITE: 628 cpu_dcache_wb_range(addr, minlen); 629 break; 630 } 631 ioff = 0; 632 len -= minlen; 633 } 634 } 635 636 /* 637 * Common function for DMA map synchronization. May be called 638 * by bus-specific DMA map synchronization functions. 639 * 640 * This version works for the Virtually Indexed Virtually Tagged 641 * cache found on 32-bit ARM processors. 642 * 643 * XXX Should have separate versions for write-through vs. 644 * XXX write-back caches. We currently assume write-back 645 * XXX here, which is not as efficient as it could be for 646 * XXX the write-through case. 647 */ 648 void 649 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 650 bus_size_t len, int ops) 651 { 652 653 #ifdef DEBUG_DMA 654 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n", 655 t, map, offset, len, ops); 656 #endif /* DEBUG_DMA */ 657 658 /* 659 * Mixing of PRE and POST operations is not allowed. 660 */ 661 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 662 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 663 panic("_bus_dmamap_sync: mix PRE and POST"); 664 665 #ifdef DIAGNOSTIC 666 if (offset >= map->dm_mapsize) 667 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 668 offset, map->dm_mapsize); 669 if (len == 0 || (offset + len) > map->dm_mapsize) 670 panic("_bus_dmamap_sync: bad length"); 671 #endif 672 673 /* 674 * For a virtually-indexed write-back cache, we need 675 * to do the following things: 676 * 677 * PREREAD -- Invalidate the D-cache. We do this 678 * here in case a write-back is required by the back-end. 679 * 680 * PREWRITE -- Write-back the D-cache. Note that if 681 * we are doing a PREREAD|PREWRITE, we can collapse 682 * the whole thing into a single Wb-Inv. 683 * 684 * POSTREAD -- Nothing. 685 * 686 * POSTWRITE -- Nothing. 687 */ 688 689 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 690 if (ops == 0) 691 return; 692 693 /* Skip cache frobbing if mapping was COHERENT. */ 694 if (map->_dm_flags & ARM32_DMAMAP_COHERENT) { 695 /* Drain the write buffer. */ 696 cpu_drain_writebuf(); 697 return; 698 } 699 700 /* 701 * If the mapping belongs to a non-kernel vmspace, and the 702 * vmspace has not been active since the last time a full 703 * cache flush was performed, we don't need to do anything. 704 */ 705 if (__predict_false(map->_dm_proc != NULL && 706 map->_dm_proc->p_vmspace->vm_map.pmap->pm_cstate.cs_cache_d == 0)) 707 return; 708 709 switch (map->_dm_buftype) { 710 case ARM32_BUFTYPE_LINEAR: 711 _bus_dmamap_sync_linear(t, map, offset, len, ops); 712 break; 713 714 case ARM32_BUFTYPE_MBUF: 715 _bus_dmamap_sync_mbuf(t, map, offset, len, ops); 716 break; 717 718 case ARM32_BUFTYPE_UIO: 719 _bus_dmamap_sync_uio(t, map, offset, len, ops); 720 break; 721 722 case ARM32_BUFTYPE_RAW: 723 panic("_bus_dmamap_sync: ARM32_BUFTYPE_RAW"); 724 break; 725 726 case ARM32_BUFTYPE_INVALID: 727 panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID"); 728 break; 729 730 default: 731 printf("unknown buffer type %d\n", map->_dm_buftype); 732 panic("_bus_dmamap_sync"); 733 } 734 735 /* Drain the write buffer. */ 736 cpu_drain_writebuf(); 737 } 738 739 /* 740 * Common function for DMA-safe memory allocation. May be called 741 * by bus-specific DMA memory allocation functions. 742 */ 743 744 extern paddr_t physical_start; 745 extern paddr_t physical_end; 746 747 int 748 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 749 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 750 int flags) 751 { 752 struct arm32_dma_range *dr; 753 int error, i; 754 755 #ifdef DEBUG_DMA 756 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx " 757 "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment, 758 boundary, segs, nsegs, rsegs, flags); 759 #endif 760 761 if ((dr = t->_ranges) != NULL) { 762 error = ENOMEM; 763 for (i = 0; i < t->_nranges; i++, dr++) { 764 if (dr->dr_len == 0) 765 continue; 766 error = _bus_dmamem_alloc_range(t, size, alignment, 767 boundary, segs, nsegs, rsegs, flags, 768 trunc_page(dr->dr_sysbase), 769 trunc_page(dr->dr_sysbase + dr->dr_len)); 770 if (error == 0) 771 break; 772 } 773 } else { 774 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 775 segs, nsegs, rsegs, flags, trunc_page(physical_start), 776 trunc_page(physical_end)); 777 } 778 779 #ifdef DEBUG_DMA 780 printf("dmamem_alloc: =%d\n", error); 781 #endif 782 783 return(error); 784 } 785 786 /* 787 * Common function for freeing DMA-safe memory. May be called by 788 * bus-specific DMA memory free functions. 789 */ 790 void 791 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 792 { 793 struct vm_page *m; 794 bus_addr_t addr; 795 struct pglist mlist; 796 int curseg; 797 798 #ifdef DEBUG_DMA 799 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs); 800 #endif /* DEBUG_DMA */ 801 802 /* 803 * Build a list of pages to free back to the VM system. 804 */ 805 TAILQ_INIT(&mlist); 806 for (curseg = 0; curseg < nsegs; curseg++) { 807 for (addr = segs[curseg].ds_addr; 808 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 809 addr += PAGE_SIZE) { 810 m = PHYS_TO_VM_PAGE(addr); 811 TAILQ_INSERT_TAIL(&mlist, m, pageq); 812 } 813 } 814 uvm_pglistfree(&mlist); 815 } 816 817 /* 818 * Common function for mapping DMA-safe memory. May be called by 819 * bus-specific DMA memory map functions. 820 */ 821 int 822 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 823 size_t size, caddr_t *kvap, int flags) 824 { 825 vaddr_t va; 826 bus_addr_t addr; 827 int curseg; 828 pt_entry_t *ptep/*, pte*/; 829 const uvm_flag_t kmflags = 830 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 831 832 #ifdef DEBUG_DMA 833 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t, 834 segs, nsegs, (unsigned long)size, flags); 835 #endif /* DEBUG_DMA */ 836 837 size = round_page(size); 838 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 839 840 if (va == 0) 841 return (ENOMEM); 842 843 *kvap = (caddr_t)va; 844 845 for (curseg = 0; curseg < nsegs; curseg++) { 846 for (addr = segs[curseg].ds_addr; 847 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 848 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 849 #ifdef DEBUG_DMA 850 printf("wiring p%lx to v%lx", addr, va); 851 #endif /* DEBUG_DMA */ 852 if (size == 0) 853 panic("_bus_dmamem_map: size botch"); 854 pmap_enter(pmap_kernel(), va, addr, 855 VM_PROT_READ | VM_PROT_WRITE, 856 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 857 /* 858 * If the memory must remain coherent with the 859 * cache then we must make the memory uncacheable 860 * in order to maintain virtual cache coherency. 861 * We must also guarantee the cache does not already 862 * contain the virtal addresses we are making 863 * uncacheable. 864 */ 865 if (flags & BUS_DMA_COHERENT) { 866 cpu_dcache_wbinv_range(va, PAGE_SIZE); 867 cpu_drain_writebuf(); 868 ptep = vtopte(va); 869 *ptep &= ~L2_S_CACHE_MASK; 870 PTE_SYNC(ptep); 871 tlb_flush(); 872 } 873 #ifdef DEBUG_DMA 874 ptep = vtopte(va); 875 printf(" pte=v%p *pte=%x\n", ptep, *ptep); 876 #endif /* DEBUG_DMA */ 877 } 878 } 879 pmap_update(pmap_kernel()); 880 #ifdef DEBUG_DMA 881 printf("dmamem_map: =%p\n", *kvap); 882 #endif /* DEBUG_DMA */ 883 return (0); 884 } 885 886 /* 887 * Common function for unmapping DMA-safe memory. May be called by 888 * bus-specific DMA memory unmapping functions. 889 */ 890 void 891 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size) 892 { 893 894 #ifdef DEBUG_DMA 895 printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva, 896 (unsigned long)size); 897 #endif /* DEBUG_DMA */ 898 #ifdef DIAGNOSTIC 899 if ((u_long)kva & PGOFSET) 900 panic("_bus_dmamem_unmap"); 901 #endif /* DIAGNOSTIC */ 902 903 size = round_page(size); 904 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size); 905 pmap_update(pmap_kernel()); 906 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 907 } 908 909 /* 910 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 911 * bus-specific DMA mmap(2)'ing functions. 912 */ 913 paddr_t 914 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 915 off_t off, int prot, int flags) 916 { 917 int i; 918 919 for (i = 0; i < nsegs; i++) { 920 #ifdef DIAGNOSTIC 921 if (off & PGOFSET) 922 panic("_bus_dmamem_mmap: offset unaligned"); 923 if (segs[i].ds_addr & PGOFSET) 924 panic("_bus_dmamem_mmap: segment unaligned"); 925 if (segs[i].ds_len & PGOFSET) 926 panic("_bus_dmamem_mmap: segment size not multiple" 927 " of page size"); 928 #endif /* DIAGNOSTIC */ 929 if (off >= segs[i].ds_len) { 930 off -= segs[i].ds_len; 931 continue; 932 } 933 934 return (arm_btop((u_long)segs[i].ds_addr + off)); 935 } 936 937 /* Page not found. */ 938 return (-1); 939 } 940 941 /********************************************************************** 942 * DMA utility functions 943 **********************************************************************/ 944 945 /* 946 * Utility function to load a linear buffer. lastaddrp holds state 947 * between invocations (for multiple-buffer loads). segp contains 948 * the starting segment on entrace, and the ending segment on exit. 949 * first indicates if this is the first invocation of this function. 950 */ 951 int 952 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 953 bus_size_t buflen, struct proc *p, int flags) 954 { 955 bus_size_t sgsize; 956 bus_addr_t curaddr; 957 vaddr_t vaddr = (vaddr_t)buf; 958 pd_entry_t *pde; 959 pt_entry_t pte; 960 int error; 961 pmap_t pmap; 962 pt_entry_t *ptep; 963 964 #ifdef DEBUG_DMA 965 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n", 966 buf, buflen, flags); 967 #endif /* DEBUG_DMA */ 968 969 if (p != NULL) 970 pmap = p->p_vmspace->vm_map.pmap; 971 else 972 pmap = pmap_kernel(); 973 974 while (buflen > 0) { 975 /* 976 * Get the physical address for this segment. 977 * 978 * XXX Don't support checking for coherent mappings 979 * XXX in user address space. 980 */ 981 if (__predict_true(pmap == pmap_kernel())) { 982 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep); 983 if (__predict_false(pmap_pde_section(pde))) { 984 curaddr = (*pde & L1_S_FRAME) | 985 (vaddr & L1_S_OFFSET); 986 if (*pde & L1_S_CACHE_MASK) { 987 map->_dm_flags &= 988 ~ARM32_DMAMAP_COHERENT; 989 } 990 } else { 991 pte = *ptep; 992 KDASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV); 993 if (__predict_false((pte & L2_TYPE_MASK) 994 == L2_TYPE_L)) { 995 curaddr = (pte & L2_L_FRAME) | 996 (vaddr & L2_L_OFFSET); 997 if (pte & L2_L_CACHE_MASK) { 998 map->_dm_flags &= 999 ~ARM32_DMAMAP_COHERENT; 1000 } 1001 } else { 1002 curaddr = (pte & L2_S_FRAME) | 1003 (vaddr & L2_S_OFFSET); 1004 if (pte & L2_S_CACHE_MASK) { 1005 map->_dm_flags &= 1006 ~ARM32_DMAMAP_COHERENT; 1007 } 1008 } 1009 } 1010 } else { 1011 (void) pmap_extract(pmap, vaddr, &curaddr); 1012 map->_dm_flags &= ~ARM32_DMAMAP_COHERENT; 1013 } 1014 1015 /* 1016 * Compute the segment size, and adjust counts. 1017 */ 1018 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 1019 if (buflen < sgsize) 1020 sgsize = buflen; 1021 1022 error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize); 1023 if (error) 1024 return (error); 1025 1026 vaddr += sgsize; 1027 buflen -= sgsize; 1028 } 1029 1030 return (0); 1031 } 1032 1033 /* 1034 * Allocate physical memory from the given physical address range. 1035 * Called by DMA-safe memory allocation methods. 1036 */ 1037 int 1038 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1039 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1040 int flags, paddr_t low, paddr_t high) 1041 { 1042 paddr_t curaddr, lastaddr; 1043 struct vm_page *m; 1044 struct pglist mlist; 1045 int curseg, error; 1046 1047 #ifdef DEBUG_DMA 1048 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n", 1049 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high); 1050 #endif /* DEBUG_DMA */ 1051 1052 /* Always round the size. */ 1053 size = round_page(size); 1054 1055 /* 1056 * Allocate pages from the VM system. 1057 */ 1058 error = uvm_pglistalloc(size, low, high, alignment, boundary, 1059 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 1060 if (error) 1061 return (error); 1062 1063 /* 1064 * Compute the location, size, and number of segments actually 1065 * returned by the VM code. 1066 */ 1067 m = TAILQ_FIRST(&mlist); 1068 curseg = 0; 1069 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 1070 segs[curseg].ds_len = PAGE_SIZE; 1071 #ifdef DEBUG_DMA 1072 printf("alloc: page %lx\n", lastaddr); 1073 #endif /* DEBUG_DMA */ 1074 m = TAILQ_NEXT(m, pageq); 1075 1076 for (; m != NULL; m = TAILQ_NEXT(m, pageq)) { 1077 curaddr = VM_PAGE_TO_PHYS(m); 1078 #ifdef DIAGNOSTIC 1079 if (curaddr < low || curaddr >= high) { 1080 printf("uvm_pglistalloc returned non-sensical" 1081 " address 0x%lx\n", curaddr); 1082 panic("_bus_dmamem_alloc_range"); 1083 } 1084 #endif /* DIAGNOSTIC */ 1085 #ifdef DEBUG_DMA 1086 printf("alloc: page %lx\n", curaddr); 1087 #endif /* DEBUG_DMA */ 1088 if (curaddr == (lastaddr + PAGE_SIZE)) 1089 segs[curseg].ds_len += PAGE_SIZE; 1090 else { 1091 curseg++; 1092 segs[curseg].ds_addr = curaddr; 1093 segs[curseg].ds_len = PAGE_SIZE; 1094 } 1095 lastaddr = curaddr; 1096 } 1097 1098 *rsegs = curseg + 1; 1099 1100 return (0); 1101 } 1102 1103 /* 1104 * Check if a memory region intersects with a DMA range, and return the 1105 * page-rounded intersection if it does. 1106 */ 1107 int 1108 arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges, 1109 paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep) 1110 { 1111 struct arm32_dma_range *dr; 1112 int i; 1113 1114 if (ranges == NULL) 1115 return (0); 1116 1117 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 1118 if (dr->dr_sysbase <= pa && 1119 pa < (dr->dr_sysbase + dr->dr_len)) { 1120 /* 1121 * Beginning of region intersects with this range. 1122 */ 1123 *pap = trunc_page(pa); 1124 *sizep = round_page(min(pa + size, 1125 dr->dr_sysbase + dr->dr_len) - pa); 1126 return (1); 1127 } 1128 if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) { 1129 /* 1130 * End of region intersects with this range. 1131 */ 1132 *pap = trunc_page(dr->dr_sysbase); 1133 *sizep = round_page(min((pa + size) - dr->dr_sysbase, 1134 dr->dr_len)); 1135 return (1); 1136 } 1137 } 1138 1139 /* No intersection found. */ 1140 return (0); 1141 } 1142