1 /* $NetBSD: bus_dma.c,v 1.111 2018/07/17 12:31:16 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #define _ARM32_BUS_DMA_PRIVATE 34 35 #include "opt_arm_bus_space.h" 36 #include "opt_cputypes.h" 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.111 2018/07/17 12:31:16 christos Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/bus.h> 43 #include <sys/cpu.h> 44 #include <sys/kmem.h> 45 #include <sys/mbuf.h> 46 47 #include <uvm/uvm.h> 48 49 #include <arm/cpuconf.h> 50 #include <arm/cpufunc.h> 51 52 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 53 #include <dev/mm.h> 54 #endif 55 56 #ifdef BUSDMA_COUNTERS 57 static struct evcnt bus_dma_creates = 58 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates"); 59 static struct evcnt bus_dma_bounced_creates = 60 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates"); 61 static struct evcnt bus_dma_loads = 62 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads"); 63 static struct evcnt bus_dma_bounced_loads = 64 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads"); 65 static struct evcnt bus_dma_coherent_loads = 66 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "coherent loads"); 67 static struct evcnt bus_dma_read_bounces = 68 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces"); 69 static struct evcnt bus_dma_write_bounces = 70 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces"); 71 static struct evcnt bus_dma_bounced_unloads = 72 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads"); 73 static struct evcnt bus_dma_unloads = 74 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads"); 75 static struct evcnt bus_dma_bounced_destroys = 76 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys"); 77 static struct evcnt bus_dma_destroys = 78 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys"); 79 static struct evcnt bus_dma_sync_prereadwrite = 80 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite"); 81 static struct evcnt bus_dma_sync_preread_begin = 82 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin"); 83 static struct evcnt bus_dma_sync_preread = 84 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread"); 85 static struct evcnt bus_dma_sync_preread_tail = 86 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail"); 87 static struct evcnt bus_dma_sync_prewrite = 88 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite"); 89 static struct evcnt bus_dma_sync_postread = 90 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread"); 91 static struct evcnt bus_dma_sync_postreadwrite = 92 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite"); 93 static struct evcnt bus_dma_sync_postwrite = 94 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite"); 95 96 EVCNT_ATTACH_STATIC(bus_dma_creates); 97 EVCNT_ATTACH_STATIC(bus_dma_bounced_creates); 98 EVCNT_ATTACH_STATIC(bus_dma_loads); 99 EVCNT_ATTACH_STATIC(bus_dma_bounced_loads); 100 EVCNT_ATTACH_STATIC(bus_dma_coherent_loads); 101 EVCNT_ATTACH_STATIC(bus_dma_read_bounces); 102 EVCNT_ATTACH_STATIC(bus_dma_write_bounces); 103 EVCNT_ATTACH_STATIC(bus_dma_unloads); 104 EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads); 105 EVCNT_ATTACH_STATIC(bus_dma_destroys); 106 EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys); 107 EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite); 108 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin); 109 EVCNT_ATTACH_STATIC(bus_dma_sync_preread); 110 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail); 111 EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite); 112 EVCNT_ATTACH_STATIC(bus_dma_sync_postread); 113 EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite); 114 EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite); 115 116 #define STAT_INCR(x) (bus_dma_ ## x.ev_count++) 117 #else 118 #define STAT_INCR(x) __nothing 119 #endif 120 121 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 122 bus_size_t, struct vmspace *, int); 123 124 /* 125 * Check to see if the specified page is in an allowed DMA range. 126 */ 127 static inline struct arm32_dma_range * 128 _bus_dma_paddr_inrange(struct arm32_dma_range *ranges, int nranges, 129 bus_addr_t curaddr) 130 { 131 struct arm32_dma_range *dr; 132 int i; 133 134 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 135 if (curaddr >= dr->dr_sysbase && 136 curaddr < (dr->dr_sysbase + dr->dr_len)) 137 return dr; 138 } 139 140 return NULL; 141 } 142 143 /* 144 * Check to see if the specified busaddr is in an allowed DMA range. 145 */ 146 static inline paddr_t 147 _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr) 148 { 149 struct arm32_dma_range *dr; 150 u_int i; 151 152 if (t->_nranges == 0) 153 return curaddr; 154 155 for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) { 156 if (dr->dr_busbase <= curaddr 157 && curaddr < dr->dr_busbase + dr->dr_len) 158 return curaddr - dr->dr_busbase + dr->dr_sysbase; 159 } 160 panic("%s: curaddr %#lx not in range", __func__, curaddr); 161 } 162 163 /* 164 * Common function to load the specified physical address into the 165 * DMA map, coalescing segments and boundary checking as necessary. 166 */ 167 static int 168 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map, 169 bus_addr_t paddr, bus_size_t size, bool coherent) 170 { 171 bus_dma_segment_t * const segs = map->dm_segs; 172 int nseg = map->dm_nsegs; 173 bus_addr_t lastaddr; 174 bus_addr_t bmask = ~(map->_dm_boundary - 1); 175 bus_addr_t curaddr; 176 bus_size_t sgsize; 177 uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0; 178 179 if (nseg > 0) 180 lastaddr = segs[nseg - 1].ds_addr + segs[nseg - 1].ds_len; 181 else 182 lastaddr = 0xdead; 183 184 again: 185 sgsize = size; 186 187 /* Make sure we're in an allowed DMA range. */ 188 if (t->_ranges != NULL) { 189 /* XXX cache last result? */ 190 const struct arm32_dma_range * const dr = 191 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr); 192 if (dr == NULL) 193 return EINVAL; 194 195 /* 196 * If this region is coherent, mark the segment as coherent. 197 */ 198 _ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT; 199 200 /* 201 * In a valid DMA range. Translate the physical 202 * memory address to an address in the DMA window. 203 */ 204 curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase; 205 #if 0 206 printf("%p: %#lx: range %#lx/%#lx/%#lx/%#x: %#x <-- %#lx\n", 207 t, paddr, dr->dr_sysbase, dr->dr_busbase, 208 dr->dr_len, dr->dr_flags, _ds_flags, curaddr); 209 #endif 210 } else 211 curaddr = paddr; 212 213 /* 214 * Make sure we don't cross any boundaries. 215 */ 216 if (map->_dm_boundary > 0) { 217 bus_addr_t baddr; /* next boundary address */ 218 219 baddr = (curaddr + map->_dm_boundary) & bmask; 220 if (sgsize > (baddr - curaddr)) 221 sgsize = (baddr - curaddr); 222 } 223 224 /* 225 * Insert chunk into a segment, coalescing with the 226 * previous segment if possible. 227 */ 228 if (nseg > 0 && curaddr == lastaddr && 229 segs[nseg - 1].ds_len + sgsize <= map->dm_maxsegsz && 230 ((segs[nseg - 1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 && 231 (map->_dm_boundary == 0 || 232 (segs[nseg - 1].ds_addr & bmask) == (curaddr & bmask))) { 233 /* coalesce */ 234 segs[nseg - 1].ds_len += sgsize; 235 } else if (nseg >= map->_dm_segcnt) { 236 return EFBIG; 237 } else { 238 /* new segment */ 239 segs[nseg].ds_addr = curaddr; 240 segs[nseg].ds_len = sgsize; 241 segs[nseg]._ds_flags = _ds_flags; 242 nseg++; 243 } 244 245 lastaddr = curaddr + sgsize; 246 247 paddr += sgsize; 248 size -= sgsize; 249 if (size > 0) 250 goto again; 251 252 map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT); 253 map->dm_nsegs = nseg; 254 return 0; 255 } 256 257 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 258 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 259 bus_size_t size, int flags); 260 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map); 261 static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, 262 int direction); 263 264 static int 265 _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 266 size_t buflen, int buftype, int flags) 267 { 268 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 269 struct vmspace * const vm = vmspace_kernel(); 270 int error; 271 272 KASSERT(cookie != NULL); 273 KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE); 274 275 /* 276 * Allocate bounce pages, if necessary. 277 */ 278 if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) { 279 error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags); 280 if (error) 281 return error; 282 } 283 284 /* 285 * Cache a pointer to the caller's buffer and load the DMA map 286 * with the bounce buffer. 287 */ 288 cookie->id_origbuf = buf; 289 cookie->id_origbuflen = buflen; 290 error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf, 291 buflen, vm, flags); 292 if (error) 293 return error; 294 295 STAT_INCR(bounced_loads); 296 map->dm_mapsize = buflen; 297 map->_dm_vmspace = vm; 298 map->_dm_buftype = buftype; 299 300 /* ...so _bus_dmamap_sync() knows we're bouncing */ 301 map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING; 302 cookie->id_flags |= _BUS_DMA_IS_BOUNCING; 303 return 0; 304 } 305 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 306 307 /* 308 * Common function for DMA map creation. May be called by bus-specific 309 * DMA map creation functions. 310 */ 311 int 312 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 313 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 314 { 315 struct arm32_bus_dmamap *map; 316 void *mapstore; 317 318 #ifdef DEBUG_DMA 319 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx" 320 " flags=%x\n", t, size, nsegments, maxsegsz, boundary, flags); 321 #endif /* DEBUG_DMA */ 322 323 /* 324 * Allocate and initialize the DMA map. The end of the map 325 * is a variable-sized array of segments, so we allocate enough 326 * room for them in one shot. 327 * 328 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 329 * of ALLOCNOW notifies others that we've reserved these resources, 330 * and they are not to be freed. 331 * 332 * The bus_dmamap_t includes one bus_dma_segment_t, hence 333 * the (nsegments - 1). 334 */ 335 const size_t mapsize = sizeof(struct arm32_bus_dmamap) + 336 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 337 const int zallocflags = (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 338 if ((mapstore = kmem_intr_zalloc(mapsize, zallocflags)) == NULL) 339 return ENOMEM; 340 341 map = (struct arm32_bus_dmamap *)mapstore; 342 map->_dm_size = size; 343 map->_dm_segcnt = nsegments; 344 map->_dm_maxmaxsegsz = maxsegsz; 345 map->_dm_boundary = boundary; 346 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 347 map->_dm_origbuf = NULL; 348 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 349 map->_dm_vmspace = vmspace_kernel(); 350 map->_dm_cookie = NULL; 351 map->dm_maxsegsz = maxsegsz; 352 map->dm_mapsize = 0; /* no valid mappings */ 353 map->dm_nsegs = 0; 354 355 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 356 struct arm32_bus_dma_cookie *cookie; 357 int cookieflags; 358 void *cookiestore; 359 int error; 360 361 cookieflags = 0; 362 363 if (t->_may_bounce != NULL) { 364 error = (*t->_may_bounce)(t, map, flags, &cookieflags); 365 if (error != 0) 366 goto out; 367 } 368 369 if (t->_ranges != NULL) 370 cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE; 371 372 if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) { 373 STAT_INCR(creates); 374 *dmamp = map; 375 return 0; 376 } 377 378 const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) + 379 (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 380 381 /* 382 * Allocate our cookie. 383 */ 384 if ((cookiestore = kmem_intr_zalloc(cookiesize, zallocflags)) == NULL) { 385 error = ENOMEM; 386 goto out; 387 } 388 cookie = (struct arm32_bus_dma_cookie *)cookiestore; 389 cookie->id_flags = cookieflags; 390 map->_dm_cookie = cookie; 391 STAT_INCR(bounced_creates); 392 393 error = _bus_dma_alloc_bouncebuf(t, map, size, flags); 394 out: 395 if (error) 396 _bus_dmamap_destroy(t, map); 397 else 398 *dmamp = map; 399 #else 400 *dmamp = map; 401 STAT_INCR(creates); 402 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 403 #ifdef DEBUG_DMA 404 printf("dmamap_create:map=%p\n", map); 405 #endif /* DEBUG_DMA */ 406 return 0; 407 } 408 409 /* 410 * Common function for DMA map destruction. May be called by bus-specific 411 * DMA map destruction functions. 412 */ 413 void 414 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 415 { 416 417 #ifdef DEBUG_DMA 418 printf("dmamap_destroy: t=%p map=%p\n", t, map); 419 #endif /* DEBUG_DMA */ 420 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 421 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 422 423 /* 424 * Free any bounce pages this map might hold. 425 */ 426 if (cookie != NULL) { 427 const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) + 428 (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 429 430 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) 431 STAT_INCR(bounced_unloads); 432 map->dm_nsegs = 0; 433 if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE) 434 _bus_dma_free_bouncebuf(t, map); 435 STAT_INCR(bounced_destroys); 436 kmem_intr_free(cookie, cookiesize); 437 } else 438 #endif 439 STAT_INCR(destroys); 440 441 if (map->dm_nsegs > 0) 442 STAT_INCR(unloads); 443 444 const size_t mapsize = sizeof(struct arm32_bus_dmamap) + 445 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1)); 446 kmem_intr_free(map, mapsize); 447 } 448 449 /* 450 * Common function for loading a DMA map with a linear buffer. May 451 * be called by bus-specific DMA map load functions. 452 */ 453 int 454 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 455 bus_size_t buflen, struct proc *p, int flags) 456 { 457 struct vmspace *vm; 458 int error; 459 460 #ifdef DEBUG_DMA 461 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n", 462 t, map, buf, buflen, p, flags); 463 #endif /* DEBUG_DMA */ 464 465 if (map->dm_nsegs > 0) { 466 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 467 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 468 if (cookie != NULL) { 469 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) { 470 STAT_INCR(bounced_unloads); 471 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 472 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING; 473 } 474 } else 475 #endif 476 STAT_INCR(unloads); 477 } 478 479 /* 480 * Make sure that on error condition we return "no valid mappings". 481 */ 482 map->dm_mapsize = 0; 483 map->dm_nsegs = 0; 484 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 485 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 486 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu", 487 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 488 489 if (buflen > map->_dm_size) 490 return EINVAL; 491 492 if (p != NULL) { 493 vm = p->p_vmspace; 494 } else { 495 vm = vmspace_kernel(); 496 } 497 498 /* _bus_dmamap_load_buffer() clears this if we're not... */ 499 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 500 501 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags); 502 if (error == 0) { 503 map->dm_mapsize = buflen; 504 map->_dm_vmspace = vm; 505 map->_dm_origbuf = buf; 506 map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR; 507 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 508 STAT_INCR(coherent_loads); 509 } else { 510 STAT_INCR(loads); 511 } 512 return 0; 513 } 514 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 515 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 516 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) { 517 error = _bus_dma_load_bouncebuf(t, map, buf, buflen, 518 _BUS_DMA_BUFTYPE_LINEAR, flags); 519 } 520 #endif 521 return error; 522 } 523 524 /* 525 * Like _bus_dmamap_load(), but for mbufs. 526 */ 527 int 528 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 529 int flags) 530 { 531 struct mbuf *m; 532 int error; 533 534 #ifdef DEBUG_DMA 535 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n", 536 t, map, m0, flags); 537 #endif /* DEBUG_DMA */ 538 539 if (map->dm_nsegs > 0) { 540 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 541 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 542 if (cookie != NULL) { 543 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) { 544 STAT_INCR(bounced_unloads); 545 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 546 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING; 547 } 548 } else 549 #endif 550 STAT_INCR(unloads); 551 } 552 553 /* 554 * Make sure that on error condition we return "no valid mappings." 555 */ 556 map->dm_mapsize = 0; 557 map->dm_nsegs = 0; 558 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 559 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 560 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu", 561 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 562 563 KASSERT(m0->m_flags & M_PKTHDR); 564 565 if (m0->m_pkthdr.len > map->_dm_size) 566 return EINVAL; 567 568 /* _bus_dmamap_load_paddr() clears this if we're not... */ 569 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 570 571 error = 0; 572 for (m = m0; m != NULL && error == 0; m = m->m_next) { 573 int offset; 574 int remainbytes; 575 const struct vm_page * const *pgs; 576 paddr_t paddr; 577 int size; 578 579 if (m->m_len == 0) 580 continue; 581 /* 582 * Don't allow reads in read-only mbufs. 583 */ 584 if (M_ROMAP(m) && (flags & BUS_DMA_READ)) { 585 error = EFAULT; 586 break; 587 } 588 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) { 589 case M_EXT|M_EXT_CLUSTER: 590 /* XXX KDASSERT */ 591 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 592 paddr = m->m_ext.ext_paddr + 593 (m->m_data - m->m_ext.ext_buf); 594 size = m->m_len; 595 error = _bus_dmamap_load_paddr(t, map, paddr, size, 596 false); 597 break; 598 599 case M_EXT|M_EXT_PAGES: 600 KASSERT(m->m_ext.ext_buf <= m->m_data); 601 KASSERT(m->m_data <= 602 m->m_ext.ext_buf + m->m_ext.ext_size); 603 604 offset = (vaddr_t)m->m_data - 605 trunc_page((vaddr_t)m->m_ext.ext_buf); 606 remainbytes = m->m_len; 607 608 /* skip uninteresting pages */ 609 pgs = (const struct vm_page * const *) 610 m->m_ext.ext_pgs + (offset >> PAGE_SHIFT); 611 612 offset &= PAGE_MASK; /* offset in the first page */ 613 614 /* load each page */ 615 while (remainbytes > 0) { 616 const struct vm_page *pg; 617 618 size = MIN(remainbytes, PAGE_SIZE - offset); 619 620 pg = *pgs++; 621 KASSERT(pg); 622 paddr = VM_PAGE_TO_PHYS(pg) + offset; 623 624 error = _bus_dmamap_load_paddr(t, map, 625 paddr, size, false); 626 if (error) 627 break; 628 offset = 0; 629 remainbytes -= size; 630 } 631 break; 632 633 case 0: 634 paddr = m->m_paddr + M_BUFOFFSET(m) + 635 (m->m_data - M_BUFADDR(m)); 636 size = m->m_len; 637 error = _bus_dmamap_load_paddr(t, map, paddr, size, 638 false); 639 break; 640 641 default: 642 error = _bus_dmamap_load_buffer(t, map, m->m_data, 643 m->m_len, vmspace_kernel(), flags); 644 } 645 } 646 if (error == 0) { 647 map->dm_mapsize = m0->m_pkthdr.len; 648 map->_dm_origbuf = m0; 649 map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF; 650 map->_dm_vmspace = vmspace_kernel(); /* always kernel */ 651 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 652 STAT_INCR(coherent_loads); 653 } else { 654 STAT_INCR(loads); 655 } 656 return 0; 657 } 658 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 659 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 660 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) { 661 error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len, 662 _BUS_DMA_BUFTYPE_MBUF, flags); 663 } 664 #endif 665 return error; 666 } 667 668 /* 669 * Like _bus_dmamap_load(), but for uios. 670 */ 671 int 672 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 673 int flags) 674 { 675 bus_size_t minlen, resid; 676 struct iovec *iov; 677 void *addr; 678 int i, error; 679 680 /* 681 * Make sure that on error condition we return "no valid mappings." 682 */ 683 map->dm_mapsize = 0; 684 map->dm_nsegs = 0; 685 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 686 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu", 687 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 688 689 resid = uio->uio_resid; 690 iov = uio->uio_iov; 691 692 /* _bus_dmamap_load_buffer() clears this if we're not... */ 693 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 694 695 error = 0; 696 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 697 /* 698 * Now at the first iovec to load. Load each iovec 699 * until we have exhausted the residual count. 700 */ 701 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 702 addr = (void *)iov[i].iov_base; 703 704 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 705 uio->uio_vmspace, flags); 706 707 resid -= minlen; 708 } 709 if (error == 0) { 710 map->dm_mapsize = uio->uio_resid; 711 map->_dm_origbuf = uio; 712 map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO; 713 map->_dm_vmspace = uio->uio_vmspace; 714 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 715 STAT_INCR(coherent_loads); 716 } else { 717 STAT_INCR(loads); 718 } 719 } 720 return error; 721 } 722 723 /* 724 * Like _bus_dmamap_load(), but for raw memory allocated with 725 * bus_dmamem_alloc(). 726 */ 727 int 728 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 729 bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags) 730 { 731 732 bus_size_t size; 733 int i, error = 0; 734 735 /* 736 * Make sure that on error conditions we return "no valid mappings." 737 */ 738 map->dm_mapsize = 0; 739 map->dm_nsegs = 0; 740 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 741 742 if (size0 > map->_dm_size) 743 return EINVAL; 744 745 for (i = 0, size = size0; i < nsegs && size > 0; i++) { 746 bus_dma_segment_t *ds = &segs[i]; 747 bus_size_t sgsize; 748 749 sgsize = MIN(ds->ds_len, size); 750 if (sgsize == 0) 751 continue; 752 error = _bus_dmamap_load_paddr(t, map, ds->ds_addr, 753 sgsize, false); 754 if (error != 0) 755 break; 756 size -= sgsize; 757 } 758 759 if (error != 0) { 760 map->dm_mapsize = 0; 761 map->dm_nsegs = 0; 762 return error; 763 } 764 765 /* XXX TBD bounce */ 766 767 map->dm_mapsize = size0; 768 return 0; 769 } 770 771 /* 772 * Common function for unloading a DMA map. May be called by 773 * bus-specific DMA map unload functions. 774 */ 775 void 776 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 777 { 778 779 #ifdef DEBUG_DMA 780 printf("dmamap_unload: t=%p map=%p\n", t, map); 781 #endif /* DEBUG_DMA */ 782 783 /* 784 * No resources to free; just mark the mappings as 785 * invalid. 786 */ 787 map->dm_mapsize = 0; 788 map->dm_nsegs = 0; 789 map->_dm_origbuf = NULL; 790 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 791 map->_dm_vmspace = NULL; 792 } 793 794 static void 795 _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops, 796 bool readonly_p) 797 { 798 799 #ifdef ARM_MMU_EXTENDED 800 /* 801 * No optimisations are available for readonly mbufs on armv6+, so 802 * assume it's not readonly from here on. 803 * 804 * See the comment in _bus_dmamap_sync_mbuf 805 */ 806 readonly_p = false; 807 #endif 808 809 KASSERTMSG((va & PAGE_MASK) == (pa & PAGE_MASK), 810 "va %#lx pa %#lx", va, pa); 811 #if 0 812 printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n", 813 va, pa, len, ops, readonly_p); 814 #endif 815 816 switch (ops) { 817 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 818 if (!readonly_p) { 819 STAT_INCR(sync_prereadwrite); 820 cpu_dcache_wbinv_range(va, len); 821 cpu_sdcache_wbinv_range(va, pa, len); 822 break; 823 } 824 /* FALLTHROUGH */ 825 826 case BUS_DMASYNC_PREREAD: { 827 const size_t line_size = arm_dcache_align; 828 const size_t line_mask = arm_dcache_align_mask; 829 vsize_t misalignment = va & line_mask; 830 if (misalignment) { 831 va -= misalignment; 832 pa -= misalignment; 833 len += misalignment; 834 STAT_INCR(sync_preread_begin); 835 cpu_dcache_wbinv_range(va, line_size); 836 cpu_sdcache_wbinv_range(va, pa, line_size); 837 if (len <= line_size) 838 break; 839 va += line_size; 840 pa += line_size; 841 len -= line_size; 842 } 843 misalignment = len & line_mask; 844 len -= misalignment; 845 if (len > 0) { 846 STAT_INCR(sync_preread); 847 cpu_dcache_inv_range(va, len); 848 cpu_sdcache_inv_range(va, pa, len); 849 } 850 if (misalignment) { 851 va += len; 852 pa += len; 853 STAT_INCR(sync_preread_tail); 854 cpu_dcache_wbinv_range(va, line_size); 855 cpu_sdcache_wbinv_range(va, pa, line_size); 856 } 857 break; 858 } 859 860 case BUS_DMASYNC_PREWRITE: 861 STAT_INCR(sync_prewrite); 862 cpu_dcache_wb_range(va, len); 863 cpu_sdcache_wb_range(va, pa, len); 864 break; 865 866 #ifdef CPU_CORTEX 867 /* 868 * Cortex CPUs can do speculative loads so we need to clean the cache 869 * after a DMA read to deal with any speculatively loaded cache lines. 870 * Since these can't be dirty, we can just invalidate them and don't 871 * have to worry about having to write back their contents. 872 */ 873 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE: 874 STAT_INCR(sync_postreadwrite); 875 cpu_dcache_inv_range(va, len); 876 cpu_sdcache_inv_range(va, pa, len); 877 break; 878 case BUS_DMASYNC_POSTREAD: 879 STAT_INCR(sync_postread); 880 cpu_dcache_inv_range(va, len); 881 cpu_sdcache_inv_range(va, pa, len); 882 break; 883 #endif 884 } 885 } 886 887 static inline void 888 _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 889 bus_size_t len, int ops) 890 { 891 bus_dma_segment_t *ds = map->dm_segs; 892 vaddr_t va = (vaddr_t) map->_dm_origbuf; 893 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 894 if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) { 895 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 896 va = (vaddr_t) cookie->id_bouncebuf; 897 } 898 #endif 899 900 while (len > 0) { 901 while (offset >= ds->ds_len) { 902 offset -= ds->ds_len; 903 va += ds->ds_len; 904 ds++; 905 } 906 907 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset); 908 size_t seglen = min(len, ds->ds_len - offset); 909 910 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) 911 _bus_dmamap_sync_segment(va + offset, pa, seglen, ops, 912 false); 913 914 offset += seglen; 915 len -= seglen; 916 } 917 } 918 919 static inline void 920 _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset, 921 bus_size_t len, int ops) 922 { 923 bus_dma_segment_t *ds = map->dm_segs; 924 struct mbuf *m = map->_dm_origbuf; 925 bus_size_t voff = offset; 926 bus_size_t ds_off = offset; 927 928 while (len > 0) { 929 /* Find the current dma segment */ 930 while (ds_off >= ds->ds_len) { 931 ds_off -= ds->ds_len; 932 ds++; 933 } 934 /* Find the current mbuf. */ 935 while (voff >= m->m_len) { 936 voff -= m->m_len; 937 m = m->m_next; 938 } 939 940 /* 941 * Now at the first mbuf to sync; nail each one until 942 * we have exhausted the length. 943 */ 944 vsize_t seglen = min(len, min(m->m_len - voff, ds->ds_len - ds_off)); 945 vaddr_t va = mtod(m, vaddr_t) + voff; 946 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off); 947 948 /* 949 * We can save a lot of work here if we know the mapping 950 * is read-only at the MMU and we aren't using the armv6+ 951 * MMU: 952 * 953 * If a mapping is read-only, no dirty cache blocks will 954 * exist for it. If a writable mapping was made read-only, 955 * we know any dirty cache lines for the range will have 956 * been cleaned for us already. Therefore, if the upper 957 * layer can tell us we have a read-only mapping, we can 958 * skip all cache cleaning. 959 * 960 * NOTE: This only works if we know the pmap cleans pages 961 * before making a read-write -> read-only transition. If 962 * this ever becomes non-true (e.g. Physically Indexed 963 * cache), this will have to be revisited. 964 */ 965 966 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) { 967 /* 968 * If we are doing preread (DMAing into the mbuf), 969 * this mbuf better not be readonly, 970 */ 971 KASSERT(!(ops & BUS_DMASYNC_PREREAD) || !M_ROMAP(m)); 972 _bus_dmamap_sync_segment(va, pa, seglen, ops, 973 M_ROMAP(m)); 974 } 975 voff += seglen; 976 ds_off += seglen; 977 len -= seglen; 978 } 979 } 980 981 static inline void 982 _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 983 bus_size_t len, int ops) 984 { 985 bus_dma_segment_t *ds = map->dm_segs; 986 struct uio *uio = map->_dm_origbuf; 987 struct iovec *iov = uio->uio_iov; 988 bus_size_t voff = offset; 989 bus_size_t ds_off = offset; 990 991 while (len > 0) { 992 /* Find the current dma segment */ 993 while (ds_off >= ds->ds_len) { 994 ds_off -= ds->ds_len; 995 ds++; 996 } 997 998 /* Find the current iovec. */ 999 while (voff >= iov->iov_len) { 1000 voff -= iov->iov_len; 1001 iov++; 1002 } 1003 1004 /* 1005 * Now at the first iovec to sync; nail each one until 1006 * we have exhausted the length. 1007 */ 1008 vsize_t seglen = min(len, min(iov->iov_len - voff, ds->ds_len - ds_off)); 1009 vaddr_t va = (vaddr_t) iov->iov_base + voff; 1010 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off); 1011 1012 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) 1013 _bus_dmamap_sync_segment(va, pa, seglen, ops, false); 1014 1015 voff += seglen; 1016 ds_off += seglen; 1017 len -= seglen; 1018 } 1019 } 1020 1021 /* 1022 * Common function for DMA map synchronization. May be called 1023 * by bus-specific DMA map synchronization functions. 1024 * 1025 * This version works for the Virtually Indexed Virtually Tagged 1026 * cache found on 32-bit ARM processors. 1027 * 1028 * XXX Should have separate versions for write-through vs. 1029 * XXX write-back caches. We currently assume write-back 1030 * XXX here, which is not as efficient as it could be for 1031 * XXX the write-through case. 1032 */ 1033 void 1034 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1035 bus_size_t len, int ops) 1036 { 1037 #ifdef DEBUG_DMA 1038 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n", 1039 t, map, offset, len, ops); 1040 #endif /* DEBUG_DMA */ 1041 1042 /* 1043 * Mixing of PRE and POST operations is not allowed. 1044 */ 1045 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 1046 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 1047 panic("_bus_dmamap_sync: mix PRE and POST"); 1048 1049 KASSERTMSG(offset < map->dm_mapsize, 1050 "offset %lu mapsize %lu", 1051 offset, map->dm_mapsize); 1052 KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize, 1053 "len %lu offset %lu mapsize %lu", 1054 len, offset, map->dm_mapsize); 1055 1056 /* 1057 * For a virtually-indexed write-back cache, we need 1058 * to do the following things: 1059 * 1060 * PREREAD -- Invalidate the D-cache. We do this 1061 * here in case a write-back is required by the back-end. 1062 * 1063 * PREWRITE -- Write-back the D-cache. Note that if 1064 * we are doing a PREREAD|PREWRITE, we can collapse 1065 * the whole thing into a single Wb-Inv. 1066 * 1067 * POSTREAD -- Re-invalidate the D-cache in case speculative 1068 * memory accesses caused cachelines to become valid with now 1069 * invalid data. 1070 * 1071 * POSTWRITE -- Nothing. 1072 */ 1073 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1074 const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING); 1075 #else 1076 const bool bouncing = false; 1077 #endif 1078 1079 const int pre_ops = ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1080 #ifdef CPU_CORTEX 1081 const int post_ops = ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1082 #else 1083 const int post_ops = 0; 1084 #endif 1085 if (!bouncing) { 1086 if (pre_ops == 0 && post_ops == BUS_DMASYNC_POSTWRITE) { 1087 STAT_INCR(sync_postwrite); 1088 return; 1089 } else if (pre_ops == 0 && post_ops == 0) { 1090 return; 1091 } 1092 } 1093 KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD), 1094 "pre_ops %#x post_ops %#x", pre_ops, post_ops); 1095 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1096 if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) { 1097 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 1098 STAT_INCR(write_bounces); 1099 char * const dataptr = (char *)cookie->id_bouncebuf + offset; 1100 /* 1101 * Copy the caller's buffer to the bounce buffer. 1102 */ 1103 switch (map->_dm_buftype) { 1104 case _BUS_DMA_BUFTYPE_LINEAR: 1105 memcpy(dataptr, cookie->id_origlinearbuf + offset, len); 1106 break; 1107 case _BUS_DMA_BUFTYPE_MBUF: 1108 m_copydata(cookie->id_origmbuf, offset, len, dataptr); 1109 break; 1110 case _BUS_DMA_BUFTYPE_UIO: 1111 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_WRITE); 1112 break; 1113 #ifdef DIAGNOSTIC 1114 case _BUS_DMA_BUFTYPE_RAW: 1115 panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_RAW"); 1116 break; 1117 1118 case _BUS_DMA_BUFTYPE_INVALID: 1119 panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_INVALID"); 1120 break; 1121 1122 default: 1123 panic("_bus_dmamap_sync(pre): map %p: unknown buffer type %d\n", 1124 map, map->_dm_buftype); 1125 break; 1126 #endif /* DIAGNOSTIC */ 1127 } 1128 } 1129 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1130 1131 /* Skip cache frobbing if mapping was COHERENT. */ 1132 if (!bouncing && (map->_dm_flags & _BUS_DMAMAP_COHERENT)) { 1133 /* Drain the write buffer. */ 1134 if (pre_ops & BUS_DMASYNC_PREWRITE) 1135 cpu_drain_writebuf(); 1136 return; 1137 } 1138 1139 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1140 if (bouncing && ((map->_dm_flags & _BUS_DMAMAP_COHERENT) || pre_ops == 0)) { 1141 goto bounce_it; 1142 } 1143 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1144 1145 #ifndef ARM_MMU_EXTENDED 1146 /* 1147 * If the mapping belongs to a non-kernel vmspace, and the 1148 * vmspace has not been active since the last time a full 1149 * cache flush was performed, we don't need to do anything. 1150 */ 1151 if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) && 1152 vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0)) 1153 return; 1154 #endif 1155 1156 int buftype = map->_dm_buftype; 1157 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1158 if (bouncing) { 1159 buftype = _BUS_DMA_BUFTYPE_LINEAR; 1160 } 1161 #endif 1162 1163 switch (buftype) { 1164 case _BUS_DMA_BUFTYPE_LINEAR: 1165 _bus_dmamap_sync_linear(t, map, offset, len, ops); 1166 break; 1167 1168 case _BUS_DMA_BUFTYPE_MBUF: 1169 _bus_dmamap_sync_mbuf(t, map, offset, len, ops); 1170 break; 1171 1172 case _BUS_DMA_BUFTYPE_UIO: 1173 _bus_dmamap_sync_uio(t, map, offset, len, ops); 1174 break; 1175 1176 case _BUS_DMA_BUFTYPE_RAW: 1177 panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW"); 1178 break; 1179 1180 case _BUS_DMA_BUFTYPE_INVALID: 1181 panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID"); 1182 break; 1183 1184 default: 1185 panic("_bus_dmamap_sync: map %p: unknown buffer type %d\n", 1186 map, map->_dm_buftype); 1187 } 1188 1189 /* Drain the write buffer. */ 1190 cpu_drain_writebuf(); 1191 1192 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1193 bounce_it: 1194 if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0) 1195 return; 1196 1197 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 1198 char * const dataptr = (char *)cookie->id_bouncebuf + offset; 1199 STAT_INCR(read_bounces); 1200 /* 1201 * Copy the bounce buffer to the caller's buffer. 1202 */ 1203 switch (map->_dm_buftype) { 1204 case _BUS_DMA_BUFTYPE_LINEAR: 1205 memcpy(cookie->id_origlinearbuf + offset, dataptr, len); 1206 break; 1207 1208 case _BUS_DMA_BUFTYPE_MBUF: 1209 m_copyback(cookie->id_origmbuf, offset, len, dataptr); 1210 break; 1211 1212 case _BUS_DMA_BUFTYPE_UIO: 1213 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ); 1214 break; 1215 #ifdef DIAGNOSTIC 1216 case _BUS_DMA_BUFTYPE_RAW: 1217 panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_RAW"); 1218 break; 1219 1220 case _BUS_DMA_BUFTYPE_INVALID: 1221 panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_INVALID"); 1222 break; 1223 1224 default: 1225 panic("_bus_dmamap_sync(post): map %p: unknown buffer type %d\n", 1226 map, map->_dm_buftype); 1227 break; 1228 #endif 1229 } 1230 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1231 } 1232 1233 /* 1234 * Common function for DMA-safe memory allocation. May be called 1235 * by bus-specific DMA memory allocation functions. 1236 */ 1237 1238 extern paddr_t physical_start; 1239 extern paddr_t physical_end; 1240 1241 int 1242 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1243 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1244 int flags) 1245 { 1246 struct arm32_dma_range *dr; 1247 int error, i; 1248 1249 #ifdef DEBUG_DMA 1250 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx " 1251 "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment, 1252 boundary, segs, nsegs, rsegs, flags); 1253 #endif 1254 1255 if ((dr = t->_ranges) != NULL) { 1256 error = ENOMEM; 1257 for (i = 0; i < t->_nranges; i++, dr++) { 1258 if (dr->dr_len == 0 1259 || (dr->dr_flags & _BUS_DMAMAP_NOALLOC)) 1260 continue; 1261 error = _bus_dmamem_alloc_range(t, size, alignment, 1262 boundary, segs, nsegs, rsegs, flags, 1263 trunc_page(dr->dr_sysbase), 1264 trunc_page(dr->dr_sysbase + dr->dr_len)); 1265 if (error == 0) 1266 break; 1267 } 1268 } else { 1269 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 1270 segs, nsegs, rsegs, flags, trunc_page(physical_start), 1271 trunc_page(physical_end)); 1272 } 1273 1274 #ifdef DEBUG_DMA 1275 printf("dmamem_alloc: =%d\n", error); 1276 #endif 1277 1278 return error; 1279 } 1280 1281 /* 1282 * Common function for freeing DMA-safe memory. May be called by 1283 * bus-specific DMA memory free functions. 1284 */ 1285 void 1286 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 1287 { 1288 struct vm_page *m; 1289 bus_addr_t addr; 1290 struct pglist mlist; 1291 int curseg; 1292 1293 #ifdef DEBUG_DMA 1294 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs); 1295 #endif /* DEBUG_DMA */ 1296 1297 /* 1298 * Build a list of pages to free back to the VM system. 1299 */ 1300 TAILQ_INIT(&mlist); 1301 for (curseg = 0; curseg < nsegs; curseg++) { 1302 for (addr = segs[curseg].ds_addr; 1303 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 1304 addr += PAGE_SIZE) { 1305 m = PHYS_TO_VM_PAGE(addr); 1306 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); 1307 } 1308 } 1309 uvm_pglistfree(&mlist); 1310 } 1311 1312 /* 1313 * Common function for mapping DMA-safe memory. May be called by 1314 * bus-specific DMA memory map functions. 1315 */ 1316 int 1317 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1318 size_t size, void **kvap, int flags) 1319 { 1320 vaddr_t va; 1321 paddr_t pa; 1322 int curseg; 1323 const uvm_flag_t kmflags = UVM_KMF_VAONLY 1324 | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0); 1325 vsize_t align = 0; 1326 1327 #ifdef DEBUG_DMA 1328 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t, 1329 segs, nsegs, (unsigned long)size, flags); 1330 #endif /* DEBUG_DMA */ 1331 1332 #ifdef PMAP_MAP_POOLPAGE 1333 /* 1334 * If all of memory is mapped, and we are mapping a single physically 1335 * contiguous area then this area is already mapped. Let's see if we 1336 * avoid having a separate mapping for it. 1337 */ 1338 if (nsegs == 1) { 1339 /* 1340 * If this is a non-COHERENT mapping, then the existing kernel 1341 * mapping is already compatible with it. 1342 */ 1343 bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0; 1344 pa = segs[0].ds_addr; 1345 1346 /* 1347 * This is a COHERENT mapping which, unless this address is in 1348 * a COHERENT dma range, will not be compatible. 1349 */ 1350 if (t->_ranges != NULL) { 1351 const struct arm32_dma_range * const dr = 1352 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa); 1353 if (dr != NULL 1354 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) { 1355 direct_mapable = true; 1356 } 1357 } 1358 1359 #ifdef PMAP_NEED_ALLOC_POOLPAGE 1360 /* 1361 * The page can only be direct mapped if was allocated out 1362 * of the arm poolpage vm freelist. 1363 */ 1364 uvm_physseg_t upm = uvm_physseg_find(atop(pa), NULL); 1365 KASSERT(uvm_physseg_valid_p(upm)); 1366 if (direct_mapable) { 1367 direct_mapable = 1368 (arm_poolpage_vmfreelist == uvm_physseg_get_free_list(upm)); 1369 } 1370 #endif 1371 1372 if (direct_mapable) { 1373 *kvap = (void *)PMAP_MAP_POOLPAGE(pa); 1374 #ifdef DEBUG_DMA 1375 printf("dmamem_map: =%p\n", *kvap); 1376 #endif /* DEBUG_DMA */ 1377 return 0; 1378 } 1379 } 1380 #endif 1381 1382 size = round_page(size); 1383 1384 #ifdef PMAP_MAPSIZE1 1385 if (size >= PMAP_MAPSIZE1) 1386 align = PMAP_MAPSIZE1; 1387 1388 #ifdef PMAP_MAPSIZE2 1389 1390 #if PMAP_MAPSIZE1 > PMAP_MAPSIZE2 1391 #error PMAP_MAPSIZE1 must be smaller than PMAP_MAPSIZE2 1392 #endif 1393 1394 if (size >= PMAP_MAPSIZE2) 1395 align = PMAP_MAPSIZE2; 1396 1397 #ifdef PMAP_MAPSIZE3 1398 1399 #if PMAP_MAPSIZE2 > PMAP_MAPSIZE3 1400 #error PMAP_MAPSIZE2 must be smaller than PMAP_MAPSIZE3 1401 #endif 1402 1403 if (size >= PMAP_MAPSIZE3) 1404 align = PMAP_MAPSIZE3; 1405 #endif 1406 #endif 1407 #endif 1408 1409 va = uvm_km_alloc(kernel_map, size, align, kmflags); 1410 if (__predict_false(va == 0 && align > 0)) { 1411 align = 0; 1412 va = uvm_km_alloc(kernel_map, size, 0, kmflags); 1413 } 1414 1415 if (va == 0) 1416 return ENOMEM; 1417 1418 *kvap = (void *)va; 1419 1420 for (curseg = 0; curseg < nsegs; curseg++) { 1421 for (pa = segs[curseg].ds_addr; 1422 pa < (segs[curseg].ds_addr + segs[curseg].ds_len); 1423 pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 1424 bool uncached = (flags & BUS_DMA_COHERENT); 1425 #ifdef DEBUG_DMA 1426 printf("wiring p%lx to v%lx", pa, va); 1427 #endif /* DEBUG_DMA */ 1428 if (size == 0) 1429 panic("_bus_dmamem_map: size botch"); 1430 1431 const struct arm32_dma_range * const dr = 1432 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa); 1433 /* 1434 * If this dma region is coherent then there is 1435 * no need for an uncached mapping. 1436 */ 1437 if (dr != NULL 1438 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) { 1439 uncached = false; 1440 } 1441 1442 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 1443 PMAP_WIRED | (uncached ? PMAP_NOCACHE : 0)); 1444 } 1445 } 1446 pmap_update(pmap_kernel()); 1447 #ifdef DEBUG_DMA 1448 printf("dmamem_map: =%p\n", *kvap); 1449 #endif /* DEBUG_DMA */ 1450 return 0; 1451 } 1452 1453 /* 1454 * Common function for unmapping DMA-safe memory. May be called by 1455 * bus-specific DMA memory unmapping functions. 1456 */ 1457 void 1458 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 1459 { 1460 1461 #ifdef DEBUG_DMA 1462 printf("dmamem_unmap: t=%p kva=%p size=%zx\n", t, kva, size); 1463 #endif /* DEBUG_DMA */ 1464 KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0, 1465 "kva %p (%#"PRIxPTR")", kva, ((uintptr_t)kva & PAGE_MASK)); 1466 1467 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 1468 /* 1469 * Check to see if this used direct mapped memory. Get its physical 1470 * address and try to map it. If the resultant matches the kva, then 1471 * it was and so we can just return since we have nothing to free up. 1472 */ 1473 paddr_t pa; 1474 vaddr_t va; 1475 (void)pmap_extract(pmap_kernel(), (vaddr_t)kva, &pa); 1476 if (mm_md_direct_mapped_phys(pa, &va) && va == (vaddr_t)kva) 1477 return; 1478 #endif 1479 1480 size = round_page(size); 1481 pmap_kremove((vaddr_t)kva, size); 1482 pmap_update(pmap_kernel()); 1483 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1484 } 1485 1486 /* 1487 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 1488 * bus-specific DMA mmap(2)'ing functions. 1489 */ 1490 paddr_t 1491 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1492 off_t off, int prot, int flags) 1493 { 1494 paddr_t map_flags; 1495 int i; 1496 1497 for (i = 0; i < nsegs; i++) { 1498 KASSERTMSG((off & PAGE_MASK) == 0, 1499 "off %#jx (%#x)", (uintmax_t)off, (int)off & PAGE_MASK); 1500 KASSERTMSG((segs[i].ds_addr & PAGE_MASK) == 0, 1501 "ds_addr %#lx (%#x)", segs[i].ds_addr, 1502 (int)segs[i].ds_addr & PAGE_MASK); 1503 KASSERTMSG((segs[i].ds_len & PAGE_MASK) == 0, 1504 "ds_len %#lx (%#x)", segs[i].ds_addr, 1505 (int)segs[i].ds_addr & PAGE_MASK); 1506 if (off >= segs[i].ds_len) { 1507 off -= segs[i].ds_len; 1508 continue; 1509 } 1510 1511 map_flags = 0; 1512 if (flags & BUS_DMA_PREFETCHABLE) 1513 map_flags |= ARM_MMAP_WRITECOMBINE; 1514 1515 return arm_btop((u_long)segs[i].ds_addr + off) | map_flags; 1516 1517 } 1518 1519 /* Page not found. */ 1520 return -1; 1521 } 1522 1523 /********************************************************************** 1524 * DMA utility functions 1525 **********************************************************************/ 1526 1527 /* 1528 * Utility function to load a linear buffer. lastaddrp holds state 1529 * between invocations (for multiple-buffer loads). segp contains 1530 * the starting segment on entrace, and the ending segment on exit. 1531 * first indicates if this is the first invocation of this function. 1532 */ 1533 int 1534 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 1535 bus_size_t buflen, struct vmspace *vm, int flags) 1536 { 1537 bus_size_t sgsize; 1538 bus_addr_t curaddr; 1539 vaddr_t vaddr = (vaddr_t)buf; 1540 int error; 1541 pmap_t pmap; 1542 1543 #ifdef DEBUG_DMA 1544 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n", 1545 buf, buflen, flags); 1546 #endif /* DEBUG_DMA */ 1547 1548 pmap = vm_map_pmap(&vm->vm_map); 1549 1550 while (buflen > 0) { 1551 /* 1552 * Get the physical address for this segment. 1553 * 1554 */ 1555 bool coherent; 1556 pmap_extract_coherency(pmap, vaddr, &curaddr, &coherent); 1557 1558 KASSERTMSG((vaddr & PAGE_MASK) == (curaddr & PAGE_MASK), 1559 "va %#lx curaddr %#lx", vaddr, curaddr); 1560 1561 /* 1562 * Compute the segment size, and adjust counts. 1563 */ 1564 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 1565 if (buflen < sgsize) 1566 sgsize = buflen; 1567 1568 error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize, 1569 coherent); 1570 if (error) 1571 return error; 1572 1573 vaddr += sgsize; 1574 buflen -= sgsize; 1575 } 1576 1577 return 0; 1578 } 1579 1580 /* 1581 * Allocate physical memory from the given physical address range. 1582 * Called by DMA-safe memory allocation methods. 1583 */ 1584 int 1585 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1586 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1587 int flags, paddr_t low, paddr_t high) 1588 { 1589 paddr_t curaddr, lastaddr; 1590 struct vm_page *m; 1591 struct pglist mlist; 1592 int curseg, error; 1593 1594 KASSERTMSG(boundary == 0 || (boundary & (boundary - 1)) == 0, 1595 "invalid boundary %#lx", boundary); 1596 1597 #ifdef DEBUG_DMA 1598 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n", 1599 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high); 1600 #endif /* DEBUG_DMA */ 1601 1602 /* Always round the size. */ 1603 size = round_page(size); 1604 1605 /* 1606 * We accept boundaries < size, splitting in multiple segments 1607 * if needed. uvm_pglistalloc does not, so compute an appropriate 1608 * boundary: next power of 2 >= size 1609 */ 1610 bus_size_t uboundary = boundary; 1611 if (uboundary <= PAGE_SIZE) { 1612 uboundary = 0; 1613 } else { 1614 while (uboundary < size) { 1615 uboundary <<= 1; 1616 } 1617 } 1618 1619 /* 1620 * Allocate pages from the VM system. 1621 */ 1622 error = uvm_pglistalloc(size, low, high, alignment, uboundary, 1623 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 1624 if (error) 1625 return error; 1626 1627 /* 1628 * Compute the location, size, and number of segments actually 1629 * returned by the VM code. 1630 */ 1631 m = TAILQ_FIRST(&mlist); 1632 curseg = 0; 1633 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 1634 segs[curseg].ds_len = PAGE_SIZE; 1635 #ifdef DEBUG_DMA 1636 printf("alloc: page %lx\n", lastaddr); 1637 #endif /* DEBUG_DMA */ 1638 m = TAILQ_NEXT(m, pageq.queue); 1639 1640 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) { 1641 curaddr = VM_PAGE_TO_PHYS(m); 1642 KASSERTMSG(low <= curaddr && curaddr < high, 1643 "uvm_pglistalloc returned non-sensicaladdress %#lx " 1644 "(low=%#lx, high=%#lx\n", curaddr, low, high); 1645 #ifdef DEBUG_DMA 1646 printf("alloc: page %lx\n", curaddr); 1647 #endif /* DEBUG_DMA */ 1648 if (curaddr == lastaddr + PAGE_SIZE 1649 && (lastaddr & boundary) == (curaddr & boundary)) 1650 segs[curseg].ds_len += PAGE_SIZE; 1651 else { 1652 curseg++; 1653 if (curseg >= nsegs) { 1654 uvm_pglistfree(&mlist); 1655 return EFBIG; 1656 } 1657 segs[curseg].ds_addr = curaddr; 1658 segs[curseg].ds_len = PAGE_SIZE; 1659 } 1660 lastaddr = curaddr; 1661 } 1662 1663 *rsegs = curseg + 1; 1664 1665 return 0; 1666 } 1667 1668 /* 1669 * Check if a memory region intersects with a DMA range, and return the 1670 * page-rounded intersection if it does. 1671 */ 1672 int 1673 arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges, 1674 paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep) 1675 { 1676 struct arm32_dma_range *dr; 1677 int i; 1678 1679 if (ranges == NULL) 1680 return 0; 1681 1682 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 1683 if (dr->dr_sysbase <= pa && 1684 pa < (dr->dr_sysbase + dr->dr_len)) { 1685 /* 1686 * Beginning of region intersects with this range. 1687 */ 1688 *pap = trunc_page(pa); 1689 *sizep = round_page(min(pa + size, 1690 dr->dr_sysbase + dr->dr_len) - pa); 1691 return 1; 1692 } 1693 if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) { 1694 /* 1695 * End of region intersects with this range. 1696 */ 1697 *pap = trunc_page(dr->dr_sysbase); 1698 *sizep = round_page(min((pa + size) - dr->dr_sysbase, 1699 dr->dr_len)); 1700 return 1; 1701 } 1702 } 1703 1704 /* No intersection found. */ 1705 return 0; 1706 } 1707 1708 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1709 static int 1710 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 1711 bus_size_t size, int flags) 1712 { 1713 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 1714 int error = 0; 1715 1716 KASSERT(cookie != NULL); 1717 1718 cookie->id_bouncebuflen = round_page(size); 1719 error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen, 1720 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 1721 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 1722 if (error == 0) { 1723 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 1724 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 1725 (void **)&cookie->id_bouncebuf, flags); 1726 if (error) { 1727 _bus_dmamem_free(t, cookie->id_bouncesegs, 1728 cookie->id_nbouncesegs); 1729 cookie->id_bouncebuflen = 0; 1730 cookie->id_nbouncesegs = 0; 1731 } else { 1732 cookie->id_flags |= _BUS_DMA_HAS_BOUNCE; 1733 } 1734 } else { 1735 cookie->id_bouncebuflen = 0; 1736 cookie->id_nbouncesegs = 0; 1737 } 1738 1739 return error; 1740 } 1741 1742 static void 1743 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 1744 { 1745 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 1746 1747 KASSERT(cookie != NULL); 1748 1749 _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen); 1750 _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs); 1751 cookie->id_bouncebuflen = 0; 1752 cookie->id_nbouncesegs = 0; 1753 cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE; 1754 } 1755 1756 /* 1757 * This function does the same as uiomove, but takes an explicit 1758 * direction, and does not update the uio structure. 1759 */ 1760 static int 1761 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction) 1762 { 1763 struct iovec *iov; 1764 int error; 1765 struct vmspace *vm; 1766 char *cp; 1767 size_t resid, cnt; 1768 int i; 1769 1770 iov = uio->uio_iov; 1771 vm = uio->uio_vmspace; 1772 cp = buf; 1773 resid = n; 1774 1775 for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) { 1776 iov = &uio->uio_iov[i]; 1777 if (iov->iov_len == 0) 1778 continue; 1779 cnt = MIN(resid, iov->iov_len); 1780 1781 if (!VMSPACE_IS_KERNEL_P(vm) && 1782 (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 1783 != 0) { 1784 preempt(); 1785 } 1786 if (direction == UIO_READ) { 1787 error = copyout_vmspace(vm, cp, iov->iov_base, cnt); 1788 } else { 1789 error = copyin_vmspace(vm, iov->iov_base, cp, cnt); 1790 } 1791 if (error) 1792 return error; 1793 cp += cnt; 1794 resid -= cnt; 1795 } 1796 return 0; 1797 } 1798 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1799 1800 int 1801 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr, 1802 bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags) 1803 { 1804 1805 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1806 struct arm32_dma_range *dr; 1807 bool subset = false; 1808 size_t nranges = 0; 1809 size_t i; 1810 for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) { 1811 if (dr->dr_sysbase <= min_addr 1812 && max_addr <= dr->dr_sysbase + dr->dr_len - 1) { 1813 subset = true; 1814 } 1815 if (min_addr <= dr->dr_sysbase + dr->dr_len 1816 && max_addr >= dr->dr_sysbase) { 1817 nranges++; 1818 } 1819 } 1820 if (subset) { 1821 *newtag = tag; 1822 /* if the tag must be freed, add a reference */ 1823 if (tag->_tag_needs_free) 1824 (tag->_tag_needs_free)++; 1825 return 0; 1826 } 1827 if (nranges == 0) { 1828 nranges = 1; 1829 } 1830 1831 const size_t tagsize = sizeof(*tag) + nranges * sizeof(*dr); 1832 if ((*newtag = kmem_intr_zalloc(tagsize, 1833 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 1834 return ENOMEM; 1835 1836 dr = (void *)(*newtag + 1); 1837 **newtag = *tag; 1838 (*newtag)->_tag_needs_free = 1; 1839 (*newtag)->_ranges = dr; 1840 (*newtag)->_nranges = nranges; 1841 1842 if (tag->_ranges == NULL) { 1843 dr->dr_sysbase = min_addr; 1844 dr->dr_busbase = min_addr; 1845 dr->dr_len = max_addr + 1 - min_addr; 1846 } else { 1847 for (i = 0; i < nranges; i++) { 1848 if (min_addr > dr->dr_sysbase + dr->dr_len 1849 || max_addr < dr->dr_sysbase) 1850 continue; 1851 dr[0] = tag->_ranges[i]; 1852 if (dr->dr_sysbase < min_addr) { 1853 psize_t diff = min_addr - dr->dr_sysbase; 1854 dr->dr_busbase += diff; 1855 dr->dr_len -= diff; 1856 dr->dr_sysbase += diff; 1857 } 1858 if (max_addr != 0xffffffff 1859 && max_addr + 1 < dr->dr_sysbase + dr->dr_len) { 1860 dr->dr_len = max_addr + 1 - dr->dr_sysbase; 1861 } 1862 dr++; 1863 } 1864 } 1865 1866 return 0; 1867 #else 1868 return EOPNOTSUPP; 1869 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1870 } 1871 1872 void 1873 _bus_dmatag_destroy(bus_dma_tag_t tag) 1874 { 1875 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1876 switch (tag->_tag_needs_free) { 1877 case 0: 1878 break; /* not allocated with kmem */ 1879 case 1: { 1880 const size_t tagsize = sizeof(*tag) 1881 + tag->_nranges * sizeof(*tag->_ranges); 1882 kmem_intr_free(tag, tagsize); /* last reference to tag */ 1883 break; 1884 } 1885 default: 1886 (tag->_tag_needs_free)--; /* one less reference */ 1887 } 1888 #endif 1889 } 1890