1 /* $NetBSD: bus_dma.c,v 1.86 2014/04/10 02:44:05 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #define _ARM32_BUS_DMA_PRIVATE 34 35 #include "opt_arm_bus_space.h" 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.86 2014/04/10 02:44:05 matt Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/proc.h> 44 #include <sys/buf.h> 45 #include <sys/bus.h> 46 #include <sys/cpu.h> 47 #include <sys/reboot.h> 48 #include <sys/conf.h> 49 #include <sys/file.h> 50 #include <sys/kmem.h> 51 #include <sys/mbuf.h> 52 #include <sys/vnode.h> 53 #include <sys/device.h> 54 55 #include <uvm/uvm.h> 56 57 #include <arm/cpufunc.h> 58 59 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 60 #include <dev/mm.h> 61 #endif 62 63 #ifdef BUSDMA_COUNTERS 64 static struct evcnt bus_dma_creates = 65 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates"); 66 static struct evcnt bus_dma_bounced_creates = 67 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates"); 68 static struct evcnt bus_dma_loads = 69 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads"); 70 static struct evcnt bus_dma_bounced_loads = 71 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads"); 72 static struct evcnt bus_dma_coherent_loads = 73 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "coherent loads"); 74 static struct evcnt bus_dma_read_bounces = 75 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces"); 76 static struct evcnt bus_dma_write_bounces = 77 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces"); 78 static struct evcnt bus_dma_bounced_unloads = 79 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads"); 80 static struct evcnt bus_dma_unloads = 81 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads"); 82 static struct evcnt bus_dma_bounced_destroys = 83 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys"); 84 static struct evcnt bus_dma_destroys = 85 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys"); 86 static struct evcnt bus_dma_sync_prereadwrite = 87 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite"); 88 static struct evcnt bus_dma_sync_preread_begin = 89 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin"); 90 static struct evcnt bus_dma_sync_preread = 91 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread"); 92 static struct evcnt bus_dma_sync_preread_tail = 93 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail"); 94 static struct evcnt bus_dma_sync_prewrite = 95 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite"); 96 static struct evcnt bus_dma_sync_postread = 97 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread"); 98 static struct evcnt bus_dma_sync_postreadwrite = 99 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite"); 100 static struct evcnt bus_dma_sync_postwrite = 101 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite"); 102 103 EVCNT_ATTACH_STATIC(bus_dma_creates); 104 EVCNT_ATTACH_STATIC(bus_dma_bounced_creates); 105 EVCNT_ATTACH_STATIC(bus_dma_loads); 106 EVCNT_ATTACH_STATIC(bus_dma_bounced_loads); 107 EVCNT_ATTACH_STATIC(bus_dma_coherent_loads); 108 EVCNT_ATTACH_STATIC(bus_dma_read_bounces); 109 EVCNT_ATTACH_STATIC(bus_dma_write_bounces); 110 EVCNT_ATTACH_STATIC(bus_dma_unloads); 111 EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads); 112 EVCNT_ATTACH_STATIC(bus_dma_destroys); 113 EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys); 114 EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite); 115 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin); 116 EVCNT_ATTACH_STATIC(bus_dma_sync_preread); 117 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail); 118 EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite); 119 EVCNT_ATTACH_STATIC(bus_dma_sync_postread); 120 EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite); 121 EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite); 122 123 #define STAT_INCR(x) (bus_dma_ ## x.ev_count++) 124 #else 125 #define STAT_INCR(x) /*(bus_dma_ ## x.ev_count++)*/ 126 #endif 127 128 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 129 bus_size_t, struct vmspace *, int); 130 static struct arm32_dma_range * 131 _bus_dma_paddr_inrange(struct arm32_dma_range *, int, paddr_t); 132 133 /* 134 * Check to see if the specified page is in an allowed DMA range. 135 */ 136 inline struct arm32_dma_range * 137 _bus_dma_paddr_inrange(struct arm32_dma_range *ranges, int nranges, 138 bus_addr_t curaddr) 139 { 140 struct arm32_dma_range *dr; 141 int i; 142 143 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 144 if (curaddr >= dr->dr_sysbase && 145 curaddr < (dr->dr_sysbase + dr->dr_len)) 146 return (dr); 147 } 148 149 return (NULL); 150 } 151 152 /* 153 * Check to see if the specified busaddr is in an allowed DMA range. 154 */ 155 static inline paddr_t 156 _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr) 157 { 158 struct arm32_dma_range *dr; 159 u_int i; 160 161 if (t->_nranges == 0) 162 return curaddr; 163 164 for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) { 165 if (dr->dr_busbase <= curaddr 166 && curaddr < dr->dr_busbase + dr->dr_len) 167 return curaddr - dr->dr_busbase + dr->dr_sysbase; 168 } 169 panic("%s: curaddr %#lx not in range", __func__, curaddr); 170 } 171 172 /* 173 * Common function to load the specified physical address into the 174 * DMA map, coalescing segments and boundary checking as necessary. 175 */ 176 static int 177 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map, 178 bus_addr_t paddr, bus_size_t size, bool coherent) 179 { 180 bus_dma_segment_t * const segs = map->dm_segs; 181 int nseg = map->dm_nsegs; 182 bus_addr_t lastaddr; 183 bus_addr_t bmask = ~(map->_dm_boundary - 1); 184 bus_addr_t curaddr; 185 bus_size_t sgsize; 186 uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0; 187 188 if (nseg > 0) 189 lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len; 190 else 191 lastaddr = 0xdead; 192 193 again: 194 sgsize = size; 195 196 /* Make sure we're in an allowed DMA range. */ 197 if (t->_ranges != NULL) { 198 /* XXX cache last result? */ 199 const struct arm32_dma_range * const dr = 200 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr); 201 if (dr == NULL) 202 return (EINVAL); 203 204 /* 205 * If this region is coherent, mark the segment as coherent. 206 */ 207 _ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT; 208 209 /* 210 * In a valid DMA range. Translate the physical 211 * memory address to an address in the DMA window. 212 */ 213 curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase; 214 #if 0 215 printf("%p: %#lx: range %#lx/%#lx/%#lx/%#x: %#x <-- %#lx\n", 216 t, paddr, dr->dr_sysbase, dr->dr_busbase, 217 dr->dr_len, dr->dr_flags, _ds_flags, curaddr); 218 #endif 219 } else 220 curaddr = paddr; 221 222 /* 223 * Make sure we don't cross any boundaries. 224 */ 225 if (map->_dm_boundary > 0) { 226 bus_addr_t baddr; /* next boundary address */ 227 228 baddr = (curaddr + map->_dm_boundary) & bmask; 229 if (sgsize > (baddr - curaddr)) 230 sgsize = (baddr - curaddr); 231 } 232 233 /* 234 * Insert chunk into a segment, coalescing with the 235 * previous segment if possible. 236 */ 237 if (nseg > 0 && curaddr == lastaddr && 238 segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz && 239 ((segs[nseg-1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 && 240 (map->_dm_boundary == 0 || 241 (segs[nseg-1].ds_addr & bmask) == (curaddr & bmask))) { 242 /* coalesce */ 243 segs[nseg-1].ds_len += sgsize; 244 } else if (nseg >= map->_dm_segcnt) { 245 return (EFBIG); 246 } else { 247 /* new segment */ 248 segs[nseg].ds_addr = curaddr; 249 segs[nseg].ds_len = sgsize; 250 segs[nseg]._ds_flags = _ds_flags; 251 nseg++; 252 } 253 254 lastaddr = curaddr + sgsize; 255 256 paddr += sgsize; 257 size -= sgsize; 258 if (size > 0) 259 goto again; 260 261 map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT); 262 map->dm_nsegs = nseg; 263 return (0); 264 } 265 266 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 267 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 268 bus_size_t size, int flags); 269 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map); 270 static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, 271 int direction); 272 273 static int 274 _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 275 size_t buflen, int buftype, int flags) 276 { 277 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 278 struct vmspace * const vm = vmspace_kernel(); 279 int error; 280 281 KASSERT(cookie != NULL); 282 KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE); 283 284 /* 285 * Allocate bounce pages, if necessary. 286 */ 287 if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) { 288 error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags); 289 if (error) 290 return (error); 291 } 292 293 /* 294 * Cache a pointer to the caller's buffer and load the DMA map 295 * with the bounce buffer. 296 */ 297 cookie->id_origbuf = buf; 298 cookie->id_origbuflen = buflen; 299 error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf, 300 buflen, vm, flags); 301 if (error) 302 return (error); 303 304 STAT_INCR(bounced_loads); 305 map->dm_mapsize = buflen; 306 map->_dm_vmspace = vm; 307 map->_dm_buftype = buftype; 308 309 /* ...so _bus_dmamap_sync() knows we're bouncing */ 310 map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING; 311 cookie->id_flags |= _BUS_DMA_IS_BOUNCING; 312 return 0; 313 } 314 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 315 316 /* 317 * Common function for DMA map creation. May be called by bus-specific 318 * DMA map creation functions. 319 */ 320 int 321 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 322 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 323 { 324 struct arm32_bus_dmamap *map; 325 void *mapstore; 326 327 #ifdef DEBUG_DMA 328 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n", 329 t, size, nsegments, maxsegsz, boundary, flags); 330 #endif /* DEBUG_DMA */ 331 332 /* 333 * Allocate and initialize the DMA map. The end of the map 334 * is a variable-sized array of segments, so we allocate enough 335 * room for them in one shot. 336 * 337 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 338 * of ALLOCNOW notifies others that we've reserved these resources, 339 * and they are not to be freed. 340 * 341 * The bus_dmamap_t includes one bus_dma_segment_t, hence 342 * the (nsegments - 1). 343 */ 344 const size_t mapsize = sizeof(struct arm32_bus_dmamap) + 345 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 346 const int zallocflags = (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 347 if ((mapstore = kmem_intr_zalloc(mapsize, zallocflags)) == NULL) 348 return (ENOMEM); 349 350 map = (struct arm32_bus_dmamap *)mapstore; 351 map->_dm_size = size; 352 map->_dm_segcnt = nsegments; 353 map->_dm_maxmaxsegsz = maxsegsz; 354 map->_dm_boundary = boundary; 355 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 356 map->_dm_origbuf = NULL; 357 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 358 map->_dm_vmspace = vmspace_kernel(); 359 map->_dm_cookie = NULL; 360 map->dm_maxsegsz = maxsegsz; 361 map->dm_mapsize = 0; /* no valid mappings */ 362 map->dm_nsegs = 0; 363 364 *dmamp = map; 365 366 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 367 struct arm32_bus_dma_cookie *cookie; 368 int cookieflags; 369 void *cookiestore; 370 int error; 371 372 cookieflags = 0; 373 374 if (t->_may_bounce != NULL) { 375 error = (*t->_may_bounce)(t, map, flags, &cookieflags); 376 if (error != 0) 377 goto out; 378 } 379 380 if (t->_ranges != NULL) 381 cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE; 382 383 if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) { 384 STAT_INCR(creates); 385 return 0; 386 } 387 388 const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) + 389 (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 390 391 /* 392 * Allocate our cookie. 393 */ 394 if ((cookiestore = kmem_intr_zalloc(cookiesize, zallocflags)) == NULL) { 395 error = ENOMEM; 396 goto out; 397 } 398 cookie = (struct arm32_bus_dma_cookie *)cookiestore; 399 cookie->id_flags = cookieflags; 400 map->_dm_cookie = cookie; 401 STAT_INCR(bounced_creates); 402 403 error = _bus_dma_alloc_bouncebuf(t, map, size, flags); 404 out: 405 if (error) 406 _bus_dmamap_destroy(t, map); 407 #else 408 STAT_INCR(creates); 409 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 410 411 #ifdef DEBUG_DMA 412 printf("dmamap_create:map=%p\n", map); 413 #endif /* DEBUG_DMA */ 414 return (0); 415 } 416 417 /* 418 * Common function for DMA map destruction. May be called by bus-specific 419 * DMA map destruction functions. 420 */ 421 void 422 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 423 { 424 425 #ifdef DEBUG_DMA 426 printf("dmamap_destroy: t=%p map=%p\n", t, map); 427 #endif /* DEBUG_DMA */ 428 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 429 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 430 431 /* 432 * Free any bounce pages this map might hold. 433 */ 434 if (cookie != NULL) { 435 const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) + 436 (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 437 438 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) 439 STAT_INCR(bounced_unloads); 440 map->dm_nsegs = 0; 441 if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE) 442 _bus_dma_free_bouncebuf(t, map); 443 STAT_INCR(bounced_destroys); 444 kmem_intr_free(cookie, cookiesize); 445 } else 446 #endif 447 STAT_INCR(destroys); 448 449 if (map->dm_nsegs > 0) 450 STAT_INCR(unloads); 451 452 const size_t mapsize = sizeof(struct arm32_bus_dmamap) + 453 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1)); 454 kmem_intr_free(map, mapsize); 455 } 456 457 /* 458 * Common function for loading a DMA map with a linear buffer. May 459 * be called by bus-specific DMA map load functions. 460 */ 461 int 462 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 463 bus_size_t buflen, struct proc *p, int flags) 464 { 465 struct vmspace *vm; 466 int error; 467 468 #ifdef DEBUG_DMA 469 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n", 470 t, map, buf, buflen, p, flags); 471 #endif /* DEBUG_DMA */ 472 473 if (map->dm_nsegs > 0) { 474 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 475 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 476 if (cookie != NULL) { 477 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) { 478 STAT_INCR(bounced_unloads); 479 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 480 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING; 481 } 482 } else 483 #endif 484 STAT_INCR(unloads); 485 } 486 487 /* 488 * Make sure that on error condition we return "no valid mappings". 489 */ 490 map->dm_mapsize = 0; 491 map->dm_nsegs = 0; 492 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 493 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 494 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu", 495 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 496 497 if (buflen > map->_dm_size) 498 return (EINVAL); 499 500 if (p != NULL) { 501 vm = p->p_vmspace; 502 } else { 503 vm = vmspace_kernel(); 504 } 505 506 /* _bus_dmamap_load_buffer() clears this if we're not... */ 507 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 508 509 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags); 510 if (error == 0) { 511 map->dm_mapsize = buflen; 512 map->_dm_vmspace = vm; 513 map->_dm_origbuf = buf; 514 map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR; 515 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 516 STAT_INCR(coherent_loads); 517 } else { 518 STAT_INCR(loads); 519 } 520 return 0; 521 } 522 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 523 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 524 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) { 525 error = _bus_dma_load_bouncebuf(t, map, buf, buflen, 526 _BUS_DMA_BUFTYPE_LINEAR, flags); 527 } 528 #endif 529 return (error); 530 } 531 532 /* 533 * Like _bus_dmamap_load(), but for mbufs. 534 */ 535 int 536 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 537 int flags) 538 { 539 int error; 540 struct mbuf *m; 541 542 #ifdef DEBUG_DMA 543 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n", 544 t, map, m0, flags); 545 #endif /* DEBUG_DMA */ 546 547 if (map->dm_nsegs > 0) { 548 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 549 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 550 if (cookie != NULL) { 551 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) { 552 STAT_INCR(bounced_unloads); 553 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 554 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING; 555 } 556 } else 557 #endif 558 STAT_INCR(unloads); 559 } 560 561 /* 562 * Make sure that on error condition we return "no valid mappings." 563 */ 564 map->dm_mapsize = 0; 565 map->dm_nsegs = 0; 566 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 567 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 568 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu", 569 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 570 571 KASSERT(m0->m_flags & M_PKTHDR); 572 573 if (m0->m_pkthdr.len > map->_dm_size) 574 return (EINVAL); 575 576 /* _bus_dmamap_load_paddr() clears this if we're not... */ 577 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 578 579 error = 0; 580 for (m = m0; m != NULL && error == 0; m = m->m_next) { 581 int offset; 582 int remainbytes; 583 const struct vm_page * const *pgs; 584 paddr_t paddr; 585 int size; 586 587 if (m->m_len == 0) 588 continue; 589 /* 590 * Don't allow reads in read-only mbufs. 591 */ 592 if (M_ROMAP(m) && (flags & BUS_DMA_READ)) { 593 error = EFAULT; 594 break; 595 } 596 switch (m->m_flags & (M_EXT|M_CLUSTER|M_EXT_PAGES)) { 597 case M_EXT|M_CLUSTER: 598 /* XXX KDASSERT */ 599 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 600 paddr = m->m_ext.ext_paddr + 601 (m->m_data - m->m_ext.ext_buf); 602 size = m->m_len; 603 error = _bus_dmamap_load_paddr(t, map, paddr, size, 604 false); 605 break; 606 607 case M_EXT|M_EXT_PAGES: 608 KASSERT(m->m_ext.ext_buf <= m->m_data); 609 KASSERT(m->m_data <= 610 m->m_ext.ext_buf + m->m_ext.ext_size); 611 612 offset = (vaddr_t)m->m_data - 613 trunc_page((vaddr_t)m->m_ext.ext_buf); 614 remainbytes = m->m_len; 615 616 /* skip uninteresting pages */ 617 pgs = (const struct vm_page * const *) 618 m->m_ext.ext_pgs + (offset >> PAGE_SHIFT); 619 620 offset &= PAGE_MASK; /* offset in the first page */ 621 622 /* load each page */ 623 while (remainbytes > 0) { 624 const struct vm_page *pg; 625 626 size = MIN(remainbytes, PAGE_SIZE - offset); 627 628 pg = *pgs++; 629 KASSERT(pg); 630 paddr = VM_PAGE_TO_PHYS(pg) + offset; 631 632 error = _bus_dmamap_load_paddr(t, map, 633 paddr, size, false); 634 if (error) 635 break; 636 offset = 0; 637 remainbytes -= size; 638 } 639 break; 640 641 case 0: 642 paddr = m->m_paddr + M_BUFOFFSET(m) + 643 (m->m_data - M_BUFADDR(m)); 644 size = m->m_len; 645 error = _bus_dmamap_load_paddr(t, map, paddr, size, 646 false); 647 break; 648 649 default: 650 error = _bus_dmamap_load_buffer(t, map, m->m_data, 651 m->m_len, vmspace_kernel(), flags); 652 } 653 } 654 if (error == 0) { 655 map->dm_mapsize = m0->m_pkthdr.len; 656 map->_dm_origbuf = m0; 657 map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF; 658 map->_dm_vmspace = vmspace_kernel(); /* always kernel */ 659 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 660 STAT_INCR(coherent_loads); 661 } else { 662 STAT_INCR(loads); 663 } 664 return 0; 665 } 666 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 667 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 668 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) { 669 error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len, 670 _BUS_DMA_BUFTYPE_MBUF, flags); 671 } 672 #endif 673 return (error); 674 } 675 676 /* 677 * Like _bus_dmamap_load(), but for uios. 678 */ 679 int 680 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 681 int flags) 682 { 683 int i, error; 684 bus_size_t minlen, resid; 685 struct iovec *iov; 686 void *addr; 687 688 /* 689 * Make sure that on error condition we return "no valid mappings." 690 */ 691 map->dm_mapsize = 0; 692 map->dm_nsegs = 0; 693 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 694 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu", 695 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 696 697 resid = uio->uio_resid; 698 iov = uio->uio_iov; 699 700 /* _bus_dmamap_load_buffer() clears this if we're not... */ 701 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 702 703 error = 0; 704 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 705 /* 706 * Now at the first iovec to load. Load each iovec 707 * until we have exhausted the residual count. 708 */ 709 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 710 addr = (void *)iov[i].iov_base; 711 712 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 713 uio->uio_vmspace, flags); 714 715 resid -= minlen; 716 } 717 if (error == 0) { 718 map->dm_mapsize = uio->uio_resid; 719 map->_dm_origbuf = uio; 720 map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO; 721 map->_dm_vmspace = uio->uio_vmspace; 722 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 723 STAT_INCR(coherent_loads); 724 } else { 725 STAT_INCR(loads); 726 } 727 } 728 return (error); 729 } 730 731 /* 732 * Like _bus_dmamap_load(), but for raw memory allocated with 733 * bus_dmamem_alloc(). 734 */ 735 int 736 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 737 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 738 { 739 740 panic("_bus_dmamap_load_raw: not implemented"); 741 } 742 743 /* 744 * Common function for unloading a DMA map. May be called by 745 * bus-specific DMA map unload functions. 746 */ 747 void 748 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 749 { 750 751 #ifdef DEBUG_DMA 752 printf("dmamap_unload: t=%p map=%p\n", t, map); 753 #endif /* DEBUG_DMA */ 754 755 /* 756 * No resources to free; just mark the mappings as 757 * invalid. 758 */ 759 map->dm_mapsize = 0; 760 map->dm_nsegs = 0; 761 map->_dm_origbuf = NULL; 762 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 763 map->_dm_vmspace = NULL; 764 } 765 766 static void 767 _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops, bool readonly_p) 768 { 769 KASSERTMSG((va & PAGE_MASK) == (pa & PAGE_MASK), 770 "va %#lx pa %#lx", va, pa); 771 #if 0 772 printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n", 773 va, pa, len, ops, readonly_p); 774 #endif 775 776 switch (ops) { 777 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 778 if (!readonly_p) { 779 STAT_INCR(sync_prereadwrite); 780 cpu_dcache_wbinv_range(va, len); 781 cpu_sdcache_wbinv_range(va, pa, len); 782 break; 783 } 784 /* FALLTHROUGH */ 785 786 case BUS_DMASYNC_PREREAD: { 787 const size_t line_size = arm_dcache_align; 788 const size_t line_mask = arm_dcache_align_mask; 789 vsize_t misalignment = va & line_mask; 790 if (misalignment) { 791 va -= misalignment; 792 pa -= misalignment; 793 len += misalignment; 794 STAT_INCR(sync_preread_begin); 795 cpu_dcache_wbinv_range(va, line_size); 796 cpu_sdcache_wbinv_range(va, pa, line_size); 797 if (len <= line_size) 798 break; 799 va += line_size; 800 pa += line_size; 801 len -= line_size; 802 } 803 misalignment = len & line_mask; 804 len -= misalignment; 805 if (len > 0) { 806 STAT_INCR(sync_preread); 807 cpu_dcache_inv_range(va, len); 808 cpu_sdcache_inv_range(va, pa, len); 809 } 810 if (misalignment) { 811 va += len; 812 pa += len; 813 STAT_INCR(sync_preread_tail); 814 cpu_dcache_wbinv_range(va, line_size); 815 cpu_sdcache_wbinv_range(va, pa, line_size); 816 } 817 break; 818 } 819 820 case BUS_DMASYNC_PREWRITE: 821 STAT_INCR(sync_prewrite); 822 cpu_dcache_wb_range(va, len); 823 cpu_sdcache_wb_range(va, pa, len); 824 break; 825 826 #ifdef CPU_CORTEX 827 /* 828 * Cortex CPUs can do speculative loads so we need to clean the cache 829 * after a DMA read to deal with any speculatively loaded cache lines. 830 * Since these can't be dirty, we can just invalidate them and don't 831 * have to worry about having to write back their contents. 832 */ 833 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE: 834 STAT_INCR(sync_postreadwrite); 835 cpu_dcache_inv_range(va, len); 836 cpu_sdcache_inv_range(va, pa, len); 837 break; 838 case BUS_DMASYNC_POSTREAD: 839 STAT_INCR(sync_postread); 840 cpu_dcache_inv_range(va, len); 841 cpu_sdcache_inv_range(va, pa, len); 842 break; 843 #endif 844 } 845 } 846 847 static inline void 848 _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 849 bus_size_t len, int ops) 850 { 851 bus_dma_segment_t *ds = map->dm_segs; 852 vaddr_t va = (vaddr_t) map->_dm_origbuf; 853 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 854 if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) { 855 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 856 va = (vaddr_t) cookie->id_bouncebuf; 857 } 858 #endif 859 860 while (len > 0) { 861 while (offset >= ds->ds_len) { 862 offset -= ds->ds_len; 863 va += ds->ds_len; 864 ds++; 865 } 866 867 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset); 868 size_t seglen = min(len, ds->ds_len - offset); 869 870 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) 871 _bus_dmamap_sync_segment(va + offset, pa, seglen, ops, 872 false); 873 874 offset += seglen; 875 len -= seglen; 876 } 877 } 878 879 static inline void 880 _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset, 881 bus_size_t len, int ops) 882 { 883 bus_dma_segment_t *ds = map->dm_segs; 884 struct mbuf *m = map->_dm_origbuf; 885 bus_size_t voff = offset; 886 bus_size_t ds_off = offset; 887 888 while (len > 0) { 889 /* Find the current dma segment */ 890 while (ds_off >= ds->ds_len) { 891 ds_off -= ds->ds_len; 892 ds++; 893 } 894 /* Find the current mbuf. */ 895 while (voff >= m->m_len) { 896 voff -= m->m_len; 897 m = m->m_next; 898 } 899 900 /* 901 * Now at the first mbuf to sync; nail each one until 902 * we have exhausted the length. 903 */ 904 vsize_t seglen = min(len, min(m->m_len - voff, ds->ds_len - ds_off)); 905 vaddr_t va = mtod(m, vaddr_t) + voff; 906 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off); 907 908 /* 909 * We can save a lot of work here if we know the mapping 910 * is read-only at the MMU: 911 * 912 * If a mapping is read-only, no dirty cache blocks will 913 * exist for it. If a writable mapping was made read-only, 914 * we know any dirty cache lines for the range will have 915 * been cleaned for us already. Therefore, if the upper 916 * layer can tell us we have a read-only mapping, we can 917 * skip all cache cleaning. 918 * 919 * NOTE: This only works if we know the pmap cleans pages 920 * before making a read-write -> read-only transition. If 921 * this ever becomes non-true (e.g. Physically Indexed 922 * cache), this will have to be revisited. 923 */ 924 925 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) 926 _bus_dmamap_sync_segment(va, pa, seglen, ops, 927 M_ROMAP(m)); 928 voff += seglen; 929 ds_off += seglen; 930 len -= seglen; 931 } 932 } 933 934 static inline void 935 _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 936 bus_size_t len, int ops) 937 { 938 bus_dma_segment_t *ds = map->dm_segs; 939 struct uio *uio = map->_dm_origbuf; 940 struct iovec *iov = uio->uio_iov; 941 bus_size_t voff = offset; 942 bus_size_t ds_off = offset; 943 944 while (len > 0) { 945 /* Find the current dma segment */ 946 while (ds_off >= ds->ds_len) { 947 ds_off -= ds->ds_len; 948 ds++; 949 } 950 951 /* Find the current iovec. */ 952 while (voff >= iov->iov_len) { 953 voff -= iov->iov_len; 954 iov++; 955 } 956 957 /* 958 * Now at the first iovec to sync; nail each one until 959 * we have exhausted the length. 960 */ 961 vsize_t seglen = min(len, min(iov->iov_len - voff, ds->ds_len - ds_off)); 962 vaddr_t va = (vaddr_t) iov->iov_base + voff; 963 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off); 964 965 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) 966 _bus_dmamap_sync_segment(va, pa, seglen, ops, false); 967 968 voff += seglen; 969 ds_off += seglen; 970 len -= seglen; 971 } 972 } 973 974 /* 975 * Common function for DMA map synchronization. May be called 976 * by bus-specific DMA map synchronization functions. 977 * 978 * This version works for the Virtually Indexed Virtually Tagged 979 * cache found on 32-bit ARM processors. 980 * 981 * XXX Should have separate versions for write-through vs. 982 * XXX write-back caches. We currently assume write-back 983 * XXX here, which is not as efficient as it could be for 984 * XXX the write-through case. 985 */ 986 void 987 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 988 bus_size_t len, int ops) 989 { 990 #ifdef DEBUG_DMA 991 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n", 992 t, map, offset, len, ops); 993 #endif /* DEBUG_DMA */ 994 995 /* 996 * Mixing of PRE and POST operations is not allowed. 997 */ 998 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 999 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 1000 panic("_bus_dmamap_sync: mix PRE and POST"); 1001 1002 KASSERTMSG(offset < map->dm_mapsize, 1003 "offset %lu mapsize %lu", 1004 offset, map->dm_mapsize); 1005 KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize, 1006 "len %lu offset %lu mapsize %lu", 1007 len, offset, map->dm_mapsize); 1008 1009 /* 1010 * For a virtually-indexed write-back cache, we need 1011 * to do the following things: 1012 * 1013 * PREREAD -- Invalidate the D-cache. We do this 1014 * here in case a write-back is required by the back-end. 1015 * 1016 * PREWRITE -- Write-back the D-cache. Note that if 1017 * we are doing a PREREAD|PREWRITE, we can collapse 1018 * the whole thing into a single Wb-Inv. 1019 * 1020 * POSTREAD -- Re-invalidate the D-cache in case speculative 1021 * memory accesses caused cachelines to become valid with now 1022 * invalid data. 1023 * 1024 * POSTWRITE -- Nothing. 1025 */ 1026 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1027 const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING); 1028 #else 1029 const bool bouncing = false; 1030 #endif 1031 1032 const int pre_ops = ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1033 #ifdef CPU_CORTEX 1034 const int post_ops = ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1035 #else 1036 const int post_ops = 0; 1037 #endif 1038 if (!bouncing && pre_ops == 0 && post_ops == BUS_DMASYNC_POSTWRITE) { 1039 STAT_INCR(sync_postwrite); 1040 return; 1041 } 1042 KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD), 1043 "pre_ops %#x post_ops %#x", pre_ops, post_ops); 1044 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1045 if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) { 1046 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 1047 STAT_INCR(write_bounces); 1048 char * const dataptr = (char *)cookie->id_bouncebuf + offset; 1049 /* 1050 * Copy the caller's buffer to the bounce buffer. 1051 */ 1052 switch (map->_dm_buftype) { 1053 case _BUS_DMA_BUFTYPE_LINEAR: 1054 memcpy(dataptr, cookie->id_origlinearbuf + offset, len); 1055 break; 1056 case _BUS_DMA_BUFTYPE_MBUF: 1057 m_copydata(cookie->id_origmbuf, offset, len, dataptr); 1058 break; 1059 case _BUS_DMA_BUFTYPE_UIO: 1060 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_WRITE); 1061 break; 1062 #ifdef DIAGNOSTIC 1063 case _BUS_DMA_BUFTYPE_RAW: 1064 panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_RAW"); 1065 break; 1066 1067 case _BUS_DMA_BUFTYPE_INVALID: 1068 panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_INVALID"); 1069 break; 1070 1071 default: 1072 panic("_bus_dmamap_sync(pre): map %p: unknown buffer type %d\n", 1073 map, map->_dm_buftype); 1074 break; 1075 #endif /* DIAGNOSTIC */ 1076 } 1077 } 1078 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1079 1080 /* Skip cache frobbing if mapping was COHERENT. */ 1081 if (!bouncing && (map->_dm_flags & _BUS_DMAMAP_COHERENT)) { 1082 /* Drain the write buffer. */ 1083 if (pre_ops & BUS_DMASYNC_PREWRITE) 1084 cpu_drain_writebuf(); 1085 return; 1086 } 1087 1088 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1089 if (bouncing && ((map->_dm_flags & _BUS_DMAMAP_COHERENT) || pre_ops == 0)) { 1090 goto bounce_it; 1091 } 1092 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1093 1094 #ifndef ARM_MMU_EXTENDED 1095 /* 1096 * If the mapping belongs to a non-kernel vmspace, and the 1097 * vmspace has not been active since the last time a full 1098 * cache flush was performed, we don't need to do anything. 1099 */ 1100 if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) && 1101 vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0)) 1102 return; 1103 #endif 1104 1105 int buftype = map->_dm_buftype; 1106 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1107 if (bouncing) { 1108 buftype = _BUS_DMA_BUFTYPE_LINEAR; 1109 } 1110 #endif 1111 1112 switch (buftype) { 1113 case _BUS_DMA_BUFTYPE_LINEAR: 1114 _bus_dmamap_sync_linear(t, map, offset, len, ops); 1115 break; 1116 1117 case _BUS_DMA_BUFTYPE_MBUF: 1118 _bus_dmamap_sync_mbuf(t, map, offset, len, ops); 1119 break; 1120 1121 case _BUS_DMA_BUFTYPE_UIO: 1122 _bus_dmamap_sync_uio(t, map, offset, len, ops); 1123 break; 1124 1125 case _BUS_DMA_BUFTYPE_RAW: 1126 panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW"); 1127 break; 1128 1129 case _BUS_DMA_BUFTYPE_INVALID: 1130 panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID"); 1131 break; 1132 1133 default: 1134 panic("_bus_dmamap_sync: map %p: unknown buffer type %d\n", 1135 map, map->_dm_buftype); 1136 } 1137 1138 /* Drain the write buffer. */ 1139 cpu_drain_writebuf(); 1140 1141 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1142 bounce_it: 1143 if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0) 1144 return; 1145 1146 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie; 1147 char * const dataptr = (char *)cookie->id_bouncebuf + offset; 1148 STAT_INCR(read_bounces); 1149 /* 1150 * Copy the bounce buffer to the caller's buffer. 1151 */ 1152 switch (map->_dm_buftype) { 1153 case _BUS_DMA_BUFTYPE_LINEAR: 1154 memcpy(cookie->id_origlinearbuf + offset, dataptr, len); 1155 break; 1156 1157 case _BUS_DMA_BUFTYPE_MBUF: 1158 m_copyback(cookie->id_origmbuf, offset, len, dataptr); 1159 break; 1160 1161 case _BUS_DMA_BUFTYPE_UIO: 1162 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ); 1163 break; 1164 #ifdef DIAGNOSTIC 1165 case _BUS_DMA_BUFTYPE_RAW: 1166 panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_RAW"); 1167 break; 1168 1169 case _BUS_DMA_BUFTYPE_INVALID: 1170 panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_INVALID"); 1171 break; 1172 1173 default: 1174 panic("_bus_dmamap_sync(post): map %p: unknown buffer type %d\n", 1175 map, map->_dm_buftype); 1176 break; 1177 #endif 1178 } 1179 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1180 } 1181 1182 /* 1183 * Common function for DMA-safe memory allocation. May be called 1184 * by bus-specific DMA memory allocation functions. 1185 */ 1186 1187 extern paddr_t physical_start; 1188 extern paddr_t physical_end; 1189 1190 int 1191 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1192 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1193 int flags) 1194 { 1195 struct arm32_dma_range *dr; 1196 int error, i; 1197 1198 #ifdef DEBUG_DMA 1199 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx " 1200 "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment, 1201 boundary, segs, nsegs, rsegs, flags); 1202 #endif 1203 1204 if ((dr = t->_ranges) != NULL) { 1205 error = ENOMEM; 1206 for (i = 0; i < t->_nranges; i++, dr++) { 1207 if (dr->dr_len == 0 1208 || (dr->dr_flags & _BUS_DMAMAP_NOALLOC)) 1209 continue; 1210 error = _bus_dmamem_alloc_range(t, size, alignment, 1211 boundary, segs, nsegs, rsegs, flags, 1212 trunc_page(dr->dr_sysbase), 1213 trunc_page(dr->dr_sysbase + dr->dr_len)); 1214 if (error == 0) 1215 break; 1216 } 1217 } else { 1218 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 1219 segs, nsegs, rsegs, flags, trunc_page(physical_start), 1220 trunc_page(physical_end)); 1221 } 1222 1223 #ifdef DEBUG_DMA 1224 printf("dmamem_alloc: =%d\n", error); 1225 #endif 1226 1227 return(error); 1228 } 1229 1230 /* 1231 * Common function for freeing DMA-safe memory. May be called by 1232 * bus-specific DMA memory free functions. 1233 */ 1234 void 1235 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 1236 { 1237 struct vm_page *m; 1238 bus_addr_t addr; 1239 struct pglist mlist; 1240 int curseg; 1241 1242 #ifdef DEBUG_DMA 1243 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs); 1244 #endif /* DEBUG_DMA */ 1245 1246 /* 1247 * Build a list of pages to free back to the VM system. 1248 */ 1249 TAILQ_INIT(&mlist); 1250 for (curseg = 0; curseg < nsegs; curseg++) { 1251 for (addr = segs[curseg].ds_addr; 1252 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 1253 addr += PAGE_SIZE) { 1254 m = PHYS_TO_VM_PAGE(addr); 1255 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); 1256 } 1257 } 1258 uvm_pglistfree(&mlist); 1259 } 1260 1261 /* 1262 * Common function for mapping DMA-safe memory. May be called by 1263 * bus-specific DMA memory map functions. 1264 */ 1265 int 1266 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1267 size_t size, void **kvap, int flags) 1268 { 1269 vaddr_t va; 1270 paddr_t pa; 1271 int curseg; 1272 const uvm_flag_t kmflags = UVM_KMF_VAONLY 1273 | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0); 1274 vsize_t align = 0; 1275 1276 #ifdef DEBUG_DMA 1277 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t, 1278 segs, nsegs, (unsigned long)size, flags); 1279 #endif /* DEBUG_DMA */ 1280 1281 #ifdef PMAP_MAP_POOLPAGE 1282 /* 1283 * If all of memory is mapped, and we are mapping a single physically 1284 * contiguous area then this area is already mapped. Let's see if we 1285 * avoid having a separate mapping for it. 1286 */ 1287 if (nsegs == 1) { 1288 /* 1289 * If this is a non-COHERENT mapping, then the existing kernel 1290 * mapping is already compatible with it. 1291 */ 1292 bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0; 1293 pa = segs[0].ds_addr; 1294 1295 /* 1296 * This is a COHERENT mapping which, unless this address is in 1297 * a COHERENT dma range, will not be compatible. 1298 */ 1299 if (t->_ranges != NULL) { 1300 const struct arm32_dma_range * const dr = 1301 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa); 1302 if (dr != NULL 1303 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) { 1304 direct_mapable = true; 1305 } 1306 } 1307 1308 if (direct_mapable) { 1309 *kvap = (void *)PMAP_MAP_POOLPAGE(pa); 1310 #ifdef DEBUG_DMA 1311 printf("dmamem_map: =%p\n", *kvap); 1312 #endif /* DEBUG_DMA */ 1313 return 0; 1314 } 1315 } 1316 #endif 1317 1318 size = round_page(size); 1319 if (__predict_true(size > L2_L_SIZE)) { 1320 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1321 if (size >= L1_SS_SIZE) 1322 align = L1_SS_SIZE; 1323 else 1324 #endif 1325 if (size >= L1_S_SIZE) 1326 align = L1_S_SIZE; 1327 else 1328 align = L2_L_SIZE; 1329 } 1330 1331 va = uvm_km_alloc(kernel_map, size, align, kmflags); 1332 if (__predict_false(va == 0 && align > 0)) { 1333 align = 0; 1334 va = uvm_km_alloc(kernel_map, size, 0, kmflags); 1335 } 1336 1337 if (va == 0) 1338 return (ENOMEM); 1339 1340 *kvap = (void *)va; 1341 1342 for (curseg = 0; curseg < nsegs; curseg++) { 1343 for (pa = segs[curseg].ds_addr; 1344 pa < (segs[curseg].ds_addr + segs[curseg].ds_len); 1345 pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 1346 bool uncached = (flags & BUS_DMA_COHERENT); 1347 #ifdef DEBUG_DMA 1348 printf("wiring p%lx to v%lx", pa, va); 1349 #endif /* DEBUG_DMA */ 1350 if (size == 0) 1351 panic("_bus_dmamem_map: size botch"); 1352 1353 const struct arm32_dma_range * const dr = 1354 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa); 1355 /* 1356 * If this dma region is coherent then there is 1357 * no need for an uncached mapping. 1358 */ 1359 if (dr != NULL 1360 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) { 1361 uncached = false; 1362 } 1363 1364 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 1365 PMAP_WIRED | (uncached ? PMAP_NOCACHE : 0)); 1366 } 1367 } 1368 pmap_update(pmap_kernel()); 1369 #ifdef DEBUG_DMA 1370 printf("dmamem_map: =%p\n", *kvap); 1371 #endif /* DEBUG_DMA */ 1372 return (0); 1373 } 1374 1375 /* 1376 * Common function for unmapping DMA-safe memory. May be called by 1377 * bus-specific DMA memory unmapping functions. 1378 */ 1379 void 1380 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 1381 { 1382 1383 #ifdef DEBUG_DMA 1384 printf("dmamem_unmap: t=%p kva=%p size=%zx\n", t, kva, size); 1385 #endif /* DEBUG_DMA */ 1386 KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0, 1387 "kva %p (%#"PRIxPTR")", kva, ((uintptr_t)kva & PAGE_MASK)); 1388 1389 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 1390 /* 1391 * Check to see if this used direct mapped memory. Get it's physical 1392 * address and try to map it. If the resultant matches the kva, then 1393 * it was and so we can just return since we have notice to free up. 1394 */ 1395 paddr_t pa; 1396 vaddr_t va; 1397 (void)pmap_extract(pmap_kernel(), (vaddr_t)kva, &pa); 1398 if (mm_md_direct_mapped_phys(pa, &va) && va == (vaddr_t)kva) 1399 return; 1400 #endif 1401 1402 size = round_page(size); 1403 pmap_kremove((vaddr_t)kva, size); 1404 pmap_update(pmap_kernel()); 1405 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1406 } 1407 1408 /* 1409 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 1410 * bus-specific DMA mmap(2)'ing functions. 1411 */ 1412 paddr_t 1413 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1414 off_t off, int prot, int flags) 1415 { 1416 paddr_t map_flags; 1417 int i; 1418 1419 for (i = 0; i < nsegs; i++) { 1420 KASSERTMSG((off & PAGE_MASK) == 0, 1421 "off %#qx (%#x)", off, (int)off & PAGE_MASK); 1422 KASSERTMSG((segs[i].ds_addr & PAGE_MASK) == 0, 1423 "ds_addr %#lx (%#x)", segs[i].ds_addr, 1424 (int)segs[i].ds_addr & PAGE_MASK); 1425 KASSERTMSG((segs[i].ds_len & PAGE_MASK) == 0, 1426 "ds_len %#lx (%#x)", segs[i].ds_addr, 1427 (int)segs[i].ds_addr & PAGE_MASK); 1428 if (off >= segs[i].ds_len) { 1429 off -= segs[i].ds_len; 1430 continue; 1431 } 1432 1433 map_flags = 0; 1434 if (flags & BUS_DMA_PREFETCHABLE) 1435 map_flags |= ARM32_MMAP_WRITECOMBINE; 1436 1437 return (arm_btop((u_long)segs[i].ds_addr + off) | map_flags); 1438 1439 } 1440 1441 /* Page not found. */ 1442 return (-1); 1443 } 1444 1445 /********************************************************************** 1446 * DMA utility functions 1447 **********************************************************************/ 1448 1449 /* 1450 * Utility function to load a linear buffer. lastaddrp holds state 1451 * between invocations (for multiple-buffer loads). segp contains 1452 * the starting segment on entrace, and the ending segment on exit. 1453 * first indicates if this is the first invocation of this function. 1454 */ 1455 int 1456 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 1457 bus_size_t buflen, struct vmspace *vm, int flags) 1458 { 1459 bus_size_t sgsize; 1460 bus_addr_t curaddr; 1461 vaddr_t vaddr = (vaddr_t)buf; 1462 int error; 1463 pmap_t pmap; 1464 1465 #ifdef DEBUG_DMA 1466 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n", 1467 buf, buflen, flags); 1468 #endif /* DEBUG_DMA */ 1469 1470 pmap = vm_map_pmap(&vm->vm_map); 1471 1472 while (buflen > 0) { 1473 /* 1474 * Get the physical address for this segment. 1475 * 1476 * XXX Doesn't support checking for coherent mappings 1477 * XXX in user address space. 1478 */ 1479 bool coherent; 1480 if (__predict_true(pmap == pmap_kernel())) { 1481 pd_entry_t *pde; 1482 pt_entry_t *ptep; 1483 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep); 1484 if (__predict_false(pmap_pde_section(pde))) { 1485 paddr_t s_frame = L1_S_FRAME; 1486 paddr_t s_offset = L1_S_OFFSET; 1487 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1488 if (__predict_false(pmap_pde_supersection(pde))) { 1489 s_frame = L1_SS_FRAME; 1490 s_offset = L1_SS_OFFSET; 1491 } 1492 #endif 1493 curaddr = (*pde & s_frame) | (vaddr & s_offset); 1494 coherent = (*pde & L1_S_CACHE_MASK) == 0; 1495 } else { 1496 pt_entry_t pte = *ptep; 1497 KDASSERTMSG((pte & L2_TYPE_MASK) != L2_TYPE_INV, 1498 "va=%#"PRIxVADDR" pde=%#x ptep=%p pte=%#x", 1499 vaddr, *pde, ptep, pte); 1500 if (__predict_false((pte & L2_TYPE_MASK) 1501 == L2_TYPE_L)) { 1502 curaddr = (pte & L2_L_FRAME) | 1503 (vaddr & L2_L_OFFSET); 1504 coherent = (pte & L2_L_CACHE_MASK) == 0; 1505 } else { 1506 curaddr = (pte & ~PAGE_MASK) | 1507 (vaddr & PAGE_MASK); 1508 coherent = (pte & L2_S_CACHE_MASK) == 0; 1509 } 1510 } 1511 } else { 1512 (void) pmap_extract(pmap, vaddr, &curaddr); 1513 coherent = false; 1514 } 1515 KASSERTMSG((vaddr & PAGE_MASK) == (curaddr & PAGE_MASK), 1516 "va %#lx curaddr %#lx", vaddr, curaddr); 1517 1518 /* 1519 * Compute the segment size, and adjust counts. 1520 */ 1521 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 1522 if (buflen < sgsize) 1523 sgsize = buflen; 1524 1525 error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize, 1526 coherent); 1527 if (error) 1528 return (error); 1529 1530 vaddr += sgsize; 1531 buflen -= sgsize; 1532 } 1533 1534 return (0); 1535 } 1536 1537 /* 1538 * Allocate physical memory from the given physical address range. 1539 * Called by DMA-safe memory allocation methods. 1540 */ 1541 int 1542 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1543 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1544 int flags, paddr_t low, paddr_t high) 1545 { 1546 paddr_t curaddr, lastaddr; 1547 struct vm_page *m; 1548 struct pglist mlist; 1549 int curseg, error; 1550 1551 KASSERTMSG(boundary == 0 || (boundary & (boundary-1)) == 0, 1552 "invalid boundary %#lx", boundary); 1553 1554 #ifdef DEBUG_DMA 1555 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n", 1556 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high); 1557 #endif /* DEBUG_DMA */ 1558 1559 /* Always round the size. */ 1560 size = round_page(size); 1561 1562 /* 1563 * We accept boundaries < size, splitting in multiple segments 1564 * if needed. uvm_pglistalloc does not, so compute an appropriate 1565 * boundary: next power of 2 >= size 1566 */ 1567 bus_size_t uboundary = boundary; 1568 if (uboundary <= PAGE_SIZE) { 1569 uboundary = 0; 1570 } else { 1571 while (uboundary < size) { 1572 uboundary <<= 1; 1573 } 1574 } 1575 1576 /* 1577 * Allocate pages from the VM system. 1578 */ 1579 error = uvm_pglistalloc(size, low, high, alignment, uboundary, 1580 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 1581 if (error) 1582 return (error); 1583 1584 /* 1585 * Compute the location, size, and number of segments actually 1586 * returned by the VM code. 1587 */ 1588 m = TAILQ_FIRST(&mlist); 1589 curseg = 0; 1590 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 1591 segs[curseg].ds_len = PAGE_SIZE; 1592 #ifdef DEBUG_DMA 1593 printf("alloc: page %lx\n", lastaddr); 1594 #endif /* DEBUG_DMA */ 1595 m = TAILQ_NEXT(m, pageq.queue); 1596 1597 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) { 1598 curaddr = VM_PAGE_TO_PHYS(m); 1599 KASSERTMSG(low <= curaddr && curaddr < high, 1600 "uvm_pglistalloc returned non-sensicaladdress %#lx " 1601 "(low=%#lx, high=%#lx\n", curaddr, low, high); 1602 #ifdef DEBUG_DMA 1603 printf("alloc: page %lx\n", curaddr); 1604 #endif /* DEBUG_DMA */ 1605 if (curaddr == lastaddr + PAGE_SIZE 1606 && (lastaddr & boundary) == (curaddr & boundary)) 1607 segs[curseg].ds_len += PAGE_SIZE; 1608 else { 1609 curseg++; 1610 if (curseg >= nsegs) { 1611 uvm_pglistfree(&mlist); 1612 return EFBIG; 1613 } 1614 segs[curseg].ds_addr = curaddr; 1615 segs[curseg].ds_len = PAGE_SIZE; 1616 } 1617 lastaddr = curaddr; 1618 } 1619 1620 *rsegs = curseg + 1; 1621 1622 return (0); 1623 } 1624 1625 /* 1626 * Check if a memory region intersects with a DMA range, and return the 1627 * page-rounded intersection if it does. 1628 */ 1629 int 1630 arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges, 1631 paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep) 1632 { 1633 struct arm32_dma_range *dr; 1634 int i; 1635 1636 if (ranges == NULL) 1637 return (0); 1638 1639 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 1640 if (dr->dr_sysbase <= pa && 1641 pa < (dr->dr_sysbase + dr->dr_len)) { 1642 /* 1643 * Beginning of region intersects with this range. 1644 */ 1645 *pap = trunc_page(pa); 1646 *sizep = round_page(min(pa + size, 1647 dr->dr_sysbase + dr->dr_len) - pa); 1648 return (1); 1649 } 1650 if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) { 1651 /* 1652 * End of region intersects with this range. 1653 */ 1654 *pap = trunc_page(dr->dr_sysbase); 1655 *sizep = round_page(min((pa + size) - dr->dr_sysbase, 1656 dr->dr_len)); 1657 return (1); 1658 } 1659 } 1660 1661 /* No intersection found. */ 1662 return (0); 1663 } 1664 1665 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1666 static int 1667 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 1668 bus_size_t size, int flags) 1669 { 1670 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 1671 int error = 0; 1672 1673 KASSERT(cookie != NULL); 1674 1675 cookie->id_bouncebuflen = round_page(size); 1676 error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen, 1677 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 1678 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 1679 if (error == 0) { 1680 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 1681 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 1682 (void **)&cookie->id_bouncebuf, flags); 1683 if (error) { 1684 _bus_dmamem_free(t, cookie->id_bouncesegs, 1685 cookie->id_nbouncesegs); 1686 cookie->id_bouncebuflen = 0; 1687 cookie->id_nbouncesegs = 0; 1688 } else { 1689 cookie->id_flags |= _BUS_DMA_HAS_BOUNCE; 1690 } 1691 } else { 1692 cookie->id_bouncebuflen = 0; 1693 cookie->id_nbouncesegs = 0; 1694 } 1695 1696 return (error); 1697 } 1698 1699 static void 1700 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 1701 { 1702 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie; 1703 1704 KASSERT(cookie != NULL); 1705 1706 _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen); 1707 _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs); 1708 cookie->id_bouncebuflen = 0; 1709 cookie->id_nbouncesegs = 0; 1710 cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE; 1711 } 1712 1713 /* 1714 * This function does the same as uiomove, but takes an explicit 1715 * direction, and does not update the uio structure. 1716 */ 1717 static int 1718 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction) 1719 { 1720 struct iovec *iov; 1721 int error; 1722 struct vmspace *vm; 1723 char *cp; 1724 size_t resid, cnt; 1725 int i; 1726 1727 iov = uio->uio_iov; 1728 vm = uio->uio_vmspace; 1729 cp = buf; 1730 resid = n; 1731 1732 for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) { 1733 iov = &uio->uio_iov[i]; 1734 if (iov->iov_len == 0) 1735 continue; 1736 cnt = MIN(resid, iov->iov_len); 1737 1738 if (!VMSPACE_IS_KERNEL_P(vm) && 1739 (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 1740 != 0) { 1741 preempt(); 1742 } 1743 if (direction == UIO_READ) { 1744 error = copyout_vmspace(vm, cp, iov->iov_base, cnt); 1745 } else { 1746 error = copyin_vmspace(vm, iov->iov_base, cp, cnt); 1747 } 1748 if (error) 1749 return (error); 1750 cp += cnt; 1751 resid -= cnt; 1752 } 1753 return (0); 1754 } 1755 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1756 1757 int 1758 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr, 1759 bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags) 1760 { 1761 1762 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1763 struct arm32_dma_range *dr; 1764 bool subset = false; 1765 size_t nranges = 0; 1766 size_t i; 1767 for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) { 1768 if (dr->dr_sysbase <= min_addr 1769 && max_addr <= dr->dr_sysbase + dr->dr_len - 1) { 1770 subset = true; 1771 } 1772 if (min_addr <= dr->dr_sysbase + dr->dr_len 1773 && max_addr >= dr->dr_sysbase) { 1774 nranges++; 1775 } 1776 } 1777 if (subset) { 1778 *newtag = tag; 1779 /* if the tag must be freed, add a reference */ 1780 if (tag->_tag_needs_free) 1781 (tag->_tag_needs_free)++; 1782 return 0; 1783 } 1784 if (nranges == 0) { 1785 nranges = 1; 1786 } 1787 1788 const size_t tagsize = sizeof(*tag) + nranges * sizeof(*dr); 1789 if ((*newtag = kmem_intr_zalloc(tagsize, 1790 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 1791 return ENOMEM; 1792 1793 dr = (void *)(*newtag + 1); 1794 **newtag = *tag; 1795 (*newtag)->_tag_needs_free = 1; 1796 (*newtag)->_ranges = dr; 1797 (*newtag)->_nranges = nranges; 1798 1799 if (tag->_ranges == NULL) { 1800 dr->dr_sysbase = min_addr; 1801 dr->dr_busbase = min_addr; 1802 dr->dr_len = max_addr + 1 - min_addr; 1803 } else { 1804 for (i = 0; i < nranges; i++) { 1805 if (min_addr > dr->dr_sysbase + dr->dr_len 1806 || max_addr < dr->dr_sysbase) 1807 continue; 1808 dr[0] = tag->_ranges[i]; 1809 if (dr->dr_sysbase < min_addr) { 1810 psize_t diff = min_addr - dr->dr_sysbase; 1811 dr->dr_busbase += diff; 1812 dr->dr_len -= diff; 1813 dr->dr_sysbase += diff; 1814 } 1815 if (max_addr != 0xffffffff 1816 && max_addr + 1 < dr->dr_sysbase + dr->dr_len) { 1817 dr->dr_len = max_addr + 1 - dr->dr_sysbase; 1818 } 1819 dr++; 1820 } 1821 } 1822 1823 return 0; 1824 #else 1825 return EOPNOTSUPP; 1826 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */ 1827 } 1828 1829 void 1830 _bus_dmatag_destroy(bus_dma_tag_t tag) 1831 { 1832 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE 1833 switch (tag->_tag_needs_free) { 1834 case 0: 1835 break; /* not allocated with kmem */ 1836 case 1: { 1837 const size_t tagsize = sizeof(*tag) 1838 + tag->_nranges * sizeof(*tag->_ranges); 1839 kmem_intr_free(tag, tagsize); /* last reference to tag */ 1840 break; 1841 } 1842 default: 1843 (tag->_tag_needs_free)--; /* one less reference */ 1844 } 1845 #endif 1846 } 1847