1 /* $NetBSD: bus_dma.c,v 1.9 2024/12/10 07:42:03 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #define _RISCV_BUS_DMA_PRIVATE 34 #define _RISCV_NEED_BUS_DMA_BOUNCE 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.9 2024/12/10 07:42:03 skrll Exp $"); 38 39 #include <sys/param.h> 40 41 #include <sys/bus.h> 42 #include <sys/cpu.h> 43 #include <sys/kmem.h> 44 #include <sys/mbuf.h> 45 46 #include <uvm/uvm.h> 47 48 #include <machine/cpufunc.h> 49 50 #define BUSDMA_COUNTERS 51 #ifdef BUSDMA_COUNTERS 52 static struct evcnt bus_dma_creates = 53 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates"); 54 static struct evcnt bus_dma_bounced_creates = 55 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates"); 56 static struct evcnt bus_dma_loads = 57 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads"); 58 static struct evcnt bus_dma_bounced_loads = 59 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads"); 60 static struct evcnt bus_dma_coherent_loads = 61 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "coherent loads"); 62 static struct evcnt bus_dma_read_bounces = 63 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces"); 64 static struct evcnt bus_dma_write_bounces = 65 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces"); 66 static struct evcnt bus_dma_bounced_unloads = 67 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads"); 68 static struct evcnt bus_dma_bounced_mbuf_loads = 69 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced mbuf loads"); 70 static struct evcnt bus_dma_unloads = 71 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads"); 72 static struct evcnt bus_dma_bounced_destroys = 73 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys"); 74 static struct evcnt bus_dma_destroys = 75 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys"); 76 static struct evcnt bus_dma_sync_prereadwrite = 77 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite"); 78 static struct evcnt bus_dma_sync_preread_begin = 79 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin"); 80 static struct evcnt bus_dma_sync_preread = 81 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread"); 82 static struct evcnt bus_dma_sync_preread_tail = 83 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail"); 84 static struct evcnt bus_dma_sync_prewrite = 85 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite"); 86 static struct evcnt bus_dma_sync_postread = 87 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread"); 88 static struct evcnt bus_dma_sync_postreadwrite = 89 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite"); 90 static struct evcnt bus_dma_sync_postwrite = 91 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite"); 92 static struct evcnt bus_dma_inrange_fail = 93 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "inrange check failed"); 94 95 static struct evcnt bus_dma_sync_coherent_prereadwrite = 96 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent prereadwrite"); 97 static struct evcnt bus_dma_sync_coherent_preread = 98 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent preread"); 99 static struct evcnt bus_dma_sync_coherent_prewrite = 100 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent prewrite"); 101 static struct evcnt bus_dma_sync_coherent_postread = 102 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent postread"); 103 static struct evcnt bus_dma_sync_coherent_postreadwrite = 104 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent postreadwrite"); 105 static struct evcnt bus_dma_sync_coherent_postwrite = 106 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent postwrite"); 107 108 EVCNT_ATTACH_STATIC(bus_dma_creates); 109 EVCNT_ATTACH_STATIC(bus_dma_bounced_creates); 110 EVCNT_ATTACH_STATIC(bus_dma_loads); 111 EVCNT_ATTACH_STATIC(bus_dma_bounced_loads); 112 EVCNT_ATTACH_STATIC(bus_dma_coherent_loads); 113 EVCNT_ATTACH_STATIC(bus_dma_read_bounces); 114 EVCNT_ATTACH_STATIC(bus_dma_write_bounces); 115 EVCNT_ATTACH_STATIC(bus_dma_unloads); 116 EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads); 117 EVCNT_ATTACH_STATIC(bus_dma_destroys); 118 EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys); 119 EVCNT_ATTACH_STATIC(bus_dma_bounced_mbuf_loads); 120 EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite); 121 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin); 122 EVCNT_ATTACH_STATIC(bus_dma_sync_preread); 123 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail); 124 EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite); 125 EVCNT_ATTACH_STATIC(bus_dma_sync_postread); 126 EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite); 127 EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite); 128 EVCNT_ATTACH_STATIC(bus_dma_inrange_fail); 129 130 EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_prereadwrite); 131 EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_preread); 132 EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_prewrite); 133 EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_postread); 134 EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_postreadwrite); 135 EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_postwrite); 136 137 #define STAT_INCR(x) (bus_dma_ ## x.ev_count++) 138 #else 139 #define STAT_INCR(x) __nothing 140 #endif 141 142 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 143 bus_size_t, struct vmspace *, int); 144 145 /* 146 * Check to see if the specified page is in an allowed DMA range. 147 */ 148 static inline struct riscv_dma_range * 149 _bus_dma_paddr_inrange(struct riscv_dma_range *ranges, int nranges, 150 bus_addr_t curaddr) 151 { 152 struct riscv_dma_range *dr; 153 int i; 154 155 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 156 if (curaddr >= dr->dr_sysbase && 157 curaddr < (dr->dr_sysbase + dr->dr_len)) 158 return dr; 159 } 160 161 return NULL; 162 } 163 164 /* 165 * Check to see if the specified busaddr is in an allowed DMA range. 166 */ 167 static inline paddr_t 168 _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr) 169 { 170 struct riscv_dma_range *dr; 171 u_int i; 172 173 if (t->_nranges == 0) 174 return curaddr; 175 176 for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) { 177 if (dr->dr_busbase <= curaddr 178 && curaddr < dr->dr_busbase + dr->dr_len) 179 return curaddr - dr->dr_busbase + dr->dr_sysbase; 180 } 181 panic("%s: curaddr %#" PRIxBUSADDR "not in range", __func__, curaddr); 182 } 183 184 /* 185 * Common function to load the specified physical address into the 186 * DMA map, coalescing segments and boundary checking as necessary. 187 */ 188 static int 189 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map, 190 bus_addr_t paddr, bus_size_t size, bool coherent) 191 { 192 bus_dma_segment_t * const segs = map->dm_segs; 193 int nseg = map->dm_nsegs; 194 bus_addr_t lastaddr; 195 bus_addr_t bmask = ~(map->_dm_boundary - 1); 196 bus_addr_t curaddr; 197 bus_size_t sgsize; 198 uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0; 199 200 if (nseg > 0) 201 lastaddr = segs[nseg - 1].ds_addr + segs[nseg - 1].ds_len; 202 else 203 lastaddr = 0xdead; 204 205 again: 206 sgsize = size; 207 208 /* Make sure we're in an allowed DMA range. */ 209 if (t->_ranges != NULL) { 210 /* XXX cache last result? */ 211 const struct riscv_dma_range * const dr = 212 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr); 213 if (__predict_false(dr == NULL)) { 214 STAT_INCR(inrange_fail); 215 return EINVAL; 216 } 217 218 /* 219 * If this region is coherent, mark the segment as coherent. 220 */ 221 _ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT; 222 223 /* 224 * In a valid DMA range. Translate the physical 225 * memory address to an address in the DMA window. 226 */ 227 curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase; 228 #if 0 229 printf("%p: %#" PRIxPADDR 230 ": range %#" PRIxPADDR "/%#" PRIxBUSADDR 231 "/%#" PRIxBUSSIZE "/%#" PRIx32 ": %#" PRIx32 232 " <-- %#" PRIxBUSADDR "\n", 233 t, paddr, dr->dr_sysbase, dr->dr_busbase, 234 dr->dr_len, dr->dr_flags, _ds_flags, curaddr); 235 #endif 236 } else 237 curaddr = paddr; 238 239 /* 240 * Make sure we don't cross any boundaries. 241 */ 242 if (map->_dm_boundary > 0) { 243 bus_addr_t baddr; /* next boundary address */ 244 245 baddr = (curaddr + map->_dm_boundary) & bmask; 246 if (sgsize > (baddr - curaddr)) 247 sgsize = (baddr - curaddr); 248 } 249 250 /* 251 * Insert chunk into a segment, coalescing with the 252 * previous segment if possible. 253 */ 254 if (nseg > 0 && curaddr == lastaddr && 255 segs[nseg - 1].ds_len + sgsize <= map->dm_maxsegsz && 256 ((segs[nseg - 1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 && 257 (map->_dm_boundary == 0 || 258 (segs[nseg - 1].ds_addr & bmask) == (curaddr & bmask))) { 259 /* coalesce */ 260 segs[nseg - 1].ds_len += sgsize; 261 } else if (__predict_false(nseg >= map->_dm_segcnt)) { 262 return EFBIG; 263 } else { 264 /* new segment */ 265 segs[nseg].ds_addr = curaddr; 266 segs[nseg].ds_len = sgsize; 267 segs[nseg]._ds_paddr = curaddr; 268 segs[nseg]._ds_flags = _ds_flags; 269 nseg++; 270 } 271 272 lastaddr = curaddr + sgsize; 273 274 paddr += sgsize; 275 size -= sgsize; 276 if (size > 0) 277 goto again; 278 279 map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT); 280 map->dm_nsegs = nseg; 281 return 0; 282 } 283 284 static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, 285 int direction); 286 287 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 288 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 289 bus_size_t size, int flags); 290 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map); 291 292 static int 293 _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 294 size_t buflen, int buftype, int flags) 295 { 296 struct riscv_bus_dma_cookie * const cookie = map->_dm_cookie; 297 struct vmspace * const vm = vmspace_kernel(); 298 int error; 299 300 KASSERT(cookie != NULL); 301 KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE); 302 303 /* 304 * Allocate bounce pages, if necessary. 305 */ 306 if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) { 307 error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags); 308 if (__predict_false(error)) 309 return error; 310 } 311 312 /* 313 * Since we're trying again, clear the previous attempt. 314 */ 315 map->dm_mapsize = 0; 316 map->dm_nsegs = 0; 317 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 318 /* _bus_dmamap_load_buffer() clears this if we're not... */ 319 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 320 321 /* 322 * Cache a pointer to the caller's buffer and load the DMA map 323 * with the bounce buffer. 324 */ 325 cookie->id_origbuf = buf; 326 cookie->id_origbuflen = buflen; 327 error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf, 328 buflen, vm, flags); 329 if (__predict_false(error)) 330 return error; 331 332 STAT_INCR(bounced_loads); 333 map->dm_mapsize = buflen; 334 map->_dm_vmspace = vm; 335 map->_dm_buftype = buftype; 336 337 /* ...so _bus_dmamap_sync() knows we're bouncing */ 338 map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING; 339 cookie->id_flags |= _BUS_DMA_IS_BOUNCING; 340 return 0; 341 } 342 #endif /* _RISCV_NEED_BUS_DMA_BOUNCE */ 343 344 /* 345 * Common function for DMA map creation. May be called by bus-specific 346 * DMA map creation functions. 347 */ 348 int 349 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 350 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 351 { 352 struct riscv_bus_dmamap *map; 353 void *mapstore; 354 int error = 0; 355 356 #ifdef DEBUG_DMA 357 printf("dmamap_create: t=%p size=%#" PRIxBUSSIZE 358 " nseg=%#x msegsz=%#" PRIxBUSSIZE 359 " boundary=%#" PRIxBUSSIZE 360 " flags=%#x\n", t, size, nsegments, maxsegsz, boundary, flags); 361 #endif /* DEBUG_DMA */ 362 363 /* 364 * Allocate and initialize the DMA map. The end of the map 365 * is a variable-sized array of segments, so we allocate enough 366 * room for them in one shot. 367 * 368 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 369 * of ALLOCNOW notifies others that we've reserved these resources, 370 * and they are not to be freed. 371 * 372 * The bus_dmamap_t includes one bus_dma_segment_t, hence 373 * the (nsegments - 1). 374 */ 375 const size_t mapsize = sizeof(struct riscv_bus_dmamap) + 376 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 377 const int zallocflags = (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 378 if ((mapstore = kmem_intr_zalloc(mapsize, zallocflags)) == NULL) 379 return ENOMEM; 380 381 map = (struct riscv_bus_dmamap *)mapstore; 382 map->_dm_size = size; 383 map->_dm_segcnt = nsegments; 384 map->_dm_maxmaxsegsz = maxsegsz; 385 map->_dm_boundary = boundary; 386 map->_dm_flags = flags & ~(BUS_DMA_WAITOK | BUS_DMA_NOWAIT); 387 map->_dm_origbuf = NULL; 388 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 389 map->_dm_vmspace = vmspace_kernel(); 390 map->_dm_cookie = NULL; 391 map->dm_maxsegsz = maxsegsz; 392 map->dm_mapsize = 0; /* no valid mappings */ 393 map->dm_nsegs = 0; 394 395 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 396 struct riscv_bus_dma_cookie *cookie; 397 int cookieflags; 398 void *cookiestore; 399 400 cookieflags = 0; 401 402 if (t->_may_bounce != NULL) { 403 error = (*t->_may_bounce)(t, map, flags, &cookieflags); 404 if (error != 0) 405 goto out; 406 } 407 408 if (t->_ranges != NULL) { 409 /* 410 * If ranges are defined, we may have to bounce. The only 411 * exception is if there is exactly one range that covers 412 * all of physical memory. 413 */ 414 switch (t->_nranges) { 415 case 1: 416 if (t->_ranges[0].dr_sysbase == 0 && 417 t->_ranges[0].dr_len == UINTPTR_MAX) { 418 break; 419 } 420 /* FALLTHROUGH */ 421 default: 422 cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE; 423 } 424 } 425 426 if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) { 427 STAT_INCR(creates); 428 *dmamp = map; 429 return 0; 430 } 431 432 const size_t cookiesize = sizeof(struct riscv_bus_dma_cookie) + 433 (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 434 435 /* 436 * Allocate our cookie. 437 */ 438 if ((cookiestore = kmem_intr_zalloc(cookiesize, zallocflags)) == NULL) { 439 error = ENOMEM; 440 goto out; 441 } 442 cookie = (struct riscv_bus_dma_cookie *)cookiestore; 443 cookie->id_flags = cookieflags; 444 map->_dm_cookie = cookie; 445 STAT_INCR(bounced_creates); 446 447 error = _bus_dma_alloc_bouncebuf(t, map, size, flags); 448 out: 449 if (error) 450 _bus_dmamap_destroy(t, map); 451 else 452 *dmamp = map; 453 #else 454 *dmamp = map; 455 STAT_INCR(creates); 456 #endif /* _RISCV_NEED_BUS_DMA_BOUNCE */ 457 #ifdef DEBUG_DMA 458 printf("dmamap_create:map=%p\n", map); 459 #endif /* DEBUG_DMA */ 460 return error; 461 } 462 463 /* 464 * Common function for DMA map destruction. May be called by bus-specific 465 * DMA map destruction functions. 466 */ 467 void 468 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 469 { 470 471 #ifdef DEBUG_DMA 472 printf("dmamap_destroy: t=%p map=%p\n", t, map); 473 #endif /* DEBUG_DMA */ 474 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 475 struct riscv_bus_dma_cookie *cookie = map->_dm_cookie; 476 477 /* 478 * Free any bounce pages this map might hold. 479 */ 480 if (cookie != NULL) { 481 const size_t cookiesize = sizeof(struct riscv_bus_dma_cookie) + 482 (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 483 484 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) 485 STAT_INCR(bounced_unloads); 486 map->dm_nsegs = 0; 487 if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE) 488 _bus_dma_free_bouncebuf(t, map); 489 STAT_INCR(bounced_destroys); 490 kmem_intr_free(cookie, cookiesize); 491 } else 492 #endif 493 STAT_INCR(destroys); 494 495 if (map->dm_nsegs > 0) 496 STAT_INCR(unloads); 497 498 const size_t mapsize = sizeof(struct riscv_bus_dmamap) + 499 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1)); 500 kmem_intr_free(map, mapsize); 501 } 502 503 /* 504 * Common function for loading a DMA map with a linear buffer. May 505 * be called by bus-specific DMA map load functions. 506 */ 507 int 508 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 509 bus_size_t buflen, struct proc *p, int flags) 510 { 511 struct vmspace *vm; 512 int error; 513 514 #ifdef DEBUG_DMA 515 printf("dmamap_load: t=%p map=%p buf=%p len=%#" PRIxBUSSIZE 516 " p=%p f=%#x\n", t, map, buf, buflen, p, flags); 517 #endif /* DEBUG_DMA */ 518 519 if (map->dm_nsegs > 0) { 520 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 521 struct riscv_bus_dma_cookie *cookie = map->_dm_cookie; 522 if (cookie != NULL) { 523 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) { 524 STAT_INCR(bounced_unloads); 525 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 526 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING; 527 } 528 } else 529 #endif 530 STAT_INCR(unloads); 531 } 532 533 /* 534 * Make sure that on error condition we return "no valid mappings". 535 */ 536 map->dm_mapsize = 0; 537 map->dm_nsegs = 0; 538 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 539 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 540 "dm_maxsegsz %" PRIuBUSSIZE " _dm_maxmaxsegsz %" PRIuBUSSIZE, 541 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 542 543 if (__predict_false(buflen > map->_dm_size)) 544 return EINVAL; 545 546 if (p != NULL) { 547 vm = p->p_vmspace; 548 } else { 549 vm = vmspace_kernel(); 550 } 551 552 /* _bus_dmamap_load_buffer() clears this if we're not... */ 553 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 554 555 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags); 556 if (__predict_true(error == 0)) { 557 map->dm_mapsize = buflen; 558 map->_dm_vmspace = vm; 559 map->_dm_origbuf = buf; 560 map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR; 561 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 562 STAT_INCR(coherent_loads); 563 } else { 564 STAT_INCR(loads); 565 } 566 return 0; 567 } 568 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 569 struct riscv_bus_dma_cookie * const cookie = map->_dm_cookie; 570 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) { 571 error = _bus_dma_load_bouncebuf(t, map, buf, buflen, 572 _BUS_DMA_BUFTYPE_LINEAR, flags); 573 } 574 #endif 575 return error; 576 } 577 578 /* 579 * Like _bus_dmamap_load(), but for mbufs. 580 */ 581 int 582 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 583 int flags) 584 { 585 struct mbuf *m; 586 int error; 587 588 #ifdef DEBUG_DMA 589 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%#x\n", 590 t, map, m0, flags); 591 #endif /* DEBUG_DMA */ 592 593 if (map->dm_nsegs > 0) { 594 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 595 struct riscv_bus_dma_cookie *cookie = map->_dm_cookie; 596 if (cookie != NULL) { 597 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) { 598 STAT_INCR(bounced_unloads); 599 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 600 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING; 601 } 602 } else 603 #endif 604 STAT_INCR(unloads); 605 } 606 607 /* 608 * Make sure that on error condition we return "no valid mappings." 609 */ 610 map->dm_mapsize = 0; 611 map->dm_nsegs = 0; 612 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 613 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 614 "dm_maxsegsz %" PRIuBUSSIZE " _dm_maxmaxsegsz %" PRIuBUSSIZE, 615 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 616 617 KASSERT(m0->m_flags & M_PKTHDR); 618 619 if (__predict_false(m0->m_pkthdr.len > map->_dm_size)) 620 return EINVAL; 621 622 /* _bus_dmamap_load_paddr() clears this if we're not... */ 623 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 624 625 error = 0; 626 for (m = m0; m != NULL && error == 0; m = m->m_next) { 627 int offset; 628 int remainbytes; 629 const struct vm_page * const *pgs; 630 paddr_t paddr; 631 int size; 632 633 if (m->m_len == 0) 634 continue; 635 /* 636 * Don't allow reads in read-only mbufs. 637 */ 638 if (__predict_false(M_ROMAP(m) && (flags & BUS_DMA_READ))) { 639 error = EFAULT; 640 break; 641 } 642 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER | M_EXT_PAGES)) { 643 case M_EXT | M_EXT_CLUSTER: 644 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 645 paddr = m->m_ext.ext_paddr + 646 (m->m_data - m->m_ext.ext_buf); 647 size = m->m_len; 648 error = _bus_dmamap_load_paddr(t, map, paddr, size, 649 false); 650 break; 651 652 case M_EXT | M_EXT_PAGES: 653 KASSERT(m->m_ext.ext_buf <= m->m_data); 654 KASSERT(m->m_data <= 655 m->m_ext.ext_buf + m->m_ext.ext_size); 656 657 offset = (vaddr_t)m->m_data - 658 trunc_page((vaddr_t)m->m_ext.ext_buf); 659 remainbytes = m->m_len; 660 661 /* skip uninteresting pages */ 662 pgs = (const struct vm_page * const *) 663 m->m_ext.ext_pgs + (offset >> PAGE_SHIFT); 664 665 offset &= PAGE_MASK; /* offset in the first page */ 666 667 /* load each page */ 668 while (remainbytes > 0) { 669 const struct vm_page *pg; 670 671 size = MIN(remainbytes, PAGE_SIZE - offset); 672 673 pg = *pgs++; 674 KASSERT(pg); 675 paddr = VM_PAGE_TO_PHYS(pg) + offset; 676 677 error = _bus_dmamap_load_paddr(t, map, 678 paddr, size, false); 679 if (__predict_false(error)) 680 break; 681 offset = 0; 682 remainbytes -= size; 683 } 684 break; 685 686 case 0: 687 paddr = m->m_paddr + M_BUFOFFSET(m) + 688 (m->m_data - M_BUFADDR(m)); 689 size = m->m_len; 690 error = _bus_dmamap_load_paddr(t, map, paddr, size, 691 false); 692 break; 693 694 default: 695 error = _bus_dmamap_load_buffer(t, map, m->m_data, 696 m->m_len, vmspace_kernel(), flags); 697 } 698 } 699 if (__predict_true(error == 0)) { 700 map->dm_mapsize = m0->m_pkthdr.len; 701 map->_dm_origbuf = m0; 702 map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF; 703 map->_dm_vmspace = vmspace_kernel(); /* always kernel */ 704 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 705 STAT_INCR(coherent_loads); 706 } else { 707 STAT_INCR(loads); 708 } 709 return 0; 710 } 711 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 712 struct riscv_bus_dma_cookie * const cookie = map->_dm_cookie; 713 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) { 714 error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len, 715 _BUS_DMA_BUFTYPE_MBUF, flags); 716 STAT_INCR(bounced_mbuf_loads); 717 } 718 #endif 719 return error; 720 } 721 722 /* 723 * Like _bus_dmamap_load(), but for uios. 724 */ 725 int 726 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 727 int flags) 728 { 729 bus_size_t minlen, resid; 730 struct iovec *iov; 731 void *addr; 732 int i, error; 733 734 /* 735 * Make sure that on error condition we return "no valid mappings." 736 */ 737 map->dm_mapsize = 0; 738 map->dm_nsegs = 0; 739 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz, 740 "dm_maxsegsz %" PRIuBUSSIZE " _dm_maxmaxsegsz %" PRIuBUSSIZE, 741 map->dm_maxsegsz, map->_dm_maxmaxsegsz); 742 743 resid = uio->uio_resid; 744 iov = uio->uio_iov; 745 746 /* _bus_dmamap_load_buffer() clears this if we're not... */ 747 map->_dm_flags |= _BUS_DMAMAP_COHERENT; 748 749 error = 0; 750 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 751 /* 752 * Now at the first iovec to load. Load each iovec 753 * until we have exhausted the residual count. 754 */ 755 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 756 addr = (void *)iov[i].iov_base; 757 758 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 759 uio->uio_vmspace, flags); 760 761 resid -= minlen; 762 } 763 if (__predict_true(error == 0)) { 764 map->dm_mapsize = uio->uio_resid; 765 map->_dm_origbuf = uio; 766 map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO; 767 map->_dm_vmspace = uio->uio_vmspace; 768 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) { 769 STAT_INCR(coherent_loads); 770 } else { 771 STAT_INCR(loads); 772 } 773 } 774 return error; 775 } 776 777 /* 778 * Like _bus_dmamap_load(), but for raw memory allocated with 779 * bus_dmamem_alloc(). 780 */ 781 int 782 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 783 bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags) 784 { 785 786 bus_size_t size; 787 int i, error = 0; 788 789 /* 790 * Make sure that on error conditions we return "no valid mappings." 791 */ 792 map->dm_mapsize = 0; 793 map->dm_nsegs = 0; 794 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 795 796 if (__predict_false(size0 > map->_dm_size)) 797 return EINVAL; 798 799 for (i = 0, size = size0; i < nsegs && size > 0; i++) { 800 bus_dma_segment_t *ds = &segs[i]; 801 bus_size_t sgsize; 802 803 sgsize = MIN(ds->ds_len, size); 804 if (sgsize == 0) 805 continue; 806 const bool coherent = 807 (ds->_ds_flags & _BUS_DMAMAP_COHERENT) != 0; 808 error = _bus_dmamap_load_paddr(t, map, ds->ds_addr, 809 sgsize, coherent); 810 if (__predict_false(error != 0)) 811 break; 812 size -= sgsize; 813 } 814 815 if (__predict_false(error != 0)) { 816 map->dm_mapsize = 0; 817 map->dm_nsegs = 0; 818 return error; 819 } 820 821 /* XXX TBD bounce */ 822 823 map->dm_mapsize = size0; 824 map->_dm_origbuf = NULL; 825 map->_dm_buftype = _BUS_DMA_BUFTYPE_RAW; 826 map->_dm_vmspace = NULL; 827 return 0; 828 } 829 830 /* 831 * Common function for unloading a DMA map. May be called by 832 * bus-specific DMA map unload functions. 833 */ 834 void 835 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 836 { 837 838 #ifdef DEBUG_DMA 839 printf("dmamap_unload: t=%p map=%p\n", t, map); 840 #endif /* DEBUG_DMA */ 841 842 /* 843 * No resources to free; just mark the mappings as 844 * invalid. 845 */ 846 map->dm_mapsize = 0; 847 map->dm_nsegs = 0; 848 map->_dm_origbuf = NULL; 849 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID; 850 map->_dm_vmspace = NULL; 851 } 852 853 static void 854 _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops) 855 { 856 857 KASSERTMSG((va & PAGE_MASK) == (pa & PAGE_MASK), 858 "va %#" PRIxVADDR " pa %#" PRIxPADDR, va, pa); 859 #if 0 860 printf("sync_segment: va=%#" PRIxVADDR 861 " pa=%#" PRIxPADDR " len=%#" PRIxVSIZE " ops=%#x\n", 862 va, pa, len, ops); 863 #endif 864 switch (ops) { 865 case BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE: 866 STAT_INCR(sync_prereadwrite); 867 cpu_dcache_wbinv_range(va, len); 868 cpu_sdcache_wbinv_range(va, pa, len); 869 break; 870 871 case BUS_DMASYNC_PREREAD: { 872 const vsize_t line_size = riscv_dcache_align; 873 const vsize_t line_mask = riscv_dcache_align_mask; 874 vsize_t misalignment = va & line_mask; 875 if (misalignment) { 876 va -= misalignment; 877 pa -= misalignment; 878 len += misalignment; 879 STAT_INCR(sync_preread_begin); 880 cpu_dcache_wbinv_range(va, line_size); 881 cpu_sdcache_wbinv_range(va, pa, line_size); 882 if (len <= line_size) 883 break; 884 va += line_size; 885 pa += line_size; 886 len -= line_size; 887 } 888 misalignment = len & line_mask; 889 len -= misalignment; 890 if (len > 0) { 891 STAT_INCR(sync_preread); 892 cpu_dcache_inv_range(va, len); 893 cpu_sdcache_inv_range(va, pa, len); 894 } 895 if (misalignment) { 896 va += len; 897 pa += len; 898 STAT_INCR(sync_preread_tail); 899 cpu_dcache_wbinv_range(va, line_size); 900 cpu_sdcache_wbinv_range(va, pa, line_size); 901 } 902 break; 903 } 904 905 case BUS_DMASYNC_PREWRITE: 906 STAT_INCR(sync_prewrite); 907 cpu_dcache_wb_range(va, len); 908 cpu_sdcache_wb_range(va, pa, len); 909 break; 910 911 /* 912 * CPUs can do speculative loads so we need to clean the cache after 913 * a DMA read to deal with any speculatively loaded cache lines. 914 * Since these can't be dirty, we can just invalidate them and don't 915 * have to worry about having to write back their contents. 916 */ 917 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 918 STAT_INCR(sync_postreadwrite); 919 cpu_dcache_inv_range(va, len); 920 cpu_sdcache_inv_range(va, pa, len); 921 break; 922 case BUS_DMASYNC_POSTREAD: 923 STAT_INCR(sync_postread); 924 cpu_dcache_inv_range(va, len); 925 cpu_sdcache_inv_range(va, pa, len); 926 break; 927 } 928 } 929 930 static inline void 931 _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 932 bus_size_t len, int ops) 933 { 934 bus_dma_segment_t *ds = map->dm_segs; 935 vaddr_t va = (vaddr_t) map->_dm_origbuf; 936 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 937 if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) { 938 struct riscv_bus_dma_cookie * const cookie = map->_dm_cookie; 939 va = (vaddr_t) cookie->id_bouncebuf; 940 } 941 #endif 942 943 while (len > 0) { 944 while (offset >= ds->ds_len) { 945 offset -= ds->ds_len; 946 va += ds->ds_len; 947 ds++; 948 } 949 950 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset); 951 size_t seglen = uimin(len, ds->ds_len - offset); 952 953 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) 954 _bus_dmamap_sync_segment(va + offset, pa, seglen, ops); 955 956 offset += seglen; 957 len -= seglen; 958 } 959 } 960 961 static inline void 962 _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset, 963 bus_size_t len, int ops) 964 { 965 bus_dma_segment_t *ds = map->dm_segs; 966 struct mbuf *m = map->_dm_origbuf; 967 bus_size_t voff = offset; 968 bus_size_t ds_off = offset; 969 970 while (len > 0) { 971 /* Find the current dma segment */ 972 while (ds_off >= ds->ds_len) { 973 ds_off -= ds->ds_len; 974 ds++; 975 } 976 /* Find the current mbuf. */ 977 while (voff >= m->m_len) { 978 voff -= m->m_len; 979 m = m->m_next; 980 } 981 982 /* 983 * Now at the first mbuf to sync; nail each one until 984 * we have exhausted the length. 985 */ 986 vsize_t seglen = uimin(len, uimin(m->m_len - voff, ds->ds_len - ds_off)); 987 vaddr_t va = mtod(m, vaddr_t) + voff; 988 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off); 989 990 /* 991 * If a mapping is read-only, no dirty cache blocks will 992 * exist for it. If a writable mapping was made read-only, 993 * we know any dirty cache lines for the range will have 994 * been cleaned for us already. Therefore, if the upper 995 * layer can tell us we have a read-only mapping, we can 996 * skip all cache cleaning. 997 * 998 * NOTE: This only works if we know the pmap cleans pages 999 * before making a read-write -> read-only transition. Assume 1000 * this is not true here. 1001 * 1002 * XXXNH this will have to be revisited. 1003 */ 1004 1005 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) { 1006 /* 1007 * If we are doing preread (DMAing into the mbuf), 1008 * this mbuf better not be readonly, 1009 */ 1010 KASSERT(!(ops & BUS_DMASYNC_PREREAD) || !M_ROMAP(m)); 1011 _bus_dmamap_sync_segment(va, pa, seglen, ops); 1012 } 1013 voff += seglen; 1014 ds_off += seglen; 1015 len -= seglen; 1016 } 1017 } 1018 1019 static inline void 1020 _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1021 bus_size_t len, int ops) 1022 { 1023 bus_dma_segment_t *ds = map->dm_segs; 1024 struct uio *uio = map->_dm_origbuf; 1025 struct iovec *iov = uio->uio_iov; 1026 bus_size_t voff = offset; 1027 bus_size_t ds_off = offset; 1028 1029 while (len > 0) { 1030 /* Find the current dma segment */ 1031 while (ds_off >= ds->ds_len) { 1032 ds_off -= ds->ds_len; 1033 ds++; 1034 } 1035 1036 /* Find the current iovec. */ 1037 while (voff >= iov->iov_len) { 1038 voff -= iov->iov_len; 1039 iov++; 1040 } 1041 1042 /* 1043 * Now at the first iovec to sync; nail each one until 1044 * we have exhausted the length. 1045 */ 1046 vsize_t seglen = uimin(len, uimin(iov->iov_len - voff, ds->ds_len - ds_off)); 1047 vaddr_t va = (vaddr_t) iov->iov_base + voff; 1048 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off); 1049 1050 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) 1051 _bus_dmamap_sync_segment(va, pa, seglen, ops); 1052 1053 voff += seglen; 1054 ds_off += seglen; 1055 len -= seglen; 1056 } 1057 } 1058 1059 /* 1060 * Common function for DMA map synchronization. May be called 1061 * by bus-specific DMA map synchronization functions. 1062 * 1063 * XXX Should have separate versions for write-through vs. 1064 * XXX write-back caches. We currently assume write-back 1065 * XXX here, which is not as efficient as it could be for 1066 * XXX the write-through case. 1067 */ 1068 void 1069 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1070 bus_size_t len, int ops) 1071 { 1072 #ifdef DEBUG_DMA 1073 printf("dmamap_sync: t=%p map=%p offset=%#" PRIxBUSADDR 1074 " len=%#" PRIxBUSSIZE " ops=%#x\n", t, map, offset, len, ops); 1075 #endif /* DEBUG_DMA */ 1076 1077 /* 1078 * Mixing of PRE and POST operations is not allowed. 1079 */ 1080 KASSERTMSG((((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == 0) 1081 || ((ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) == 0)), 1082 "%s: mix PRE and POST", __func__); 1083 1084 KASSERTMSG(offset < map->dm_mapsize, 1085 "offset %" PRIxBUSADDR " mapsize %" PRIuBUSSIZE, 1086 offset, map->dm_mapsize); 1087 KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize, 1088 "len %" PRIuBUSSIZE " offset %" PRIxBUSADDR " mapsize %" PRIuBUSSIZE, 1089 len, offset, map->dm_mapsize); 1090 1091 /* 1092 * For a write-back cache, we need to do the following things: 1093 * 1094 * PREREAD -- Invalidate the D-cache. We do this 1095 * here in case a write-back is required by the back-end. 1096 * 1097 * PREWRITE -- Write-back the D-cache. Note that if 1098 * we are doing a PREREAD | PREWRITE, we can collapse 1099 * the whole thing into a single Wb-Inv. 1100 * 1101 * POSTREAD -- Re-invalidate the D-cache in case speculative 1102 * memory accesses caused cachelines to become valid with now 1103 * invalid data. 1104 * 1105 * POSTWRITE -- Nothing. 1106 */ 1107 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 1108 const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING); 1109 #else 1110 const bool bouncing = false; 1111 #endif 1112 1113 const int pre_ops = ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1114 const int post_ops = ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1115 if (pre_ops == 0 && post_ops == 0) 1116 return; 1117 1118 if (post_ops == BUS_DMASYNC_POSTWRITE) { 1119 KASSERT(pre_ops == 0); 1120 if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)) { 1121 STAT_INCR(sync_coherent_postwrite); 1122 } else { 1123 STAT_INCR(sync_postwrite); 1124 } 1125 return; 1126 } 1127 1128 KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD), 1129 "pre_ops %#x post_ops %#x", pre_ops, post_ops); 1130 1131 if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) { 1132 struct riscv_bus_dma_cookie * const cookie = map->_dm_cookie; 1133 STAT_INCR(write_bounces); 1134 char * const dataptr = (char *)cookie->id_bouncebuf + offset; 1135 /* 1136 * Copy the caller's buffer to the bounce buffer. 1137 */ 1138 switch (map->_dm_buftype) { 1139 case _BUS_DMA_BUFTYPE_LINEAR: 1140 memcpy(dataptr, cookie->id_origlinearbuf + offset, len); 1141 break; 1142 1143 case _BUS_DMA_BUFTYPE_MBUF: 1144 m_copydata(cookie->id_origmbuf, offset, len, dataptr); 1145 break; 1146 1147 case _BUS_DMA_BUFTYPE_UIO: 1148 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, 1149 UIO_WRITE); 1150 break; 1151 1152 #ifdef DIAGNOSTIC 1153 case _BUS_DMA_BUFTYPE_RAW: 1154 panic("%s:(pre): _BUS_DMA_BUFTYPE_RAW", __func__); 1155 break; 1156 1157 case _BUS_DMA_BUFTYPE_INVALID: 1158 panic("%s(pre): _BUS_DMA_BUFTYPE_INVALID", __func__); 1159 break; 1160 1161 default: 1162 panic("%s(pre): map %p: unknown buffer type %d\n", 1163 __func__, map, map->_dm_buftype); 1164 break; 1165 #endif /* DIAGNOSTIC */ 1166 } 1167 } 1168 1169 /* Skip cache frobbing if mapping was COHERENT */ 1170 if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)) { 1171 switch (ops) { 1172 case BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE: 1173 STAT_INCR(sync_coherent_prereadwrite); 1174 break; 1175 1176 case BUS_DMASYNC_PREREAD: 1177 STAT_INCR(sync_coherent_preread); 1178 break; 1179 1180 case BUS_DMASYNC_PREWRITE: 1181 STAT_INCR(sync_coherent_prewrite); 1182 break; 1183 1184 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1185 STAT_INCR(sync_coherent_postreadwrite); 1186 break; 1187 1188 case BUS_DMASYNC_POSTREAD: 1189 STAT_INCR(sync_coherent_postread); 1190 break; 1191 1192 /* BUS_DMASYNC_POSTWRITE was aleady handled as a fastpath */ 1193 } 1194 /* 1195 * Drain the write buffer of DMA operators. 1196 * 1) when cpu->device (prewrite) 1197 * 2) when device->cpu (postread) 1198 */ 1199 if ((pre_ops & BUS_DMASYNC_PREWRITE) || (post_ops & BUS_DMASYNC_POSTREAD)) 1200 asm volatile ("fence iorw,iorw" ::: "memory"); 1201 1202 /* 1203 * Only thing left to do for COHERENT mapping is copy from bounce 1204 * in the POSTREAD case. 1205 */ 1206 if (bouncing && (post_ops & BUS_DMASYNC_POSTREAD)) 1207 goto bounce_it; 1208 1209 return; 1210 } 1211 1212 int buftype = map->_dm_buftype; 1213 if (bouncing) { 1214 buftype = _BUS_DMA_BUFTYPE_LINEAR; 1215 } 1216 1217 switch (buftype) { 1218 case _BUS_DMA_BUFTYPE_LINEAR: 1219 case _BUS_DMA_BUFTYPE_RAW: 1220 _bus_dmamap_sync_linear(t, map, offset, len, ops); 1221 break; 1222 1223 case _BUS_DMA_BUFTYPE_MBUF: 1224 _bus_dmamap_sync_mbuf(t, map, offset, len, ops); 1225 break; 1226 1227 case _BUS_DMA_BUFTYPE_UIO: 1228 _bus_dmamap_sync_uio(t, map, offset, len, ops); 1229 break; 1230 1231 case _BUS_DMA_BUFTYPE_INVALID: 1232 panic("%s: _BUS_DMA_BUFTYPE_INVALID", __func__); 1233 break; 1234 1235 default: 1236 panic("%s: map %p: unknown buffer type %d\n", __func__, map, 1237 map->_dm_buftype); 1238 } 1239 1240 /* Drain the write buffer. */ 1241 asm volatile ("fence iorw,iorw" ::: "memory"); 1242 1243 if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0) 1244 return; 1245 1246 bounce_it: 1247 STAT_INCR(read_bounces); 1248 1249 struct riscv_bus_dma_cookie * const cookie = map->_dm_cookie; 1250 char * const dataptr = (char *)cookie->id_bouncebuf + offset; 1251 /* 1252 * Copy the bounce buffer to the caller's buffer. 1253 */ 1254 switch (map->_dm_buftype) { 1255 case _BUS_DMA_BUFTYPE_LINEAR: 1256 memcpy(cookie->id_origlinearbuf + offset, dataptr, len); 1257 break; 1258 1259 case _BUS_DMA_BUFTYPE_MBUF: 1260 m_copyback(cookie->id_origmbuf, offset, len, dataptr); 1261 break; 1262 1263 case _BUS_DMA_BUFTYPE_UIO: 1264 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ); 1265 break; 1266 1267 #ifdef DIAGNOSTIC 1268 case _BUS_DMA_BUFTYPE_RAW: 1269 panic("%s(post): _BUS_DMA_BUFTYPE_RAW", __func__); 1270 break; 1271 1272 case _BUS_DMA_BUFTYPE_INVALID: 1273 panic("%s(post): _BUS_DMA_BUFTYPE_INVALID", __func__); 1274 break; 1275 1276 default: 1277 panic("%s(post): map %p: unknown buffer type %d\n", __func__, 1278 map, map->_dm_buftype); 1279 break; 1280 #endif 1281 } 1282 } 1283 1284 /* 1285 * Common function for DMA-safe memory allocation. May be called 1286 * by bus-specific DMA memory allocation functions. 1287 */ 1288 1289 int 1290 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1291 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1292 int flags) 1293 { 1294 struct riscv_dma_range *dr; 1295 int error, i; 1296 1297 #ifdef DEBUG_DMA 1298 printf("dmamem_alloc t=%p size=%#" PRIxBUSSIZE 1299 " align=%#" PRIxBUSSIZE 1300 " boundary=%#" PRIxBUSSIZE " " 1301 "segs=%p nsegs=%#x rsegs=%p flags=%#x\n", t, size, alignment, 1302 boundary, segs, nsegs, rsegs, flags); 1303 #endif 1304 1305 if ((dr = t->_ranges) != NULL) { 1306 error = ENOMEM; 1307 for (i = 0; i < t->_nranges; i++, dr++) { 1308 if (dr->dr_len == 0 1309 || (dr->dr_flags & _BUS_DMAMAP_NOALLOC)) 1310 continue; 1311 error = _bus_dmamem_alloc_range(t, size, alignment, 1312 boundary, segs, nsegs, rsegs, flags, 1313 trunc_page(dr->dr_sysbase), 1314 trunc_page(dr->dr_sysbase + dr->dr_len)); 1315 if (error == 0) 1316 break; 1317 } 1318 } else { 1319 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 1320 segs, nsegs, rsegs, flags, 0UL, ~0UL); 1321 } 1322 1323 #ifdef DEBUG_DMA 1324 printf("dmamem_alloc: =%d\n", error); 1325 #endif 1326 1327 return error; 1328 } 1329 1330 /* 1331 * Common function for freeing DMA-safe memory. May be called by 1332 * bus-specific DMA memory free functions. 1333 */ 1334 void 1335 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 1336 { 1337 struct vm_page *m; 1338 bus_addr_t addr; 1339 struct pglist mlist; 1340 int curseg; 1341 1342 #ifdef DEBUG_DMA 1343 printf("dmamem_free: t=%p segs=%p nsegs=%#x\n", t, segs, nsegs); 1344 #endif /* DEBUG_DMA */ 1345 1346 /* 1347 * Build a list of pages to free back to the VM system. 1348 */ 1349 TAILQ_INIT(&mlist); 1350 for (curseg = 0; curseg < nsegs; curseg++) { 1351 for (addr = segs[curseg].ds_addr; 1352 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 1353 addr += PAGE_SIZE) { 1354 m = PHYS_TO_VM_PAGE(addr); 1355 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); 1356 } 1357 } 1358 uvm_pglistfree(&mlist); 1359 } 1360 1361 /* 1362 * Common function for mapping DMA-safe memory. May be called by 1363 * bus-specific DMA memory map functions. 1364 */ 1365 int 1366 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1367 size_t size, void **kvap, int flags) 1368 { 1369 vaddr_t va; 1370 paddr_t pa; 1371 int curseg; 1372 const uvm_flag_t kmflags = UVM_KMF_VAONLY 1373 | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0); 1374 vsize_t align = 0; 1375 1376 #ifdef DEBUG_DMA 1377 printf("dmamem_map: t=%p segs=%p nsegs=%#x size=%#zx flags=%#x\n", t, 1378 segs, nsegs, size, flags); 1379 #endif /* DEBUG_DMA */ 1380 1381 #ifdef PMAP_MAP_POOLPAGE 1382 /* 1383 * If all of memory is mapped, and we are mapping a single physically 1384 * contiguous area then this area is already mapped. Let's see if we 1385 * avoid having a separate mapping for it. 1386 */ 1387 if (nsegs == 1 && (flags & BUS_DMA_PREFETCHABLE) == 0) { 1388 /* 1389 * If this is a non-COHERENT mapping, then the existing kernel 1390 * mapping is already compatible with it. 1391 */ 1392 bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0; 1393 pa = segs[0].ds_addr; 1394 1395 /* 1396 * This is a COHERENT mapping which, unless this address is in 1397 * a COHERENT dma range, will not be compatible. 1398 */ 1399 if (t->_ranges != NULL) { 1400 const struct riscv_dma_range * const dr = 1401 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa); 1402 if (dr != NULL 1403 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) { 1404 direct_mapable = true; 1405 } 1406 } 1407 1408 if (direct_mapable) { 1409 *kvap = (void *)PMAP_MAP_POOLPAGE(pa); 1410 #ifdef DEBUG_DMA 1411 printf("dmamem_map: =%p\n", *kvap); 1412 #endif /* DEBUG_DMA */ 1413 return 0; 1414 } 1415 } 1416 #endif 1417 1418 size = round_page(size); 1419 1420 #ifdef PMAP_MAPSIZE1 1421 if (size >= PMAP_MAPSIZE1) 1422 align = PMAP_MAPSIZE1; 1423 1424 #ifdef PMAP_MAPSIZE2 1425 1426 #if PMAP_MAPSIZE1 > PMAP_MAPSIZE2 1427 #error PMAP_MAPSIZE1 must be smaller than PMAP_MAPSIZE2 1428 #endif 1429 1430 if (size >= PMAP_MAPSIZE2) 1431 align = PMAP_MAPSIZE2; 1432 1433 #ifdef PMAP_MAPSIZE3 1434 1435 #if PMAP_MAPSIZE2 > PMAP_MAPSIZE3 1436 #error PMAP_MAPSIZE2 must be smaller than PMAP_MAPSIZE3 1437 #endif 1438 1439 if (size >= PMAP_MAPSIZE3) 1440 align = PMAP_MAPSIZE3; 1441 #endif 1442 #endif 1443 #endif 1444 1445 va = uvm_km_alloc(kernel_map, size, align, kmflags); 1446 if (__predict_false(va == 0 && align > 0)) { 1447 align = 0; 1448 va = uvm_km_alloc(kernel_map, size, 0, kmflags); 1449 } 1450 1451 if (va == 0) 1452 return ENOMEM; 1453 1454 *kvap = (void *)va; 1455 1456 for (curseg = 0; curseg < nsegs; curseg++) { 1457 for (pa = segs[curseg].ds_addr; 1458 pa < (segs[curseg].ds_addr + segs[curseg].ds_len); 1459 pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 1460 bool uncached = (flags & BUS_DMA_COHERENT); 1461 bool prefetchable = (flags & BUS_DMA_PREFETCHABLE); 1462 #ifdef DEBUG_DMA 1463 printf("wiring P%#" PRIxPADDR 1464 " to V%#" PRIxVADDR "\n", pa, va); 1465 #endif /* DEBUG_DMA */ 1466 if (size == 0) 1467 panic("_bus_dmamem_map: size botch"); 1468 1469 const struct riscv_dma_range * const dr = 1470 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa); 1471 /* 1472 * If this dma region is coherent then there is 1473 * no need for an uncached mapping. 1474 */ 1475 if (dr != NULL 1476 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) { 1477 uncached = false; 1478 } 1479 1480 u_int pmap_flags = PMAP_WIRED; 1481 if (prefetchable) 1482 pmap_flags |= PMAP_WRITE_COMBINE; 1483 else if (uncached) 1484 pmap_flags |= PMAP_NOCACHE; 1485 1486 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 1487 pmap_flags); 1488 } 1489 } 1490 pmap_update(pmap_kernel()); 1491 #ifdef DEBUG_DMA 1492 printf("dmamem_map: =%p\n", *kvap); 1493 #endif /* DEBUG_DMA */ 1494 return 0; 1495 } 1496 1497 /* 1498 * Common function for unmapping DMA-safe memory. May be called by 1499 * bus-specific DMA memory unmapping functions. 1500 */ 1501 void 1502 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 1503 { 1504 1505 #ifdef DEBUG_DMA 1506 printf("dmamem_unmap: t=%p kva=%p size=%#zx\n", t, kva, size); 1507 #endif /* DEBUG_DMA */ 1508 KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0, 1509 "kva %p (%#"PRIxPTR")", kva, ((uintptr_t)kva & PAGE_MASK)); 1510 1511 /* 1512 * Check to see if this used direct mapped memory. If so we can 1513 * just return since we have nothing to free up. 1514 */ 1515 if (pmap_md_direct_mapped_vaddr_p((vaddr_t)kva)) 1516 return; 1517 1518 size = round_page(size); 1519 pmap_kremove((vaddr_t)kva, size); 1520 pmap_update(pmap_kernel()); 1521 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1522 } 1523 1524 /* 1525 * Common function for mmap(2)'ing DMA-safe memory. May be called by 1526 * bus-specific DMA mmap(2)'ing functions. 1527 */ 1528 paddr_t 1529 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1530 off_t off, int prot, int flags) 1531 { 1532 /* Page not found. */ 1533 return -1; 1534 } 1535 1536 /********************************************************************** 1537 * DMA utility functions 1538 **********************************************************************/ 1539 1540 /* 1541 * Utility function to load a linear buffer. lastaddrp holds state 1542 * between invocations (for multiple-buffer loads). segp contains 1543 * the starting segment on entrance, and the ending segment on exit. 1544 * first indicates if this is the first invocation of this function. 1545 */ 1546 int 1547 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 1548 bus_size_t buflen, struct vmspace *vm, int flags) 1549 { 1550 bus_size_t sgsize; 1551 bus_addr_t curaddr; 1552 vaddr_t vaddr = (vaddr_t)buf; 1553 int error; 1554 pmap_t pmap = vm_map_pmap(&vm->vm_map); 1555 1556 #ifdef DEBUG_DMA 1557 printf("_bus_dmamap_load_buffer(buf=%p, len=%#" PRIxBUSSIZE 1558 ", flags=%#x)\n", buf, buflen, flags); 1559 #endif /* DEBUG_DMA */ 1560 1561 1562 while (buflen > 0) { 1563 /* 1564 * Get the physical address for this segment. 1565 */ 1566 pmap_extract(pmap, vaddr, &curaddr); 1567 1568 KASSERTMSG((vaddr & PAGE_MASK) == (curaddr & PAGE_MASK), 1569 "va %#" PRIxVADDR " curaddr %#" PRIxBUSADDR, vaddr, curaddr); 1570 1571 /* 1572 * Compute the segment size, and adjust counts. 1573 */ 1574 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 1575 if (buflen < sgsize) 1576 sgsize = buflen; 1577 1578 error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize, 1579 false); 1580 if (__predict_false(error)) 1581 return error; 1582 1583 vaddr += sgsize; 1584 buflen -= sgsize; 1585 } 1586 1587 return 0; 1588 } 1589 1590 /* 1591 * Allocate physical memory from the given physical address range. 1592 * Called by DMA-safe memory allocation methods. 1593 */ 1594 int 1595 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1596 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1597 int flags, paddr_t low, paddr_t high) 1598 { 1599 paddr_t curaddr, lastaddr; 1600 struct vm_page *m; 1601 struct pglist mlist; 1602 int curseg, error; 1603 1604 KASSERTMSG(boundary == 0 || (boundary & (boundary - 1)) == 0, 1605 "invalid boundary %#" PRIxBUSSIZE, boundary); 1606 1607 #ifdef DEBUG_DMA 1608 printf("alloc_range: t=%p size=%#" PRIxBUSSIZE 1609 " align=%#" PRIxBUSSIZE " boundary=%#" PRIxBUSSIZE 1610 " segs=%p nsegs=%#x rsegs=%p flags=%#x" 1611 " lo=%#" PRIxPADDR " hi=%#" PRIxPADDR "\n", 1612 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high); 1613 #endif /* DEBUG_DMA */ 1614 1615 /* Always round the size. */ 1616 size = round_page(size); 1617 1618 /* 1619 * We accept boundaries < size, splitting in multiple segments 1620 * if needed. uvm_pglistalloc does not, so compute an appropriate 1621 * boundary: next power of 2 >= size 1622 */ 1623 bus_size_t uboundary = boundary; 1624 if (uboundary <= PAGE_SIZE) { 1625 uboundary = 0; 1626 } else { 1627 while (uboundary < size) { 1628 uboundary <<= 1; 1629 } 1630 } 1631 1632 /* 1633 * Allocate pages from the VM system. 1634 */ 1635 error = uvm_pglistalloc(size, low, high, alignment, uboundary, 1636 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 1637 if (error) 1638 return error; 1639 1640 /* 1641 * Compute the location, size, and number of segments actually 1642 * returned by the VM code. 1643 */ 1644 m = TAILQ_FIRST(&mlist); 1645 curseg = 0; 1646 lastaddr = segs[curseg].ds_addr = segs[curseg]._ds_paddr = 1647 VM_PAGE_TO_PHYS(m); 1648 segs[curseg].ds_len = PAGE_SIZE; 1649 #ifdef DEBUG_DMA 1650 printf("alloc: page %#" PRIxPADDR "\n", lastaddr); 1651 #endif /* DEBUG_DMA */ 1652 m = TAILQ_NEXT(m, pageq.queue); 1653 1654 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) { 1655 curaddr = VM_PAGE_TO_PHYS(m); 1656 KASSERTMSG(low <= curaddr && curaddr < high, 1657 "uvm_pglistalloc returned non-sensicaladdress %#" PRIxPADDR 1658 "(low=%#" PRIxPADDR ", high=%#" PRIxPADDR "\n", 1659 curaddr, low, high); 1660 #ifdef DEBUG_DMA 1661 printf("alloc: page %#" PRIxPADDR "\n", curaddr); 1662 #endif /* DEBUG_DMA */ 1663 if (curaddr == lastaddr + PAGE_SIZE 1664 && (lastaddr & boundary) == (curaddr & boundary)) 1665 segs[curseg].ds_len += PAGE_SIZE; 1666 else { 1667 curseg++; 1668 if (curseg >= nsegs) { 1669 uvm_pglistfree(&mlist); 1670 return EFBIG; 1671 } 1672 segs[curseg].ds_addr = curaddr; 1673 segs[curseg]._ds_paddr = curaddr; 1674 segs[curseg].ds_len = PAGE_SIZE; 1675 } 1676 lastaddr = curaddr; 1677 } 1678 1679 *rsegs = curseg + 1; 1680 1681 return 0; 1682 } 1683 1684 /* 1685 * Check if a memory region intersects with a DMA range, and return the 1686 * page-rounded intersection if it does. 1687 */ 1688 int 1689 riscv_dma_range_intersect(struct riscv_dma_range *ranges, int nranges, 1690 paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep) 1691 { 1692 struct riscv_dma_range *dr; 1693 int i; 1694 1695 if (ranges == NULL) 1696 return 0; 1697 1698 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 1699 if (dr->dr_sysbase <= pa && 1700 pa < (dr->dr_sysbase + dr->dr_len)) { 1701 /* 1702 * Beginning of region intersects with this range. 1703 */ 1704 *pap = trunc_page(pa); 1705 *sizep = round_page(uimin(pa + size, 1706 dr->dr_sysbase + dr->dr_len) - pa); 1707 return 1; 1708 } 1709 if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) { 1710 /* 1711 * End of region intersects with this range. 1712 */ 1713 *pap = trunc_page(dr->dr_sysbase); 1714 *sizep = round_page(uimin((pa + size) - dr->dr_sysbase, 1715 dr->dr_len)); 1716 return 1; 1717 } 1718 } 1719 1720 /* No intersection found. */ 1721 return 0; 1722 } 1723 1724 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 1725 static int 1726 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 1727 bus_size_t size, int flags) 1728 { 1729 struct riscv_bus_dma_cookie *cookie = map->_dm_cookie; 1730 int error = 0; 1731 1732 KASSERT(cookie != NULL); 1733 1734 cookie->id_bouncebuflen = round_page(size); 1735 error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen, 1736 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 1737 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 1738 if (error == 0) { 1739 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 1740 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 1741 (void **)&cookie->id_bouncebuf, flags); 1742 if (error) { 1743 _bus_dmamem_free(t, cookie->id_bouncesegs, 1744 cookie->id_nbouncesegs); 1745 cookie->id_bouncebuflen = 0; 1746 cookie->id_nbouncesegs = 0; 1747 } else { 1748 cookie->id_flags |= _BUS_DMA_HAS_BOUNCE; 1749 } 1750 } else { 1751 cookie->id_bouncebuflen = 0; 1752 cookie->id_nbouncesegs = 0; 1753 } 1754 1755 return error; 1756 } 1757 1758 static void 1759 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 1760 { 1761 struct riscv_bus_dma_cookie *cookie = map->_dm_cookie; 1762 1763 KASSERT(cookie != NULL); 1764 1765 _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen); 1766 _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs); 1767 cookie->id_bouncebuflen = 0; 1768 cookie->id_nbouncesegs = 0; 1769 cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE; 1770 } 1771 #endif /* _RISCV_NEED_BUS_DMA_BOUNCE */ 1772 1773 /* 1774 * This function does the same as uiomove, but takes an explicit 1775 * direction, and does not update the uio structure. 1776 */ 1777 static int 1778 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction) 1779 { 1780 struct iovec *iov; 1781 int error; 1782 struct vmspace *vm; 1783 char *cp; 1784 size_t resid, cnt; 1785 int i; 1786 1787 iov = uio->uio_iov; 1788 vm = uio->uio_vmspace; 1789 cp = buf; 1790 resid = n; 1791 1792 for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) { 1793 iov = &uio->uio_iov[i]; 1794 if (iov->iov_len == 0) 1795 continue; 1796 cnt = MIN(resid, iov->iov_len); 1797 1798 if (!VMSPACE_IS_KERNEL_P(vm)) { 1799 preempt_point(); 1800 } 1801 if (direction == UIO_READ) { 1802 error = copyout_vmspace(vm, cp, iov->iov_base, cnt); 1803 } else { 1804 error = copyin_vmspace(vm, iov->iov_base, cp, cnt); 1805 } 1806 if (error) 1807 return error; 1808 cp += cnt; 1809 resid -= cnt; 1810 } 1811 return 0; 1812 } 1813 1814 int 1815 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr, 1816 bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags) 1817 { 1818 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 1819 if (min_addr >= max_addr) 1820 return EOPNOTSUPP; 1821 1822 struct riscv_dma_range *dr; 1823 bool psubset = true; 1824 size_t nranges = 0; 1825 size_t i; 1826 for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) { 1827 /* 1828 * If the new {min,max}_addr are narrower than any of the 1829 * ranges in the parent tag then we need a new tag; 1830 * otherwise the parent tag is a subset of the new 1831 * range and can continue to be used. 1832 */ 1833 if (min_addr > dr->dr_sysbase 1834 || max_addr < dr->dr_sysbase + dr->dr_len - 1) { 1835 psubset = false; 1836 } 1837 if (min_addr <= dr->dr_sysbase + dr->dr_len 1838 && max_addr >= dr->dr_sysbase) { 1839 nranges++; 1840 } 1841 } 1842 if (nranges == 0) { 1843 nranges = 1; 1844 psubset = false; 1845 } 1846 if (psubset) { 1847 *newtag = tag; 1848 /* if the tag must be freed, add a reference */ 1849 if (tag->_tag_needs_free) 1850 (tag->_tag_needs_free)++; 1851 return 0; 1852 } 1853 1854 const size_t tagsize = sizeof(*tag) + nranges * sizeof(*dr); 1855 if ((*newtag = kmem_intr_zalloc(tagsize, 1856 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 1857 return ENOMEM; 1858 1859 dr = (void *)(*newtag + 1); 1860 **newtag = *tag; 1861 (*newtag)->_tag_needs_free = 1; 1862 (*newtag)->_ranges = dr; 1863 (*newtag)->_nranges = nranges; 1864 1865 if (tag->_ranges == NULL) { 1866 dr->dr_sysbase = min_addr; 1867 dr->dr_busbase = min_addr; 1868 dr->dr_len = max_addr + 1 - min_addr; 1869 } else { 1870 struct riscv_dma_range *pdr; 1871 1872 for (i = 0, pdr = tag->_ranges; i < tag->_nranges; i++, pdr++) { 1873 KASSERT(nranges != 0); 1874 1875 if (min_addr > pdr->dr_sysbase + pdr->dr_len 1876 || max_addr < pdr->dr_sysbase) { 1877 /* 1878 * this range doesn't overlap with new limits, 1879 * so skip. 1880 */ 1881 continue; 1882 } 1883 /* 1884 * Copy the range and adjust to fit within the new 1885 * limits 1886 */ 1887 dr[0] = pdr[0]; 1888 if (dr->dr_sysbase < min_addr) { 1889 psize_t diff = min_addr - dr->dr_sysbase; 1890 dr->dr_busbase += diff; 1891 dr->dr_len -= diff; 1892 dr->dr_sysbase += diff; 1893 } 1894 if (max_addr <= dr->dr_sysbase + dr->dr_len - 1) { 1895 dr->dr_len = max_addr + 1 - dr->dr_sysbase; 1896 } 1897 dr++; 1898 nranges--; 1899 } 1900 } 1901 1902 return 0; 1903 #else 1904 return EOPNOTSUPP; 1905 #endif /* _RISCV_NEED_BUS_DMA_BOUNCE */ 1906 } 1907 1908 void 1909 _bus_dmatag_destroy(bus_dma_tag_t tag) 1910 { 1911 #ifdef _RISCV_NEED_BUS_DMA_BOUNCE 1912 switch (tag->_tag_needs_free) { 1913 case 0: 1914 break; /* not allocated with kmem */ 1915 case 1: { 1916 const size_t tagsize = sizeof(*tag) 1917 + tag->_nranges * sizeof(*tag->_ranges); 1918 kmem_intr_free(tag, tagsize); /* last reference to tag */ 1919 break; 1920 } 1921 default: 1922 (tag->_tag_needs_free)--; /* one less reference */ 1923 } 1924 #endif 1925 } 1926