1 /* $NetBSD: bus_dma.c,v 1.27 2023/11/23 20:40:08 andvar Exp $ */ 2 3 /* 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.27 2023/11/23 20:40:08 andvar Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/kmem.h> 38 #include <sys/mbuf.h> 39 #include <sys/proc.h> 40 #include <sys/systm.h> 41 42 #include <mips/cache.h> 43 44 #define _PLAYSTATION2_BUS_DMA_PRIVATE 45 #include <machine/bus.h> 46 47 #include <dev/bus_dma/bus_dmamem_common.h> 48 49 #include <uvm/uvm_extern.h> 50 51 #include <machine/locore.h> 52 53 extern paddr_t kvtophys(vaddr_t); /* XXX */ 54 static int _bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t, 55 struct vmspace *, int, vaddr_t *, int *, int); 56 57 struct playstation2_bus_dma_tag playstation2_default_bus_dma_tag = { 58 _bus_dmamap_create, 59 _bus_dmamap_destroy, 60 _bus_dmamap_load, 61 _bus_dmamap_load_mbuf, 62 _bus_dmamap_load_uio, 63 _bus_dmamap_load_raw, 64 _bus_dmamap_unload, 65 _bus_dmamap_sync, 66 _bus_dmamem_alloc, 67 _bus_dmamem_free, 68 _bus_dmamem_map, 69 _bus_dmamem_unmap, 70 _bus_dmamem_mmap, 71 }; 72 73 static size_t 74 _bus_dmamap_mapsize(int const nsegments) 75 { 76 KASSERT(nsegments > 0); 77 return sizeof(struct playstation2_bus_dmamap) + 78 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 79 } 80 81 /* 82 * Common function for DMA map creation. May be called by bus-specific 83 * DMA map creation functions. 84 */ 85 int 86 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 87 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 88 { 89 struct playstation2_bus_dmamap *map; 90 void *mapstore; 91 92 /* 93 * Allocate and initialize the DMA map. The end of the map 94 * is a variable-sized array of segments, so we allocate enough 95 * room for them in one shot. 96 * 97 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 98 * of ALLOCNOW notifies others that we've reserved these resources, 99 * and they are not to be freed. 100 * 101 * The bus_dmamap_t includes one bus_dma_segment_t, hence 102 * the (nsegments - 1). 103 */ 104 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments), 105 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 106 return ENOMEM; 107 108 map = (struct playstation2_bus_dmamap *)mapstore; 109 map->_dm_size = size; 110 map->_dm_segcnt = nsegments; 111 map->_dm_maxmaxsegsz = maxsegsz; 112 map->_dm_boundary = boundary; 113 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 114 map->dm_maxsegsz = maxsegsz; 115 map->dm_mapsize = 0; /* no valid mappings */ 116 map->dm_nsegs = 0; 117 118 *dmamp = map; 119 return 0; 120 } 121 122 /* 123 * Common function for DMA map destruction. May be called by bus-specific 124 * DMA map destruction functions. 125 */ 126 void 127 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 128 { 129 130 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt)); 131 } 132 133 134 /* 135 * Utility function to load a linear buffer. lastaddrp holds state 136 * between invocations (for multiple-buffer loads). segp contains 137 * the starting segment on entrance, and the ending segment on exit. 138 * first indicates if this is the first invocation of this function. 139 */ 140 int 141 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen, 142 struct vmspace *vm, int flags, vaddr_t *lastaddrp, int *segp, int first) 143 { 144 bus_size_t sgsize; 145 bus_addr_t curaddr, lastaddr, baddr, bmask; 146 vaddr_t vaddr = (vaddr_t)buf; 147 int seg; 148 149 lastaddr = *lastaddrp; 150 bmask = ~(map->_dm_boundary - 1); 151 152 for (seg = *segp; buflen > 0 ; ) { 153 /* 154 * Get the physical address for this segment. 155 */ 156 if (!VMSPACE_IS_KERNEL_P(vm)) 157 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 158 vaddr, (paddr_t *)&curaddr); 159 else 160 curaddr = kvtophys(vaddr); 161 162 /* 163 * Compute the segment size, and adjust counts. 164 */ 165 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 166 if (buflen < sgsize) 167 sgsize = buflen; 168 169 /* 170 * Make sure we don't cross any boundaries. 171 */ 172 if (map->_dm_boundary > 0) { 173 baddr = (curaddr + map->_dm_boundary) & bmask; 174 if (sgsize > (baddr - curaddr)) 175 sgsize = (baddr - curaddr); 176 } 177 178 /* 179 * Insert chunk into a segment, coalescing with 180 * the previous segment if possible. 181 */ 182 if (first) { 183 map->dm_segs[seg].ds_addr = curaddr; 184 map->dm_segs[seg].ds_len = sgsize; 185 map->dm_segs[seg]._ds_vaddr = vaddr; 186 first = 0; 187 } else { 188 if (curaddr == lastaddr && 189 (map->dm_segs[seg].ds_len + sgsize) <= 190 map->dm_maxsegsz && 191 (map->_dm_boundary == 0 || 192 (map->dm_segs[seg].ds_addr & bmask) == 193 (curaddr & bmask))) 194 map->dm_segs[seg].ds_len += sgsize; 195 else { 196 if (++seg >= map->_dm_segcnt) 197 break; 198 map->dm_segs[seg].ds_addr = curaddr; 199 map->dm_segs[seg].ds_len = sgsize; 200 map->dm_segs[seg]._ds_vaddr = vaddr; 201 } 202 } 203 204 lastaddr = curaddr + sgsize; 205 vaddr += sgsize; 206 buflen -= sgsize; 207 } 208 209 *segp = seg; 210 *lastaddrp = lastaddr; 211 212 /* 213 * Did we fit? 214 */ 215 if (buflen != 0) 216 return EFBIG; /* XXX Better return value here? */ 217 218 return 0; 219 } 220 221 /* 222 * Common function for loading a direct-mapped DMA map with a linear 223 * buffer. 224 */ 225 int 226 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 227 bus_size_t buflen, struct proc *p, int flags) 228 { 229 vaddr_t lastaddr; 230 int seg, error; 231 struct vmspace *vm; 232 233 /* 234 * Make sure that on error condition we return "no valid mappings". 235 */ 236 map->dm_mapsize = 0; 237 map->dm_nsegs = 0; 238 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 239 240 if (buflen > map->_dm_size) 241 return EINVAL; 242 243 if (p != NULL) { 244 vm = p->p_vmspace; 245 } else { 246 vm = vmspace_kernel(); 247 } 248 249 seg = 0; 250 error = _bus_dmamap_load_buffer(map, buf, buflen, 251 vm, flags, &lastaddr, &seg, 1); 252 if (error == 0) { 253 map->dm_mapsize = buflen; 254 map->dm_nsegs = seg + 1; 255 256 /* 257 * For linear buffers, we support marking the mapping 258 * as COHERENT. 259 * 260 * XXX Check TLB entries for cache-inhibit bits? 261 */ 262 if (buf >= (void *)MIPS_KSEG1_START && 263 buf < (void *)MIPS_KSEG2_START) 264 map->_dm_flags |= PLAYSTATION2_DMAMAP_COHERENT; 265 } 266 return error; 267 } 268 269 /* 270 * Like _bus_dmamap_load(), but for mbufs. 271 */ 272 int 273 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 274 int flags) 275 { 276 vaddr_t lastaddr; 277 int seg, error, first; 278 struct mbuf *m; 279 280 /* 281 * Make sure that on error condition we return "no valid mappings." 282 */ 283 map->dm_mapsize = 0; 284 map->dm_nsegs = 0; 285 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 286 287 #ifdef DIAGNOSTIC 288 if ((m0->m_flags & M_PKTHDR) == 0) 289 panic("_bus_dmamap_load_mbuf: no packet header"); 290 #endif 291 292 if (m0->m_pkthdr.len > map->_dm_size) 293 return EINVAL; 294 295 first = 1; 296 seg = 0; 297 error = 0; 298 for (m = m0; m != NULL && error == 0; m = m->m_next) { 299 if (m->m_len == 0) 300 continue; 301 error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len, 302 vmspace_kernel(), flags, &lastaddr, &seg, first); 303 first = 0; 304 } 305 if (error == 0) { 306 map->dm_mapsize = m0->m_pkthdr.len; 307 map->dm_nsegs = seg + 1; 308 } 309 return error; 310 } 311 312 /* 313 * Like _bus_dmamap_load(), but for uios. 314 */ 315 int 316 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 317 int flags) 318 { 319 vaddr_t lastaddr; 320 int seg, i, error, first; 321 bus_size_t minlen, resid; 322 struct iovec *iov; 323 void *addr; 324 325 /* 326 * Make sure that on error condition we return "no valid mappings." 327 */ 328 map->dm_mapsize = 0; 329 map->dm_nsegs = 0; 330 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 331 332 resid = uio->uio_resid; 333 iov = uio->uio_iov; 334 335 first = 1; 336 seg = 0; 337 error = 0; 338 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 339 /* 340 * Now at the first iovec to load. Load each iovec 341 * until we have exhausted the residual count. 342 */ 343 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 344 addr = (void *)iov[i].iov_base; 345 346 error = _bus_dmamap_load_buffer(map, addr, minlen, 347 uio->uio_vmspace, flags, &lastaddr, &seg, first); 348 first = 0; 349 350 resid -= minlen; 351 } 352 if (error == 0) { 353 map->dm_mapsize = uio->uio_resid; 354 map->dm_nsegs = seg + 1; 355 } 356 return error; 357 } 358 359 /* 360 * Like _bus_dmamap_load(), but for raw memory. 361 */ 362 int 363 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 364 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 365 { 366 367 panic("_bus_dmamap_load_raw: not implemented"); 368 } 369 370 /* 371 * Common function for unloading a DMA map. May be called by 372 * chipset-specific DMA map unload functions. 373 */ 374 void 375 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 376 { 377 378 /* 379 * No resources to free; just mark the mappings as 380 * invalid. 381 */ 382 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 383 map->dm_mapsize = 0; 384 map->dm_nsegs = 0; 385 map->_dm_flags &= ~PLAYSTATION2_DMAMAP_COHERENT; 386 } 387 388 /* 389 * Common function for DMA map synchronization. May be called 390 * by chipset-specific DMA map synchronization functions. 391 */ 392 void 393 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 394 bus_size_t len, int ops) 395 { 396 bus_size_t minlen; 397 bus_addr_t addr; 398 int i; 399 400 /* 401 * Mixing PRE and POST operations is not allowed. 402 */ 403 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 404 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 405 panic("_bus_dmamap_sync: mix PRE and POST"); 406 407 #ifdef DIAGNOSTIC 408 if (offset >= map->dm_mapsize) 409 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 410 offset, map->dm_mapsize); 411 if (len == 0 || (offset + len) > map->dm_mapsize) 412 panic("_bus_dmamap_sync: bad length"); 413 #endif 414 415 /* 416 * Flush the write buffer. 417 */ 418 wbflush(); 419 420 /* 421 * If the mapping is of COHERENT DMA-safe memory, no cache 422 * flush is necessary. 423 */ 424 if (map->_dm_flags & PLAYSTATION2_DMAMAP_COHERENT) 425 return; 426 427 /* 428 * No cache flushes are necessary if we're only doing 429 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE). 430 */ 431 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0) 432 return; 433 434 /* 435 * Flush data cache for PREREAD. This has the side-effect 436 * of invalidating the cache. Done at PREREAD since it 437 * causes the cache line(s) to be written back to memory. 438 * 439 * Flush data cache for PREWRITE, so that the contents of 440 * the data buffer in memory reflect reality. 441 * 442 * Given the test above, we know we're doing one of these 443 * two operations, so no additional tests are necessary. 444 */ 445 446 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 447 /* Find the beginning segment. */ 448 if (offset >= map->dm_segs[i].ds_len) { 449 offset -= map->dm_segs[i].ds_len; 450 continue; 451 } 452 453 /* 454 * Now at the first segment to sync; nail 455 * each segment until we have exhausted the 456 * length. 457 */ 458 minlen = len < map->dm_segs[i].ds_len - offset ? 459 len : map->dm_segs[i].ds_len - offset; 460 461 addr = map->dm_segs[i]._ds_vaddr; 462 463 #ifdef BUS_DMA_DEBUG 464 printf("bus_dmamap_sync: flushing segment %d " 465 "(0x%lx..0x%lx) ...", i, addr + offset, 466 addr + offset + minlen - 1); 467 #endif 468 mips_dcache_wbinv_range(addr + offset, minlen); 469 470 #ifdef BUS_DMA_DEBUG 471 printf("\n"); 472 #endif 473 offset = 0; 474 len -= minlen; 475 } 476 } 477 478 /* 479 * Common function for DMA-safe memory allocation. May be called 480 * by bus-specific DMA memory allocation functions. 481 */ 482 int 483 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 484 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 485 int flags) 486 { 487 return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, 488 segs, nsegs, rsegs, flags, 489 pmap_limits.avail_start /*low*/, 490 pmap_limits.avail_end - 1 /*high*/)); 491 } 492 493 /* 494 * Common function for freeing DMA-safe memory. May be called by 495 * bus-specific DMA memory free functions. 496 */ 497 void 498 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 499 { 500 501 _bus_dmamem_free_common(t, segs, nsegs); 502 } 503 504 /* 505 * Common function for mapping DMA-safe memory. May be called by 506 * bus-specific DMA memory map functions. 507 */ 508 int 509 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 510 size_t size, void **kvap, int flags) 511 { 512 513 /* 514 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 515 * TLB thrashing. 516 */ 517 if (nsegs == 1) { 518 if (flags & BUS_DMA_COHERENT) 519 *kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 520 else 521 *kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 522 return 0; 523 } 524 525 /* XXX BUS_DMA_COHERENT */ 526 return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0)); 527 } 528 529 /* 530 * Common function for unmapping DMA-safe memory. May be called by 531 * bus-specific DMA memory unmapping functions. 532 */ 533 void 534 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 535 { 536 537 /* 538 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 539 * not in KSEG2). 540 */ 541 if (kva >= (void *)MIPS_KSEG0_START && 542 kva < (void *)MIPS_KSEG2_START) 543 return; 544 545 _bus_dmamem_unmap_common(t, kva, size); 546 } 547 548 /* 549 * Common function for mmap(2)'ing DMA-safe memory. May be called by 550 * bus-specific DMA mmap(2)'ing functions. 551 */ 552 paddr_t 553 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 554 off_t off, int prot, int flags) 555 { 556 bus_addr_t rv; 557 558 rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags); 559 if (rv == (bus_addr_t)-1) 560 return (-1); 561 562 return (mips_btop((char *)rv)); 563 } 564