1 /* $NetBSD: bus_dma.c,v 1.26 2022/07/26 20:08:56 andvar Exp $ */ 2 3 /* 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.26 2022/07/26 20:08:56 andvar Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/kmem.h> 38 #include <sys/mbuf.h> 39 #include <sys/proc.h> 40 41 #include <mips/cache.h> 42 43 #define _PLAYSTATION2_BUS_DMA_PRIVATE 44 #include <machine/bus.h> 45 46 #include <dev/bus_dma/bus_dmamem_common.h> 47 48 #include <uvm/uvm_extern.h> 49 50 #include <machine/locore.h> 51 52 extern paddr_t kvtophys(vaddr_t); /* XXX */ 53 static int _bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t, 54 struct vmspace *, int, vaddr_t *, int *, int); 55 56 struct playstation2_bus_dma_tag playstation2_default_bus_dma_tag = { 57 _bus_dmamap_create, 58 _bus_dmamap_destroy, 59 _bus_dmamap_load, 60 _bus_dmamap_load_mbuf, 61 _bus_dmamap_load_uio, 62 _bus_dmamap_load_raw, 63 _bus_dmamap_unload, 64 _bus_dmamap_sync, 65 _bus_dmamem_alloc, 66 _bus_dmamem_free, 67 _bus_dmamem_map, 68 _bus_dmamem_unmap, 69 _bus_dmamem_mmap, 70 }; 71 72 static size_t 73 _bus_dmamap_mapsize(int const nsegments) 74 { 75 KASSERT(nsegments > 0); 76 return sizeof(struct playstation2_bus_dmamap) + 77 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 78 } 79 80 /* 81 * Common function for DMA map creation. May be called by bus-specific 82 * DMA map creation functions. 83 */ 84 int 85 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 86 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 87 { 88 struct playstation2_bus_dmamap *map; 89 void *mapstore; 90 91 /* 92 * Allocate and initialize the DMA map. The end of the map 93 * is a variable-sized array of segments, so we allocate enough 94 * room for them in one shot. 95 * 96 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 97 * of ALLOCNOW notifies others that we've reserved these resources, 98 * and they are not to be freed. 99 * 100 * The bus_dmamap_t includes one bus_dma_segment_t, hence 101 * the (nsegments - 1). 102 */ 103 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments), 104 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 105 return ENOMEM; 106 107 map = (struct playstation2_bus_dmamap *)mapstore; 108 map->_dm_size = size; 109 map->_dm_segcnt = nsegments; 110 map->_dm_maxmaxsegsz = maxsegsz; 111 map->_dm_boundary = boundary; 112 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 113 map->dm_maxsegsz = maxsegsz; 114 map->dm_mapsize = 0; /* no valid mappings */ 115 map->dm_nsegs = 0; 116 117 *dmamp = map; 118 return 0; 119 } 120 121 /* 122 * Common function for DMA map destruction. May be called by bus-specific 123 * DMA map destruction functions. 124 */ 125 void 126 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 127 { 128 129 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt)); 130 } 131 132 133 /* 134 * Utility function to load a linear buffer. lastaddrp holds state 135 * between invocations (for multiple-buffer loads). segp contains 136 * the starting segment on entrance, and the ending segment on exit. 137 * first indicates if this is the first invocation of this function. 138 */ 139 int 140 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen, 141 struct vmspace *vm, int flags, vaddr_t *lastaddrp, int *segp, int first) 142 { 143 bus_size_t sgsize; 144 bus_addr_t curaddr, lastaddr, baddr, bmask; 145 vaddr_t vaddr = (vaddr_t)buf; 146 int seg; 147 148 lastaddr = *lastaddrp; 149 bmask = ~(map->_dm_boundary - 1); 150 151 for (seg = *segp; buflen > 0 ; ) { 152 /* 153 * Get the physical address for this segment. 154 */ 155 if (!VMSPACE_IS_KERNEL_P(vm)) 156 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 157 vaddr, (paddr_t *)&curaddr); 158 else 159 curaddr = kvtophys(vaddr); 160 161 /* 162 * Compute the segment size, and adjust counts. 163 */ 164 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 165 if (buflen < sgsize) 166 sgsize = buflen; 167 168 /* 169 * Make sure we don't cross any boundaries. 170 */ 171 if (map->_dm_boundary > 0) { 172 baddr = (curaddr + map->_dm_boundary) & bmask; 173 if (sgsize > (baddr - curaddr)) 174 sgsize = (baddr - curaddr); 175 } 176 177 /* 178 * Insert chunk into a segment, coalescing with 179 * the previous segment if possible. 180 */ 181 if (first) { 182 map->dm_segs[seg].ds_addr = curaddr; 183 map->dm_segs[seg].ds_len = sgsize; 184 map->dm_segs[seg]._ds_vaddr = vaddr; 185 first = 0; 186 } else { 187 if (curaddr == lastaddr && 188 (map->dm_segs[seg].ds_len + sgsize) <= 189 map->dm_maxsegsz && 190 (map->_dm_boundary == 0 || 191 (map->dm_segs[seg].ds_addr & bmask) == 192 (curaddr & bmask))) 193 map->dm_segs[seg].ds_len += sgsize; 194 else { 195 if (++seg >= map->_dm_segcnt) 196 break; 197 map->dm_segs[seg].ds_addr = curaddr; 198 map->dm_segs[seg].ds_len = sgsize; 199 map->dm_segs[seg]._ds_vaddr = vaddr; 200 } 201 } 202 203 lastaddr = curaddr + sgsize; 204 vaddr += sgsize; 205 buflen -= sgsize; 206 } 207 208 *segp = seg; 209 *lastaddrp = lastaddr; 210 211 /* 212 * Did we fit? 213 */ 214 if (buflen != 0) 215 return EFBIG; /* XXX Better return value here? */ 216 217 return 0; 218 } 219 220 /* 221 * Common function for loading a direct-mapped DMA map with a linear 222 * buffer. 223 */ 224 int 225 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 226 bus_size_t buflen, struct proc *p, int flags) 227 { 228 vaddr_t lastaddr; 229 int seg, error; 230 struct vmspace *vm; 231 232 /* 233 * Make sure that on error condition we return "no valid mappings". 234 */ 235 map->dm_mapsize = 0; 236 map->dm_nsegs = 0; 237 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 238 239 if (buflen > map->_dm_size) 240 return EINVAL; 241 242 if (p != NULL) { 243 vm = p->p_vmspace; 244 } else { 245 vm = vmspace_kernel(); 246 } 247 248 seg = 0; 249 error = _bus_dmamap_load_buffer(map, buf, buflen, 250 vm, flags, &lastaddr, &seg, 1); 251 if (error == 0) { 252 map->dm_mapsize = buflen; 253 map->dm_nsegs = seg + 1; 254 255 /* 256 * For linear buffers, we support marking the mapping 257 * as COHERENT. 258 * 259 * XXX Check TLB entries for cache-inhibit bits? 260 */ 261 if (buf >= (void *)MIPS_KSEG1_START && 262 buf < (void *)MIPS_KSEG2_START) 263 map->_dm_flags |= PLAYSTATION2_DMAMAP_COHERENT; 264 } 265 return error; 266 } 267 268 /* 269 * Like _bus_dmamap_load(), but for mbufs. 270 */ 271 int 272 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 273 int flags) 274 { 275 vaddr_t lastaddr; 276 int seg, error, first; 277 struct mbuf *m; 278 279 /* 280 * Make sure that on error condition we return "no valid mappings." 281 */ 282 map->dm_mapsize = 0; 283 map->dm_nsegs = 0; 284 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 285 286 #ifdef DIAGNOSTIC 287 if ((m0->m_flags & M_PKTHDR) == 0) 288 panic("_bus_dmamap_load_mbuf: no packet header"); 289 #endif 290 291 if (m0->m_pkthdr.len > map->_dm_size) 292 return EINVAL; 293 294 first = 1; 295 seg = 0; 296 error = 0; 297 for (m = m0; m != NULL && error == 0; m = m->m_next) { 298 if (m->m_len == 0) 299 continue; 300 error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len, 301 vmspace_kernel(), flags, &lastaddr, &seg, first); 302 first = 0; 303 } 304 if (error == 0) { 305 map->dm_mapsize = m0->m_pkthdr.len; 306 map->dm_nsegs = seg + 1; 307 } 308 return error; 309 } 310 311 /* 312 * Like _bus_dmamap_load(), but for uios. 313 */ 314 int 315 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 316 int flags) 317 { 318 vaddr_t lastaddr; 319 int seg, i, error, first; 320 bus_size_t minlen, resid; 321 struct iovec *iov; 322 void *addr; 323 324 /* 325 * Make sure that on error condition we return "no valid mappings." 326 */ 327 map->dm_mapsize = 0; 328 map->dm_nsegs = 0; 329 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 330 331 resid = uio->uio_resid; 332 iov = uio->uio_iov; 333 334 first = 1; 335 seg = 0; 336 error = 0; 337 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 338 /* 339 * Now at the first iovec to load. Load each iovec 340 * until we have exhausted the residual count. 341 */ 342 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 343 addr = (void *)iov[i].iov_base; 344 345 error = _bus_dmamap_load_buffer(map, addr, minlen, 346 uio->uio_vmspace, flags, &lastaddr, &seg, first); 347 first = 0; 348 349 resid -= minlen; 350 } 351 if (error == 0) { 352 map->dm_mapsize = uio->uio_resid; 353 map->dm_nsegs = seg + 1; 354 } 355 return error; 356 } 357 358 /* 359 * Like _bus_dmamap_load(), but for raw memory. 360 */ 361 int 362 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 363 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 364 { 365 366 panic("_bus_dmamap_load_raw: not implemented"); 367 } 368 369 /* 370 * Common function for unloading a DMA map. May be called by 371 * chipset-specific DMA map unload functions. 372 */ 373 void 374 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 375 { 376 377 /* 378 * No resources to free; just mark the mappings as 379 * invalid. 380 */ 381 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 382 map->dm_mapsize = 0; 383 map->dm_nsegs = 0; 384 map->_dm_flags &= ~PLAYSTATION2_DMAMAP_COHERENT; 385 } 386 387 /* 388 * Common function for DMA map synchronization. May be called 389 * by chipset-specific DMA map synchronization functions. 390 */ 391 void 392 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 393 bus_size_t len, int ops) 394 { 395 bus_size_t minlen; 396 bus_addr_t addr; 397 int i; 398 399 /* 400 * Mixing PRE and POST operations is not allowed. 401 */ 402 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 403 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 404 panic("_bus_dmamap_sync: mix PRE and POST"); 405 406 #ifdef DIAGNOSTIC 407 if (offset >= map->dm_mapsize) 408 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 409 offset, map->dm_mapsize); 410 if (len == 0 || (offset + len) > map->dm_mapsize) 411 panic("_bus_dmamap_sync: bad length"); 412 #endif 413 414 /* 415 * Flush the write buffer. 416 */ 417 wbflush(); 418 419 /* 420 * If the mapping is of COHERENT DMA-safe memory, no cache 421 * flush is necessary. 422 */ 423 if (map->_dm_flags & PLAYSTATION2_DMAMAP_COHERENT) 424 return; 425 426 /* 427 * No cache flushes are necessary if we're only doing 428 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE). 429 */ 430 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0) 431 return; 432 433 /* 434 * Flush data cache for PREREAD. This has the side-effect 435 * of invalidating the cache. Done at PREREAD since it 436 * causes the cache line(s) to be written back to memory. 437 * 438 * Flush data cache for PREWRITE, so that the contents of 439 * the data buffer in memory reflect reality. 440 * 441 * Given the test above, we know we're doing one of these 442 * two operations, so no additional tests are necessary. 443 */ 444 445 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 446 /* Find the beginning segment. */ 447 if (offset >= map->dm_segs[i].ds_len) { 448 offset -= map->dm_segs[i].ds_len; 449 continue; 450 } 451 452 /* 453 * Now at the first segment to sync; nail 454 * each segment until we have exhausted the 455 * length. 456 */ 457 minlen = len < map->dm_segs[i].ds_len - offset ? 458 len : map->dm_segs[i].ds_len - offset; 459 460 addr = map->dm_segs[i]._ds_vaddr; 461 462 #ifdef BUS_DMA_DEBUG 463 printf("bus_dmamap_sync: flushing segment %d " 464 "(0x%lx..0x%lx) ...", i, addr + offset, 465 addr + offset + minlen - 1); 466 #endif 467 mips_dcache_wbinv_range(addr + offset, minlen); 468 469 #ifdef BUS_DMA_DEBUG 470 printf("\n"); 471 #endif 472 offset = 0; 473 len -= minlen; 474 } 475 } 476 477 /* 478 * Common function for DMA-safe memory allocation. May be called 479 * by bus-specific DMA memory allocation functions. 480 */ 481 int 482 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 483 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 484 int flags) 485 { 486 extern paddr_t avail_start, avail_end; 487 488 return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, 489 segs, nsegs, rsegs, flags, 490 avail_start /*low*/, 491 avail_end - 1 /*high*/)); 492 } 493 494 /* 495 * Common function for freeing DMA-safe memory. May be called by 496 * bus-specific DMA memory free functions. 497 */ 498 void 499 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 500 { 501 502 _bus_dmamem_free_common(t, segs, nsegs); 503 } 504 505 /* 506 * Common function for mapping DMA-safe memory. May be called by 507 * bus-specific DMA memory map functions. 508 */ 509 int 510 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 511 size_t size, void **kvap, int flags) 512 { 513 514 /* 515 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 516 * TLB thrashing. 517 */ 518 if (nsegs == 1) { 519 if (flags & BUS_DMA_COHERENT) 520 *kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 521 else 522 *kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 523 return 0; 524 } 525 526 /* XXX BUS_DMA_COHERENT */ 527 return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0)); 528 } 529 530 /* 531 * Common function for unmapping DMA-safe memory. May be called by 532 * bus-specific DMA memory unmapping functions. 533 */ 534 void 535 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 536 { 537 538 /* 539 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 540 * not in KSEG2). 541 */ 542 if (kva >= (void *)MIPS_KSEG0_START && 543 kva < (void *)MIPS_KSEG2_START) 544 return; 545 546 _bus_dmamem_unmap_common(t, kva, size); 547 } 548 549 /* 550 * Common function for mmap(2)'ing DMA-safe memory. May be called by 551 * bus-specific DMA mmap(2)'ing functions. 552 */ 553 paddr_t 554 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 555 off_t off, int prot, int flags) 556 { 557 bus_addr_t rv; 558 559 rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags); 560 if (rv == (bus_addr_t)-1) 561 return (-1); 562 563 return (mips_btop((char *)rv)); 564 } 565