1 /* $NetBSD: rumpdev_bus_dma.c,v 1.11 2022/02/13 19:20:41 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 Antti Kantee 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /*- 30 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 31 * All rights reserved. 32 * 33 * This code is derived from software contributed to The NetBSD Foundation 34 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 35 * NASA Ames Research Center. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 47 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 48 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 50 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 56 * POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * bus_dma(9) implementation which runs on top of rump kernel hypercalls. 61 * It's essentially the same as the PowerPC implementation its based on, 62 * except with some indirection and PowerPC MD features removed. 63 * This should/could be expected to run on x86, other archs may need 64 * some cache flushing hooks. 65 * 66 * From sys/arch/powerpc/powerpc/bus_dma.c: 67 * NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp 68 */ 69 70 #include <sys/cdefs.h> 71 __KERNEL_RCSID(0, "$NetBSD: rumpdev_bus_dma.c,v 1.11 2022/02/13 19:20:41 riastradh Exp $"); 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/device.h> 77 #include <sys/kmem.h> 78 #include <sys/proc.h> 79 #include <sys/mbuf.h> 80 #include <sys/bus.h> 81 #include <sys/intr.h> 82 83 #include "pci_user.h" 84 85 int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *, 86 bus_size_t, struct vmspace *, int, paddr_t *, int *, int); 87 88 /* 89 * Common function for DMA map creation. May be called by bus-specific 90 * DMA map creation functions. 91 */ 92 int 93 bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 94 bus_size_t maxsegsz, bus_size_t boundary, int flags, 95 bus_dmamap_t *dmamp) 96 { 97 bus_dmamap_t map; 98 void *mapstore; 99 size_t mapsize; 100 101 /* 102 * Allocate and initialize the DMA map. The end of the map 103 * is a variable-sized array of segments, so we allocate enough 104 * room for them in one shot. 105 * 106 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 107 * of ALLOCNOW notifies others that we've reserved these resources, 108 * and they are not to be freed. 109 * 110 * The bus_dmamap_t includes one bus_dma_segment_t, hence 111 * the (nsegments - 1). 112 */ 113 mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]); 114 if ((mapstore = kmem_intr_alloc(mapsize, 115 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 116 return (ENOMEM); 117 118 memset(mapstore, 0, mapsize); 119 map = (void *)mapstore; 120 map->_dm_size = size; 121 map->_dm_segcnt = nsegments; 122 map->_dm_maxmaxsegsz = maxsegsz; 123 map->_dm_boundary = boundary; 124 map->_dm_bounce_thresh = 0; 125 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 126 map->dm_maxsegsz = maxsegsz; 127 map->dm_mapsize = 0; /* no valid mappings */ 128 map->dm_nsegs = 0; 129 130 *dmamp = map; 131 return (0); 132 } 133 134 /* 135 * Common function for DMA map destruction. May be called by bus-specific 136 * DMA map destruction functions. 137 */ 138 void 139 bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 140 { 141 142 size_t mapsize = sizeof(*map) 143 + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]); 144 kmem_intr_free(map, mapsize); 145 } 146 147 /* 148 * Utility function to load a linear buffer. lastaddrp holds state 149 * between invocations (for multiple-buffer loads). segp contains 150 * the starting segment on entrance, and the ending segment on exit. 151 * first indicates if this is the first invocation of this function. 152 */ 153 int 154 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, 155 void *buf, bus_size_t buflen, struct vmspace *vm, int flags, 156 paddr_t *lastaddrp, int *segp, int first) 157 { 158 bus_size_t sgsize; 159 bus_addr_t curaddr, lastaddr, baddr, bmask; 160 vaddr_t vaddr = (vaddr_t)buf; 161 int seg; 162 163 // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__, 164 // t, map, buf, buflen, vm, flags, lastaddrp, segp, first); 165 166 lastaddr = *lastaddrp; 167 bmask = ~(map->_dm_boundary - 1); 168 169 for (seg = *segp; buflen > 0 ; ) { 170 /* 171 * Get the physical address for this segment. 172 */ 173 if (!VMSPACE_IS_KERNEL_P(vm)) 174 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 175 vaddr, (void *)&curaddr); 176 else 177 curaddr = vtophys(vaddr); 178 179 /* 180 * If we're beyond the bounce threshold, notify 181 * the caller. 182 */ 183 if (map->_dm_bounce_thresh != 0 && 184 curaddr >= map->_dm_bounce_thresh) 185 return (EINVAL); 186 187 /* 188 * Compute the segment size, and adjust counts. 189 */ 190 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 191 if (buflen < sgsize) 192 sgsize = buflen; 193 sgsize = MIN(sgsize, map->dm_maxsegsz); 194 195 /* 196 * Make sure we don't cross any boundaries. 197 */ 198 if (map->_dm_boundary > 0) { 199 baddr = (curaddr + map->_dm_boundary) & bmask; 200 if (sgsize > (baddr - curaddr)) 201 sgsize = (baddr - curaddr); 202 } 203 204 /* 205 * Insert chunk into a segment, coalescing with 206 * the previous segment if possible. 207 */ 208 if (first) { 209 map->dm_segs[seg].ds_addr 210 = rumpcomp_pci_virt_to_mach((void *)curaddr); 211 map->dm_segs[seg].ds_len = sgsize; 212 first = 0; 213 } else { 214 if (curaddr == lastaddr && 215 (map->dm_segs[seg].ds_len + sgsize) <= 216 map->dm_maxsegsz && 217 (map->_dm_boundary == 0 || 218 (map->dm_segs[seg].ds_addr & bmask) == 219 (rumpcomp_pci_virt_to_mach((void*)curaddr)&bmask))) 220 map->dm_segs[seg].ds_len += sgsize; 221 else { 222 if (++seg >= map->_dm_segcnt) 223 break; 224 map->dm_segs[seg].ds_addr = 225 rumpcomp_pci_virt_to_mach((void *)curaddr); 226 map->dm_segs[seg].ds_len = sgsize; 227 } 228 } 229 230 lastaddr = curaddr + sgsize; 231 vaddr += sgsize; 232 buflen -= sgsize; 233 } 234 235 *segp = seg; 236 *lastaddrp = lastaddr; 237 238 /* 239 * Did we fit? 240 */ 241 if (buflen != 0) 242 return (EFBIG); /* XXX better return value here? */ 243 244 return (0); 245 } 246 247 /* 248 * Common function for loading a DMA map with a linear buffer. May 249 * be called by bus-specific DMA map load functions. 250 */ 251 int 252 bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 253 void *buf, bus_size_t buflen, struct proc *p, int flags) 254 { 255 paddr_t lastaddr = 0; 256 int seg, error; 257 struct vmspace *vm; 258 259 /* 260 * Make sure that on error condition we return "no valid mappings". 261 */ 262 map->dm_mapsize = 0; 263 map->dm_nsegs = 0; 264 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 265 266 if (buflen > map->_dm_size) 267 return (EINVAL); 268 269 if (p != NULL) { 270 vm = p->p_vmspace; 271 } else { 272 vm = vmspace_kernel(); 273 } 274 275 seg = 0; 276 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, 277 &lastaddr, &seg, 1); 278 if (error == 0) { 279 map->dm_mapsize = buflen; 280 map->dm_nsegs = seg + 1; 281 } 282 return (error); 283 } 284 285 /* 286 * Like _bus_dmamap_load(), but for mbufs. 287 */ 288 int 289 bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 290 struct mbuf *m0, int flags) 291 { 292 paddr_t lastaddr = 0; 293 int seg, error, first; 294 struct mbuf *m; 295 296 /* 297 * Make sure that on error condition we return "no valid mappings." 298 */ 299 map->dm_mapsize = 0; 300 map->dm_nsegs = 0; 301 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 302 303 #ifdef DIAGNOSTIC 304 if ((m0->m_flags & M_PKTHDR) == 0) 305 panic("_bus_dmamap_load_mbuf: no packet header"); 306 #endif 307 308 if (m0->m_pkthdr.len > map->_dm_size) 309 return (EINVAL); 310 311 first = 1; 312 seg = 0; 313 error = 0; 314 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) { 315 if (m->m_len == 0) 316 continue; 317 #ifdef POOL_VTOPHYS 318 /* XXX Could be better about coalescing. */ 319 /* XXX Doesn't check boundaries. */ 320 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 321 case M_EXT|M_EXT_CLUSTER: 322 /* XXX KDASSERT */ 323 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 324 lastaddr = m->m_ext.ext_paddr + 325 (m->m_data - m->m_ext.ext_buf); 326 have_addr: 327 if (first == 0 && ++seg >= map->_dm_segcnt) { 328 error = EFBIG; 329 continue; 330 } 331 map->dm_segs[seg].ds_addr = 332 rumpcomp_pci_virt_to_mach((void *)lastaddr); 333 map->dm_segs[seg].ds_len = m->m_len; 334 lastaddr += m->m_len; 335 continue; 336 337 case 0: 338 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 339 (m->m_data - M_BUFADDR(m)); 340 goto have_addr; 341 342 default: 343 break; 344 } 345 #endif 346 error = _bus_dmamap_load_buffer(t, map, m->m_data, 347 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first); 348 } 349 if (error == 0) { 350 map->dm_mapsize = m0->m_pkthdr.len; 351 map->dm_nsegs = seg + 1; 352 } 353 return (error); 354 } 355 356 /* 357 * Like _bus_dmamap_load(), but for uios. 358 */ 359 int 360 bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 361 struct uio *uio, int flags) 362 { 363 paddr_t lastaddr = 0; 364 int seg, i, error, first; 365 bus_size_t minlen, resid; 366 struct iovec *iov; 367 void *addr; 368 369 /* 370 * Make sure that on error condition we return "no valid mappings." 371 */ 372 map->dm_mapsize = 0; 373 map->dm_nsegs = 0; 374 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 375 376 resid = uio->uio_resid; 377 iov = uio->uio_iov; 378 379 first = 1; 380 seg = 0; 381 error = 0; 382 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 383 /* 384 * Now at the first iovec to load. Load each iovec 385 * until we have exhausted the residual count. 386 */ 387 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 388 addr = (void *)iov[i].iov_base; 389 390 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 391 uio->uio_vmspace, flags, &lastaddr, &seg, first); 392 first = 0; 393 394 resid -= minlen; 395 } 396 if (error == 0) { 397 map->dm_mapsize = uio->uio_resid; 398 map->dm_nsegs = seg + 1; 399 } 400 return (error); 401 } 402 403 /* 404 * Like _bus_dmamap_load(), but for raw memory allocated with 405 * bus_dmamem_alloc(). 406 */ 407 int 408 bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 409 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 410 { 411 412 panic("_bus_dmamap_load_raw: not implemented"); 413 } 414 415 /* 416 * Common function for unloading a DMA map. May be called by 417 * chipset-specific DMA map unload functions. 418 */ 419 void 420 bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 421 { 422 423 /* 424 * No resources to free; just mark the mappings as 425 * invalid. 426 */ 427 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 428 map->dm_mapsize = 0; 429 map->dm_nsegs = 0; 430 } 431 432 void 433 bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 434 bus_addr_t offset, bus_size_t len, int ops) 435 { 436 437 /* XXX: this might need some MD tweaks */ 438 membar_sync(); 439 } 440 441 /* 442 * Common function for freeing DMA-safe memory. May be called by 443 * bus-specific DMA memory free functions. 444 */ 445 void 446 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 447 { 448 #ifdef RUMPCOMP_USERFEATURE_PCI_DMAFREE 449 vaddr_t vacookie = segs[0]._ds_vacookie; 450 bus_size_t sizecookie = segs[0]._ds_sizecookie; 451 452 rumpcomp_pci_dmafree(vacookie, sizecookie); 453 #else 454 panic("bus_dmamem_free not implemented"); 455 #endif 456 } 457 458 /* 459 * Don't have hypercall for mapping scatter-gather memory. 460 * So just simply fail if there's more than one segment to map 461 */ 462 int 463 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 464 size_t size, void **kvap, int flags) 465 { 466 struct rumpcomp_pci_dmaseg *dss; 467 size_t allocsize = nsegs * sizeof(*dss); 468 int rv, i; 469 470 /* 471 * Though rumpcomp_pci_dmaseg "accidentally" matches the 472 * bus_dma segment descriptor (at least for now), act 473 * proper and actually translate it. 474 */ 475 dss = kmem_alloc(allocsize, KM_SLEEP); 476 for (i = 0; i < nsegs; i++) { 477 dss[i].ds_pa = segs[i].ds_addr; 478 dss[i].ds_len = segs[i].ds_len; 479 dss[i].ds_vacookie = segs[i]._ds_vacookie; 480 } 481 rv = rumpcomp_pci_dmamem_map(dss, nsegs, size, kvap); 482 kmem_free(dss, allocsize); 483 484 return rv; 485 } 486 487 /* 488 * Common function for unmapping DMA-safe memory. May be called by 489 * bus-specific DMA memory unmapping functions. 490 */ 491 void 492 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 493 { 494 495 /* nothing to do as long as bus_dmamem_map() is what it is */ 496 } 497 498 paddr_t 499 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 500 off_t off, int prot, int flags) 501 { 502 503 panic("bus_dmamem_mmap not supported"); 504 } 505 506 /* 507 * Allocate physical memory from the given physical address range. 508 * Called by DMA-safe memory allocation methods. 509 */ 510 int 511 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 512 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 513 int flags) 514 { 515 paddr_t curaddr, lastaddr, pa; 516 vaddr_t vacookie; 517 size_t sizecookie; 518 int curseg, error; 519 520 /* Always round the size. */ 521 size = round_page(size); 522 523 sizecookie = size; 524 525 /* 526 * Allocate pages from the VM system. 527 */ 528 #if 0 529 error = uvm_pglistalloc(size, low, high, alignment, boundary, 530 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 531 #else 532 /* XXX: ignores boundary, nsegs, etc. */ 533 //printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs); 534 error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie); 535 #endif 536 if (error) 537 return (error); 538 539 /* 540 * Compute the location, size, and number of segments actually 541 * returned by the VM code. 542 */ 543 curseg = 0; 544 lastaddr = segs[curseg].ds_addr = pa; 545 segs[curseg].ds_len = PAGE_SIZE; 546 segs[curseg]._ds_vacookie = vacookie; 547 segs[curseg]._ds_sizecookie = sizecookie; 548 size -= PAGE_SIZE; 549 pa += PAGE_SIZE; 550 vacookie += PAGE_SIZE; 551 552 for (; size; 553 pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) { 554 curaddr = pa; 555 if (curaddr == (lastaddr + PAGE_SIZE) && 556 (lastaddr & boundary) == (curaddr & boundary)) { 557 segs[curseg].ds_len += PAGE_SIZE; 558 } else { 559 curseg++; 560 if (curseg >= nsegs) 561 return EFBIG; 562 segs[curseg].ds_addr = curaddr; 563 segs[curseg].ds_len = PAGE_SIZE; 564 segs[curseg]._ds_vacookie = vacookie; 565 segs[curseg]._ds_sizecookie = sizecookie; 566 } 567 lastaddr = curaddr; 568 } 569 *rsegs = curseg + 1; 570 571 return (0); 572 } 573