1 /* $NetBSD: rumpdev_bus_dma.c,v 1.10 2020/11/02 18:58:06 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 Antti Kantee 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /*- 30 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 31 * All rights reserved. 32 * 33 * This code is derived from software contributed to The NetBSD Foundation 34 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 35 * NASA Ames Research Center. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 47 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 48 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 50 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 56 * POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * bus_dma(9) implementation which runs on top of rump kernel hypercalls. 61 * It's essentially the same as the PowerPC implementation its based on, 62 * except with some indirection and PowerPC MD features removed. 63 * This should/could be expected to run on x86, other archs may need 64 * some cache flushing hooks. 65 * 66 * From sys/arch/powerpc/powerpc/bus_dma.c: 67 * NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp 68 */ 69 70 #include <sys/cdefs.h> 71 __KERNEL_RCSID(0, "$NetBSD: rumpdev_bus_dma.c,v 1.10 2020/11/02 18:58:06 christos Exp $"); 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/device.h> 77 #include <sys/kmem.h> 78 #include <sys/proc.h> 79 #include <sys/mbuf.h> 80 #include <sys/bus.h> 81 #include <sys/intr.h> 82 83 #include "pci_user.h" 84 85 #define EIEIO membar_sync() 86 87 int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *, 88 bus_size_t, struct vmspace *, int, paddr_t *, int *, int); 89 90 /* 91 * Common function for DMA map creation. May be called by bus-specific 92 * DMA map creation functions. 93 */ 94 int 95 bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 96 bus_size_t maxsegsz, bus_size_t boundary, int flags, 97 bus_dmamap_t *dmamp) 98 { 99 bus_dmamap_t map; 100 void *mapstore; 101 size_t mapsize; 102 103 /* 104 * Allocate and initialize the DMA map. The end of the map 105 * is a variable-sized array of segments, so we allocate enough 106 * room for them in one shot. 107 * 108 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 109 * of ALLOCNOW notifies others that we've reserved these resources, 110 * and they are not to be freed. 111 * 112 * The bus_dmamap_t includes one bus_dma_segment_t, hence 113 * the (nsegments - 1). 114 */ 115 mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]); 116 if ((mapstore = kmem_intr_alloc(mapsize, 117 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 118 return (ENOMEM); 119 120 memset(mapstore, 0, mapsize); 121 map = (void *)mapstore; 122 map->_dm_size = size; 123 map->_dm_segcnt = nsegments; 124 map->_dm_maxmaxsegsz = maxsegsz; 125 map->_dm_boundary = boundary; 126 map->_dm_bounce_thresh = 0; 127 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 128 map->dm_maxsegsz = maxsegsz; 129 map->dm_mapsize = 0; /* no valid mappings */ 130 map->dm_nsegs = 0; 131 132 *dmamp = map; 133 return (0); 134 } 135 136 /* 137 * Common function for DMA map destruction. May be called by bus-specific 138 * DMA map destruction functions. 139 */ 140 void 141 bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 142 { 143 144 size_t mapsize = sizeof(*map) 145 + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]); 146 kmem_intr_free(map, mapsize); 147 } 148 149 /* 150 * Utility function to load a linear buffer. lastaddrp holds state 151 * between invocations (for multiple-buffer loads). segp contains 152 * the starting segment on entrance, and the ending segment on exit. 153 * first indicates if this is the first invocation of this function. 154 */ 155 int 156 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, 157 void *buf, bus_size_t buflen, struct vmspace *vm, int flags, 158 paddr_t *lastaddrp, int *segp, int first) 159 { 160 bus_size_t sgsize; 161 bus_addr_t curaddr, lastaddr, baddr, bmask; 162 vaddr_t vaddr = (vaddr_t)buf; 163 int seg; 164 165 // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__, 166 // t, map, buf, buflen, vm, flags, lastaddrp, segp, first); 167 168 lastaddr = *lastaddrp; 169 bmask = ~(map->_dm_boundary - 1); 170 171 for (seg = *segp; buflen > 0 ; ) { 172 /* 173 * Get the physical address for this segment. 174 */ 175 if (!VMSPACE_IS_KERNEL_P(vm)) 176 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 177 vaddr, (void *)&curaddr); 178 else 179 curaddr = vtophys(vaddr); 180 181 /* 182 * If we're beyond the bounce threshold, notify 183 * the caller. 184 */ 185 if (map->_dm_bounce_thresh != 0 && 186 curaddr >= map->_dm_bounce_thresh) 187 return (EINVAL); 188 189 /* 190 * Compute the segment size, and adjust counts. 191 */ 192 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 193 if (buflen < sgsize) 194 sgsize = buflen; 195 sgsize = MIN(sgsize, map->dm_maxsegsz); 196 197 /* 198 * Make sure we don't cross any boundaries. 199 */ 200 if (map->_dm_boundary > 0) { 201 baddr = (curaddr + map->_dm_boundary) & bmask; 202 if (sgsize > (baddr - curaddr)) 203 sgsize = (baddr - curaddr); 204 } 205 206 /* 207 * Insert chunk into a segment, coalescing with 208 * the previous segment if possible. 209 */ 210 if (first) { 211 map->dm_segs[seg].ds_addr 212 = rumpcomp_pci_virt_to_mach((void *)curaddr); 213 map->dm_segs[seg].ds_len = sgsize; 214 first = 0; 215 } else { 216 if (curaddr == lastaddr && 217 (map->dm_segs[seg].ds_len + sgsize) <= 218 map->dm_maxsegsz && 219 (map->_dm_boundary == 0 || 220 (map->dm_segs[seg].ds_addr & bmask) == 221 (rumpcomp_pci_virt_to_mach((void*)curaddr)&bmask))) 222 map->dm_segs[seg].ds_len += sgsize; 223 else { 224 if (++seg >= map->_dm_segcnt) 225 break; 226 map->dm_segs[seg].ds_addr = 227 rumpcomp_pci_virt_to_mach((void *)curaddr); 228 map->dm_segs[seg].ds_len = sgsize; 229 } 230 } 231 232 lastaddr = curaddr + sgsize; 233 vaddr += sgsize; 234 buflen -= sgsize; 235 } 236 237 *segp = seg; 238 *lastaddrp = lastaddr; 239 240 /* 241 * Did we fit? 242 */ 243 if (buflen != 0) 244 return (EFBIG); /* XXX better return value here? */ 245 246 return (0); 247 } 248 249 /* 250 * Common function for loading a DMA map with a linear buffer. May 251 * be called by bus-specific DMA map load functions. 252 */ 253 int 254 bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 255 void *buf, bus_size_t buflen, struct proc *p, int flags) 256 { 257 paddr_t lastaddr = 0; 258 int seg, error; 259 struct vmspace *vm; 260 261 /* 262 * Make sure that on error condition we return "no valid mappings". 263 */ 264 map->dm_mapsize = 0; 265 map->dm_nsegs = 0; 266 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 267 268 if (buflen > map->_dm_size) 269 return (EINVAL); 270 271 if (p != NULL) { 272 vm = p->p_vmspace; 273 } else { 274 vm = vmspace_kernel(); 275 } 276 277 seg = 0; 278 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, 279 &lastaddr, &seg, 1); 280 if (error == 0) { 281 map->dm_mapsize = buflen; 282 map->dm_nsegs = seg + 1; 283 } 284 return (error); 285 } 286 287 /* 288 * Like _bus_dmamap_load(), but for mbufs. 289 */ 290 int 291 bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 292 struct mbuf *m0, int flags) 293 { 294 paddr_t lastaddr = 0; 295 int seg, error, first; 296 struct mbuf *m; 297 298 /* 299 * Make sure that on error condition we return "no valid mappings." 300 */ 301 map->dm_mapsize = 0; 302 map->dm_nsegs = 0; 303 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 304 305 #ifdef DIAGNOSTIC 306 if ((m0->m_flags & M_PKTHDR) == 0) 307 panic("_bus_dmamap_load_mbuf: no packet header"); 308 #endif 309 310 if (m0->m_pkthdr.len > map->_dm_size) 311 return (EINVAL); 312 313 first = 1; 314 seg = 0; 315 error = 0; 316 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) { 317 if (m->m_len == 0) 318 continue; 319 #ifdef POOL_VTOPHYS 320 /* XXX Could be better about coalescing. */ 321 /* XXX Doesn't check boundaries. */ 322 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 323 case M_EXT|M_EXT_CLUSTER: 324 /* XXX KDASSERT */ 325 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 326 lastaddr = m->m_ext.ext_paddr + 327 (m->m_data - m->m_ext.ext_buf); 328 have_addr: 329 if (first == 0 && ++seg >= map->_dm_segcnt) { 330 error = EFBIG; 331 continue; 332 } 333 map->dm_segs[seg].ds_addr = 334 rumpcomp_pci_virt_to_mach((void *)lastaddr); 335 map->dm_segs[seg].ds_len = m->m_len; 336 lastaddr += m->m_len; 337 continue; 338 339 case 0: 340 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 341 (m->m_data - M_BUFADDR(m)); 342 goto have_addr; 343 344 default: 345 break; 346 } 347 #endif 348 error = _bus_dmamap_load_buffer(t, map, m->m_data, 349 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first); 350 } 351 if (error == 0) { 352 map->dm_mapsize = m0->m_pkthdr.len; 353 map->dm_nsegs = seg + 1; 354 } 355 return (error); 356 } 357 358 /* 359 * Like _bus_dmamap_load(), but for uios. 360 */ 361 int 362 bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 363 struct uio *uio, int flags) 364 { 365 paddr_t lastaddr = 0; 366 int seg, i, error, first; 367 bus_size_t minlen, resid; 368 struct iovec *iov; 369 void *addr; 370 371 /* 372 * Make sure that on error condition we return "no valid mappings." 373 */ 374 map->dm_mapsize = 0; 375 map->dm_nsegs = 0; 376 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 377 378 resid = uio->uio_resid; 379 iov = uio->uio_iov; 380 381 first = 1; 382 seg = 0; 383 error = 0; 384 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 385 /* 386 * Now at the first iovec to load. Load each iovec 387 * until we have exhausted the residual count. 388 */ 389 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 390 addr = (void *)iov[i].iov_base; 391 392 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 393 uio->uio_vmspace, flags, &lastaddr, &seg, first); 394 first = 0; 395 396 resid -= minlen; 397 } 398 if (error == 0) { 399 map->dm_mapsize = uio->uio_resid; 400 map->dm_nsegs = seg + 1; 401 } 402 return (error); 403 } 404 405 /* 406 * Like _bus_dmamap_load(), but for raw memory allocated with 407 * bus_dmamem_alloc(). 408 */ 409 int 410 bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 411 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 412 { 413 414 panic("_bus_dmamap_load_raw: not implemented"); 415 } 416 417 /* 418 * Common function for unloading a DMA map. May be called by 419 * chipset-specific DMA map unload functions. 420 */ 421 void 422 bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 423 { 424 425 /* 426 * No resources to free; just mark the mappings as 427 * invalid. 428 */ 429 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 430 map->dm_mapsize = 0; 431 map->dm_nsegs = 0; 432 } 433 434 void 435 bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 436 bus_addr_t offset, bus_size_t len, int ops) 437 { 438 439 /* XXX: this might need some MD tweaks */ 440 membar_sync(); 441 } 442 443 /* 444 * Common function for freeing DMA-safe memory. May be called by 445 * bus-specific DMA memory free functions. 446 */ 447 void 448 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 449 { 450 #ifdef RUMPCOMP_USERFEATURE_PCI_DMAFREE 451 vaddr_t vacookie = segs[0]._ds_vacookie; 452 bus_size_t sizecookie = segs[0]._ds_sizecookie; 453 454 rumpcomp_pci_dmafree(vacookie, sizecookie); 455 #else 456 panic("bus_dmamem_free not implemented"); 457 #endif 458 } 459 460 /* 461 * Don't have hypercall for mapping scatter-gather memory. 462 * So just simply fail if there's more than one segment to map 463 */ 464 int 465 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 466 size_t size, void **kvap, int flags) 467 { 468 struct rumpcomp_pci_dmaseg *dss; 469 size_t allocsize = nsegs * sizeof(*dss); 470 int rv, i; 471 472 /* 473 * Though rumpcomp_pci_dmaseg "accidentally" matches the 474 * bus_dma segment descriptor (at least for now), act 475 * proper and actually translate it. 476 */ 477 dss = kmem_alloc(allocsize, KM_SLEEP); 478 for (i = 0; i < nsegs; i++) { 479 dss[i].ds_pa = segs[i].ds_addr; 480 dss[i].ds_len = segs[i].ds_len; 481 dss[i].ds_vacookie = segs[i]._ds_vacookie; 482 } 483 rv = rumpcomp_pci_dmamem_map(dss, nsegs, size, kvap); 484 kmem_free(dss, allocsize); 485 486 return rv; 487 } 488 489 /* 490 * Common function for unmapping DMA-safe memory. May be called by 491 * bus-specific DMA memory unmapping functions. 492 */ 493 void 494 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 495 { 496 497 /* nothing to do as long as bus_dmamem_map() is what it is */ 498 } 499 500 paddr_t 501 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 502 off_t off, int prot, int flags) 503 { 504 505 panic("bus_dmamem_mmap not supported"); 506 } 507 508 /* 509 * Allocate physical memory from the given physical address range. 510 * Called by DMA-safe memory allocation methods. 511 */ 512 int 513 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 514 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 515 int flags) 516 { 517 paddr_t curaddr, lastaddr, pa; 518 vaddr_t vacookie; 519 size_t sizecookie; 520 int curseg, error; 521 522 /* Always round the size. */ 523 size = round_page(size); 524 525 sizecookie = size; 526 527 /* 528 * Allocate pages from the VM system. 529 */ 530 #if 0 531 error = uvm_pglistalloc(size, low, high, alignment, boundary, 532 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 533 #else 534 /* XXX: ignores boundary, nsegs, etc. */ 535 //printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs); 536 error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie); 537 #endif 538 if (error) 539 return (error); 540 541 /* 542 * Compute the location, size, and number of segments actually 543 * returned by the VM code. 544 */ 545 curseg = 0; 546 lastaddr = segs[curseg].ds_addr = pa; 547 segs[curseg].ds_len = PAGE_SIZE; 548 segs[curseg]._ds_vacookie = vacookie; 549 segs[curseg]._ds_sizecookie = sizecookie; 550 size -= PAGE_SIZE; 551 pa += PAGE_SIZE; 552 vacookie += PAGE_SIZE; 553 554 for (; size; 555 pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) { 556 curaddr = pa; 557 if (curaddr == (lastaddr + PAGE_SIZE) && 558 (lastaddr & boundary) == (curaddr & boundary)) { 559 segs[curseg].ds_len += PAGE_SIZE; 560 } else { 561 curseg++; 562 if (curseg >= nsegs) 563 return EFBIG; 564 segs[curseg].ds_addr = curaddr; 565 segs[curseg].ds_len = PAGE_SIZE; 566 segs[curseg]._ds_vacookie = vacookie; 567 segs[curseg]._ds_sizecookie = sizecookie; 568 } 569 lastaddr = curaddr; 570 } 571 *rsegs = curseg + 1; 572 573 return (0); 574 } 575