1 /* $NetBSD: rumpdev_bus_dma.c,v 1.8 2019/01/27 02:08:48 pgoyette Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 Antti Kantee 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /*- 30 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 31 * All rights reserved. 32 * 33 * This code is derived from software contributed to The NetBSD Foundation 34 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 35 * NASA Ames Research Center. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 47 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 48 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 50 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 56 * POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * bus_dma(9) implementation which runs on top of rump kernel hypercalls. 61 * It's essentially the same as the PowerPC implementation its based on, 62 * except with some indirection and PowerPC MD features removed. 63 * This should/could be expected to run on x86, other archs may need 64 * some cache flushing hooks. 65 * 66 * From sys/arch/powerpc/powerpc/bus_dma.c: 67 * NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp 68 */ 69 70 #include <sys/cdefs.h> 71 __KERNEL_RCSID(0, "$NetBSD: rumpdev_bus_dma.c,v 1.8 2019/01/27 02:08:48 pgoyette Exp $"); 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/device.h> 77 #include <sys/kmem.h> 78 #include <sys/proc.h> 79 #include <sys/mbuf.h> 80 #include <sys/bus.h> 81 #include <sys/intr.h> 82 83 #include <uvm/uvm.h> 84 85 #include "pci_user.h" 86 87 #define EIEIO membar_sync() 88 89 int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *, 90 bus_size_t, struct vmspace *, int, paddr_t *, int *, int); 91 92 /* 93 * Common function for DMA map creation. May be called by bus-specific 94 * DMA map creation functions. 95 */ 96 int 97 bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 98 bus_size_t maxsegsz, bus_size_t boundary, int flags, 99 bus_dmamap_t *dmamp) 100 { 101 bus_dmamap_t map; 102 void *mapstore; 103 size_t mapsize; 104 105 /* 106 * Allocate and initialize the DMA map. The end of the map 107 * is a variable-sized array of segments, so we allocate enough 108 * room for them in one shot. 109 * 110 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 111 * of ALLOCNOW notifies others that we've reserved these resources, 112 * and they are not to be freed. 113 * 114 * The bus_dmamap_t includes one bus_dma_segment_t, hence 115 * the (nsegments - 1). 116 */ 117 mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]); 118 if ((mapstore = kmem_intr_alloc(mapsize, 119 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 120 return (ENOMEM); 121 122 memset(mapstore, 0, mapsize); 123 map = (void *)mapstore; 124 map->_dm_size = size; 125 map->_dm_segcnt = nsegments; 126 map->_dm_maxmaxsegsz = maxsegsz; 127 map->_dm_boundary = boundary; 128 map->_dm_bounce_thresh = 0; 129 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 130 map->dm_maxsegsz = maxsegsz; 131 map->dm_mapsize = 0; /* no valid mappings */ 132 map->dm_nsegs = 0; 133 134 *dmamp = map; 135 return (0); 136 } 137 138 /* 139 * Common function for DMA map destruction. May be called by bus-specific 140 * DMA map destruction functions. 141 */ 142 void 143 bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 144 { 145 146 size_t mapsize = sizeof(*map) 147 + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]); 148 kmem_intr_free(map, mapsize); 149 } 150 151 /* 152 * Utility function to load a linear buffer. lastaddrp holds state 153 * between invocations (for multiple-buffer loads). segp contains 154 * the starting segment on entrance, and the ending segment on exit. 155 * first indicates if this is the first invocation of this function. 156 */ 157 int 158 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, 159 void *buf, bus_size_t buflen, struct vmspace *vm, int flags, 160 paddr_t *lastaddrp, int *segp, int first) 161 { 162 bus_size_t sgsize; 163 bus_addr_t curaddr, lastaddr, baddr, bmask; 164 vaddr_t vaddr = (vaddr_t)buf; 165 int seg; 166 167 // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__, 168 // t, map, buf, buflen, vm, flags, lastaddrp, segp, first); 169 170 lastaddr = *lastaddrp; 171 bmask = ~(map->_dm_boundary - 1); 172 173 for (seg = *segp; buflen > 0 ; ) { 174 /* 175 * Get the physical address for this segment. 176 */ 177 if (!VMSPACE_IS_KERNEL_P(vm)) 178 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 179 vaddr, (void *)&curaddr); 180 else 181 curaddr = vtophys(vaddr); 182 183 /* 184 * If we're beyond the bounce threshold, notify 185 * the caller. 186 */ 187 if (map->_dm_bounce_thresh != 0 && 188 curaddr >= map->_dm_bounce_thresh) 189 return (EINVAL); 190 191 /* 192 * Compute the segment size, and adjust counts. 193 */ 194 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 195 if (buflen < sgsize) 196 sgsize = buflen; 197 sgsize = min(sgsize, map->dm_maxsegsz); 198 199 /* 200 * Make sure we don't cross any boundaries. 201 */ 202 if (map->_dm_boundary > 0) { 203 baddr = (curaddr + map->_dm_boundary) & bmask; 204 if (sgsize > (baddr - curaddr)) 205 sgsize = (baddr - curaddr); 206 } 207 208 /* 209 * Insert chunk into a segment, coalescing with 210 * the previous segment if possible. 211 */ 212 if (first) { 213 map->dm_segs[seg].ds_addr 214 = rumpcomp_pci_virt_to_mach((void *)curaddr); 215 map->dm_segs[seg].ds_len = sgsize; 216 first = 0; 217 } else { 218 if (curaddr == lastaddr && 219 (map->dm_segs[seg].ds_len + sgsize) <= 220 map->dm_maxsegsz && 221 (map->_dm_boundary == 0 || 222 (map->dm_segs[seg].ds_addr & bmask) == 223 (rumpcomp_pci_virt_to_mach((void*)curaddr)&bmask))) 224 map->dm_segs[seg].ds_len += sgsize; 225 else { 226 if (++seg >= map->_dm_segcnt) 227 break; 228 map->dm_segs[seg].ds_addr = 229 rumpcomp_pci_virt_to_mach((void *)curaddr); 230 map->dm_segs[seg].ds_len = sgsize; 231 } 232 } 233 234 lastaddr = curaddr + sgsize; 235 vaddr += sgsize; 236 buflen -= sgsize; 237 } 238 239 *segp = seg; 240 *lastaddrp = lastaddr; 241 242 /* 243 * Did we fit? 244 */ 245 if (buflen != 0) 246 return (EFBIG); /* XXX better return value here? */ 247 248 return (0); 249 } 250 251 /* 252 * Common function for loading a DMA map with a linear buffer. May 253 * be called by bus-specific DMA map load functions. 254 */ 255 int 256 bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 257 void *buf, bus_size_t buflen, struct proc *p, int flags) 258 { 259 paddr_t lastaddr = 0; 260 int seg, error; 261 struct vmspace *vm; 262 263 /* 264 * Make sure that on error condition we return "no valid mappings". 265 */ 266 map->dm_mapsize = 0; 267 map->dm_nsegs = 0; 268 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 269 270 if (buflen > map->_dm_size) 271 return (EINVAL); 272 273 if (p != NULL) { 274 vm = p->p_vmspace; 275 } else { 276 vm = vmspace_kernel(); 277 } 278 279 seg = 0; 280 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, 281 &lastaddr, &seg, 1); 282 if (error == 0) { 283 map->dm_mapsize = buflen; 284 map->dm_nsegs = seg + 1; 285 } 286 return (error); 287 } 288 289 /* 290 * Like _bus_dmamap_load(), but for mbufs. 291 */ 292 int 293 bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 294 struct mbuf *m0, int flags) 295 { 296 paddr_t lastaddr = 0; 297 int seg, error, first; 298 struct mbuf *m; 299 300 /* 301 * Make sure that on error condition we return "no valid mappings." 302 */ 303 map->dm_mapsize = 0; 304 map->dm_nsegs = 0; 305 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 306 307 #ifdef DIAGNOSTIC 308 if ((m0->m_flags & M_PKTHDR) == 0) 309 panic("_bus_dmamap_load_mbuf: no packet header"); 310 #endif 311 312 if (m0->m_pkthdr.len > map->_dm_size) 313 return (EINVAL); 314 315 first = 1; 316 seg = 0; 317 error = 0; 318 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) { 319 if (m->m_len == 0) 320 continue; 321 #ifdef POOL_VTOPHYS 322 /* XXX Could be better about coalescing. */ 323 /* XXX Doesn't check boundaries. */ 324 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 325 case M_EXT|M_EXT_CLUSTER: 326 /* XXX KDASSERT */ 327 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 328 lastaddr = m->m_ext.ext_paddr + 329 (m->m_data - m->m_ext.ext_buf); 330 have_addr: 331 if (first == 0 && ++seg >= map->_dm_segcnt) { 332 error = EFBIG; 333 continue; 334 } 335 map->dm_segs[seg].ds_addr = 336 rumpcomp_pci_virt_to_mach((void *)lastaddr); 337 map->dm_segs[seg].ds_len = m->m_len; 338 lastaddr += m->m_len; 339 continue; 340 341 case 0: 342 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 343 (m->m_data - M_BUFADDR(m)); 344 goto have_addr; 345 346 default: 347 break; 348 } 349 #endif 350 error = _bus_dmamap_load_buffer(t, map, m->m_data, 351 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first); 352 } 353 if (error == 0) { 354 map->dm_mapsize = m0->m_pkthdr.len; 355 map->dm_nsegs = seg + 1; 356 } 357 return (error); 358 } 359 360 /* 361 * Like _bus_dmamap_load(), but for uios. 362 */ 363 int 364 bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 365 struct uio *uio, int flags) 366 { 367 paddr_t lastaddr = 0; 368 int seg, i, error, first; 369 bus_size_t minlen, resid; 370 struct iovec *iov; 371 void *addr; 372 373 /* 374 * Make sure that on error condition we return "no valid mappings." 375 */ 376 map->dm_mapsize = 0; 377 map->dm_nsegs = 0; 378 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 379 380 resid = uio->uio_resid; 381 iov = uio->uio_iov; 382 383 first = 1; 384 seg = 0; 385 error = 0; 386 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 387 /* 388 * Now at the first iovec to load. Load each iovec 389 * until we have exhausted the residual count. 390 */ 391 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 392 addr = (void *)iov[i].iov_base; 393 394 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 395 uio->uio_vmspace, flags, &lastaddr, &seg, first); 396 first = 0; 397 398 resid -= minlen; 399 } 400 if (error == 0) { 401 map->dm_mapsize = uio->uio_resid; 402 map->dm_nsegs = seg + 1; 403 } 404 return (error); 405 } 406 407 /* 408 * Like _bus_dmamap_load(), but for raw memory allocated with 409 * bus_dmamem_alloc(). 410 */ 411 int 412 bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 413 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 414 { 415 416 panic("_bus_dmamap_load_raw: not implemented"); 417 } 418 419 /* 420 * Common function for unloading a DMA map. May be called by 421 * chipset-specific DMA map unload functions. 422 */ 423 void 424 bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 425 { 426 427 /* 428 * No resources to free; just mark the mappings as 429 * invalid. 430 */ 431 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 432 map->dm_mapsize = 0; 433 map->dm_nsegs = 0; 434 } 435 436 void 437 bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 438 bus_addr_t offset, bus_size_t len, int ops) 439 { 440 441 /* XXX: this might need some MD tweaks */ 442 membar_sync(); 443 } 444 445 /* 446 * Common function for freeing DMA-safe memory. May be called by 447 * bus-specific DMA memory free functions. 448 */ 449 void 450 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 451 { 452 #ifdef RUMPCOMP_USERFEATURE_PCI_DMAFREE 453 vaddr_t vacookie = segs[0]._ds_vacookie; 454 bus_size_t sizecookie = segs[0]._ds_sizecookie; 455 456 rumpcomp_pci_dmafree(vacookie, sizecookie); 457 #else 458 panic("bus_dmamem_free not implemented"); 459 #endif 460 } 461 462 /* 463 * Don't have hypercall for mapping scatter-gather memory. 464 * So just simply fail if there's more than one segment to map 465 */ 466 int 467 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 468 size_t size, void **kvap, int flags) 469 { 470 struct rumpcomp_pci_dmaseg *dss; 471 size_t allocsize = nsegs * sizeof(*dss); 472 int rv, i; 473 474 /* 475 * Though rumpcomp_pci_dmaseg "accidentally" matches the 476 * bus_dma segment descriptor (at least for now), act 477 * proper and actually translate it. 478 */ 479 dss = kmem_alloc(allocsize, KM_SLEEP); 480 for (i = 0; i < nsegs; i++) { 481 dss[i].ds_pa = segs[i].ds_addr; 482 dss[i].ds_len = segs[i].ds_len; 483 dss[i].ds_vacookie = segs[i]._ds_vacookie; 484 } 485 rv = rumpcomp_pci_dmamem_map(dss, nsegs, size, kvap); 486 kmem_free(dss, allocsize); 487 488 return rv; 489 } 490 491 /* 492 * Common function for unmapping DMA-safe memory. May be called by 493 * bus-specific DMA memory unmapping functions. 494 */ 495 void 496 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 497 { 498 499 /* nothing to do as long as bus_dmamem_map() is what it is */ 500 } 501 502 paddr_t 503 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 504 off_t off, int prot, int flags) 505 { 506 507 panic("bus_dmamem_mmap not supported"); 508 } 509 510 /* 511 * Allocate physical memory from the given physical address range. 512 * Called by DMA-safe memory allocation methods. 513 */ 514 int 515 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 516 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 517 int flags) 518 { 519 paddr_t curaddr, lastaddr, pa; 520 vaddr_t vacookie; 521 size_t sizecookie; 522 int curseg, error; 523 524 /* Always round the size. */ 525 size = round_page(size); 526 527 sizecookie = size; 528 529 /* 530 * Allocate pages from the VM system. 531 */ 532 #if 0 533 error = uvm_pglistalloc(size, low, high, alignment, boundary, 534 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 535 #else 536 /* XXX: ignores boundary, nsegs, etc. */ 537 //printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs); 538 error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie); 539 #endif 540 if (error) 541 return (error); 542 543 /* 544 * Compute the location, size, and number of segments actually 545 * returned by the VM code. 546 */ 547 curseg = 0; 548 lastaddr = segs[curseg].ds_addr = pa; 549 segs[curseg].ds_len = PAGE_SIZE; 550 segs[curseg]._ds_vacookie = vacookie; 551 segs[curseg]._ds_sizecookie = sizecookie; 552 size -= PAGE_SIZE; 553 pa += PAGE_SIZE; 554 vacookie += PAGE_SIZE; 555 556 for (; size; 557 pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) { 558 curaddr = pa; 559 if (curaddr == (lastaddr + PAGE_SIZE) && 560 (lastaddr & boundary) == (curaddr & boundary)) { 561 segs[curseg].ds_len += PAGE_SIZE; 562 } else { 563 curseg++; 564 if (curseg >= nsegs) 565 return EFBIG; 566 segs[curseg].ds_addr = curaddr; 567 segs[curseg].ds_len = PAGE_SIZE; 568 segs[curseg]._ds_vacookie = vacookie; 569 segs[curseg]._ds_sizecookie = sizecookie; 570 } 571 lastaddr = curaddr; 572 } 573 *rsegs = curseg + 1; 574 575 return (0); 576 } 577