1 /* $NetBSD: rumpdev_bus_dma.c,v 1.3 2014/04/14 21:43:00 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 Antti Kantee 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /*- 30 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 31 * All rights reserved. 32 * 33 * This code is derived from software contributed to The NetBSD Foundation 34 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 35 * NASA Ames Research Center. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 47 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 48 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 50 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 56 * POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * bus_dma(9) implementation which runs on top of rump kernel hypercalls. 61 * It's essentially the same as the PowerPC implementation its based on, 62 * except with some indirection and PowerPC MD features removed. 63 * This should/could be expected to run on x86, other archs may need 64 * some cache flushing hooks. 65 * 66 * From sys/arch/powerpc/powerpc/bus_dma.c: 67 * NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp 68 */ 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/device.h> 74 #include <sys/kmem.h> 75 #include <sys/proc.h> 76 #include <sys/mbuf.h> 77 #include <sys/bus.h> 78 #include <sys/intr.h> 79 80 #include <uvm/uvm.h> 81 82 #include "pci_user.h" 83 84 #define EIEIO membar_sync() 85 86 int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *, 87 bus_size_t, struct vmspace *, int, paddr_t *, int *, int); 88 89 /* 90 * Common function for DMA map creation. May be called by bus-specific 91 * DMA map creation functions. 92 */ 93 int 94 bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 95 bus_size_t maxsegsz, bus_size_t boundary, int flags, 96 bus_dmamap_t *dmamp) 97 { 98 bus_dmamap_t map; 99 void *mapstore; 100 size_t mapsize; 101 102 /* 103 * Allocate and initialize the DMA map. The end of the map 104 * is a variable-sized array of segments, so we allocate enough 105 * room for them in one shot. 106 * 107 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 108 * of ALLOCNOW notifies others that we've reserved these resources, 109 * and they are not to be freed. 110 * 111 * The bus_dmamap_t includes one bus_dma_segment_t, hence 112 * the (nsegments - 1). 113 */ 114 mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]); 115 if ((mapstore = kmem_intr_alloc(mapsize, 116 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 117 return (ENOMEM); 118 119 memset(mapstore, 0, mapsize); 120 map = (void *)mapstore; 121 map->_dm_size = size; 122 map->_dm_segcnt = nsegments; 123 map->_dm_maxmaxsegsz = maxsegsz; 124 map->_dm_boundary = boundary; 125 map->_dm_bounce_thresh = 0; 126 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 127 map->dm_maxsegsz = maxsegsz; 128 map->dm_mapsize = 0; /* no valid mappings */ 129 map->dm_nsegs = 0; 130 131 *dmamp = map; 132 return (0); 133 } 134 135 /* 136 * Common function for DMA map destruction. May be called by bus-specific 137 * DMA map destruction functions. 138 */ 139 void 140 bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 141 { 142 143 size_t mapsize = sizeof(*map) 144 + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]); 145 kmem_intr_free(map, mapsize); 146 } 147 148 /* 149 * Utility function to load a linear buffer. lastaddrp holds state 150 * between invocations (for multiple-buffer loads). segp contains 151 * the starting segment on entrance, and the ending segment on exit. 152 * first indicates if this is the first invocation of this function. 153 */ 154 int 155 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, 156 void *buf, bus_size_t buflen, struct vmspace *vm, int flags, 157 paddr_t *lastaddrp, int *segp, int first) 158 { 159 bus_size_t sgsize; 160 bus_addr_t curaddr, lastaddr, baddr, bmask; 161 vaddr_t vaddr = (vaddr_t)buf; 162 int seg; 163 164 // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__, 165 // t, map, buf, buflen, vm, flags, lastaddrp, segp, first); 166 167 lastaddr = *lastaddrp; 168 bmask = ~(map->_dm_boundary - 1); 169 170 for (seg = *segp; buflen > 0 ; ) { 171 /* 172 * Get the physical address for this segment. 173 */ 174 if (!VMSPACE_IS_KERNEL_P(vm)) 175 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 176 vaddr, (void *)&curaddr); 177 else 178 curaddr = vtophys(vaddr); 179 180 /* 181 * If we're beyond the bounce threshold, notify 182 * the caller. 183 */ 184 if (map->_dm_bounce_thresh != 0 && 185 curaddr >= map->_dm_bounce_thresh) 186 return (EINVAL); 187 188 /* 189 * Compute the segment size, and adjust counts. 190 */ 191 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 192 if (buflen < sgsize) 193 sgsize = buflen; 194 sgsize = min(sgsize, map->dm_maxsegsz); 195 196 /* 197 * Make sure we don't cross any boundaries. 198 */ 199 if (map->_dm_boundary > 0) { 200 baddr = (curaddr + map->_dm_boundary) & bmask; 201 if (sgsize > (baddr - curaddr)) 202 sgsize = (baddr - curaddr); 203 } 204 205 /* 206 * Insert chunk into a segment, coalescing with 207 * the previous segment if possible. 208 */ 209 if (first) { 210 map->dm_segs[seg].ds_addr 211 = rumpcomp_pci_virt_to_mach((void *)curaddr); 212 map->dm_segs[seg].ds_len = sgsize; 213 first = 0; 214 } else { 215 if (curaddr == lastaddr && 216 (map->dm_segs[seg].ds_len + sgsize) <= 217 map->dm_maxsegsz && 218 (map->_dm_boundary == 0 || 219 (map->dm_segs[seg].ds_addr & bmask) == 220 (rumpcomp_pci_virt_to_mach((void*)curaddr)&bmask))) 221 map->dm_segs[seg].ds_len += sgsize; 222 else { 223 if (++seg >= map->_dm_segcnt) 224 break; 225 map->dm_segs[seg].ds_addr = 226 rumpcomp_pci_virt_to_mach((void *)curaddr); 227 map->dm_segs[seg].ds_len = sgsize; 228 } 229 } 230 231 lastaddr = curaddr + sgsize; 232 vaddr += sgsize; 233 buflen -= sgsize; 234 } 235 236 *segp = seg; 237 *lastaddrp = lastaddr; 238 239 /* 240 * Did we fit? 241 */ 242 if (buflen != 0) 243 return (EFBIG); /* XXX better return value here? */ 244 245 return (0); 246 } 247 248 /* 249 * Common function for loading a DMA map with a linear buffer. May 250 * be called by bus-specific DMA map load functions. 251 */ 252 int 253 bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 254 void *buf, bus_size_t buflen, struct proc *p, int flags) 255 { 256 paddr_t lastaddr = 0; 257 int seg, error; 258 struct vmspace *vm; 259 260 /* 261 * Make sure that on error condition we return "no valid mappings". 262 */ 263 map->dm_mapsize = 0; 264 map->dm_nsegs = 0; 265 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 266 267 if (buflen > map->_dm_size) 268 return (EINVAL); 269 270 if (p != NULL) { 271 vm = p->p_vmspace; 272 } else { 273 vm = vmspace_kernel(); 274 } 275 276 seg = 0; 277 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, 278 &lastaddr, &seg, 1); 279 if (error == 0) { 280 map->dm_mapsize = buflen; 281 map->dm_nsegs = seg + 1; 282 } 283 return (error); 284 } 285 286 /* 287 * Like _bus_dmamap_load(), but for mbufs. 288 */ 289 int 290 bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 291 struct mbuf *m0, int flags) 292 { 293 paddr_t lastaddr = 0; 294 int seg, error, first; 295 struct mbuf *m; 296 297 /* 298 * Make sure that on error condition we return "no valid mappings." 299 */ 300 map->dm_mapsize = 0; 301 map->dm_nsegs = 0; 302 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 303 304 #ifdef DIAGNOSTIC 305 if ((m0->m_flags & M_PKTHDR) == 0) 306 panic("_bus_dmamap_load_mbuf: no packet header"); 307 #endif 308 309 if (m0->m_pkthdr.len > map->_dm_size) 310 return (EINVAL); 311 312 first = 1; 313 seg = 0; 314 error = 0; 315 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) { 316 if (m->m_len == 0) 317 continue; 318 #ifdef POOL_VTOPHYS 319 /* XXX Could be better about coalescing. */ 320 /* XXX Doesn't check boundaries. */ 321 switch (m->m_flags & (M_EXT|M_CLUSTER)) { 322 case M_EXT|M_CLUSTER: 323 /* XXX KDASSERT */ 324 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 325 lastaddr = m->m_ext.ext_paddr + 326 (m->m_data - m->m_ext.ext_buf); 327 have_addr: 328 if (first == 0 && ++seg >= map->_dm_segcnt) { 329 error = EFBIG; 330 continue; 331 } 332 map->dm_segs[seg].ds_addr = 333 rumpcomp_pci_virt_to_mach((void *)lastaddr); 334 map->dm_segs[seg].ds_len = m->m_len; 335 lastaddr += m->m_len; 336 continue; 337 338 case 0: 339 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 340 (m->m_data - M_BUFADDR(m)); 341 goto have_addr; 342 343 default: 344 break; 345 } 346 #endif 347 error = _bus_dmamap_load_buffer(t, map, m->m_data, 348 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first); 349 } 350 if (error == 0) { 351 map->dm_mapsize = m0->m_pkthdr.len; 352 map->dm_nsegs = seg + 1; 353 } 354 return (error); 355 } 356 357 /* 358 * Like _bus_dmamap_load(), but for uios. 359 */ 360 int 361 bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 362 struct uio *uio, int flags) 363 { 364 paddr_t lastaddr = 0; 365 int seg, i, error, first; 366 bus_size_t minlen, resid; 367 struct iovec *iov; 368 void *addr; 369 370 /* 371 * Make sure that on error condition we return "no valid mappings." 372 */ 373 map->dm_mapsize = 0; 374 map->dm_nsegs = 0; 375 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 376 377 resid = uio->uio_resid; 378 iov = uio->uio_iov; 379 380 first = 1; 381 seg = 0; 382 error = 0; 383 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 384 /* 385 * Now at the first iovec to load. Load each iovec 386 * until we have exhausted the residual count. 387 */ 388 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 389 addr = (void *)iov[i].iov_base; 390 391 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 392 uio->uio_vmspace, flags, &lastaddr, &seg, first); 393 first = 0; 394 395 resid -= minlen; 396 } 397 if (error == 0) { 398 map->dm_mapsize = uio->uio_resid; 399 map->dm_nsegs = seg + 1; 400 } 401 return (error); 402 } 403 404 /* 405 * Like _bus_dmamap_load(), but for raw memory allocated with 406 * bus_dmamem_alloc(). 407 */ 408 int 409 bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 410 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 411 { 412 413 panic("_bus_dmamap_load_raw: not implemented"); 414 } 415 416 /* 417 * Common function for unloading a DMA map. May be called by 418 * chipset-specific DMA map unload functions. 419 */ 420 void 421 bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 422 { 423 424 /* 425 * No resources to free; just mark the mappings as 426 * invalid. 427 */ 428 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 429 map->dm_mapsize = 0; 430 map->dm_nsegs = 0; 431 } 432 433 void 434 bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 435 bus_addr_t offset, bus_size_t len, int ops) 436 { 437 438 /* XXX: this might need some MD tweaks */ 439 membar_sync(); 440 } 441 442 /* 443 * Common function for freeing DMA-safe memory. May be called by 444 * bus-specific DMA memory free functions. 445 */ 446 void 447 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 448 { 449 450 panic("bus_dmamem_free not implemented"); 451 } 452 453 /* 454 * Don't have hypercall for mapping scatter-gather memory. 455 * So just simply fail if there's more than one segment to map 456 */ 457 int 458 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 459 size_t size, void **kvap, int flags) 460 { 461 struct rumpcomp_pci_dmaseg *dss; 462 size_t allocsize = nsegs * sizeof(*dss); 463 int rv, i; 464 465 /* 466 * Though rumpcomp_pci_dmaseg "accidentally" matches the 467 * bus_dma segment descriptor (at least for now), act 468 * proper and actually translate it. 469 */ 470 dss = kmem_alloc(allocsize, KM_SLEEP); 471 for (i = 0; i < nsegs; i++) { 472 dss[i].ds_pa = segs[i].ds_addr; 473 dss[i].ds_len = segs[i].ds_len; 474 dss[i].ds_vacookie = segs[i]._ds_vacookie; 475 } 476 rv = rumpcomp_pci_dmamem_map(dss, nsegs, size, kvap); 477 kmem_free(dss, allocsize); 478 479 return rv; 480 } 481 482 /* 483 * Common function for unmapping DMA-safe memory. May be called by 484 * bus-specific DMA memory unmapping functions. 485 */ 486 void 487 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 488 { 489 490 /* nothing to do as long as bus_dmamem_map() is what it is */ 491 } 492 493 paddr_t 494 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 495 off_t off, int prot, int flags) 496 { 497 498 panic("bus_dmamem_mmap not supported"); 499 } 500 501 /* 502 * Allocate physical memory from the given physical address range. 503 * Called by DMA-safe memory allocation methods. 504 */ 505 int 506 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 507 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 508 int flags) 509 { 510 paddr_t curaddr, lastaddr, pa; 511 vaddr_t vacookie; 512 int curseg, error; 513 514 /* Always round the size. */ 515 size = round_page(size); 516 517 /* 518 * Allocate pages from the VM system. 519 */ 520 #if 0 521 error = uvm_pglistalloc(size, low, high, alignment, boundary, 522 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 523 #else 524 /* XXX: ignores boundary, nsegs, etc. */ 525 //printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs); 526 error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie); 527 #endif 528 if (error) 529 return (error); 530 531 /* 532 * Compute the location, size, and number of segments actually 533 * returned by the VM code. 534 */ 535 curseg = 0; 536 lastaddr = segs[curseg].ds_addr = pa; 537 segs[curseg].ds_len = PAGE_SIZE; 538 segs[curseg]._ds_vacookie = vacookie; 539 size -= PAGE_SIZE; 540 pa += PAGE_SIZE; 541 vacookie += PAGE_SIZE; 542 543 for (; size; 544 pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) { 545 curaddr = pa; 546 if (curaddr == (lastaddr + PAGE_SIZE) && 547 (lastaddr & boundary) == (curaddr & boundary)) { 548 segs[curseg].ds_len += PAGE_SIZE; 549 } else { 550 curseg++; 551 if (curseg >= nsegs) 552 return EFBIG; 553 segs[curseg].ds_addr = curaddr; 554 segs[curseg].ds_len = PAGE_SIZE; 555 segs[curseg]._ds_vacookie = vacookie; 556 } 557 lastaddr = curaddr; 558 } 559 *rsegs = curseg + 1; 560 561 return (0); 562 } 563