1 /* $NetBSD: bus.c,v 1.25 2005/01/18 07:12:16 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * bus_space(9) and bus_dma(9) implementation for NetBSD/x68k. 41 * These are default implementations; some buses may use their own. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.25 2005/01/18 07:12:16 chs Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/kernel.h> 52 #include <sys/conf.h> 53 #include <sys/device.h> 54 #include <sys/proc.h> 55 56 #include <uvm/uvm_extern.h> 57 58 #include <m68k/cacheops.h> 59 #include <machine/bus.h> 60 61 #if defined(M68040) || defined(M68060) 62 static inline void dmasync_flush(bus_addr_t, bus_size_t); 63 static inline void dmasync_inval(bus_addr_t, bus_size_t); 64 #endif 65 66 int 67 x68k_bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend, 68 bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, 69 bus_addr_t *bpap, bus_space_handle_t *bshp) 70 { 71 return (EINVAL); 72 } 73 74 void 75 x68k_bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size) 76 { 77 panic("bus_space_free: shouldn't be here"); 78 } 79 80 81 extern paddr_t avail_end; 82 83 /* 84 * Common function for DMA map creation. May be called by bus-specific 85 * DMA map creation functions. 86 */ 87 int 88 x68k_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 89 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 90 { 91 struct x68k_bus_dmamap *map; 92 void *mapstore; 93 size_t mapsize; 94 95 /* 96 * Allocate and initialize the DMA map. The end of the map 97 * is a variable-sized array of segments, so we allocate enough 98 * room for them in one shot. 99 * 100 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 101 * of ALLOCNOW notifies others that we've reserved these resources, 102 * and they are not to be freed. 103 * 104 * The bus_dmamap_t includes one bus_dma_segment_t, hence 105 * the (nsegments - 1). 106 */ 107 mapsize = sizeof(struct x68k_bus_dmamap) + 108 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 109 if ((mapstore = malloc(mapsize, M_DMAMAP, 110 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 111 return (ENOMEM); 112 113 memset(mapstore, 0, mapsize); 114 map = (struct x68k_bus_dmamap *)mapstore; 115 map->x68k_dm_size = size; 116 map->x68k_dm_segcnt = nsegments; 117 map->x68k_dm_maxsegsz = maxsegsz; 118 map->x68k_dm_boundary = boundary; 119 map->x68k_dm_bounce_thresh = t->_bounce_thresh; 120 map->x68k_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 121 map->dm_mapsize = 0; /* no valid mappings */ 122 map->dm_nsegs = 0; 123 124 *dmamp = map; 125 return (0); 126 } 127 128 /* 129 * Common function for DMA map destruction. May be called by bus-specific 130 * DMA map destruction functions. 131 */ 132 void 133 x68k_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 134 { 135 136 free(map, M_DMAMAP); 137 } 138 139 /* 140 * Common function for loading a DMA map with a linear buffer. May 141 * be called by bus-specific DMA map load functions. 142 */ 143 int 144 x68k_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 145 bus_size_t buflen, struct proc *p, int flags) 146 { 147 paddr_t lastaddr; 148 int seg, error; 149 150 /* 151 * Make sure that on error condition we return "no valid mappings". 152 */ 153 map->dm_mapsize = 0; 154 map->dm_nsegs = 0; 155 156 if (buflen > map->x68k_dm_size) 157 return (EINVAL); 158 159 seg = 0; 160 error = x68k_bus_dmamap_load_buffer(map, buf, buflen, p, flags, 161 &lastaddr, &seg, 1); 162 if (error == 0) { 163 map->dm_mapsize = buflen; 164 map->dm_nsegs = seg + 1; 165 } 166 return (error); 167 } 168 169 /* 170 * Like x68k_bus_dmamap_load(), but for mbufs. 171 */ 172 int 173 x68k_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 174 int flags) 175 { 176 paddr_t lastaddr; 177 int seg, error, first; 178 struct mbuf *m; 179 180 /* 181 * Make sure that on error condition we return "no valid mappings." 182 */ 183 map->dm_mapsize = 0; 184 map->dm_nsegs = 0; 185 186 #ifdef DIAGNOSTIC 187 if ((m0->m_flags & M_PKTHDR) == 0) 188 panic("x68k_bus_dmamap_load_mbuf: no packet header"); 189 #endif 190 191 if (m0->m_pkthdr.len > map->x68k_dm_size) 192 return (EINVAL); 193 194 first = 1; 195 seg = 0; 196 error = 0; 197 for (m = m0; m != NULL && error == 0; m = m->m_next) { 198 if (m->m_len == 0) 199 continue; 200 error = x68k_bus_dmamap_load_buffer(map, m->m_data, m->m_len, 201 NULL, flags, &lastaddr, &seg, first); 202 first = 0; 203 } 204 if (error == 0) { 205 map->dm_mapsize = m0->m_pkthdr.len; 206 map->dm_nsegs = seg + 1; 207 } 208 return (error); 209 } 210 211 /* 212 * Like x68k_bus_dmamap_load(), but for uios. 213 */ 214 int 215 x68k_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 216 int flags) 217 { 218 #if 0 219 paddr_t lastaddr; 220 int seg, i, error, first; 221 bus_size_t minlen, resid; 222 struct proc *p = NULL; 223 struct iovec *iov; 224 caddr_t addr; 225 226 /* 227 * Make sure that on error condition we return "no valid mappings." 228 */ 229 map->dm_mapsize = 0; 230 map->dm_nsegs = 0; 231 232 resid = uio->uio_resid; 233 iov = uio->uio_iov; 234 235 if (uio->uio_segflg == UIO_USERSPACE) { 236 p = uio->uio_procp; 237 #ifdef DIAGNOSTIC 238 if (p == NULL) 239 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 240 #endif 241 } 242 243 first = 1; 244 seg = 0; 245 error = 0; 246 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 247 /* 248 * Now at the first iovec to load. Load each iovec 249 * until we have exhausted the residual count. 250 */ 251 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 252 addr = (caddr_t)iov[i].iov_base; 253 254 error = x68k_bus_dmamap_load_buffer(map, addr, minlen, 255 p, flags, &lastaddr, &seg, first); 256 first = 0; 257 258 resid -= minlen; 259 } 260 if (error == 0) { 261 map->dm_mapsize = uio->uio_resid; 262 map->dm_nsegs = seg + 1; 263 } 264 return (error); 265 #else 266 panic ("x68k_bus_dmamap_load_uio: not implemented"); 267 #endif 268 } 269 270 /* 271 * Like x68k_bus_dmamap_load(), but for raw memory allocated with 272 * bus_dmamem_alloc(). 273 */ 274 int 275 x68k_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 276 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 277 { 278 279 panic("x68k_bus_dmamap_load_raw: not implemented"); 280 } 281 282 /* 283 * Common function for unloading a DMA map. May be called by 284 * bus-specific DMA map unload functions. 285 */ 286 void 287 x68k_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 288 { 289 290 /* 291 * No resources to free; just mark the mappings as 292 * invalid. 293 */ 294 map->dm_mapsize = 0; 295 map->dm_nsegs = 0; 296 } 297 298 #if defined(M68040) || defined(M68060) 299 static inline void 300 dmasync_flush(bus_addr_t addr, bus_size_t len) 301 { 302 bus_addr_t end = addr+len; 303 304 if (len <= 1024) { 305 addr = addr & ~0xF; 306 307 do { 308 DCFL(addr); 309 addr += 16; 310 } while (addr < end); 311 } else { 312 addr = m68k_trunc_page(addr); 313 314 do { 315 DCFP(addr); 316 addr += PAGE_SIZE; 317 } while (addr < end); 318 } 319 } 320 321 static inline void 322 dmasync_inval(bus_addr_t addr, bus_size_t len) 323 { 324 bus_addr_t end = addr+len; 325 326 if (len <= 1024) { 327 addr = addr & ~0xF; 328 329 do { 330 DCFL(addr); 331 ICPL(addr); 332 addr += 16; 333 } while (addr < end); 334 } else { 335 addr = m68k_trunc_page(addr); 336 337 do { 338 DCPL(addr); 339 ICPP(addr); 340 addr += PAGE_SIZE; 341 } while (addr < end); 342 } 343 } 344 #endif 345 346 /* 347 * Common function for DMA map synchronization. May be called 348 * by bus-specific DMA map synchronization functions. 349 */ 350 void 351 x68k_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 352 bus_size_t len, int ops) 353 { 354 #if defined(M68040) || defined(M68060) 355 bus_dma_segment_t *ds = map->dm_segs; 356 bus_addr_t seg; 357 int i; 358 359 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE)) == 0) 360 return; 361 #if defined(M68020) || defined(M68030) 362 if (mmutype != MMU_68040) { 363 if ((ops & BUS_DMASYNC_POSTWRITE) == 0) 364 return; /* no copyback cache */ 365 ICIA(); /* no per-page/per-line control */ 366 DCIA(); 367 return; 368 } 369 #endif 370 if (offset >= map->dm_mapsize) 371 return; /* driver bug; warn it? */ 372 if (offset+len > map->dm_mapsize) 373 len = map->dm_mapsize; /* driver bug; warn it? */ 374 375 i = 0; 376 while (ds[i].ds_len <= offset) { 377 offset -= ds[i++].ds_len; 378 continue; 379 } 380 while (len > 0) { 381 seg = ds[i].ds_len - offset; 382 if (seg > len) 383 seg = len; 384 if (mmutype == MMU_68040 && (ops & BUS_DMASYNC_PREWRITE)) 385 dmasync_flush(ds[i].ds_addr+offset, seg); 386 if (ops & BUS_DMASYNC_POSTREAD) 387 dmasync_inval(ds[i].ds_addr+offset, seg); 388 offset = 0; 389 len -= seg; 390 i++; 391 } 392 #else /* no 040/060 */ 393 if ((ops & BUS_DMASYNC_POSTWRITE)) { 394 ICIA(); /* no per-page/per-line control */ 395 DCIA(); 396 } 397 #endif 398 } 399 400 /* 401 * Common function for DMA-safe memory allocation. May be called 402 * by bus-specific DMA memory allocation functions. 403 */ 404 int 405 x68k_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 406 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 407 int flags) 408 { 409 410 return (x68k_bus_dmamem_alloc_range(t, size, alignment, boundary, 411 segs, nsegs, rsegs, flags, 0, trunc_page(avail_end))); 412 } 413 414 /* 415 * Common function for freeing DMA-safe memory. May be called by 416 * bus-specific DMA memory free functions. 417 */ 418 void 419 x68k_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 420 { 421 struct vm_page *m; 422 bus_addr_t addr; 423 struct pglist mlist; 424 int curseg; 425 426 /* 427 * Build a list of pages to free back to the VM system. 428 */ 429 TAILQ_INIT(&mlist); 430 for (curseg = 0; curseg < nsegs; curseg++) { 431 for (addr = segs[curseg].ds_addr; 432 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 433 addr += PAGE_SIZE) { 434 m = PHYS_TO_VM_PAGE(addr); 435 TAILQ_INSERT_TAIL(&mlist, m, pageq); 436 } 437 } 438 439 uvm_pglistfree(&mlist); 440 } 441 442 /* 443 * Common function for mapping DMA-safe memory. May be called by 444 * bus-specific DMA memory map functions. 445 */ 446 int 447 x68k_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 448 size_t size, caddr_t *kvap, int flags) 449 { 450 vaddr_t va; 451 bus_addr_t addr; 452 int curseg; 453 454 size = round_page(size); 455 456 va = uvm_km_valloc(kernel_map, size); 457 458 if (va == 0) 459 return (ENOMEM); 460 461 *kvap = (caddr_t)va; 462 463 for (curseg = 0; curseg < nsegs; curseg++) { 464 for (addr = segs[curseg].ds_addr; 465 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 466 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 467 if (size == 0) 468 panic("x68k_bus_dmamem_map: size botch"); 469 pmap_enter(pmap_kernel(), va, addr, 470 VM_PROT_READ | VM_PROT_WRITE, 471 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 472 } 473 } 474 pmap_update(pmap_kernel()); 475 476 return (0); 477 } 478 479 /* 480 * Common function for unmapping DMA-safe memory. May be called by 481 * bus-specific DMA memory unmapping functions. 482 */ 483 void 484 x68k_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size) 485 { 486 #ifdef DIAGNOSTIC 487 if (m68k_page_offset(kva)) 488 panic("x68k_bus_dmamem_unmap"); 489 #endif 490 491 size = round_page(size); 492 493 uvm_km_free(kernel_map, (vaddr_t)kva, size); 494 } 495 496 /* 497 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 498 * bus-specific DMA mmap(2)'ing functions. 499 */ 500 paddr_t 501 x68k_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 502 off_t off, int prot, int flags) 503 { 504 int i; 505 506 for (i = 0; i < nsegs; i++) { 507 #ifdef DIAGNOSTIC 508 if (m68k_page_offset(off)) 509 panic("x68k_bus_dmamem_mmap: offset unaligned"); 510 if (m68k_page_offset(segs[i].ds_addr)) 511 panic("x68k_bus_dmamem_mmap: segment unaligned"); 512 if (m68k_page_offset(segs[i].ds_len)) 513 panic("x68k_bus_dmamem_mmap: segment size not multiple" 514 " of page size"); 515 #endif 516 if (off >= segs[i].ds_len) { 517 off -= segs[i].ds_len; 518 continue; 519 } 520 521 return (m68k_btop((caddr_t)segs[i].ds_addr + off)); 522 } 523 524 /* Page not found. */ 525 return (-1); 526 } 527 528 529 /********************************************************************** 530 * DMA utility functions 531 **********************************************************************/ 532 533 /* 534 * Utility function to load a linear buffer. lastaddrp holds state 535 * between invocations (for multiple-buffer loads). segp contains 536 * the starting segment on entrace, and the ending segment on exit. 537 * first indicates if this is the first invocation of this function. 538 */ 539 int 540 x68k_bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen, 541 struct proc *p, int flags, paddr_t *lastaddrp, int *segp, int first) 542 { 543 bus_size_t sgsize; 544 bus_addr_t curaddr, lastaddr, baddr, bmask; 545 vaddr_t vaddr = (vaddr_t)buf; 546 int seg; 547 pmap_t pmap; 548 549 if (p != NULL) 550 pmap = p->p_vmspace->vm_map.pmap; 551 else 552 pmap = pmap_kernel(); 553 554 lastaddr = *lastaddrp; 555 bmask = ~(map->x68k_dm_boundary - 1); 556 557 for (seg = *segp; buflen > 0 ; ) { 558 /* 559 * Get the physical address for this segment. 560 */ 561 (void) pmap_extract(pmap, vaddr, &curaddr); 562 563 /* 564 * If we're beyond the bounce threshold, notify 565 * the caller. 566 */ 567 if (map->x68k_dm_bounce_thresh != 0 && 568 curaddr >= map->x68k_dm_bounce_thresh) 569 return (EINVAL); 570 571 /* 572 * Compute the segment size, and adjust counts. 573 */ 574 sgsize = PAGE_SIZE - m68k_page_offset(vaddr); 575 if (buflen < sgsize) 576 sgsize = buflen; 577 578 /* 579 * Make sure we don't cross any boundaries. 580 */ 581 if (map->x68k_dm_boundary > 0) { 582 baddr = (curaddr + map->x68k_dm_boundary) & bmask; 583 if (sgsize > (baddr - curaddr)) 584 sgsize = (baddr - curaddr); 585 } 586 587 /* 588 * Insert chunk into a segment, coalescing with 589 * previous segment if possible. 590 */ 591 if (first) { 592 map->dm_segs[seg].ds_addr = curaddr; 593 map->dm_segs[seg].ds_len = sgsize; 594 first = 0; 595 } else { 596 if (curaddr == lastaddr && 597 (map->dm_segs[seg].ds_len + sgsize) <= 598 map->x68k_dm_maxsegsz && 599 (map->x68k_dm_boundary == 0 || 600 (map->dm_segs[seg].ds_addr & bmask) == 601 (curaddr & bmask))) 602 map->dm_segs[seg].ds_len += sgsize; 603 else { 604 if (++seg >= map->x68k_dm_segcnt) 605 break; 606 map->dm_segs[seg].ds_addr = curaddr; 607 map->dm_segs[seg].ds_len = sgsize; 608 } 609 } 610 611 lastaddr = curaddr + sgsize; 612 vaddr += sgsize; 613 buflen -= sgsize; 614 } 615 616 *segp = seg; 617 *lastaddrp = lastaddr; 618 619 /* 620 * Did we fit? 621 */ 622 if (buflen != 0) 623 return (EFBIG); /* XXX better return value here? */ 624 return (0); 625 } 626 627 /* 628 * Allocate physical memory from the given physical address range. 629 * Called by DMA-safe memory allocation methods. 630 */ 631 int 632 x68k_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, 633 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 634 int nsegs, int *rsegs, int flags, paddr_t low, paddr_t high) 635 { 636 paddr_t curaddr, lastaddr; 637 struct vm_page *m; 638 struct pglist mlist; 639 int curseg, error; 640 641 /* Always round the size. */ 642 size = round_page(size); 643 644 /* 645 * Allocate pages from the VM system. 646 */ 647 error = uvm_pglistalloc(size, low, high, alignment, boundary, 648 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 649 if (error) 650 return (error); 651 652 /* 653 * Compute the location, size, and number of segments actually 654 * returned by the VM code. 655 */ 656 m = mlist.tqh_first; 657 curseg = 0; 658 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 659 segs[curseg].ds_len = PAGE_SIZE; 660 m = m->pageq.tqe_next; 661 662 for (; m != NULL; m = m->pageq.tqe_next) { 663 curaddr = VM_PAGE_TO_PHYS(m); 664 #ifdef DIAGNOSTIC 665 if (curaddr < low || curaddr >= high) { 666 printf("uvm_pglistalloc returned non-sensical" 667 " address 0x%lx\n", curaddr); 668 panic("x68k_bus_dmamem_alloc_range"); 669 } 670 #endif 671 if (curaddr == (lastaddr + PAGE_SIZE)) 672 segs[curseg].ds_len += PAGE_SIZE; 673 else { 674 curseg++; 675 segs[curseg].ds_addr = curaddr; 676 segs[curseg].ds_len = PAGE_SIZE; 677 } 678 lastaddr = curaddr; 679 } 680 681 *rsegs = curseg + 1; 682 683 return (0); 684 } 685