1 /* $NetBSD: mvmebus.c,v 1.10 2005/12/11 12:22:48 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Steve C. Woodford. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: mvmebus.c,v 1.10 2005/12/11 12:22:48 christos Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/kernel.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/malloc.h> 47 #include <sys/kcore.h> 48 49 #include <machine/cpu.h> 50 #include <machine/bus.h> 51 52 #include <dev/vme/vmereg.h> 53 #include <dev/vme/vmevar.h> 54 55 #include <dev/mvme/mvmebus.h> 56 57 #ifdef DIAGNOSTIC 58 int mvmebus_dummy_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t, 59 bus_size_t, int, bus_dmamap_t *); 60 void mvmebus_dummy_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 61 int mvmebus_dummy_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, 62 bus_size_t, bus_dma_segment_t *, int, int *, int); 63 void mvmebus_dummy_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int); 64 #endif 65 66 #ifdef DEBUG 67 static const char *mvmebus_mod_string(vme_addr_t, vme_size_t, 68 vme_am_t, vme_datasize_t); 69 #endif 70 71 static void mvmebus_offboard_ram(struct mvmebus_softc *); 72 static int mvmebus_dmamap_load_common(struct mvmebus_softc *, bus_dmamap_t); 73 74 vme_am_t _mvmebus_am_cap[] = { 75 MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_USER, 76 MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_USER, 77 MVMEBUS_AM_CAP_PROG | MVMEBUS_AM_CAP_USER, 78 MVMEBUS_AM_CAP_BLK | MVMEBUS_AM_CAP_USER, 79 MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_SUPER, 80 MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_SUPER, 81 MVMEBUS_AM_CAP_PROG | MVMEBUS_AM_CAP_SUPER, 82 MVMEBUS_AM_CAP_BLK | MVMEBUS_AM_CAP_SUPER 83 }; 84 85 const char *mvmebus_irq_name[] = { 86 "vmeirq0", "vmeirq1", "vmeirq2", "vmeirq3", 87 "vmeirq4", "vmeirq5", "vmeirq6", "vmeirq7" 88 }; 89 90 extern phys_ram_seg_t mem_clusters[0]; 91 extern int mem_cluster_cnt; 92 93 94 static void 95 mvmebus_offboard_ram(sc) 96 struct mvmebus_softc *sc; 97 { 98 struct mvmebus_range *svr, *mvr; 99 vme_addr_t start, end, size; 100 int i; 101 102 /* 103 * If we have any offboard RAM (i.e. a VMEbus RAM board) then 104 * we need to record its details since it's effectively another 105 * VMEbus slave image as far as we're concerned. 106 * The chip-specific backend will have reserved sc->sc_slaves[0] 107 * for exactly this purpose. 108 */ 109 svr = sc->sc_slaves; 110 if (mem_cluster_cnt < 2) { 111 svr->vr_am = MVMEBUS_AM_DISABLED; 112 return; 113 } 114 115 start = mem_clusters[1].start; 116 size = mem_clusters[1].size - 1; 117 end = start + size; 118 119 /* 120 * Figure out which VMEbus master image the RAM is 121 * visible through. This will tell us the address 122 * modifier and datasizes it uses, as well as allowing 123 * us to calculate its `real' VMEbus address. 124 * 125 * XXX FIXME: This is broken if the RAM is mapped through 126 * a translated address space. For example, on mvme167 it's 127 * perfectly legal to set up the following A32 mapping: 128 * 129 * vr_locaddr == 0x80000000 130 * vr_vmestart == 0x10000000 131 * vr_vmeend == 0x10ffffff 132 * 133 * In this case, RAM at VMEbus address 0x10800000 will appear at local 134 * address 0x80800000, but we need to set the slave vr_vmestart to 135 * 0x10800000. 136 */ 137 for (i = 0, mvr = sc->sc_masters; i < sc->sc_nmasters; i++, mvr++) { 138 vme_addr_t vstart = mvr->vr_locstart + mvr->vr_vmestart; 139 140 if (start >= vstart && 141 end <= vstart + (mvr->vr_vmeend - mvr->vr_vmestart)) 142 break; 143 } 144 if (i == sc->sc_nmasters) { 145 svr->vr_am = MVMEBUS_AM_DISABLED; 146 #ifdef DEBUG 147 printf("%s: No VMEbus master mapping for offboard RAM!\n", 148 sc->sc_dev.dv_xname); 149 #endif 150 return; 151 } 152 153 svr->vr_locstart = start; 154 svr->vr_vmestart = start & mvr->vr_mask; 155 svr->vr_vmeend = svr->vr_vmestart + size; 156 svr->vr_datasize = mvr->vr_datasize; 157 svr->vr_mask = mvr->vr_mask; 158 svr->vr_am = mvr->vr_am & VME_AM_ADRSIZEMASK; 159 svr->vr_am |= MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_PROG | 160 MVMEBUS_AM_CAP_SUPER | MVMEBUS_AM_CAP_USER; 161 } 162 163 void 164 mvmebus_attach(sc) 165 struct mvmebus_softc *sc; 166 { 167 struct vmebus_attach_args vaa; 168 int i; 169 170 /* Zap the IRQ reference counts */ 171 for (i = 0; i < 8; i++) 172 sc->sc_irqref[i] = 0; 173 174 /* If there's offboard RAM, get its VMEbus slave attributes */ 175 mvmebus_offboard_ram(sc); 176 177 #ifdef DEBUG 178 for (i = 0; i < sc->sc_nmasters; i++) { 179 struct mvmebus_range *vr = &sc->sc_masters[i]; 180 if (vr->vr_am == MVMEBUS_AM_DISABLED) { 181 printf("%s: Master#%d: disabled\n", 182 sc->sc_dev.dv_xname, i); 183 continue; 184 } 185 printf("%s: Master#%d: 0x%08lx -> %s\n", 186 sc->sc_dev.dv_xname, i, 187 vr->vr_locstart + (vr->vr_vmestart & vr->vr_mask), 188 mvmebus_mod_string(vr->vr_vmestart, 189 (vr->vr_vmeend - vr->vr_vmestart) + 1, 190 vr->vr_am, vr->vr_datasize)); 191 } 192 193 for (i = 0; i < sc->sc_nslaves; i++) { 194 struct mvmebus_range *vr = &sc->sc_slaves[i]; 195 if (vr->vr_am == MVMEBUS_AM_DISABLED) { 196 printf("%s: Slave#%d: disabled\n", 197 sc->sc_dev.dv_xname, i); 198 continue; 199 } 200 printf("%s: Slave#%d: 0x%08lx -> %s\n", 201 sc->sc_dev.dv_xname, i, vr->vr_locstart, 202 mvmebus_mod_string(vr->vr_vmestart, 203 (vr->vr_vmeend - vr->vr_vmestart) + 1, 204 vr->vr_am, vr->vr_datasize)); 205 } 206 #endif 207 208 sc->sc_vct.cookie = sc; 209 sc->sc_vct.vct_probe = mvmebus_probe; 210 sc->sc_vct.vct_map = mvmebus_map; 211 sc->sc_vct.vct_unmap = mvmebus_unmap; 212 sc->sc_vct.vct_int_map = mvmebus_intmap; 213 sc->sc_vct.vct_int_evcnt = mvmebus_intr_evcnt; 214 sc->sc_vct.vct_int_establish = mvmebus_intr_establish; 215 sc->sc_vct.vct_int_disestablish = mvmebus_intr_disestablish; 216 sc->sc_vct.vct_dmamap_create = mvmebus_dmamap_create; 217 sc->sc_vct.vct_dmamap_destroy = mvmebus_dmamap_destroy; 218 sc->sc_vct.vct_dmamem_alloc = mvmebus_dmamem_alloc; 219 sc->sc_vct.vct_dmamem_free = mvmebus_dmamem_free; 220 221 sc->sc_mvmedmat._cookie = sc; 222 sc->sc_mvmedmat._dmamap_load = mvmebus_dmamap_load; 223 sc->sc_mvmedmat._dmamap_load_mbuf = mvmebus_dmamap_load_mbuf; 224 sc->sc_mvmedmat._dmamap_load_uio = mvmebus_dmamap_load_uio; 225 sc->sc_mvmedmat._dmamap_load_raw = mvmebus_dmamap_load_raw; 226 sc->sc_mvmedmat._dmamap_unload = mvmebus_dmamap_unload; 227 sc->sc_mvmedmat._dmamap_sync = mvmebus_dmamap_sync; 228 sc->sc_mvmedmat._dmamem_map = mvmebus_dmamem_map; 229 sc->sc_mvmedmat._dmamem_unmap = mvmebus_dmamem_unmap; 230 sc->sc_mvmedmat._dmamem_mmap = mvmebus_dmamem_mmap; 231 232 #ifdef DIAGNOSTIC 233 sc->sc_mvmedmat._dmamap_create = mvmebus_dummy_dmamap_create; 234 sc->sc_mvmedmat._dmamap_destroy = mvmebus_dummy_dmamap_destroy; 235 sc->sc_mvmedmat._dmamem_alloc = mvmebus_dummy_dmamem_alloc; 236 sc->sc_mvmedmat._dmamem_free = mvmebus_dummy_dmamem_free; 237 #else 238 sc->sc_mvmedmat._dmamap_create = NULL; 239 sc->sc_mvmedmat._dmamap_destroy = NULL; 240 sc->sc_mvmedmat._dmamem_alloc = NULL; 241 sc->sc_mvmedmat._dmamem_free = NULL; 242 #endif 243 244 vaa.va_vct = &sc->sc_vct; 245 vaa.va_bdt = &sc->sc_mvmedmat; 246 vaa.va_slaveconfig = NULL; 247 248 config_found(&sc->sc_dev, &vaa, 0); 249 } 250 251 int 252 mvmebus_map(vsc, vmeaddr, len, am, datasize, swap, tag, handle, resc) 253 void *vsc; 254 vme_addr_t vmeaddr; 255 vme_size_t len; 256 vme_am_t am; 257 vme_datasize_t datasize; 258 vme_swap_t swap; 259 bus_space_tag_t *tag; 260 bus_space_handle_t *handle; 261 vme_mapresc_t *resc; 262 { 263 struct mvmebus_softc *sc; 264 struct mvmebus_mapresc *mr; 265 struct mvmebus_range *vr; 266 vme_addr_t end; 267 vme_am_t cap, as; 268 paddr_t paddr; 269 int rv, i; 270 271 sc = vsc; 272 end = (vmeaddr + len) - 1; 273 paddr = 0; 274 vr = sc->sc_masters; 275 cap = MVMEBUS_AM2CAP(am); 276 as = am & VME_AM_ADRSIZEMASK; 277 278 for (i = 0; i < sc->sc_nmasters && paddr == 0; i++, vr++) { 279 if (vr->vr_am == MVMEBUS_AM_DISABLED) 280 continue; 281 282 if (cap == (vr->vr_am & cap) && 283 as == (vr->vr_am & VME_AM_ADRSIZEMASK) && 284 datasize <= vr->vr_datasize && 285 vmeaddr >= vr->vr_vmestart && end < vr->vr_vmeend) 286 paddr = vr->vr_locstart + (vmeaddr & vr->vr_mask); 287 } 288 if (paddr == 0) 289 return (ENOMEM); 290 291 rv = bus_space_map(sc->sc_bust, paddr, len, 0, handle); 292 if (rv != 0) 293 return (rv); 294 295 /* Allocate space for the resource tag */ 296 if ((mr = malloc(sizeof(*mr), M_DEVBUF, M_NOWAIT)) == NULL) { 297 bus_space_unmap(sc->sc_bust, *handle, len); 298 return (ENOMEM); 299 } 300 301 /* Record the range's details */ 302 mr->mr_am = am; 303 mr->mr_datasize = datasize; 304 mr->mr_addr = vmeaddr; 305 mr->mr_size = len; 306 mr->mr_handle = *handle; 307 mr->mr_range = i; 308 309 *tag = sc->sc_bust; 310 *resc = (vme_mapresc_t *) mr; 311 312 return (0); 313 } 314 315 /* ARGSUSED */ 316 void 317 mvmebus_unmap(vsc, resc) 318 void *vsc; 319 vme_mapresc_t resc; 320 { 321 struct mvmebus_softc *sc = vsc; 322 struct mvmebus_mapresc *mr = (struct mvmebus_mapresc *) resc; 323 324 bus_space_unmap(sc->sc_bust, mr->mr_handle, mr->mr_size); 325 326 free(mr, M_DEVBUF); 327 } 328 329 int 330 mvmebus_probe(vsc, vmeaddr, len, am, datasize, callback, arg) 331 void *vsc; 332 vme_addr_t vmeaddr; 333 vme_size_t len; 334 vme_am_t am; 335 vme_datasize_t datasize; 336 int (*callback)(void *, bus_space_tag_t, bus_space_handle_t); 337 void *arg; 338 { 339 bus_space_tag_t tag; 340 bus_space_handle_t handle; 341 vme_mapresc_t resc; 342 vme_size_t offs; 343 int rv; 344 345 /* Get a temporary mapping to the VMEbus range */ 346 rv = mvmebus_map(vsc, vmeaddr, len, am, datasize, 0, 347 &tag, &handle, &resc); 348 if (rv) 349 return (rv); 350 351 if (callback) 352 rv = (*callback) (arg, tag, handle); 353 else 354 for (offs = 0; offs < len && rv == 0;) { 355 switch (datasize) { 356 case VME_D8: 357 rv = bus_space_peek_1(tag, handle, offs, NULL); 358 offs += 1; 359 break; 360 361 case VME_D16: 362 rv = bus_space_peek_2(tag, handle, offs, NULL); 363 offs += 2; 364 break; 365 366 case VME_D32: 367 rv = bus_space_peek_4(tag, handle, offs, NULL); 368 offs += 4; 369 break; 370 } 371 } 372 373 mvmebus_unmap(vsc, resc); 374 375 return (rv); 376 } 377 378 /* ARGSUSED */ 379 int 380 mvmebus_intmap(vsc, level, vector, handlep) 381 void *vsc; 382 int level, vector; 383 vme_intr_handle_t *handlep; 384 { 385 386 if (level < 1 || level > 7 || vector < 0x80 || vector > 0xff) 387 return (EINVAL); 388 389 /* This is rather gross */ 390 *handlep = (void *) (int) ((level << 8) | vector); 391 return (0); 392 } 393 394 /* ARGSUSED */ 395 const struct evcnt * 396 mvmebus_intr_evcnt(vsc, handle) 397 void *vsc; 398 vme_intr_handle_t handle; 399 { 400 struct mvmebus_softc *sc = vsc; 401 402 return (&sc->sc_evcnt[(((int) handle) >> 8) - 1]); 403 } 404 405 void * 406 mvmebus_intr_establish(vsc, handle, prior, func, arg) 407 void *vsc; 408 vme_intr_handle_t handle; 409 int prior; 410 int (*func)(void *); 411 void *arg; 412 { 413 struct mvmebus_softc *sc; 414 int level, vector, first; 415 416 sc = vsc; 417 418 /* Extract the interrupt's level and vector */ 419 level = ((int) handle) >> 8; 420 vector = ((int) handle) & 0xff; 421 422 #ifdef DIAGNOSTIC 423 if (vector < 0 || vector > 0xff) { 424 printf("%s: Illegal vector offset: 0x%x\n", 425 sc->sc_dev.dv_xname, vector); 426 panic("mvmebus_intr_establish"); 427 } 428 if (level < 1 || level > 7) { 429 printf("%s: Illegal interrupt level: %d\n", 430 sc->sc_dev.dv_xname, level); 431 panic("mvmebus_intr_establish"); 432 } 433 #endif 434 435 first = (sc->sc_irqref[level]++ == 0); 436 437 (*sc->sc_intr_establish)(sc->sc_chip, prior, level, vector, first, 438 func, arg, &sc->sc_evcnt[level - 1]); 439 440 return ((void *) handle); 441 } 442 443 void 444 mvmebus_intr_disestablish(vsc, handle) 445 void *vsc; 446 vme_intr_handle_t handle; 447 { 448 struct mvmebus_softc *sc; 449 int level, vector, last; 450 451 sc = vsc; 452 453 /* Extract the interrupt's level and vector */ 454 level = ((int) handle) >> 8; 455 vector = ((int) handle) & 0xff; 456 457 #ifdef DIAGNOSTIC 458 if (vector < 0 || vector > 0xff) { 459 printf("%s: Illegal vector offset: 0x%x\n", 460 sc->sc_dev.dv_xname, vector); 461 panic("mvmebus_intr_disestablish"); 462 } 463 if (level < 1 || level > 7) { 464 printf("%s: Illegal interrupt level: %d\n", 465 sc->sc_dev.dv_xname, level); 466 panic("mvmebus_intr_disestablish"); 467 } 468 if (sc->sc_irqref[level] == 0) { 469 printf("%s: VMEirq#%d: Reference count already zero!\n", 470 sc->sc_dev.dv_xname, level); 471 panic("mvmebus_intr_disestablish"); 472 } 473 #endif 474 475 last = (--(sc->sc_irqref[level]) == 0); 476 477 (*sc->sc_intr_disestablish)(sc->sc_chip, level, vector, last, 478 &sc->sc_evcnt[level - 1]); 479 } 480 481 #ifdef DIAGNOSTIC 482 /* ARGSUSED */ 483 int 484 mvmebus_dummy_dmamap_create(t, size, nsegs, maxsegsz, boundary, flags, dmamp) 485 bus_dma_tag_t t; 486 bus_size_t size; 487 int nsegs; 488 bus_size_t maxsegsz; 489 bus_size_t boundary; 490 int flags; 491 bus_dmamap_t *dmamp; 492 { 493 494 panic("Must use vme_dmamap_create() in place of bus_dmamap_create()"); 495 return (0); /* Shutup the compiler */ 496 } 497 498 /* ARGSUSED */ 499 void 500 mvmebus_dummy_dmamap_destroy(t, map) 501 bus_dma_tag_t t; 502 bus_dmamap_t map; 503 { 504 505 panic("Must use vme_dmamap_destroy() in place of bus_dmamap_destroy()"); 506 } 507 #endif 508 509 /* ARGSUSED */ 510 int 511 mvmebus_dmamap_create(vsc, len, am, datasize, swap, nsegs, 512 segsz, bound, flags, mapp) 513 void *vsc; 514 vme_size_t len; 515 vme_am_t am; 516 vme_datasize_t datasize; 517 vme_swap_t swap; 518 int nsegs; 519 vme_size_t segsz; 520 vme_addr_t bound; 521 int flags; 522 bus_dmamap_t *mapp; 523 { 524 struct mvmebus_softc *sc = vsc; 525 struct mvmebus_dmamap *vmap; 526 struct mvmebus_range *vr; 527 vme_am_t cap, as; 528 int i, rv; 529 530 cap = MVMEBUS_AM2CAP(am); 531 as = am & VME_AM_ADRSIZEMASK; 532 533 /* 534 * Verify that we even stand a chance of satisfying 535 * the VMEbus address space and datasize requested. 536 */ 537 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) { 538 if (vr->vr_am == MVMEBUS_AM_DISABLED) 539 continue; 540 541 if (as == (vr->vr_am & VME_AM_ADRSIZEMASK) && 542 cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize && 543 len <= (vr->vr_vmeend - vr->vr_vmestart)) 544 break; 545 } 546 547 if (i == sc->sc_nslaves) 548 return (EINVAL); 549 550 if ((vmap = malloc(sizeof(*vmap), M_DMAMAP, 551 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 552 return (ENOMEM); 553 554 555 rv = bus_dmamap_create(sc->sc_dmat, len, nsegs, segsz, 556 bound, flags, mapp); 557 if (rv != 0) { 558 free(vmap, M_DMAMAP); 559 return (rv); 560 } 561 562 vmap->vm_am = am; 563 vmap->vm_datasize = datasize; 564 vmap->vm_swap = swap; 565 vmap->vm_slave = vr; 566 567 (*mapp)->_dm_cookie = vmap; 568 569 return (0); 570 } 571 572 void 573 mvmebus_dmamap_destroy(vsc, map) 574 void *vsc; 575 bus_dmamap_t map; 576 { 577 struct mvmebus_softc *sc = vsc; 578 579 free(map->_dm_cookie, M_DMAMAP); 580 bus_dmamap_destroy(sc->sc_dmat, map); 581 } 582 583 static int 584 mvmebus_dmamap_load_common(sc, map) 585 struct mvmebus_softc *sc; 586 bus_dmamap_t map; 587 { 588 struct mvmebus_dmamap *vmap = map->_dm_cookie; 589 struct mvmebus_range *vr = vmap->vm_slave; 590 bus_dma_segment_t *ds; 591 vme_am_t cap, am; 592 int i; 593 594 cap = MVMEBUS_AM2CAP(vmap->vm_am); 595 am = vmap->vm_am & VME_AM_ADRSIZEMASK; 596 597 /* 598 * Traverse the list of segments which make up this map, and 599 * convert the CPU-relative addresses therein to VMEbus addresses. 600 */ 601 for (ds = &map->dm_segs[0]; ds < &map->dm_segs[map->dm_nsegs]; ds++) { 602 /* 603 * First, see if this map's slave image can access the 604 * segment, otherwise we have to waste time scanning all 605 * the slave images. 606 */ 607 vr = vmap->vm_slave; 608 if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) && 609 cap == (vr->vr_am & cap) && 610 vmap->vm_datasize <= vr->vr_datasize && 611 ds->_ds_cpuaddr >= vr->vr_locstart && 612 ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart)) 613 goto found; 614 615 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) { 616 if (vr->vr_am == MVMEBUS_AM_DISABLED) 617 continue; 618 619 /* 620 * Filter out any slave images which don't have the 621 * same VMEbus address modifier and datasize as 622 * this DMA map, and those which don't cover the 623 * physical address region containing the segment. 624 */ 625 if (vr != vmap->vm_slave && 626 am == (vr->vr_am & VME_AM_ADRSIZEMASK) && 627 cap == (vr->vr_am & cap) && 628 vmap->vm_datasize <= vr->vr_datasize && 629 ds->_ds_cpuaddr >= vr->vr_locstart && 630 ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart)) 631 break; 632 } 633 634 /* 635 * Did we find an applicable slave image which covers this 636 * segment? 637 */ 638 if (i == sc->sc_nslaves) { 639 /* 640 * XXX TODO: 641 * 642 * Bounce this segment via a bounce buffer allocated 643 * from this DMA map. 644 */ 645 printf("mvmebus_dmamap_load_common: bounce needed!\n"); 646 return (EINVAL); 647 } 648 649 found: 650 /* 651 * Generate the VMEbus address of this segment 652 */ 653 ds->ds_addr = (ds->_ds_cpuaddr - vr->vr_locstart) + 654 vr->vr_vmestart; 655 } 656 657 return (0); 658 } 659 660 int 661 mvmebus_dmamap_load(t, map, buf, buflen, p, flags) 662 bus_dma_tag_t t; 663 bus_dmamap_t map; 664 void *buf; 665 bus_size_t buflen; 666 struct proc *p; 667 int flags; 668 { 669 struct mvmebus_softc *sc = t->_cookie; 670 int rv; 671 672 rv = bus_dmamap_load(sc->sc_dmat, map, buf, buflen, p, flags); 673 if (rv != 0) 674 return rv; 675 676 return mvmebus_dmamap_load_common(sc, map); 677 } 678 679 int 680 mvmebus_dmamap_load_mbuf(t, map, chain, flags) 681 bus_dma_tag_t t; 682 bus_dmamap_t map; 683 struct mbuf *chain; 684 int flags; 685 { 686 struct mvmebus_softc *sc = t->_cookie; 687 int rv; 688 689 rv = bus_dmamap_load_mbuf(sc->sc_dmat, map, chain, flags); 690 if (rv != 0) 691 return rv; 692 693 return mvmebus_dmamap_load_common(sc, map); 694 } 695 696 int 697 mvmebus_dmamap_load_uio(t, map, uio, flags) 698 bus_dma_tag_t t; 699 bus_dmamap_t map; 700 struct uio *uio; 701 int flags; 702 { 703 struct mvmebus_softc *sc = t->_cookie; 704 int rv; 705 706 rv = bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags); 707 if (rv != 0) 708 return rv; 709 710 return mvmebus_dmamap_load_common(sc, map); 711 } 712 713 int 714 mvmebus_dmamap_load_raw(t, map, segs, nsegs, size, flags) 715 bus_dma_tag_t t; 716 bus_dmamap_t map; 717 bus_dma_segment_t *segs; 718 int nsegs; 719 bus_size_t size; 720 int flags; 721 { 722 struct mvmebus_softc *sc = t->_cookie; 723 int rv; 724 725 /* 726 * mvmebus_dmamem_alloc() will ensure that the physical memory 727 * backing these segments is 100% accessible in at least one 728 * of the board's VMEbus slave images. 729 */ 730 rv = bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags); 731 if (rv != 0) 732 return rv; 733 734 return mvmebus_dmamap_load_common(sc, map); 735 } 736 737 void 738 mvmebus_dmamap_unload(t, map) 739 bus_dma_tag_t t; 740 bus_dmamap_t map; 741 { 742 struct mvmebus_softc *sc = t->_cookie; 743 744 /* XXX Deal with bounce buffers */ 745 746 bus_dmamap_unload(sc->sc_dmat, map); 747 } 748 749 void 750 mvmebus_dmamap_sync(t, map, offset, len, ops) 751 bus_dma_tag_t t; 752 bus_dmamap_t map; 753 bus_addr_t offset; 754 bus_size_t len; 755 int ops; 756 { 757 struct mvmebus_softc *sc = t->_cookie; 758 759 /* XXX Bounce buffers */ 760 761 bus_dmamap_sync(sc->sc_dmat, map, offset, len, ops); 762 } 763 764 #ifdef DIAGNOSTIC 765 /* ARGSUSED */ 766 int 767 mvmebus_dummy_dmamem_alloc(t, size, align, boundary, segs, nsegs, rsegs, flags) 768 bus_dma_tag_t t; 769 bus_size_t size; 770 bus_size_t align; 771 bus_size_t boundary; 772 bus_dma_segment_t *segs; 773 int nsegs; 774 int *rsegs; 775 int flags; 776 { 777 778 panic("Must use vme_dmamem_alloc() in place of bus_dmamem_alloc()"); 779 } 780 781 /* ARGSUSED */ 782 void 783 mvmebus_dummy_dmamem_free(t, segs, nsegs) 784 bus_dma_tag_t t; 785 bus_dma_segment_t *segs; 786 int nsegs; 787 { 788 789 panic("Must use vme_dmamem_free() in place of bus_dmamem_free()"); 790 } 791 #endif 792 793 /* ARGSUSED */ 794 int 795 mvmebus_dmamem_alloc(vsc, len, am, datasize, swap, segs, nsegs, rsegs, flags) 796 void *vsc; 797 vme_size_t len; 798 vme_am_t am; 799 vme_datasize_t datasize; 800 vme_swap_t swap; 801 bus_dma_segment_t *segs; 802 int nsegs; 803 int *rsegs; 804 int flags; 805 { 806 extern paddr_t avail_start; 807 struct mvmebus_softc *sc = vsc; 808 struct mvmebus_range *vr; 809 bus_addr_t low, high; 810 bus_size_t bound; 811 vme_am_t cap; 812 int i; 813 814 cap = MVMEBUS_AM2CAP(am); 815 am &= VME_AM_ADRSIZEMASK; 816 817 /* 818 * Find a slave mapping in the requested VMEbus address space. 819 */ 820 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) { 821 if (vr->vr_am == MVMEBUS_AM_DISABLED) 822 continue; 823 824 if (i == 0 && (flags & BUS_DMA_ONBOARD_RAM) != 0) 825 continue; 826 827 if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) && 828 cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize && 829 len <= (vr->vr_vmeend - vr->vr_vmestart)) 830 break; 831 } 832 if (i == sc->sc_nslaves) 833 return (EINVAL); 834 835 /* 836 * Set up the constraints so we can allocate physical memory which 837 * is visible in the requested address space 838 */ 839 low = max(vr->vr_locstart, avail_start); 840 high = vr->vr_locstart + (vr->vr_vmeend - vr->vr_vmestart) + 1; 841 bound = (bus_size_t) vr->vr_mask + 1; 842 843 /* 844 * Allocate physical memory. 845 * 846 * Note: This fills in the segments with CPU-relative physical 847 * addresses. A further call to bus_dmamap_load_raw() (with a 848 * DMA map which specifies the same VMEbus address space and 849 * constraints as the call to here) must be made. The segments 850 * of the DMA map will then contain VMEbus-relative physical 851 * addresses of the memory allocated here. 852 */ 853 return _bus_dmamem_alloc_common(sc->sc_dmat, low, high, 854 len, 0, bound, segs, nsegs, rsegs, flags); 855 } 856 857 void 858 mvmebus_dmamem_free(vsc, segs, nsegs) 859 void *vsc; 860 bus_dma_segment_t *segs; 861 int nsegs; 862 { 863 struct mvmebus_softc *sc = vsc; 864 865 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 866 } 867 868 int 869 mvmebus_dmamem_map(t, segs, nsegs, size, kvap, flags) 870 bus_dma_tag_t t; 871 bus_dma_segment_t *segs; 872 int nsegs; 873 size_t size; 874 caddr_t *kvap; 875 int flags; 876 { 877 struct mvmebus_softc *sc = t->_cookie; 878 879 return bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags); 880 } 881 882 void 883 mvmebus_dmamem_unmap(t, kva, size) 884 bus_dma_tag_t t; 885 caddr_t kva; 886 size_t size; 887 { 888 struct mvmebus_softc *sc = t->_cookie; 889 890 bus_dmamem_unmap(sc->sc_dmat, kva, size); 891 } 892 893 paddr_t 894 mvmebus_dmamem_mmap(t, segs, nsegs, offset, prot, flags) 895 bus_dma_tag_t t; 896 bus_dma_segment_t *segs; 897 int nsegs; 898 off_t offset; 899 int prot; 900 int flags; 901 { 902 struct mvmebus_softc *sc = t->_cookie; 903 904 return bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, offset, prot, flags); 905 } 906 907 #ifdef DEBUG 908 static const char * 909 mvmebus_mod_string(addr, len, am, ds) 910 vme_addr_t addr; 911 vme_size_t len; 912 vme_am_t am; 913 vme_datasize_t ds; 914 { 915 static const char *mode[] = {"BLT64)", "DATA)", "PROG)", "BLT32)"}; 916 static const char *dsiz[] = {"(", "(D8,", "(D16,", "(D16-D8,", 917 "(D32,", "(D32,D8,", "(D32-D16,", "(D32-D8,"}; 918 static const char *adrfmt[] = { "A32:%08x-%08x ", "USR:%08x-%08x ", 919 "A16:%04x-%04x ", "A24:%06x-%06x " }; 920 static char mstring[40]; 921 922 snprintf(mstring, sizeof(mstring), 923 adrfmt[(am & VME_AM_ADRSIZEMASK) >> VME_AM_ADRSIZESHIFT], 924 addr, addr + len - 1); 925 strlcat(mstring, dsiz[ds & 0x7], sizeof(mstring)); 926 927 if (MVMEBUS_AM_HAS_CAP(am)) { 928 if (am & MVMEBUS_AM_CAP_DATA) 929 strlcat(mstring, "D", sizeof(mstring)); 930 if (am & MVMEBUS_AM_CAP_PROG) 931 strlcat(mstring, "P", sizeof(mstring)); 932 if (am & MVMEBUS_AM_CAP_USER) 933 strlcat(mstring, "U", sizeof(mstring)); 934 if (am & MVMEBUS_AM_CAP_SUPER) 935 strlcat(mstring, "S", sizeof(mstring)); 936 if (am & MVMEBUS_AM_CAP_BLK) 937 strlcat(mstring, "B", sizeof(mstring)); 938 if (am & MVMEBUS_AM_CAP_BLKD64) 939 strlcat(mstring, "6", sizeof(mstring)); 940 strlcat(mstring, ")", sizeof(mstring)); 941 } else { 942 strlcat(mstring, ((am & VME_AM_PRIVMASK) == VME_AM_USER) ? 943 "USER," : "SUPER,", sizeof(mstring)); 944 strlcat(mstring, mode[am & VME_AM_MODEMASK], sizeof(mstring)); 945 } 946 947 return (mstring); 948 } 949 #endif 950