1 /* $NetBSD: vme_machdep.c,v 1.39 2002/10/02 16:02:16 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/param.h> 40 #include <sys/extent.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/malloc.h> 44 #include <sys/errno.h> 45 46 #include <sys/proc.h> 47 #include <sys/user.h> 48 #include <sys/syslog.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define _SPARC_BUS_DMA_PRIVATE 53 #include <machine/bus.h> 54 #include <sparc/sparc/iommuvar.h> 55 #include <machine/autoconf.h> 56 #include <machine/oldmon.h> 57 #include <machine/cpu.h> 58 #include <machine/ctlreg.h> 59 60 #include <dev/vme/vmereg.h> 61 #include <dev/vme/vmevar.h> 62 63 #include <sparc/sparc/asm.h> 64 #include <sparc/sparc/vaddrs.h> 65 #include <sparc/sparc/cpuvar.h> 66 #include <sparc/dev/vmereg.h> 67 68 struct sparcvme_softc { 69 struct device sc_dev; /* base device */ 70 bus_space_tag_t sc_bustag; 71 bus_dma_tag_t sc_dmatag; 72 struct vmebusreg *sc_reg; /* VME control registers */ 73 struct vmebusvec *sc_vec; /* VME interrupt vector */ 74 struct rom_range *sc_range; /* ROM range property */ 75 int sc_nrange; 76 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */ 77 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */ 78 int (*sc_vmeintr) __P((void *)); 79 }; 80 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 81 82 /* autoconfiguration driver */ 83 static int vmematch_iommu __P((struct device *, struct cfdata *, void *)); 84 static void vmeattach_iommu __P((struct device *, struct device *, void *)); 85 static int vmematch_mainbus __P((struct device *, struct cfdata *, void *)); 86 static void vmeattach_mainbus __P((struct device *, struct device *, void *)); 87 #if defined(SUN4) 88 int vmeintr4 __P((void *)); 89 #endif 90 #if defined(SUN4M) 91 int vmeintr4m __P((void *)); 92 static int sparc_vme_error __P((void)); 93 #endif 94 95 96 static int sparc_vme_probe __P((void *, vme_addr_t, vme_size_t, 97 vme_am_t, vme_datasize_t, 98 int (*) __P((void *, bus_space_tag_t, bus_space_handle_t)), void *)); 99 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_am_t, 100 vme_datasize_t, vme_swap_t, 101 bus_space_tag_t *, bus_space_handle_t *, 102 vme_mapresc_t *)); 103 static void sparc_vme_unmap __P((void *, vme_mapresc_t)); 104 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *)); 105 static const struct evcnt *sparc_vme_intr_evcnt __P((void *, 106 vme_intr_handle_t)); 107 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t, int, 108 int (*) __P((void *)), void *)); 109 static void sparc_vme_intr_disestablish __P((void *, void *)); 110 111 static int vmebus_translate __P((struct sparcvme_softc *, vme_am_t, 112 vme_addr_t, bus_addr_t *)); 113 #if defined(SUN4M) 114 static void sparc_vme_iommu_barrier __P(( bus_space_tag_t, bus_space_handle_t, 115 bus_size_t, bus_size_t, int)); 116 117 #endif 118 119 /* 120 * DMA functions. 121 */ 122 static void sparc_vct_dmamap_destroy __P((void *, bus_dmamap_t)); 123 124 #if defined(SUN4) 125 static int sparc_vct4_dmamap_create __P((void *, vme_size_t, vme_am_t, 126 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 127 int, bus_dmamap_t *)); 128 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 129 bus_size_t, struct proc *, int)); 130 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 131 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 132 bus_addr_t, bus_size_t, int)); 133 #endif 134 135 #if defined(SUN4M) 136 static int sparc_vct_iommu_dmamap_create __P((void *, vme_size_t, vme_am_t, 137 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 138 int, bus_dmamap_t *)); 139 static int sparc_vme_iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, 140 int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); 141 142 static int sparc_vme_iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, 143 void *, bus_size_t, struct proc *, int)); 144 static void sparc_vme_iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 145 static void sparc_vme_iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 146 bus_addr_t, bus_size_t, int)); 147 #endif 148 149 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *, 150 int, size_t, caddr_t *, int)); 151 #if 0 152 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); 153 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t)); 154 static paddr_t sparc_vme_dmamem_mmap __P((bus_dma_tag_t, 155 bus_dma_segment_t *, int, off_t, int, int)); 156 #endif 157 158 int sparc_vme_mmap_cookie __P((vme_addr_t, vme_am_t, bus_space_handle_t *)); 159 160 CFATTACH_DECL(vme_mainbus, sizeof(struct sparcvme_softc), 161 vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 162 163 CFATTACH_DECL(vme_iommu, sizeof(struct sparcvme_softc), 164 vmematch_iommu, vmeattach_iommu, NULL, NULL); 165 166 int (*vmeerr_handler) __P((void)); 167 168 #define VMEMOD_D32 0x40 /* ??? */ 169 170 /* If the PROM does not provide the `ranges' property, we make up our own */ 171 struct rom_range vmebus_translations[] = { 172 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 173 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 174 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 175 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 176 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 177 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 178 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 179 #undef _DS 180 }; 181 182 /* 183 * The VME bus logic on sun4 machines maps DMA requests in the first MB 184 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 185 * for DVMA space allocations. The DMA addresses returned by 186 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 187 */ 188 struct extent *vme_dvmamap; 189 190 /* 191 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 192 * VME space to the last 8MB of DVMA space and the first 1MB of 193 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 194 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 195 * The following constants define subregions in the IOMMU DVMA map 196 * for VME DVMA allocations. The DMA addresses returned by 197 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 198 */ 199 #define VME_IOMMU_DVMA_BASE 0xff800000 200 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 201 #define VME_IOMMU_DVMA_AM24_END 0xff900000 202 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 203 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 204 205 struct sparc_bus_space_tag sparc_vme_bus_tag = { 206 NULL, /* cookie */ 207 NULL, /* parent bus tag */ 208 NULL, /* bus_map */ 209 NULL, /* bus_unmap */ 210 NULL, /* bus_subregion */ 211 NULL /* barrier */ 212 }; 213 214 struct vme_chipset_tag sparc_vme_chipset_tag = { 215 NULL, 216 sparc_vme_map, 217 sparc_vme_unmap, 218 sparc_vme_probe, 219 sparc_vme_intr_map, 220 sparc_vme_intr_evcnt, 221 sparc_vme_intr_establish, 222 sparc_vme_intr_disestablish, 223 0, 0, 0 /* bus specific DMA stuff */ 224 }; 225 226 227 #if defined(SUN4) 228 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 229 NULL, /* cookie */ 230 _bus_dmamap_create, 231 _bus_dmamap_destroy, 232 sparc_vme4_dmamap_load, 233 _bus_dmamap_load_mbuf, 234 _bus_dmamap_load_uio, 235 _bus_dmamap_load_raw, 236 sparc_vme4_dmamap_unload, 237 sparc_vme4_dmamap_sync, 238 239 _bus_dmamem_alloc, 240 _bus_dmamem_free, 241 sparc_vme_dmamem_map, 242 _bus_dmamem_unmap, 243 _bus_dmamem_mmap 244 }; 245 #endif 246 247 #if defined(SUN4M) 248 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 249 NULL, /* cookie */ 250 sparc_vme_iommu_dmamap_create, 251 _bus_dmamap_destroy, 252 sparc_vme_iommu_dmamap_load, 253 _bus_dmamap_load_mbuf, 254 _bus_dmamap_load_uio, 255 _bus_dmamap_load_raw, 256 sparc_vme_iommu_dmamap_unload, 257 sparc_vme_iommu_dmamap_sync, 258 259 _bus_dmamem_alloc, 260 _bus_dmamem_free, 261 sparc_vme_dmamem_map, 262 _bus_dmamem_unmap, 263 _bus_dmamem_mmap 264 }; 265 #endif 266 267 268 int 269 vmematch_mainbus(parent, cf, aux) 270 struct device *parent; 271 struct cfdata *cf; 272 void *aux; 273 { 274 struct mainbus_attach_args *ma = aux; 275 276 if (!CPU_ISSUN4) 277 return (0); 278 279 return (strcmp("vme", ma->ma_name) == 0); 280 } 281 282 int 283 vmematch_iommu(parent, cf, aux) 284 struct device *parent; 285 struct cfdata *cf; 286 void *aux; 287 { 288 struct iommu_attach_args *ia = aux; 289 290 return (strcmp("vme", ia->iom_name) == 0); 291 } 292 293 294 void 295 vmeattach_mainbus(parent, self, aux) 296 struct device *parent, *self; 297 void *aux; 298 { 299 #if defined(SUN4) 300 struct mainbus_attach_args *ma = aux; 301 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 302 struct vmebus_attach_args vba; 303 304 if (self->dv_unit > 0) { 305 printf(" unsupported\n"); 306 return; 307 } 308 309 sc->sc_bustag = ma->ma_bustag; 310 sc->sc_dmatag = ma->ma_dmatag; 311 312 /* VME interrupt entry point */ 313 sc->sc_vmeintr = vmeintr4; 314 315 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 316 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 317 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 318 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 319 320 #if 0 321 sparc_vme_bus_tag.parent = ma->ma_bustag; 322 vba.vba_bustag = &sparc_vme_bus_tag; 323 #endif 324 vba.va_vct = &sparc_vme_chipset_tag; 325 vba.va_bdt = &sparc_vme4_dma_tag; 326 vba.va_slaveconfig = 0; 327 328 /* Fall back to our own `range' construction */ 329 sc->sc_range = vmebus_translations; 330 sc->sc_nrange = 331 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 332 333 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 334 M_DEVBUF, 0, 0, EX_NOWAIT); 335 if (vme_dvmamap == NULL) 336 panic("vme: unable to allocate DVMA map"); 337 338 printf("\n"); 339 (void)config_found(self, &vba, 0); 340 341 #endif 342 return; 343 } 344 345 /* sun4m vmebus */ 346 void 347 vmeattach_iommu(parent, self, aux) 348 struct device *parent, *self; 349 void *aux; 350 { 351 #if defined(SUN4M) 352 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 353 struct iommu_attach_args *ia = aux; 354 struct vmebus_attach_args vba; 355 bus_space_handle_t bh; 356 int node; 357 int cline; 358 359 if (self->dv_unit > 0) { 360 printf(" unsupported\n"); 361 return; 362 } 363 364 sc->sc_bustag = ia->iom_bustag; 365 sc->sc_dmatag = ia->iom_dmatag; 366 367 /* VME interrupt entry point */ 368 sc->sc_vmeintr = vmeintr4m; 369 370 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 371 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 372 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 373 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 374 sparc_vme_bus_tag.sparc_bus_barrier = sparc_vme_iommu_barrier; 375 376 #if 0 377 vba.vba_bustag = &sparc_vme_bus_tag; 378 #endif 379 vba.va_vct = &sparc_vme_chipset_tag; 380 vba.va_bdt = &sparc_vme_iommu_dma_tag; 381 vba.va_slaveconfig = 0; 382 383 node = ia->iom_node; 384 385 /* 386 * Map VME control space 387 */ 388 if (ia->iom_nreg < 2) { 389 printf("%s: only %d register sets\n", self->dv_xname, 390 ia->iom_nreg); 391 return; 392 } 393 394 if (bus_space_map(ia->iom_bustag, 395 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 396 ia->iom_reg[0].oa_base), 397 (bus_size_t)ia->iom_reg[0].oa_size, 398 BUS_SPACE_MAP_LINEAR, 399 &bh) != 0) { 400 panic("%s: can't map vmebusreg", self->dv_xname); 401 } 402 sc->sc_reg = (struct vmebusreg *)bh; 403 404 if (bus_space_map(ia->iom_bustag, 405 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 406 ia->iom_reg[1].oa_base), 407 (bus_size_t)ia->iom_reg[1].oa_size, 408 BUS_SPACE_MAP_LINEAR, 409 &bh) != 0) { 410 panic("%s: can't map vmebusvec", self->dv_xname); 411 } 412 sc->sc_vec = (struct vmebusvec *)bh; 413 414 /* 415 * Map VME IO cache tags and flush control. 416 */ 417 if (bus_space_map(ia->iom_bustag, 418 (bus_addr_t) BUS_ADDR( 419 ia->iom_reg[1].oa_space, 420 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 421 VME_IOC_SIZE, 422 BUS_SPACE_MAP_LINEAR, 423 &bh) != 0) { 424 panic("%s: can't map IOC tags", self->dv_xname); 425 } 426 sc->sc_ioctags = (u_int32_t *)bh; 427 428 if (bus_space_map(ia->iom_bustag, 429 (bus_addr_t) BUS_ADDR( 430 ia->iom_reg[1].oa_space, 431 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 432 VME_IOC_SIZE, 433 BUS_SPACE_MAP_LINEAR, 434 &bh) != 0) { 435 panic("%s: can't map IOC flush registers", self->dv_xname); 436 } 437 sc->sc_iocflush = (u_int32_t *)bh; 438 439 /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg; 440 441 /* 442 * Get "range" property. 443 */ 444 if (PROM_getprop(node, "ranges", sizeof(struct rom_range), 445 &sc->sc_nrange, (void **)&sc->sc_range) != 0) { 446 panic("%s: can't get ranges property", self->dv_xname); 447 } 448 449 sparcvme_sc = sc; 450 vmeerr_handler = sparc_vme_error; 451 452 /* 453 * Invalidate all IO-cache entries. 454 */ 455 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 456 sc->sc_ioctags[--cline] = 0; 457 } 458 459 /* Enable IO-cache */ 460 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 461 462 printf(": version 0x%x\n", 463 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 464 465 (void)config_found(self, &vba, 0); 466 #endif 467 } 468 469 #if defined(SUN4M) 470 static int 471 sparc_vme_error() 472 { 473 struct sparcvme_softc *sc = sparcvme_sc; 474 u_int32_t afsr, afpa; 475 char bits[64]; 476 477 afsr = sc->sc_reg->vmebus_afsr; 478 afpa = sc->sc_reg->vmebus_afar; 479 printf("VME error:\n\tAFSR %s\n", 480 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 481 printf("\taddress: 0x%x%x\n", afsr, afpa); 482 return (0); 483 } 484 #endif 485 486 int 487 vmebus_translate(sc, mod, addr, bap) 488 struct sparcvme_softc *sc; 489 vme_am_t mod; 490 vme_addr_t addr; 491 bus_addr_t *bap; 492 { 493 int i; 494 495 for (i = 0; i < sc->sc_nrange; i++) { 496 struct rom_range *rp = &sc->sc_range[i]; 497 498 if (rp->cspace != mod) 499 continue; 500 501 /* We've found the connection to the parent bus */ 502 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 503 return (0); 504 } 505 return (ENOENT); 506 } 507 508 struct vmeprobe_myarg { 509 int (*cb) __P((void *, bus_space_tag_t, bus_space_handle_t)); 510 void *cbarg; 511 bus_space_tag_t tag; 512 int res; /* backwards */ 513 }; 514 515 static int vmeprobe_mycb __P((void *, void *)); 516 static int 517 vmeprobe_mycb(bh, arg) 518 void *bh, *arg; 519 { 520 struct vmeprobe_myarg *a = arg; 521 522 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 523 return (!a->res); 524 } 525 526 int 527 sparc_vme_probe(cookie, addr, len, mod, datasize, callback, arg) 528 void *cookie; 529 vme_addr_t addr; 530 vme_size_t len; 531 vme_am_t mod; 532 vme_datasize_t datasize; 533 int (*callback) __P((void *, bus_space_tag_t, bus_space_handle_t)); 534 void *arg; 535 { 536 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 537 bus_addr_t paddr; 538 bus_size_t size; 539 struct vmeprobe_myarg myarg; 540 int res, i; 541 542 if (vmebus_translate(sc, mod, addr, &paddr) != 0) 543 return (EINVAL); 544 545 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 546 547 if (callback) { 548 myarg.cb = callback; 549 myarg.cbarg = arg; 550 myarg.tag = sc->sc_bustag; 551 myarg.res = 0; 552 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 553 0, vmeprobe_mycb, &myarg); 554 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 555 } 556 557 for (i = 0; i < len / size; i++) { 558 myarg.res = 0; 559 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 560 0, 0, 0); 561 if (res == 0) 562 return (EIO); 563 paddr += size; 564 } 565 return (0); 566 } 567 568 int 569 sparc_vme_map(cookie, addr, size, mod, datasize, swap, tp, hp, rp) 570 void *cookie; 571 vme_addr_t addr; 572 vme_size_t size; 573 vme_am_t mod; 574 vme_datasize_t datasize; 575 vme_swap_t swap; 576 bus_space_tag_t *tp; 577 bus_space_handle_t *hp; 578 vme_mapresc_t *rp; 579 { 580 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 581 bus_addr_t paddr; 582 int error; 583 584 error = vmebus_translate(sc, mod, addr, &paddr); 585 if (error != 0) 586 return (error); 587 588 *tp = sc->sc_bustag; 589 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 590 } 591 592 int 593 sparc_vme_mmap_cookie(addr, mod, hp) 594 vme_addr_t addr; 595 vme_am_t mod; 596 bus_space_handle_t *hp; 597 { 598 struct sparcvme_softc *sc = sparcvme_sc; 599 bus_addr_t paddr; 600 int error; 601 602 error = vmebus_translate(sc, mod, addr, &paddr); 603 if (error != 0) 604 return (error); 605 606 return (bus_space_mmap(sc->sc_bustag, paddr, 0, 607 0/*prot is ignored*/, 0)); 608 } 609 610 #if defined(SUN4M) 611 void 612 sparc_vme_iommu_barrier(t, h, offset, size, flags) 613 bus_space_tag_t t; 614 bus_space_handle_t h; 615 bus_size_t offset; 616 bus_size_t size; 617 int flags; 618 { 619 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 620 621 /* Read async fault status to flush write-buffers */ 622 (*(volatile int *)&vbp->vmebus_afsr); 623 } 624 #endif 625 626 627 628 /* 629 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 630 */ 631 static int vme_ipl_to_pil[] = { 632 0, 633 2, 634 3, 635 5, 636 7, 637 9, 638 11, 639 13 640 }; 641 642 643 /* 644 * All VME device interrupts go through vmeintr(). This function reads 645 * the VME vector from the bus, then dispatches the device interrupt 646 * handler. All handlers for devices that map to the same Processor 647 * Interrupt Level (according to the table above) are on a linked list 648 * of `sparc_vme_intr_handle' structures. The head of which is passed 649 * down as the argument to `vmeintr(void *arg)'. 650 */ 651 struct sparc_vme_intr_handle { 652 struct intrhand ih; 653 struct sparc_vme_intr_handle *next; 654 int vec; /* VME interrupt vector */ 655 int pri; /* VME interrupt priority */ 656 struct sparcvme_softc *sc;/*XXX*/ 657 }; 658 659 #if defined(SUN4) 660 int 661 vmeintr4(arg) 662 void *arg; 663 { 664 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 665 int level, vec; 666 int rv = 0; 667 668 level = (ihp->pri << 1) | 1; 669 670 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level)); 671 672 if (vec == -1) { 673 #ifdef DEBUG 674 /* 675 * This seems to happen only with the i82586 based 676 * `ie1' boards. 677 */ 678 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 679 #endif 680 return (1); /* XXX - pretend we handled it, for now */ 681 } 682 683 for (; ihp; ihp = ihp->next) 684 if (ihp->vec == vec && ihp->ih.ih_fun) 685 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 686 687 return (rv); 688 } 689 #endif 690 691 #if defined(SUN4M) 692 int 693 vmeintr4m(arg) 694 void *arg; 695 { 696 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 697 int level, vec; 698 int rv = 0; 699 700 level = (ihp->pri << 1) | 1; 701 702 #if 0 703 int pending; 704 705 /* Flush VME <=> Sbus write buffers */ 706 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 707 708 pending = *((int*)ICR_SI_PEND); 709 if ((pending & SINTR_VME(ihp->pri)) == 0) { 710 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 711 ihp->pri, pending); 712 return (0); 713 } 714 #endif 715 #if 0 716 /* Why gives this a bus timeout sometimes? */ 717 vec = ihp->sc->sc_vec->vmebusvec[level]; 718 #else 719 /* so, arrange to catch the fault... */ 720 { 721 extern struct user *proc0paddr; 722 extern int fkbyte __P((caddr_t, struct pcb *)); 723 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level]; 724 struct pcb *xpcb; 725 u_long saveonfault; 726 int s; 727 728 s = splhigh(); 729 if (curproc == NULL) 730 xpcb = (struct pcb *)proc0paddr; 731 else 732 xpcb = &curproc->p_addr->u_pcb; 733 734 saveonfault = (u_long)xpcb->pcb_onfault; 735 vec = fkbyte(addr, xpcb); 736 xpcb->pcb_onfault = (caddr_t)saveonfault; 737 738 splx(s); 739 } 740 #endif 741 742 if (vec == -1) { 743 #ifdef DEBUG 744 /* 745 * This seems to happen only with the i82586 based 746 * `ie1' boards. 747 */ 748 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 749 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 750 *((int*)ICR_SI_PEND), 751 ihp->sc->sc_reg->vmebus_afsr, 752 ihp->sc->sc_reg->vmebus_afar); 753 #endif 754 return (1); /* XXX - pretend we handled it, for now */ 755 } 756 757 for (; ihp; ihp = ihp->next) 758 if (ihp->vec == vec && ihp->ih.ih_fun) 759 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 760 761 return (rv); 762 } 763 #endif 764 765 int 766 sparc_vme_intr_map(cookie, level, vec, ihp) 767 void *cookie; 768 int level; 769 int vec; 770 vme_intr_handle_t *ihp; 771 { 772 struct sparc_vme_intr_handle *ih; 773 774 ih = (vme_intr_handle_t) 775 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 776 ih->pri = level; 777 ih->vec = vec; 778 ih->sc = cookie;/*XXX*/ 779 *ihp = ih; 780 return (0); 781 } 782 783 const struct evcnt * 784 sparc_vme_intr_evcnt(cookie, vih) 785 void *cookie; 786 vme_intr_handle_t vih; 787 { 788 789 /* XXX for now, no evcnt parent reported */ 790 return NULL; 791 } 792 793 void * 794 sparc_vme_intr_establish(cookie, vih, pri, func, arg) 795 void *cookie; 796 vme_intr_handle_t vih; 797 int pri; 798 int (*func) __P((void *)); 799 void *arg; 800 { 801 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 802 struct sparc_vme_intr_handle *svih = 803 (struct sparc_vme_intr_handle *)vih; 804 struct intrhand *ih; 805 int level; 806 807 /* XXX pri == svih->pri ??? */ 808 809 /* Translate VME priority to processor IPL */ 810 level = vme_ipl_to_pil[svih->pri]; 811 812 svih->ih.ih_fun = func; 813 svih->ih.ih_arg = arg; 814 svih->next = NULL; 815 816 /* ensure the interrupt subsystem will call us at this level */ 817 for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next) 818 if (ih->ih_fun == sc->sc_vmeintr) 819 break; 820 821 if (ih == NULL) { 822 ih = (struct intrhand *) 823 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 824 if (ih == NULL) 825 panic("vme_addirq"); 826 bzero(ih, sizeof *ih); 827 ih->ih_fun = sc->sc_vmeintr; 828 ih->ih_arg = vih; 829 intr_establish(level, ih); 830 } else { 831 svih->next = (vme_intr_handle_t)ih->ih_arg; 832 ih->ih_arg = vih; 833 } 834 return (NULL); 835 } 836 837 void 838 sparc_vme_unmap(cookie, resc) 839 void * cookie; 840 vme_mapresc_t resc; 841 { 842 /* Not implemented */ 843 panic("sparc_vme_unmap"); 844 } 845 846 void 847 sparc_vme_intr_disestablish(cookie, a) 848 void *cookie; 849 void *a; 850 { 851 /* Not implemented */ 852 panic("sparc_vme_intr_disestablish"); 853 } 854 855 856 857 /* 858 * VME DMA functions. 859 */ 860 861 static void 862 sparc_vct_dmamap_destroy(cookie, map) 863 void *cookie; 864 bus_dmamap_t map; 865 { 866 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 867 bus_dmamap_destroy(sc->sc_dmatag, map); 868 } 869 870 #if defined(SUN4) 871 static int 872 sparc_vct4_dmamap_create(cookie, size, am, datasize, swap, nsegments, maxsegsz, 873 boundary, flags, dmamp) 874 void *cookie; 875 vme_size_t size; 876 vme_am_t am; 877 vme_datasize_t datasize; 878 vme_swap_t swap; 879 int nsegments; 880 vme_size_t maxsegsz; 881 vme_addr_t boundary; 882 int flags; 883 bus_dmamap_t *dmamp; 884 { 885 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 886 887 /* Allocate a base map through parent bus ops */ 888 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 889 boundary, flags, dmamp)); 890 } 891 892 int 893 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags) 894 bus_dma_tag_t t; 895 bus_dmamap_t map; 896 void *buf; 897 bus_size_t buflen; 898 struct proc *p; 899 int flags; 900 { 901 bus_addr_t dva; 902 bus_size_t sgsize; 903 vaddr_t va, voff; 904 pmap_t pmap; 905 int pagesz = PAGE_SIZE; 906 int error; 907 908 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 909 910 va = (vaddr_t)buf; 911 voff = va & (pagesz - 1); 912 va &= -pagesz; 913 914 /* 915 * Allocate an integral number of pages from DVMA space 916 * covering the passed buffer. 917 */ 918 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 919 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 920 map->_dm_boundary, 921 (flags & BUS_DMA_NOWAIT) == 0 922 ? EX_WAITOK 923 : EX_NOWAIT, 924 (u_long *)&dva); 925 if (error != 0) 926 return (error); 927 928 map->dm_mapsize = buflen; 929 map->dm_nsegs = 1; 930 /* Adjust DVMA address to VME view */ 931 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 932 map->dm_segs[0].ds_len = buflen; 933 map->dm_segs[0]._ds_sgsize = sgsize; 934 935 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 936 937 for (; sgsize != 0; ) { 938 paddr_t pa; 939 /* 940 * Get the physical address for this page. 941 */ 942 (void) pmap_extract(pmap, va, &pa); 943 944 #ifdef notyet 945 if (have_iocache) 946 pa |= PG_IOC; 947 #endif 948 pmap_enter(pmap_kernel(), dva, 949 pa | PMAP_NC, 950 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 951 952 dva += pagesz; 953 va += pagesz; 954 sgsize -= pagesz; 955 } 956 pmap_update(pmap_kernel()); 957 958 return (0); 959 } 960 961 void 962 sparc_vme4_dmamap_unload(t, map) 963 bus_dma_tag_t t; 964 bus_dmamap_t map; 965 { 966 bus_dma_segment_t *segs = map->dm_segs; 967 int nsegs = map->dm_nsegs; 968 bus_addr_t dva; 969 bus_size_t len; 970 int i, s, error; 971 972 for (i = 0; i < nsegs; i++) { 973 /* Go from VME to CPU view */ 974 dva = segs[i].ds_addr + VME4_DVMA_BASE; 975 dva &= -PAGE_SIZE; 976 len = segs[i]._ds_sgsize; 977 978 /* Remove double-mapping in DVMA space */ 979 pmap_remove(pmap_kernel(), dva, dva + len); 980 981 /* Release DVMA space */ 982 s = splhigh(); 983 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 984 splx(s); 985 if (error != 0) 986 printf("warning: %ld of DVMA space lost\n", len); 987 } 988 pmap_update(pmap_kernel()); 989 990 /* Mark the mappings as invalid. */ 991 map->dm_mapsize = 0; 992 map->dm_nsegs = 0; 993 } 994 995 void 996 sparc_vme4_dmamap_sync(t, map, offset, len, ops) 997 bus_dma_tag_t t; 998 bus_dmamap_t map; 999 bus_addr_t offset; 1000 bus_size_t len; 1001 int ops; 1002 { 1003 1004 /* 1005 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 1006 * Currently the cache is flushed in bus_dma_load()... 1007 */ 1008 } 1009 #endif /* SUN4 */ 1010 1011 #if defined(SUN4M) 1012 static int 1013 sparc_vme_iommu_dmamap_create (t, size, nsegments, maxsegsz, 1014 boundary, flags, dmamp) 1015 bus_dma_tag_t t; 1016 bus_size_t size; 1017 int nsegments; 1018 bus_size_t maxsegsz; 1019 bus_size_t boundary; 1020 int flags; 1021 bus_dmamap_t *dmamp; 1022 { 1023 1024 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 1025 return (EINVAL); 1026 } 1027 1028 static int 1029 sparc_vct_iommu_dmamap_create(cookie, size, am, datasize, swap, nsegments, 1030 maxsegsz, boundary, flags, dmamp) 1031 void *cookie; 1032 vme_size_t size; 1033 vme_am_t am; 1034 vme_datasize_t datasize; 1035 vme_swap_t swap; 1036 int nsegments; 1037 vme_size_t maxsegsz; 1038 vme_addr_t boundary; 1039 int flags; 1040 bus_dmamap_t *dmamp; 1041 { 1042 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 1043 bus_dmamap_t map; 1044 int error; 1045 1046 /* Allocate a base map through parent bus ops */ 1047 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 1048 boundary, flags, &map); 1049 if (error != 0) 1050 return (error); 1051 1052 /* 1053 * Each I/O cache line maps to a 8K section of VME DVMA space, so 1054 * we must ensure that DVMA alloctions are always 8K aligned. 1055 */ 1056 map->_dm_align = VME_IOC_PAGESZ; 1057 1058 /* Set map region based on Address Modifier */ 1059 switch ((am & VME_AM_ADRSIZEMASK)) { 1060 case VME_AM_A16: 1061 case VME_AM_A24: 1062 /* 1 MB of DVMA space */ 1063 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 1064 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 1065 break; 1066 case VME_AM_A32: 1067 /* 8 MB of DVMA space */ 1068 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 1069 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 1070 break; 1071 } 1072 1073 *dmamp = map; 1074 return (0); 1075 } 1076 1077 int 1078 sparc_vme_iommu_dmamap_load(t, map, buf, buflen, p, flags) 1079 bus_dma_tag_t t; 1080 bus_dmamap_t map; 1081 void *buf; 1082 bus_size_t buflen; 1083 struct proc *p; 1084 int flags; 1085 { 1086 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1087 volatile u_int32_t *ioctags; 1088 int error; 1089 1090 /* Round request to a multiple of the I/O cache size */ 1091 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1092 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1093 if (error != 0) 1094 return (error); 1095 1096 /* Allocate I/O cache entries for this range */ 1097 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1098 while (buflen > 0) { 1099 *ioctags = VME_IOC_IC | VME_IOC_W; 1100 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1101 buflen -= VME_IOC_PAGESZ; 1102 } 1103 1104 /* 1105 * Adjust DVMA address to VME view. 1106 * Note: the DVMA base address is the same for all 1107 * VME address spaces. 1108 */ 1109 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1110 return (0); 1111 } 1112 1113 1114 void 1115 sparc_vme_iommu_dmamap_unload(t, map) 1116 bus_dma_tag_t t; 1117 bus_dmamap_t map; 1118 { 1119 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1120 volatile u_int32_t *flushregs; 1121 int len; 1122 1123 /* Go from VME to CPU view */ 1124 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1125 1126 /* Flush VME I/O cache */ 1127 len = map->dm_segs[0]._ds_sgsize; 1128 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1129 while (len > 0) { 1130 *flushregs = 0; 1131 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1132 len -= VME_IOC_PAGESZ; 1133 } 1134 1135 /* 1136 * Start a read from `tag space' which will not complete until 1137 * all cache flushes have finished 1138 */ 1139 (*sc->sc_ioctags); 1140 1141 bus_dmamap_unload(sc->sc_dmatag, map); 1142 } 1143 1144 void 1145 sparc_vme_iommu_dmamap_sync(t, map, offset, len, ops) 1146 bus_dma_tag_t t; 1147 bus_dmamap_t map; 1148 bus_addr_t offset; 1149 bus_size_t len; 1150 int ops; 1151 { 1152 1153 /* 1154 * XXX Should perform cache flushes as necessary. 1155 */ 1156 } 1157 #endif /* SUN4M */ 1158 1159 int 1160 sparc_vme_dmamem_map(t, segs, nsegs, size, kvap, flags) 1161 bus_dma_tag_t t; 1162 bus_dma_segment_t *segs; 1163 int nsegs; 1164 size_t size; 1165 caddr_t *kvap; 1166 int flags; 1167 { 1168 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1169 1170 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1171 } 1172