1 /* $NetBSD: vme_machdep.c,v 1.50 2004/06/27 16:08:42 pk Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.50 2004/06/27 16:08:42 pk Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/extent.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/malloc.h> 47 #include <sys/errno.h> 48 49 #include <sys/proc.h> 50 #include <sys/user.h> 51 #include <sys/syslog.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #define _SPARC_BUS_DMA_PRIVATE 56 #include <machine/bus.h> 57 #include <sparc/sparc/iommuvar.h> 58 #include <machine/autoconf.h> 59 #include <machine/oldmon.h> 60 #include <machine/cpu.h> 61 #include <machine/ctlreg.h> 62 63 #include <dev/vme/vmereg.h> 64 #include <dev/vme/vmevar.h> 65 66 #include <sparc/sparc/asm.h> 67 #include <sparc/sparc/vaddrs.h> 68 #include <sparc/sparc/cpuvar.h> 69 #include <sparc/dev/vmereg.h> 70 71 struct sparcvme_softc { 72 struct device sc_dev; /* base device */ 73 bus_space_tag_t sc_bustag; 74 bus_dma_tag_t sc_dmatag; 75 struct vmebusreg *sc_reg; /* VME control registers */ 76 struct vmebusvec *sc_vec; /* VME interrupt vector */ 77 struct rom_range *sc_range; /* ROM range property */ 78 int sc_nrange; 79 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */ 80 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */ 81 int (*sc_vmeintr) __P((void *)); 82 }; 83 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 84 85 /* autoconfiguration driver */ 86 static int vmematch_iommu __P((struct device *, struct cfdata *, void *)); 87 static void vmeattach_iommu __P((struct device *, struct device *, void *)); 88 static int vmematch_mainbus __P((struct device *, struct cfdata *, void *)); 89 static void vmeattach_mainbus __P((struct device *, struct device *, void *)); 90 #if defined(SUN4) 91 int vmeintr4 __P((void *)); 92 #endif 93 #if defined(SUN4M) 94 int vmeintr4m __P((void *)); 95 static int sparc_vme_error __P((void)); 96 #endif 97 98 99 static int sparc_vme_probe __P((void *, vme_addr_t, vme_size_t, 100 vme_am_t, vme_datasize_t, 101 int (*) __P((void *, bus_space_tag_t, bus_space_handle_t)), void *)); 102 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_am_t, 103 vme_datasize_t, vme_swap_t, 104 bus_space_tag_t *, bus_space_handle_t *, 105 vme_mapresc_t *)); 106 static void sparc_vme_unmap __P((void *, vme_mapresc_t)); 107 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *)); 108 static const struct evcnt *sparc_vme_intr_evcnt __P((void *, 109 vme_intr_handle_t)); 110 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t, int, 111 int (*) __P((void *)), void *)); 112 static void sparc_vme_intr_disestablish __P((void *, void *)); 113 114 static int vmebus_translate __P((struct sparcvme_softc *, vme_am_t, 115 vme_addr_t, bus_addr_t *)); 116 #ifdef notyet 117 #if defined(SUN4M) 118 static void sparc_vme_iommu_barrier __P(( bus_space_tag_t, bus_space_handle_t, 119 bus_size_t, bus_size_t, int)); 120 121 #endif /* SUN4M */ 122 #endif 123 124 /* 125 * DMA functions. 126 */ 127 #if defined(SUN4) || defined(SUN4M) 128 static void sparc_vct_dmamap_destroy __P((void *, bus_dmamap_t)); 129 #endif 130 131 #if defined(SUN4) 132 static int sparc_vct4_dmamap_create __P((void *, vme_size_t, vme_am_t, 133 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 134 int, bus_dmamap_t *)); 135 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 136 bus_size_t, struct proc *, int)); 137 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 138 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 139 bus_addr_t, bus_size_t, int)); 140 #endif /* SUN4 */ 141 142 #if defined(SUN4M) 143 static int sparc_vct_iommu_dmamap_create __P((void *, vme_size_t, vme_am_t, 144 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 145 int, bus_dmamap_t *)); 146 static int sparc_vme_iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, 147 int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); 148 149 static int sparc_vme_iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, 150 void *, bus_size_t, struct proc *, int)); 151 static void sparc_vme_iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 152 static void sparc_vme_iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 153 bus_addr_t, bus_size_t, int)); 154 #endif /* SUN4M */ 155 156 #if defined(SUN4) || defined(SUN4M) 157 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *, 158 int, size_t, caddr_t *, int)); 159 #endif 160 161 #if 0 162 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); 163 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t)); 164 static paddr_t sparc_vme_dmamem_mmap __P((bus_dma_tag_t, 165 bus_dma_segment_t *, int, off_t, int, int)); 166 #endif 167 168 int sparc_vme_mmap_cookie __P((vme_addr_t, vme_am_t, bus_space_handle_t *)); 169 170 CFATTACH_DECL(vme_mainbus, sizeof(struct sparcvme_softc), 171 vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 172 173 CFATTACH_DECL(vme_iommu, sizeof(struct sparcvme_softc), 174 vmematch_iommu, vmeattach_iommu, NULL, NULL); 175 176 int (*vmeerr_handler) __P((void)); 177 178 #define VMEMOD_D32 0x40 /* ??? */ 179 180 /* If the PROM does not provide the `ranges' property, we make up our own */ 181 struct rom_range vmebus_translations[] = { 182 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 183 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 184 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 185 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 186 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 187 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 188 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 189 #undef _DS 190 }; 191 192 /* 193 * The VME bus logic on sun4 machines maps DMA requests in the first MB 194 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 195 * for DVMA space allocations. The DMA addresses returned by 196 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 197 */ 198 struct extent *vme_dvmamap; 199 200 /* 201 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 202 * VME space to the last 8MB of DVMA space and the first 1MB of 203 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 204 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 205 * The following constants define subregions in the IOMMU DVMA map 206 * for VME DVMA allocations. The DMA addresses returned by 207 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 208 */ 209 #define VME_IOMMU_DVMA_BASE 0xff800000 210 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 211 #define VME_IOMMU_DVMA_AM24_END 0xff900000 212 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 213 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 214 215 struct vme_chipset_tag sparc_vme_chipset_tag = { 216 NULL, 217 sparc_vme_map, 218 sparc_vme_unmap, 219 sparc_vme_probe, 220 sparc_vme_intr_map, 221 sparc_vme_intr_evcnt, 222 sparc_vme_intr_establish, 223 sparc_vme_intr_disestablish, 224 0, 0, 0 /* bus specific DMA stuff */ 225 }; 226 227 228 #if defined(SUN4) 229 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 230 NULL, /* cookie */ 231 _bus_dmamap_create, 232 _bus_dmamap_destroy, 233 sparc_vme4_dmamap_load, 234 _bus_dmamap_load_mbuf, 235 _bus_dmamap_load_uio, 236 _bus_dmamap_load_raw, 237 sparc_vme4_dmamap_unload, 238 sparc_vme4_dmamap_sync, 239 240 _bus_dmamem_alloc, 241 _bus_dmamem_free, 242 sparc_vme_dmamem_map, 243 _bus_dmamem_unmap, 244 _bus_dmamem_mmap 245 }; 246 #endif 247 248 #if defined(SUN4M) 249 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 250 NULL, /* cookie */ 251 sparc_vme_iommu_dmamap_create, 252 _bus_dmamap_destroy, 253 sparc_vme_iommu_dmamap_load, 254 _bus_dmamap_load_mbuf, 255 _bus_dmamap_load_uio, 256 _bus_dmamap_load_raw, 257 sparc_vme_iommu_dmamap_unload, 258 sparc_vme_iommu_dmamap_sync, 259 260 _bus_dmamem_alloc, 261 _bus_dmamem_free, 262 sparc_vme_dmamem_map, 263 _bus_dmamem_unmap, 264 _bus_dmamem_mmap 265 }; 266 #endif 267 268 269 int 270 vmematch_mainbus(parent, cf, aux) 271 struct device *parent; 272 struct cfdata *cf; 273 void *aux; 274 { 275 struct mainbus_attach_args *ma = aux; 276 277 if (!CPU_ISSUN4) 278 return (0); 279 280 return (strcmp("vme", ma->ma_name) == 0); 281 } 282 283 int 284 vmematch_iommu(parent, cf, aux) 285 struct device *parent; 286 struct cfdata *cf; 287 void *aux; 288 { 289 struct iommu_attach_args *ia = aux; 290 291 return (strcmp("vme", ia->iom_name) == 0); 292 } 293 294 295 void 296 vmeattach_mainbus(parent, self, aux) 297 struct device *parent, *self; 298 void *aux; 299 { 300 #if defined(SUN4) 301 struct mainbus_attach_args *ma = aux; 302 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 303 struct vmebus_attach_args vba; 304 305 if (self->dv_unit > 0) { 306 printf(" unsupported\n"); 307 return; 308 } 309 310 sc->sc_bustag = ma->ma_bustag; 311 sc->sc_dmatag = ma->ma_dmatag; 312 313 /* VME interrupt entry point */ 314 sc->sc_vmeintr = vmeintr4; 315 316 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 317 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 318 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 319 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 320 321 vba.va_vct = &sparc_vme_chipset_tag; 322 vba.va_bdt = &sparc_vme4_dma_tag; 323 vba.va_slaveconfig = 0; 324 325 /* Fall back to our own `range' construction */ 326 sc->sc_range = vmebus_translations; 327 sc->sc_nrange = 328 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 329 330 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 331 M_DEVBUF, 0, 0, EX_NOWAIT); 332 if (vme_dvmamap == NULL) 333 panic("vme: unable to allocate DVMA map"); 334 335 printf("\n"); 336 (void)config_found(self, &vba, 0); 337 338 #endif 339 return; 340 } 341 342 /* sun4m vmebus */ 343 void 344 vmeattach_iommu(parent, self, aux) 345 struct device *parent, *self; 346 void *aux; 347 { 348 #if defined(SUN4M) 349 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 350 struct iommu_attach_args *ia = aux; 351 struct vmebus_attach_args vba; 352 bus_space_handle_t bh; 353 int node; 354 int cline; 355 356 if (self->dv_unit > 0) { 357 printf(" unsupported\n"); 358 return; 359 } 360 361 sc->sc_bustag = ia->iom_bustag; 362 sc->sc_dmatag = ia->iom_dmatag; 363 364 /* VME interrupt entry point */ 365 sc->sc_vmeintr = vmeintr4m; 366 367 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 368 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 369 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 370 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 371 372 vba.va_vct = &sparc_vme_chipset_tag; 373 vba.va_bdt = &sparc_vme_iommu_dma_tag; 374 vba.va_slaveconfig = 0; 375 376 node = ia->iom_node; 377 378 /* 379 * Map VME control space 380 */ 381 if (ia->iom_nreg < 2) { 382 printf("%s: only %d register sets\n", self->dv_xname, 383 ia->iom_nreg); 384 return; 385 } 386 387 if (bus_space_map(ia->iom_bustag, 388 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 389 ia->iom_reg[0].oa_base), 390 (bus_size_t)ia->iom_reg[0].oa_size, 391 BUS_SPACE_MAP_LINEAR, 392 &bh) != 0) { 393 panic("%s: can't map vmebusreg", self->dv_xname); 394 } 395 sc->sc_reg = (struct vmebusreg *)bh; 396 397 if (bus_space_map(ia->iom_bustag, 398 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 399 ia->iom_reg[1].oa_base), 400 (bus_size_t)ia->iom_reg[1].oa_size, 401 BUS_SPACE_MAP_LINEAR, 402 &bh) != 0) { 403 panic("%s: can't map vmebusvec", self->dv_xname); 404 } 405 sc->sc_vec = (struct vmebusvec *)bh; 406 407 /* 408 * Map VME IO cache tags and flush control. 409 */ 410 if (bus_space_map(ia->iom_bustag, 411 (bus_addr_t) BUS_ADDR( 412 ia->iom_reg[1].oa_space, 413 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 414 VME_IOC_SIZE, 415 BUS_SPACE_MAP_LINEAR, 416 &bh) != 0) { 417 panic("%s: can't map IOC tags", self->dv_xname); 418 } 419 sc->sc_ioctags = (u_int32_t *)bh; 420 421 if (bus_space_map(ia->iom_bustag, 422 (bus_addr_t) BUS_ADDR( 423 ia->iom_reg[1].oa_space, 424 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 425 VME_IOC_SIZE, 426 BUS_SPACE_MAP_LINEAR, 427 &bh) != 0) { 428 panic("%s: can't map IOC flush registers", self->dv_xname); 429 } 430 sc->sc_iocflush = (u_int32_t *)bh; 431 432 /* 433 * Get "range" property. 434 */ 435 if (prom_getprop(node, "ranges", sizeof(struct rom_range), 436 &sc->sc_nrange, &sc->sc_range) != 0) { 437 panic("%s: can't get ranges property", self->dv_xname); 438 } 439 440 sparcvme_sc = sc; 441 vmeerr_handler = sparc_vme_error; 442 443 /* 444 * Invalidate all IO-cache entries. 445 */ 446 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 447 sc->sc_ioctags[--cline] = 0; 448 } 449 450 /* Enable IO-cache */ 451 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 452 453 printf(": version 0x%x\n", 454 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 455 456 (void)config_found(self, &vba, 0); 457 #endif /* SUN4M */ 458 } 459 460 #if defined(SUN4M) 461 static int 462 sparc_vme_error() 463 { 464 struct sparcvme_softc *sc = sparcvme_sc; 465 u_int32_t afsr, afpa; 466 char bits[64]; 467 468 afsr = sc->sc_reg->vmebus_afsr; 469 afpa = sc->sc_reg->vmebus_afar; 470 printf("VME error:\n\tAFSR %s\n", 471 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 472 printf("\taddress: 0x%x%x\n", afsr, afpa); 473 return (0); 474 } 475 #endif 476 477 int 478 vmebus_translate(sc, mod, addr, bap) 479 struct sparcvme_softc *sc; 480 vme_am_t mod; 481 vme_addr_t addr; 482 bus_addr_t *bap; 483 { 484 int i; 485 486 for (i = 0; i < sc->sc_nrange; i++) { 487 struct rom_range *rp = &sc->sc_range[i]; 488 489 if (rp->cspace != mod) 490 continue; 491 492 /* We've found the connection to the parent bus */ 493 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 494 return (0); 495 } 496 return (ENOENT); 497 } 498 499 struct vmeprobe_myarg { 500 int (*cb) __P((void *, bus_space_tag_t, bus_space_handle_t)); 501 void *cbarg; 502 bus_space_tag_t tag; 503 int res; /* backwards */ 504 }; 505 506 static int vmeprobe_mycb __P((void *, void *)); 507 static int 508 vmeprobe_mycb(bh, arg) 509 void *bh, *arg; 510 { 511 struct vmeprobe_myarg *a = arg; 512 513 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 514 return (!a->res); 515 } 516 517 int 518 sparc_vme_probe(cookie, addr, len, mod, datasize, callback, arg) 519 void *cookie; 520 vme_addr_t addr; 521 vme_size_t len; 522 vme_am_t mod; 523 vme_datasize_t datasize; 524 int (*callback) __P((void *, bus_space_tag_t, bus_space_handle_t)); 525 void *arg; 526 { 527 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 528 bus_addr_t paddr; 529 bus_size_t size; 530 struct vmeprobe_myarg myarg; 531 int res, i; 532 533 if (vmebus_translate(sc, mod, addr, &paddr) != 0) 534 return (EINVAL); 535 536 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 537 538 if (callback) { 539 myarg.cb = callback; 540 myarg.cbarg = arg; 541 myarg.tag = sc->sc_bustag; 542 myarg.res = 0; 543 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 544 0, vmeprobe_mycb, &myarg); 545 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 546 } 547 548 for (i = 0; i < len / size; i++) { 549 myarg.res = 0; 550 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 551 0, 0, 0); 552 if (res == 0) 553 return (EIO); 554 paddr += size; 555 } 556 return (0); 557 } 558 559 int 560 sparc_vme_map(cookie, addr, size, mod, datasize, swap, tp, hp, rp) 561 void *cookie; 562 vme_addr_t addr; 563 vme_size_t size; 564 vme_am_t mod; 565 vme_datasize_t datasize; 566 vme_swap_t swap; 567 bus_space_tag_t *tp; 568 bus_space_handle_t *hp; 569 vme_mapresc_t *rp; 570 { 571 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 572 bus_addr_t paddr; 573 int error; 574 575 error = vmebus_translate(sc, mod, addr, &paddr); 576 if (error != 0) 577 return (error); 578 579 *tp = sc->sc_bustag; 580 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 581 } 582 583 int 584 sparc_vme_mmap_cookie(addr, mod, hp) 585 vme_addr_t addr; 586 vme_am_t mod; 587 bus_space_handle_t *hp; 588 { 589 struct sparcvme_softc *sc = sparcvme_sc; 590 bus_addr_t paddr; 591 int error; 592 593 error = vmebus_translate(sc, mod, addr, &paddr); 594 if (error != 0) 595 return (error); 596 597 return (bus_space_mmap(sc->sc_bustag, paddr, 0, 598 0/*prot is ignored*/, 0)); 599 } 600 601 #ifdef notyet 602 #if defined(SUN4M) 603 void 604 sparc_vme_iommu_barrier(t, h, offset, size, flags) 605 bus_space_tag_t t; 606 bus_space_handle_t h; 607 bus_size_t offset; 608 bus_size_t size; 609 int flags; 610 { 611 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 612 613 /* Read async fault status to flush write-buffers */ 614 (*(volatile int *)&vbp->vmebus_afsr); 615 } 616 #endif /* SUN4M */ 617 #endif 618 619 620 621 /* 622 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 623 */ 624 static int vme_ipl_to_pil[] = { 625 0, 626 2, 627 3, 628 5, 629 7, 630 9, 631 11, 632 13 633 }; 634 635 636 /* 637 * All VME device interrupts go through vmeintr(). This function reads 638 * the VME vector from the bus, then dispatches the device interrupt 639 * handler. All handlers for devices that map to the same Processor 640 * Interrupt Level (according to the table above) are on a linked list 641 * of `sparc_vme_intr_handle' structures. The head of which is passed 642 * down as the argument to `vmeintr(void *arg)'. 643 */ 644 struct sparc_vme_intr_handle { 645 struct intrhand ih; 646 struct sparc_vme_intr_handle *next; 647 int vec; /* VME interrupt vector */ 648 int pri; /* VME interrupt priority */ 649 struct sparcvme_softc *sc;/*XXX*/ 650 }; 651 652 #if defined(SUN4) 653 int 654 vmeintr4(arg) 655 void *arg; 656 { 657 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 658 int level, vec; 659 int rv = 0; 660 661 level = (ihp->pri << 1) | 1; 662 663 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level)); 664 665 if (vec == -1) { 666 #ifdef DEBUG 667 /* 668 * This seems to happen only with the i82586 based 669 * `ie1' boards. 670 */ 671 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 672 #endif 673 return (1); /* XXX - pretend we handled it, for now */ 674 } 675 676 for (; ihp; ihp = ihp->next) 677 if (ihp->vec == vec && ihp->ih.ih_fun) { 678 splx(ihp->ih.ih_classipl); 679 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 680 } 681 682 return (rv); 683 } 684 #endif 685 686 #if defined(SUN4M) 687 int 688 vmeintr4m(arg) 689 void *arg; 690 { 691 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 692 int level, vec; 693 int rv = 0; 694 695 level = (ihp->pri << 1) | 1; 696 697 #if 0 698 int pending; 699 700 /* Flush VME <=> Sbus write buffers */ 701 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 702 703 pending = *((int*)ICR_SI_PEND); 704 if ((pending & SINTR_VME(ihp->pri)) == 0) { 705 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 706 ihp->pri, pending); 707 return (0); 708 } 709 #endif 710 #if 0 711 /* Why gives this a bus timeout sometimes? */ 712 vec = ihp->sc->sc_vec->vmebusvec[level]; 713 #else 714 /* so, arrange to catch the fault... */ 715 { 716 extern struct user *proc0paddr; 717 extern int fkbyte __P((caddr_t, struct pcb *)); 718 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level]; 719 struct pcb *xpcb; 720 u_long saveonfault; 721 int s; 722 723 s = splhigh(); 724 if (curlwp == NULL) 725 xpcb = (struct pcb *)proc0paddr; 726 else 727 xpcb = &curlwp->l_addr->u_pcb; 728 729 saveonfault = (u_long)xpcb->pcb_onfault; 730 vec = fkbyte(addr, xpcb); 731 xpcb->pcb_onfault = (caddr_t)saveonfault; 732 733 splx(s); 734 } 735 #endif 736 737 if (vec == -1) { 738 #ifdef DEBUG 739 /* 740 * This seems to happen only with the i82586 based 741 * `ie1' boards. 742 */ 743 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 744 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 745 *((int*)ICR_SI_PEND), 746 ihp->sc->sc_reg->vmebus_afsr, 747 ihp->sc->sc_reg->vmebus_afar); 748 #endif 749 return (1); /* XXX - pretend we handled it, for now */ 750 } 751 752 for (; ihp; ihp = ihp->next) 753 if (ihp->vec == vec && ihp->ih.ih_fun) { 754 splx(ihp->ih.ih_classipl); 755 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 756 } 757 758 return (rv); 759 } 760 #endif 761 762 int 763 sparc_vme_intr_map(cookie, level, vec, ihp) 764 void *cookie; 765 int level; 766 int vec; 767 vme_intr_handle_t *ihp; 768 { 769 struct sparc_vme_intr_handle *ih; 770 771 ih = (vme_intr_handle_t) 772 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 773 ih->pri = level; 774 ih->vec = vec; 775 ih->sc = cookie;/*XXX*/ 776 *ihp = ih; 777 return (0); 778 } 779 780 const struct evcnt * 781 sparc_vme_intr_evcnt(cookie, vih) 782 void *cookie; 783 vme_intr_handle_t vih; 784 { 785 786 /* XXX for now, no evcnt parent reported */ 787 return NULL; 788 } 789 790 void * 791 sparc_vme_intr_establish(cookie, vih, level, func, arg) 792 void *cookie; 793 vme_intr_handle_t vih; 794 int level; 795 int (*func) __P((void *)); 796 void *arg; 797 { 798 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 799 struct sparc_vme_intr_handle *svih = 800 (struct sparc_vme_intr_handle *)vih; 801 struct intrhand *ih; 802 int pil; 803 804 /* Translate VME priority to processor IPL */ 805 pil = vme_ipl_to_pil[svih->pri]; 806 807 if (level < pil) 808 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n", 809 level, pil); 810 811 svih->ih.ih_fun = func; 812 svih->ih.ih_arg = arg; 813 svih->ih.ih_classipl = level; /* note: used slightly differently 814 than in intr.c (no shift) */ 815 svih->next = NULL; 816 817 /* ensure the interrupt subsystem will call us at this level */ 818 for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next) 819 if (ih->ih_fun == sc->sc_vmeintr) 820 break; 821 822 if (ih == NULL) { 823 ih = (struct intrhand *) 824 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 825 if (ih == NULL) 826 panic("vme_addirq"); 827 bzero(ih, sizeof *ih); 828 ih->ih_fun = sc->sc_vmeintr; 829 ih->ih_arg = vih; 830 intr_establish(pil, 0, ih, NULL); 831 } else { 832 svih->next = (vme_intr_handle_t)ih->ih_arg; 833 ih->ih_arg = vih; 834 } 835 return (NULL); 836 } 837 838 void 839 sparc_vme_unmap(cookie, resc) 840 void * cookie; 841 vme_mapresc_t resc; 842 { 843 /* Not implemented */ 844 panic("sparc_vme_unmap"); 845 } 846 847 void 848 sparc_vme_intr_disestablish(cookie, a) 849 void *cookie; 850 void *a; 851 { 852 /* Not implemented */ 853 panic("sparc_vme_intr_disestablish"); 854 } 855 856 857 858 /* 859 * VME DMA functions. 860 */ 861 862 #if defined(SUN4) || defined(SUN4M) 863 static void 864 sparc_vct_dmamap_destroy(cookie, map) 865 void *cookie; 866 bus_dmamap_t map; 867 { 868 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 869 bus_dmamap_destroy(sc->sc_dmatag, map); 870 } 871 #endif 872 873 #if defined(SUN4) 874 static int 875 sparc_vct4_dmamap_create(cookie, size, am, datasize, swap, nsegments, maxsegsz, 876 boundary, flags, dmamp) 877 void *cookie; 878 vme_size_t size; 879 vme_am_t am; 880 vme_datasize_t datasize; 881 vme_swap_t swap; 882 int nsegments; 883 vme_size_t maxsegsz; 884 vme_addr_t boundary; 885 int flags; 886 bus_dmamap_t *dmamp; 887 { 888 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 889 890 /* Allocate a base map through parent bus ops */ 891 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 892 boundary, flags, dmamp)); 893 } 894 895 int 896 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags) 897 bus_dma_tag_t t; 898 bus_dmamap_t map; 899 void *buf; 900 bus_size_t buflen; 901 struct proc *p; 902 int flags; 903 { 904 bus_addr_t dva; 905 bus_size_t sgsize; 906 u_long ldva; 907 vaddr_t va, voff; 908 pmap_t pmap; 909 int pagesz = PAGE_SIZE; 910 int error; 911 912 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 913 914 va = (vaddr_t)buf; 915 voff = va & (pagesz - 1); 916 va &= -pagesz; 917 918 /* 919 * Allocate an integral number of pages from DVMA space 920 * covering the passed buffer. 921 */ 922 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 923 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 924 map->_dm_boundary, 925 (flags & BUS_DMA_NOWAIT) == 0 926 ? EX_WAITOK 927 : EX_NOWAIT, 928 &ldva); 929 if (error != 0) 930 return (error); 931 dva = (bus_addr_t)ldva; 932 933 map->dm_mapsize = buflen; 934 map->dm_nsegs = 1; 935 /* Adjust DVMA address to VME view */ 936 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 937 map->dm_segs[0].ds_len = buflen; 938 map->dm_segs[0]._ds_sgsize = sgsize; 939 940 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 941 942 for (; sgsize != 0; ) { 943 paddr_t pa; 944 /* 945 * Get the physical address for this page. 946 */ 947 (void) pmap_extract(pmap, va, &pa); 948 949 #ifdef notyet 950 if (have_iocache) 951 pa |= PG_IOC; 952 #endif 953 pmap_enter(pmap_kernel(), dva, 954 pa | PMAP_NC, 955 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 956 957 dva += pagesz; 958 va += pagesz; 959 sgsize -= pagesz; 960 } 961 pmap_update(pmap_kernel()); 962 963 return (0); 964 } 965 966 void 967 sparc_vme4_dmamap_unload(t, map) 968 bus_dma_tag_t t; 969 bus_dmamap_t map; 970 { 971 bus_dma_segment_t *segs = map->dm_segs; 972 int nsegs = map->dm_nsegs; 973 bus_addr_t dva; 974 bus_size_t len; 975 int i, s, error; 976 977 for (i = 0; i < nsegs; i++) { 978 /* Go from VME to CPU view */ 979 dva = segs[i].ds_addr + VME4_DVMA_BASE; 980 dva &= -PAGE_SIZE; 981 len = segs[i]._ds_sgsize; 982 983 /* Remove double-mapping in DVMA space */ 984 pmap_remove(pmap_kernel(), dva, dva + len); 985 986 /* Release DVMA space */ 987 s = splhigh(); 988 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 989 splx(s); 990 if (error != 0) 991 printf("warning: %ld of DVMA space lost\n", len); 992 } 993 pmap_update(pmap_kernel()); 994 995 /* Mark the mappings as invalid. */ 996 map->dm_mapsize = 0; 997 map->dm_nsegs = 0; 998 } 999 1000 void 1001 sparc_vme4_dmamap_sync(t, map, offset, len, ops) 1002 bus_dma_tag_t t; 1003 bus_dmamap_t map; 1004 bus_addr_t offset; 1005 bus_size_t len; 1006 int ops; 1007 { 1008 1009 /* 1010 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 1011 * Currently the cache is flushed in bus_dma_load()... 1012 */ 1013 } 1014 #endif /* SUN4 */ 1015 1016 #if defined(SUN4M) 1017 static int 1018 sparc_vme_iommu_dmamap_create (t, size, nsegments, maxsegsz, 1019 boundary, flags, dmamp) 1020 bus_dma_tag_t t; 1021 bus_size_t size; 1022 int nsegments; 1023 bus_size_t maxsegsz; 1024 bus_size_t boundary; 1025 int flags; 1026 bus_dmamap_t *dmamp; 1027 { 1028 1029 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 1030 return (EINVAL); 1031 } 1032 1033 static int 1034 sparc_vct_iommu_dmamap_create(cookie, size, am, datasize, swap, nsegments, 1035 maxsegsz, boundary, flags, dmamp) 1036 void *cookie; 1037 vme_size_t size; 1038 vme_am_t am; 1039 vme_datasize_t datasize; 1040 vme_swap_t swap; 1041 int nsegments; 1042 vme_size_t maxsegsz; 1043 vme_addr_t boundary; 1044 int flags; 1045 bus_dmamap_t *dmamp; 1046 { 1047 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 1048 bus_dmamap_t map; 1049 int error; 1050 1051 /* Allocate a base map through parent bus ops */ 1052 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 1053 boundary, flags, &map); 1054 if (error != 0) 1055 return (error); 1056 1057 /* 1058 * Each I/O cache line maps to a 8K section of VME DVMA space, so 1059 * we must ensure that DVMA alloctions are always 8K aligned. 1060 */ 1061 map->_dm_align = VME_IOC_PAGESZ; 1062 1063 /* Set map region based on Address Modifier */ 1064 switch ((am & VME_AM_ADRSIZEMASK)) { 1065 case VME_AM_A16: 1066 case VME_AM_A24: 1067 /* 1 MB of DVMA space */ 1068 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 1069 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 1070 break; 1071 case VME_AM_A32: 1072 /* 8 MB of DVMA space */ 1073 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 1074 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 1075 break; 1076 } 1077 1078 *dmamp = map; 1079 return (0); 1080 } 1081 1082 int 1083 sparc_vme_iommu_dmamap_load(t, map, buf, buflen, p, flags) 1084 bus_dma_tag_t t; 1085 bus_dmamap_t map; 1086 void *buf; 1087 bus_size_t buflen; 1088 struct proc *p; 1089 int flags; 1090 { 1091 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1092 volatile u_int32_t *ioctags; 1093 int error; 1094 1095 /* Round request to a multiple of the I/O cache size */ 1096 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1097 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1098 if (error != 0) 1099 return (error); 1100 1101 /* Allocate I/O cache entries for this range */ 1102 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1103 while (buflen > 0) { 1104 *ioctags = VME_IOC_IC | VME_IOC_W; 1105 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1106 buflen -= VME_IOC_PAGESZ; 1107 } 1108 1109 /* 1110 * Adjust DVMA address to VME view. 1111 * Note: the DVMA base address is the same for all 1112 * VME address spaces. 1113 */ 1114 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1115 return (0); 1116 } 1117 1118 1119 void 1120 sparc_vme_iommu_dmamap_unload(t, map) 1121 bus_dma_tag_t t; 1122 bus_dmamap_t map; 1123 { 1124 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1125 volatile u_int32_t *flushregs; 1126 int len; 1127 1128 /* Go from VME to CPU view */ 1129 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1130 1131 /* Flush VME I/O cache */ 1132 len = map->dm_segs[0]._ds_sgsize; 1133 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1134 while (len > 0) { 1135 *flushregs = 0; 1136 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1137 len -= VME_IOC_PAGESZ; 1138 } 1139 1140 /* 1141 * Start a read from `tag space' which will not complete until 1142 * all cache flushes have finished 1143 */ 1144 (*sc->sc_ioctags); 1145 1146 bus_dmamap_unload(sc->sc_dmatag, map); 1147 } 1148 1149 void 1150 sparc_vme_iommu_dmamap_sync(t, map, offset, len, ops) 1151 bus_dma_tag_t t; 1152 bus_dmamap_t map; 1153 bus_addr_t offset; 1154 bus_size_t len; 1155 int ops; 1156 { 1157 1158 /* 1159 * XXX Should perform cache flushes as necessary. 1160 */ 1161 } 1162 #endif /* SUN4M */ 1163 1164 #if defined(SUN4) || defined(SUN4M) 1165 int 1166 sparc_vme_dmamem_map(t, segs, nsegs, size, kvap, flags) 1167 bus_dma_tag_t t; 1168 bus_dma_segment_t *segs; 1169 int nsegs; 1170 size_t size; 1171 caddr_t *kvap; 1172 int flags; 1173 { 1174 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1175 1176 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1177 } 1178 #endif /* SUN4 || SUN4M */ 1179