1 /* $NetBSD: vme_machdep.c,v 1.51 2004/12/13 02:14:13 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.51 2004/12/13 02:14:13 chs Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/extent.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/malloc.h> 47 #include <sys/errno.h> 48 49 #include <sys/proc.h> 50 #include <sys/user.h> 51 #include <sys/syslog.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #define _SPARC_BUS_DMA_PRIVATE 56 #include <machine/bus.h> 57 #include <sparc/sparc/iommuvar.h> 58 #include <machine/autoconf.h> 59 #include <machine/oldmon.h> 60 #include <machine/cpu.h> 61 #include <machine/ctlreg.h> 62 63 #include <dev/vme/vmereg.h> 64 #include <dev/vme/vmevar.h> 65 66 #include <sparc/sparc/asm.h> 67 #include <sparc/sparc/vaddrs.h> 68 #include <sparc/sparc/cpuvar.h> 69 #include <sparc/dev/vmereg.h> 70 71 struct sparcvme_softc { 72 struct device sc_dev; /* base device */ 73 bus_space_tag_t sc_bustag; 74 bus_dma_tag_t sc_dmatag; 75 struct vmebusreg *sc_reg; /* VME control registers */ 76 struct vmebusvec *sc_vec; /* VME interrupt vector */ 77 struct rom_range *sc_range; /* ROM range property */ 78 int sc_nrange; 79 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */ 80 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */ 81 int (*sc_vmeintr) __P((void *)); 82 }; 83 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 84 85 /* autoconfiguration driver */ 86 static int vmematch_iommu __P((struct device *, struct cfdata *, void *)); 87 static void vmeattach_iommu __P((struct device *, struct device *, void *)); 88 static int vmematch_mainbus __P((struct device *, struct cfdata *, void *)); 89 static void vmeattach_mainbus __P((struct device *, struct device *, void *)); 90 #if defined(SUN4) 91 int vmeintr4 __P((void *)); 92 #endif 93 #if defined(SUN4M) 94 int vmeintr4m __P((void *)); 95 static int sparc_vme_error __P((void)); 96 #endif 97 98 99 static int sparc_vme_probe __P((void *, vme_addr_t, vme_size_t, 100 vme_am_t, vme_datasize_t, 101 int (*) __P((void *, bus_space_tag_t, bus_space_handle_t)), void *)); 102 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_am_t, 103 vme_datasize_t, vme_swap_t, 104 bus_space_tag_t *, bus_space_handle_t *, 105 vme_mapresc_t *)); 106 static void sparc_vme_unmap __P((void *, vme_mapresc_t)); 107 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *)); 108 static const struct evcnt *sparc_vme_intr_evcnt __P((void *, 109 vme_intr_handle_t)); 110 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t, int, 111 int (*) __P((void *)), void *)); 112 static void sparc_vme_intr_disestablish __P((void *, void *)); 113 114 static int vmebus_translate __P((struct sparcvme_softc *, vme_am_t, 115 vme_addr_t, bus_addr_t *)); 116 #ifdef notyet 117 #if defined(SUN4M) 118 static void sparc_vme_iommu_barrier __P(( bus_space_tag_t, bus_space_handle_t, 119 bus_size_t, bus_size_t, int)); 120 121 #endif /* SUN4M */ 122 #endif 123 124 /* 125 * DMA functions. 126 */ 127 #if defined(SUN4) || defined(SUN4M) 128 static void sparc_vct_dmamap_destroy __P((void *, bus_dmamap_t)); 129 #endif 130 131 #if defined(SUN4) 132 static int sparc_vct4_dmamap_create __P((void *, vme_size_t, vme_am_t, 133 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 134 int, bus_dmamap_t *)); 135 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 136 bus_size_t, struct proc *, int)); 137 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 138 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 139 bus_addr_t, bus_size_t, int)); 140 #endif /* SUN4 */ 141 142 #if defined(SUN4M) 143 static int sparc_vct_iommu_dmamap_create __P((void *, vme_size_t, vme_am_t, 144 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 145 int, bus_dmamap_t *)); 146 static int sparc_vme_iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, 147 int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); 148 149 static int sparc_vme_iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, 150 void *, bus_size_t, struct proc *, int)); 151 static void sparc_vme_iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 152 static void sparc_vme_iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 153 bus_addr_t, bus_size_t, int)); 154 #endif /* SUN4M */ 155 156 #if defined(SUN4) || defined(SUN4M) 157 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *, 158 int, size_t, caddr_t *, int)); 159 #endif 160 161 #if 0 162 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); 163 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t)); 164 static paddr_t sparc_vme_dmamem_mmap __P((bus_dma_tag_t, 165 bus_dma_segment_t *, int, off_t, int, int)); 166 #endif 167 168 int sparc_vme_mmap_cookie __P((vme_addr_t, vme_am_t, bus_space_handle_t *)); 169 170 CFATTACH_DECL(vme_mainbus, sizeof(struct sparcvme_softc), 171 vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 172 173 CFATTACH_DECL(vme_iommu, sizeof(struct sparcvme_softc), 174 vmematch_iommu, vmeattach_iommu, NULL, NULL); 175 176 static int vme_attached; 177 178 int (*vmeerr_handler) __P((void)); 179 180 #define VMEMOD_D32 0x40 /* ??? */ 181 182 /* If the PROM does not provide the `ranges' property, we make up our own */ 183 struct rom_range vmebus_translations[] = { 184 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 185 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 186 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 187 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 188 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 189 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 190 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 191 #undef _DS 192 }; 193 194 /* 195 * The VME bus logic on sun4 machines maps DMA requests in the first MB 196 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 197 * for DVMA space allocations. The DMA addresses returned by 198 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 199 */ 200 struct extent *vme_dvmamap; 201 202 /* 203 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 204 * VME space to the last 8MB of DVMA space and the first 1MB of 205 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 206 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 207 * The following constants define subregions in the IOMMU DVMA map 208 * for VME DVMA allocations. The DMA addresses returned by 209 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 210 */ 211 #define VME_IOMMU_DVMA_BASE 0xff800000 212 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 213 #define VME_IOMMU_DVMA_AM24_END 0xff900000 214 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 215 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 216 217 struct vme_chipset_tag sparc_vme_chipset_tag = { 218 NULL, 219 sparc_vme_map, 220 sparc_vme_unmap, 221 sparc_vme_probe, 222 sparc_vme_intr_map, 223 sparc_vme_intr_evcnt, 224 sparc_vme_intr_establish, 225 sparc_vme_intr_disestablish, 226 0, 0, 0 /* bus specific DMA stuff */ 227 }; 228 229 230 #if defined(SUN4) 231 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 232 NULL, /* cookie */ 233 _bus_dmamap_create, 234 _bus_dmamap_destroy, 235 sparc_vme4_dmamap_load, 236 _bus_dmamap_load_mbuf, 237 _bus_dmamap_load_uio, 238 _bus_dmamap_load_raw, 239 sparc_vme4_dmamap_unload, 240 sparc_vme4_dmamap_sync, 241 242 _bus_dmamem_alloc, 243 _bus_dmamem_free, 244 sparc_vme_dmamem_map, 245 _bus_dmamem_unmap, 246 _bus_dmamem_mmap 247 }; 248 #endif 249 250 #if defined(SUN4M) 251 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 252 NULL, /* cookie */ 253 sparc_vme_iommu_dmamap_create, 254 _bus_dmamap_destroy, 255 sparc_vme_iommu_dmamap_load, 256 _bus_dmamap_load_mbuf, 257 _bus_dmamap_load_uio, 258 _bus_dmamap_load_raw, 259 sparc_vme_iommu_dmamap_unload, 260 sparc_vme_iommu_dmamap_sync, 261 262 _bus_dmamem_alloc, 263 _bus_dmamem_free, 264 sparc_vme_dmamem_map, 265 _bus_dmamem_unmap, 266 _bus_dmamem_mmap 267 }; 268 #endif 269 270 271 int 272 vmematch_mainbus(parent, cf, aux) 273 struct device *parent; 274 struct cfdata *cf; 275 void *aux; 276 { 277 struct mainbus_attach_args *ma = aux; 278 279 if (!CPU_ISSUN4 || vme_attached) 280 return (0); 281 282 return (strcmp("vme", ma->ma_name) == 0); 283 } 284 285 int 286 vmematch_iommu(parent, cf, aux) 287 struct device *parent; 288 struct cfdata *cf; 289 void *aux; 290 { 291 struct iommu_attach_args *ia = aux; 292 293 if (vme_attached) 294 return 0; 295 296 return (strcmp("vme", ia->iom_name) == 0); 297 } 298 299 300 void 301 vmeattach_mainbus(parent, self, aux) 302 struct device *parent, *self; 303 void *aux; 304 { 305 #if defined(SUN4) 306 struct mainbus_attach_args *ma = aux; 307 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 308 struct vmebus_attach_args vba; 309 310 vme_attached = 1; 311 312 sc->sc_bustag = ma->ma_bustag; 313 sc->sc_dmatag = ma->ma_dmatag; 314 315 /* VME interrupt entry point */ 316 sc->sc_vmeintr = vmeintr4; 317 318 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 319 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 320 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 321 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 322 323 vba.va_vct = &sparc_vme_chipset_tag; 324 vba.va_bdt = &sparc_vme4_dma_tag; 325 vba.va_slaveconfig = 0; 326 327 /* Fall back to our own `range' construction */ 328 sc->sc_range = vmebus_translations; 329 sc->sc_nrange = 330 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 331 332 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 333 M_DEVBUF, 0, 0, EX_NOWAIT); 334 if (vme_dvmamap == NULL) 335 panic("vme: unable to allocate DVMA map"); 336 337 printf("\n"); 338 (void)config_found(self, &vba, 0); 339 340 #endif 341 return; 342 } 343 344 /* sun4m vmebus */ 345 void 346 vmeattach_iommu(parent, self, aux) 347 struct device *parent, *self; 348 void *aux; 349 { 350 #if defined(SUN4M) 351 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 352 struct iommu_attach_args *ia = aux; 353 struct vmebus_attach_args vba; 354 bus_space_handle_t bh; 355 int node; 356 int cline; 357 358 sc->sc_bustag = ia->iom_bustag; 359 sc->sc_dmatag = ia->iom_dmatag; 360 361 /* VME interrupt entry point */ 362 sc->sc_vmeintr = vmeintr4m; 363 364 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 365 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 366 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 367 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 368 369 vba.va_vct = &sparc_vme_chipset_tag; 370 vba.va_bdt = &sparc_vme_iommu_dma_tag; 371 vba.va_slaveconfig = 0; 372 373 node = ia->iom_node; 374 375 /* 376 * Map VME control space 377 */ 378 if (ia->iom_nreg < 2) { 379 printf("%s: only %d register sets\n", self->dv_xname, 380 ia->iom_nreg); 381 return; 382 } 383 384 if (bus_space_map(ia->iom_bustag, 385 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 386 ia->iom_reg[0].oa_base), 387 (bus_size_t)ia->iom_reg[0].oa_size, 388 BUS_SPACE_MAP_LINEAR, 389 &bh) != 0) { 390 panic("%s: can't map vmebusreg", self->dv_xname); 391 } 392 sc->sc_reg = (struct vmebusreg *)bh; 393 394 if (bus_space_map(ia->iom_bustag, 395 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 396 ia->iom_reg[1].oa_base), 397 (bus_size_t)ia->iom_reg[1].oa_size, 398 BUS_SPACE_MAP_LINEAR, 399 &bh) != 0) { 400 panic("%s: can't map vmebusvec", self->dv_xname); 401 } 402 sc->sc_vec = (struct vmebusvec *)bh; 403 404 /* 405 * Map VME IO cache tags and flush control. 406 */ 407 if (bus_space_map(ia->iom_bustag, 408 (bus_addr_t) BUS_ADDR( 409 ia->iom_reg[1].oa_space, 410 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 411 VME_IOC_SIZE, 412 BUS_SPACE_MAP_LINEAR, 413 &bh) != 0) { 414 panic("%s: can't map IOC tags", self->dv_xname); 415 } 416 sc->sc_ioctags = (u_int32_t *)bh; 417 418 if (bus_space_map(ia->iom_bustag, 419 (bus_addr_t) BUS_ADDR( 420 ia->iom_reg[1].oa_space, 421 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 422 VME_IOC_SIZE, 423 BUS_SPACE_MAP_LINEAR, 424 &bh) != 0) { 425 panic("%s: can't map IOC flush registers", self->dv_xname); 426 } 427 sc->sc_iocflush = (u_int32_t *)bh; 428 429 /* 430 * Get "range" property. 431 */ 432 if (prom_getprop(node, "ranges", sizeof(struct rom_range), 433 &sc->sc_nrange, &sc->sc_range) != 0) { 434 panic("%s: can't get ranges property", self->dv_xname); 435 } 436 437 sparcvme_sc = sc; 438 vmeerr_handler = sparc_vme_error; 439 440 /* 441 * Invalidate all IO-cache entries. 442 */ 443 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 444 sc->sc_ioctags[--cline] = 0; 445 } 446 447 /* Enable IO-cache */ 448 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 449 450 printf(": version 0x%x\n", 451 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 452 453 (void)config_found(self, &vba, 0); 454 #endif /* SUN4M */ 455 } 456 457 #if defined(SUN4M) 458 static int 459 sparc_vme_error() 460 { 461 struct sparcvme_softc *sc = sparcvme_sc; 462 u_int32_t afsr, afpa; 463 char bits[64]; 464 465 afsr = sc->sc_reg->vmebus_afsr; 466 afpa = sc->sc_reg->vmebus_afar; 467 printf("VME error:\n\tAFSR %s\n", 468 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 469 printf("\taddress: 0x%x%x\n", afsr, afpa); 470 return (0); 471 } 472 #endif 473 474 int 475 vmebus_translate(sc, mod, addr, bap) 476 struct sparcvme_softc *sc; 477 vme_am_t mod; 478 vme_addr_t addr; 479 bus_addr_t *bap; 480 { 481 int i; 482 483 for (i = 0; i < sc->sc_nrange; i++) { 484 struct rom_range *rp = &sc->sc_range[i]; 485 486 if (rp->cspace != mod) 487 continue; 488 489 /* We've found the connection to the parent bus */ 490 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 491 return (0); 492 } 493 return (ENOENT); 494 } 495 496 struct vmeprobe_myarg { 497 int (*cb) __P((void *, bus_space_tag_t, bus_space_handle_t)); 498 void *cbarg; 499 bus_space_tag_t tag; 500 int res; /* backwards */ 501 }; 502 503 static int vmeprobe_mycb __P((void *, void *)); 504 static int 505 vmeprobe_mycb(bh, arg) 506 void *bh, *arg; 507 { 508 struct vmeprobe_myarg *a = arg; 509 510 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 511 return (!a->res); 512 } 513 514 int 515 sparc_vme_probe(cookie, addr, len, mod, datasize, callback, arg) 516 void *cookie; 517 vme_addr_t addr; 518 vme_size_t len; 519 vme_am_t mod; 520 vme_datasize_t datasize; 521 int (*callback) __P((void *, bus_space_tag_t, bus_space_handle_t)); 522 void *arg; 523 { 524 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 525 bus_addr_t paddr; 526 bus_size_t size; 527 struct vmeprobe_myarg myarg; 528 int res, i; 529 530 if (vmebus_translate(sc, mod, addr, &paddr) != 0) 531 return (EINVAL); 532 533 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 534 535 if (callback) { 536 myarg.cb = callback; 537 myarg.cbarg = arg; 538 myarg.tag = sc->sc_bustag; 539 myarg.res = 0; 540 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 541 0, vmeprobe_mycb, &myarg); 542 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 543 } 544 545 for (i = 0; i < len / size; i++) { 546 myarg.res = 0; 547 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 548 0, 0, 0); 549 if (res == 0) 550 return (EIO); 551 paddr += size; 552 } 553 return (0); 554 } 555 556 int 557 sparc_vme_map(cookie, addr, size, mod, datasize, swap, tp, hp, rp) 558 void *cookie; 559 vme_addr_t addr; 560 vme_size_t size; 561 vme_am_t mod; 562 vme_datasize_t datasize; 563 vme_swap_t swap; 564 bus_space_tag_t *tp; 565 bus_space_handle_t *hp; 566 vme_mapresc_t *rp; 567 { 568 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 569 bus_addr_t paddr; 570 int error; 571 572 error = vmebus_translate(sc, mod, addr, &paddr); 573 if (error != 0) 574 return (error); 575 576 *tp = sc->sc_bustag; 577 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 578 } 579 580 int 581 sparc_vme_mmap_cookie(addr, mod, hp) 582 vme_addr_t addr; 583 vme_am_t mod; 584 bus_space_handle_t *hp; 585 { 586 struct sparcvme_softc *sc = sparcvme_sc; 587 bus_addr_t paddr; 588 int error; 589 590 error = vmebus_translate(sc, mod, addr, &paddr); 591 if (error != 0) 592 return (error); 593 594 return (bus_space_mmap(sc->sc_bustag, paddr, 0, 595 0/*prot is ignored*/, 0)); 596 } 597 598 #ifdef notyet 599 #if defined(SUN4M) 600 void 601 sparc_vme_iommu_barrier(t, h, offset, size, flags) 602 bus_space_tag_t t; 603 bus_space_handle_t h; 604 bus_size_t offset; 605 bus_size_t size; 606 int flags; 607 { 608 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 609 610 /* Read async fault status to flush write-buffers */ 611 (*(volatile int *)&vbp->vmebus_afsr); 612 } 613 #endif /* SUN4M */ 614 #endif 615 616 617 618 /* 619 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 620 */ 621 static int vme_ipl_to_pil[] = { 622 0, 623 2, 624 3, 625 5, 626 7, 627 9, 628 11, 629 13 630 }; 631 632 633 /* 634 * All VME device interrupts go through vmeintr(). This function reads 635 * the VME vector from the bus, then dispatches the device interrupt 636 * handler. All handlers for devices that map to the same Processor 637 * Interrupt Level (according to the table above) are on a linked list 638 * of `sparc_vme_intr_handle' structures. The head of which is passed 639 * down as the argument to `vmeintr(void *arg)'. 640 */ 641 struct sparc_vme_intr_handle { 642 struct intrhand ih; 643 struct sparc_vme_intr_handle *next; 644 int vec; /* VME interrupt vector */ 645 int pri; /* VME interrupt priority */ 646 struct sparcvme_softc *sc;/*XXX*/ 647 }; 648 649 #if defined(SUN4) 650 int 651 vmeintr4(arg) 652 void *arg; 653 { 654 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 655 int level, vec; 656 int rv = 0; 657 658 level = (ihp->pri << 1) | 1; 659 660 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level)); 661 662 if (vec == -1) { 663 #ifdef DEBUG 664 /* 665 * This seems to happen only with the i82586 based 666 * `ie1' boards. 667 */ 668 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 669 #endif 670 return (1); /* XXX - pretend we handled it, for now */ 671 } 672 673 for (; ihp; ihp = ihp->next) 674 if (ihp->vec == vec && ihp->ih.ih_fun) { 675 splx(ihp->ih.ih_classipl); 676 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 677 } 678 679 return (rv); 680 } 681 #endif 682 683 #if defined(SUN4M) 684 int 685 vmeintr4m(arg) 686 void *arg; 687 { 688 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 689 int level, vec; 690 int rv = 0; 691 692 level = (ihp->pri << 1) | 1; 693 694 #if 0 695 int pending; 696 697 /* Flush VME <=> Sbus write buffers */ 698 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 699 700 pending = *((int*)ICR_SI_PEND); 701 if ((pending & SINTR_VME(ihp->pri)) == 0) { 702 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 703 ihp->pri, pending); 704 return (0); 705 } 706 #endif 707 #if 0 708 /* Why gives this a bus timeout sometimes? */ 709 vec = ihp->sc->sc_vec->vmebusvec[level]; 710 #else 711 /* so, arrange to catch the fault... */ 712 { 713 extern struct user *proc0paddr; 714 extern int fkbyte __P((caddr_t, struct pcb *)); 715 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level]; 716 struct pcb *xpcb; 717 u_long saveonfault; 718 int s; 719 720 s = splhigh(); 721 if (curlwp == NULL) 722 xpcb = (struct pcb *)proc0paddr; 723 else 724 xpcb = &curlwp->l_addr->u_pcb; 725 726 saveonfault = (u_long)xpcb->pcb_onfault; 727 vec = fkbyte(addr, xpcb); 728 xpcb->pcb_onfault = (caddr_t)saveonfault; 729 730 splx(s); 731 } 732 #endif 733 734 if (vec == -1) { 735 #ifdef DEBUG 736 /* 737 * This seems to happen only with the i82586 based 738 * `ie1' boards. 739 */ 740 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 741 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 742 *((int*)ICR_SI_PEND), 743 ihp->sc->sc_reg->vmebus_afsr, 744 ihp->sc->sc_reg->vmebus_afar); 745 #endif 746 return (1); /* XXX - pretend we handled it, for now */ 747 } 748 749 for (; ihp; ihp = ihp->next) 750 if (ihp->vec == vec && ihp->ih.ih_fun) { 751 splx(ihp->ih.ih_classipl); 752 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 753 } 754 755 return (rv); 756 } 757 #endif 758 759 int 760 sparc_vme_intr_map(cookie, level, vec, ihp) 761 void *cookie; 762 int level; 763 int vec; 764 vme_intr_handle_t *ihp; 765 { 766 struct sparc_vme_intr_handle *ih; 767 768 ih = (vme_intr_handle_t) 769 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 770 ih->pri = level; 771 ih->vec = vec; 772 ih->sc = cookie;/*XXX*/ 773 *ihp = ih; 774 return (0); 775 } 776 777 const struct evcnt * 778 sparc_vme_intr_evcnt(cookie, vih) 779 void *cookie; 780 vme_intr_handle_t vih; 781 { 782 783 /* XXX for now, no evcnt parent reported */ 784 return NULL; 785 } 786 787 void * 788 sparc_vme_intr_establish(cookie, vih, level, func, arg) 789 void *cookie; 790 vme_intr_handle_t vih; 791 int level; 792 int (*func) __P((void *)); 793 void *arg; 794 { 795 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 796 struct sparc_vme_intr_handle *svih = 797 (struct sparc_vme_intr_handle *)vih; 798 struct intrhand *ih; 799 int pil; 800 801 /* Translate VME priority to processor IPL */ 802 pil = vme_ipl_to_pil[svih->pri]; 803 804 if (level < pil) 805 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n", 806 level, pil); 807 808 svih->ih.ih_fun = func; 809 svih->ih.ih_arg = arg; 810 svih->ih.ih_classipl = level; /* note: used slightly differently 811 than in intr.c (no shift) */ 812 svih->next = NULL; 813 814 /* ensure the interrupt subsystem will call us at this level */ 815 for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next) 816 if (ih->ih_fun == sc->sc_vmeintr) 817 break; 818 819 if (ih == NULL) { 820 ih = (struct intrhand *) 821 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 822 if (ih == NULL) 823 panic("vme_addirq"); 824 bzero(ih, sizeof *ih); 825 ih->ih_fun = sc->sc_vmeintr; 826 ih->ih_arg = vih; 827 intr_establish(pil, 0, ih, NULL); 828 } else { 829 svih->next = (vme_intr_handle_t)ih->ih_arg; 830 ih->ih_arg = vih; 831 } 832 return (NULL); 833 } 834 835 void 836 sparc_vme_unmap(cookie, resc) 837 void * cookie; 838 vme_mapresc_t resc; 839 { 840 /* Not implemented */ 841 panic("sparc_vme_unmap"); 842 } 843 844 void 845 sparc_vme_intr_disestablish(cookie, a) 846 void *cookie; 847 void *a; 848 { 849 /* Not implemented */ 850 panic("sparc_vme_intr_disestablish"); 851 } 852 853 854 855 /* 856 * VME DMA functions. 857 */ 858 859 #if defined(SUN4) || defined(SUN4M) 860 static void 861 sparc_vct_dmamap_destroy(cookie, map) 862 void *cookie; 863 bus_dmamap_t map; 864 { 865 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 866 bus_dmamap_destroy(sc->sc_dmatag, map); 867 } 868 #endif 869 870 #if defined(SUN4) 871 static int 872 sparc_vct4_dmamap_create(cookie, size, am, datasize, swap, nsegments, maxsegsz, 873 boundary, flags, dmamp) 874 void *cookie; 875 vme_size_t size; 876 vme_am_t am; 877 vme_datasize_t datasize; 878 vme_swap_t swap; 879 int nsegments; 880 vme_size_t maxsegsz; 881 vme_addr_t boundary; 882 int flags; 883 bus_dmamap_t *dmamp; 884 { 885 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 886 887 /* Allocate a base map through parent bus ops */ 888 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 889 boundary, flags, dmamp)); 890 } 891 892 int 893 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags) 894 bus_dma_tag_t t; 895 bus_dmamap_t map; 896 void *buf; 897 bus_size_t buflen; 898 struct proc *p; 899 int flags; 900 { 901 bus_addr_t dva; 902 bus_size_t sgsize; 903 u_long ldva; 904 vaddr_t va, voff; 905 pmap_t pmap; 906 int pagesz = PAGE_SIZE; 907 int error; 908 909 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 910 911 va = (vaddr_t)buf; 912 voff = va & (pagesz - 1); 913 va &= -pagesz; 914 915 /* 916 * Allocate an integral number of pages from DVMA space 917 * covering the passed buffer. 918 */ 919 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 920 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 921 map->_dm_boundary, 922 (flags & BUS_DMA_NOWAIT) == 0 923 ? EX_WAITOK 924 : EX_NOWAIT, 925 &ldva); 926 if (error != 0) 927 return (error); 928 dva = (bus_addr_t)ldva; 929 930 map->dm_mapsize = buflen; 931 map->dm_nsegs = 1; 932 /* Adjust DVMA address to VME view */ 933 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 934 map->dm_segs[0].ds_len = buflen; 935 map->dm_segs[0]._ds_sgsize = sgsize; 936 937 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 938 939 for (; sgsize != 0; ) { 940 paddr_t pa; 941 /* 942 * Get the physical address for this page. 943 */ 944 (void) pmap_extract(pmap, va, &pa); 945 946 #ifdef notyet 947 if (have_iocache) 948 pa |= PG_IOC; 949 #endif 950 pmap_enter(pmap_kernel(), dva, 951 pa | PMAP_NC, 952 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 953 954 dva += pagesz; 955 va += pagesz; 956 sgsize -= pagesz; 957 } 958 pmap_update(pmap_kernel()); 959 960 return (0); 961 } 962 963 void 964 sparc_vme4_dmamap_unload(t, map) 965 bus_dma_tag_t t; 966 bus_dmamap_t map; 967 { 968 bus_dma_segment_t *segs = map->dm_segs; 969 int nsegs = map->dm_nsegs; 970 bus_addr_t dva; 971 bus_size_t len; 972 int i, s, error; 973 974 for (i = 0; i < nsegs; i++) { 975 /* Go from VME to CPU view */ 976 dva = segs[i].ds_addr + VME4_DVMA_BASE; 977 dva &= -PAGE_SIZE; 978 len = segs[i]._ds_sgsize; 979 980 /* Remove double-mapping in DVMA space */ 981 pmap_remove(pmap_kernel(), dva, dva + len); 982 983 /* Release DVMA space */ 984 s = splhigh(); 985 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 986 splx(s); 987 if (error != 0) 988 printf("warning: %ld of DVMA space lost\n", len); 989 } 990 pmap_update(pmap_kernel()); 991 992 /* Mark the mappings as invalid. */ 993 map->dm_mapsize = 0; 994 map->dm_nsegs = 0; 995 } 996 997 void 998 sparc_vme4_dmamap_sync(t, map, offset, len, ops) 999 bus_dma_tag_t t; 1000 bus_dmamap_t map; 1001 bus_addr_t offset; 1002 bus_size_t len; 1003 int ops; 1004 { 1005 1006 /* 1007 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 1008 * Currently the cache is flushed in bus_dma_load()... 1009 */ 1010 } 1011 #endif /* SUN4 */ 1012 1013 #if defined(SUN4M) 1014 static int 1015 sparc_vme_iommu_dmamap_create (t, size, nsegments, maxsegsz, 1016 boundary, flags, dmamp) 1017 bus_dma_tag_t t; 1018 bus_size_t size; 1019 int nsegments; 1020 bus_size_t maxsegsz; 1021 bus_size_t boundary; 1022 int flags; 1023 bus_dmamap_t *dmamp; 1024 { 1025 1026 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 1027 return (EINVAL); 1028 } 1029 1030 static int 1031 sparc_vct_iommu_dmamap_create(cookie, size, am, datasize, swap, nsegments, 1032 maxsegsz, boundary, flags, dmamp) 1033 void *cookie; 1034 vme_size_t size; 1035 vme_am_t am; 1036 vme_datasize_t datasize; 1037 vme_swap_t swap; 1038 int nsegments; 1039 vme_size_t maxsegsz; 1040 vme_addr_t boundary; 1041 int flags; 1042 bus_dmamap_t *dmamp; 1043 { 1044 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 1045 bus_dmamap_t map; 1046 int error; 1047 1048 /* Allocate a base map through parent bus ops */ 1049 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 1050 boundary, flags, &map); 1051 if (error != 0) 1052 return (error); 1053 1054 /* 1055 * Each I/O cache line maps to a 8K section of VME DVMA space, so 1056 * we must ensure that DVMA alloctions are always 8K aligned. 1057 */ 1058 map->_dm_align = VME_IOC_PAGESZ; 1059 1060 /* Set map region based on Address Modifier */ 1061 switch ((am & VME_AM_ADRSIZEMASK)) { 1062 case VME_AM_A16: 1063 case VME_AM_A24: 1064 /* 1 MB of DVMA space */ 1065 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 1066 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 1067 break; 1068 case VME_AM_A32: 1069 /* 8 MB of DVMA space */ 1070 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 1071 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 1072 break; 1073 } 1074 1075 *dmamp = map; 1076 return (0); 1077 } 1078 1079 int 1080 sparc_vme_iommu_dmamap_load(t, map, buf, buflen, p, flags) 1081 bus_dma_tag_t t; 1082 bus_dmamap_t map; 1083 void *buf; 1084 bus_size_t buflen; 1085 struct proc *p; 1086 int flags; 1087 { 1088 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1089 volatile u_int32_t *ioctags; 1090 int error; 1091 1092 /* Round request to a multiple of the I/O cache size */ 1093 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1094 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1095 if (error != 0) 1096 return (error); 1097 1098 /* Allocate I/O cache entries for this range */ 1099 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1100 while (buflen > 0) { 1101 *ioctags = VME_IOC_IC | VME_IOC_W; 1102 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1103 buflen -= VME_IOC_PAGESZ; 1104 } 1105 1106 /* 1107 * Adjust DVMA address to VME view. 1108 * Note: the DVMA base address is the same for all 1109 * VME address spaces. 1110 */ 1111 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1112 return (0); 1113 } 1114 1115 1116 void 1117 sparc_vme_iommu_dmamap_unload(t, map) 1118 bus_dma_tag_t t; 1119 bus_dmamap_t map; 1120 { 1121 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1122 volatile u_int32_t *flushregs; 1123 int len; 1124 1125 /* Go from VME to CPU view */ 1126 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1127 1128 /* Flush VME I/O cache */ 1129 len = map->dm_segs[0]._ds_sgsize; 1130 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1131 while (len > 0) { 1132 *flushregs = 0; 1133 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1134 len -= VME_IOC_PAGESZ; 1135 } 1136 1137 /* 1138 * Start a read from `tag space' which will not complete until 1139 * all cache flushes have finished 1140 */ 1141 (*sc->sc_ioctags); 1142 1143 bus_dmamap_unload(sc->sc_dmatag, map); 1144 } 1145 1146 void 1147 sparc_vme_iommu_dmamap_sync(t, map, offset, len, ops) 1148 bus_dma_tag_t t; 1149 bus_dmamap_t map; 1150 bus_addr_t offset; 1151 bus_size_t len; 1152 int ops; 1153 { 1154 1155 /* 1156 * XXX Should perform cache flushes as necessary. 1157 */ 1158 } 1159 #endif /* SUN4M */ 1160 1161 #if defined(SUN4) || defined(SUN4M) 1162 int 1163 sparc_vme_dmamem_map(t, segs, nsegs, size, kvap, flags) 1164 bus_dma_tag_t t; 1165 bus_dma_segment_t *segs; 1166 int nsegs; 1167 size_t size; 1168 caddr_t *kvap; 1169 int flags; 1170 { 1171 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1172 1173 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1174 } 1175 #endif /* SUN4 || SUN4M */ 1176