1 /* $NetBSD: vme_machdep.c,v 1.53 2005/11/16 00:49:03 uwe Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.53 2005/11/16 00:49:03 uwe Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/extent.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/malloc.h> 47 #include <sys/errno.h> 48 49 #include <sys/proc.h> 50 #include <sys/user.h> 51 #include <sys/syslog.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #define _SPARC_BUS_DMA_PRIVATE 56 #include <machine/bus.h> 57 #include <sparc/sparc/iommuvar.h> 58 #include <machine/autoconf.h> 59 #include <machine/oldmon.h> 60 #include <machine/cpu.h> 61 #include <machine/ctlreg.h> 62 63 #include <dev/vme/vmereg.h> 64 #include <dev/vme/vmevar.h> 65 66 #include <sparc/sparc/asm.h> 67 #include <sparc/sparc/vaddrs.h> 68 #include <sparc/sparc/cpuvar.h> 69 #include <sparc/dev/vmereg.h> 70 71 struct sparcvme_softc { 72 struct device sc_dev; /* base device */ 73 bus_space_tag_t sc_bustag; 74 bus_dma_tag_t sc_dmatag; 75 struct vmebusreg *sc_reg; /* VME control registers */ 76 struct vmebusvec *sc_vec; /* VME interrupt vector */ 77 struct rom_range *sc_range; /* ROM range property */ 78 int sc_nrange; 79 volatile uint32_t *sc_ioctags; /* VME IO-cache tag registers */ 80 volatile uint32_t *sc_iocflush;/* VME IO-cache flush registers */ 81 int (*sc_vmeintr)(void *); 82 }; 83 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 84 85 /* autoconfiguration driver */ 86 static int vmematch_iommu(struct device *, struct cfdata *, void *); 87 static void vmeattach_iommu(struct device *, struct device *, void *); 88 static int vmematch_mainbus(struct device *, struct cfdata *, void *); 89 static void vmeattach_mainbus(struct device *, struct device *, void *); 90 #if defined(SUN4) 91 int vmeintr4(void *); 92 #endif 93 #if defined(SUN4M) 94 int vmeintr4m(void *); 95 static int sparc_vme_error(void); 96 #endif 97 98 99 static int sparc_vme_probe(void *, vme_addr_t, vme_size_t, 100 vme_am_t, vme_datasize_t, 101 int (*)(void *, 102 bus_space_tag_t, bus_space_handle_t), 103 void *); 104 static int sparc_vme_map(void *, vme_addr_t, vme_size_t, vme_am_t, 105 vme_datasize_t, vme_swap_t, 106 bus_space_tag_t *, bus_space_handle_t *, 107 vme_mapresc_t *); 108 static void sparc_vme_unmap(void *, vme_mapresc_t); 109 static int sparc_vme_intr_map(void *, int, int, vme_intr_handle_t *); 110 static const struct evcnt *sparc_vme_intr_evcnt(void *, vme_intr_handle_t); 111 static void * sparc_vme_intr_establish(void *, vme_intr_handle_t, int, 112 int (*)(void *), void *); 113 static void sparc_vme_intr_disestablish(void *, void *); 114 115 static int vmebus_translate(struct sparcvme_softc *, vme_am_t, 116 vme_addr_t, bus_addr_t *); 117 #ifdef notyet 118 #if defined(SUN4M) 119 static void sparc_vme_iommu_barrier(bus_space_tag_t, bus_space_handle_t, 120 bus_size_t, bus_size_t, int); 121 122 #endif /* SUN4M */ 123 #endif 124 125 /* 126 * DMA functions. 127 */ 128 #if defined(SUN4) || defined(SUN4M) 129 static void sparc_vct_dmamap_destroy(void *, bus_dmamap_t); 130 #endif 131 132 #if defined(SUN4) 133 static int sparc_vct4_dmamap_create(void *, vme_size_t, vme_am_t, 134 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 135 int, bus_dmamap_t *); 136 static int sparc_vme4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 137 bus_size_t, struct proc *, int); 138 static void sparc_vme4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 139 static void sparc_vme4_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 140 bus_addr_t, bus_size_t, int); 141 #endif /* SUN4 */ 142 143 #if defined(SUN4M) 144 static int sparc_vct_iommu_dmamap_create(void *, vme_size_t, vme_am_t, 145 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 146 int, bus_dmamap_t *); 147 static int sparc_vme_iommu_dmamap_create(bus_dma_tag_t, bus_size_t, 148 int, bus_size_t, bus_size_t, int, bus_dmamap_t *); 149 150 static int sparc_vme_iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t, 151 void *, bus_size_t, struct proc *, int); 152 static void sparc_vme_iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 153 static void sparc_vme_iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 154 bus_addr_t, bus_size_t, int); 155 #endif /* SUN4M */ 156 157 #if defined(SUN4) || defined(SUN4M) 158 static int sparc_vme_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, 159 int, size_t, caddr_t *, int); 160 #endif 161 162 #if 0 163 static void sparc_vme_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 164 static void sparc_vme_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t); 165 static paddr_t sparc_vme_dmamem_mmap(bus_dma_tag_t, 166 bus_dma_segment_t *, int, off_t, int, int); 167 #endif 168 169 int sparc_vme_mmap_cookie(vme_addr_t, vme_am_t, bus_space_handle_t *); 170 171 CFATTACH_DECL(vme_mainbus, sizeof(struct sparcvme_softc), 172 vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 173 174 CFATTACH_DECL(vme_iommu, sizeof(struct sparcvme_softc), 175 vmematch_iommu, vmeattach_iommu, NULL, NULL); 176 177 static int vme_attached; 178 179 int (*vmeerr_handler)(void); 180 181 #define VMEMOD_D32 0x40 /* ??? */ 182 183 /* If the PROM does not provide the `ranges' property, we make up our own */ 184 struct rom_range vmebus_translations[] = { 185 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 186 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 187 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 188 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 189 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 190 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 191 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 192 #undef _DS 193 }; 194 195 /* 196 * The VME bus logic on sun4 machines maps DMA requests in the first MB 197 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 198 * for DVMA space allocations. The DMA addresses returned by 199 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 200 */ 201 struct extent *vme_dvmamap; 202 203 /* 204 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 205 * VME space to the last 8MB of DVMA space and the first 1MB of 206 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 207 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 208 * The following constants define subregions in the IOMMU DVMA map 209 * for VME DVMA allocations. The DMA addresses returned by 210 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 211 */ 212 #define VME_IOMMU_DVMA_BASE 0xff800000 213 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 214 #define VME_IOMMU_DVMA_AM24_END 0xff900000 215 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 216 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 217 218 struct vme_chipset_tag sparc_vme_chipset_tag = { 219 NULL, 220 sparc_vme_map, 221 sparc_vme_unmap, 222 sparc_vme_probe, 223 sparc_vme_intr_map, 224 sparc_vme_intr_evcnt, 225 sparc_vme_intr_establish, 226 sparc_vme_intr_disestablish, 227 0, 0, 0 /* bus specific DMA stuff */ 228 }; 229 230 231 #if defined(SUN4) 232 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 233 NULL, /* cookie */ 234 _bus_dmamap_create, 235 _bus_dmamap_destroy, 236 sparc_vme4_dmamap_load, 237 _bus_dmamap_load_mbuf, 238 _bus_dmamap_load_uio, 239 _bus_dmamap_load_raw, 240 sparc_vme4_dmamap_unload, 241 sparc_vme4_dmamap_sync, 242 243 _bus_dmamem_alloc, 244 _bus_dmamem_free, 245 sparc_vme_dmamem_map, 246 _bus_dmamem_unmap, 247 _bus_dmamem_mmap 248 }; 249 #endif 250 251 #if defined(SUN4M) 252 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 253 NULL, /* cookie */ 254 sparc_vme_iommu_dmamap_create, 255 _bus_dmamap_destroy, 256 sparc_vme_iommu_dmamap_load, 257 _bus_dmamap_load_mbuf, 258 _bus_dmamap_load_uio, 259 _bus_dmamap_load_raw, 260 sparc_vme_iommu_dmamap_unload, 261 sparc_vme_iommu_dmamap_sync, 262 263 _bus_dmamem_alloc, 264 _bus_dmamem_free, 265 sparc_vme_dmamem_map, 266 _bus_dmamem_unmap, 267 _bus_dmamem_mmap 268 }; 269 #endif 270 271 272 static int 273 vmematch_mainbus(struct device *parent, struct cfdata *cf, void *aux) 274 { 275 struct mainbus_attach_args *ma = aux; 276 277 if (!CPU_ISSUN4 || vme_attached) 278 return (0); 279 280 return (strcmp("vme", ma->ma_name) == 0); 281 } 282 283 static int 284 vmematch_iommu(struct device *parent, struct cfdata *cf, void *aux) 285 { 286 struct iommu_attach_args *ia = aux; 287 288 if (vme_attached) 289 return 0; 290 291 return (strcmp("vme", ia->iom_name) == 0); 292 } 293 294 295 static void 296 vmeattach_mainbus(struct device *parent, struct device *self, void *aux) 297 { 298 #if defined(SUN4) 299 struct mainbus_attach_args *ma = aux; 300 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 301 struct vmebus_attach_args vba; 302 303 vme_attached = 1; 304 305 sc->sc_bustag = ma->ma_bustag; 306 sc->sc_dmatag = ma->ma_dmatag; 307 308 /* VME interrupt entry point */ 309 sc->sc_vmeintr = vmeintr4; 310 311 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 312 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 313 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 314 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 315 316 vba.va_vct = &sparc_vme_chipset_tag; 317 vba.va_bdt = &sparc_vme4_dma_tag; 318 vba.va_slaveconfig = 0; 319 320 /* Fall back to our own `range' construction */ 321 sc->sc_range = vmebus_translations; 322 sc->sc_nrange = 323 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 324 325 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 326 M_DEVBUF, 0, 0, EX_NOWAIT); 327 if (vme_dvmamap == NULL) 328 panic("vme: unable to allocate DVMA map"); 329 330 printf("\n"); 331 (void)config_found(self, &vba, 0); 332 333 #endif /* SUN4 */ 334 return; 335 } 336 337 /* sun4m vmebus */ 338 static void 339 vmeattach_iommu(struct device *parent, struct device *self, void *aux) 340 { 341 #if defined(SUN4M) 342 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 343 struct iommu_attach_args *ia = aux; 344 struct vmebus_attach_args vba; 345 bus_space_handle_t bh; 346 int node; 347 int cline; 348 349 sc->sc_bustag = ia->iom_bustag; 350 sc->sc_dmatag = ia->iom_dmatag; 351 352 /* VME interrupt entry point */ 353 sc->sc_vmeintr = vmeintr4m; 354 355 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 356 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 357 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 358 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 359 360 vba.va_vct = &sparc_vme_chipset_tag; 361 vba.va_bdt = &sparc_vme_iommu_dma_tag; 362 vba.va_slaveconfig = 0; 363 364 node = ia->iom_node; 365 366 /* 367 * Map VME control space 368 */ 369 if (ia->iom_nreg < 2) { 370 printf("%s: only %d register sets\n", self->dv_xname, 371 ia->iom_nreg); 372 return; 373 } 374 375 if (bus_space_map(ia->iom_bustag, 376 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 377 ia->iom_reg[0].oa_base), 378 (bus_size_t)ia->iom_reg[0].oa_size, 379 BUS_SPACE_MAP_LINEAR, 380 &bh) != 0) { 381 panic("%s: can't map vmebusreg", self->dv_xname); 382 } 383 sc->sc_reg = (struct vmebusreg *)bh; 384 385 if (bus_space_map(ia->iom_bustag, 386 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 387 ia->iom_reg[1].oa_base), 388 (bus_size_t)ia->iom_reg[1].oa_size, 389 BUS_SPACE_MAP_LINEAR, 390 &bh) != 0) { 391 panic("%s: can't map vmebusvec", self->dv_xname); 392 } 393 sc->sc_vec = (struct vmebusvec *)bh; 394 395 /* 396 * Map VME IO cache tags and flush control. 397 */ 398 if (bus_space_map(ia->iom_bustag, 399 (bus_addr_t) BUS_ADDR( 400 ia->iom_reg[1].oa_space, 401 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 402 VME_IOC_SIZE, 403 BUS_SPACE_MAP_LINEAR, 404 &bh) != 0) { 405 panic("%s: can't map IOC tags", self->dv_xname); 406 } 407 sc->sc_ioctags = (uint32_t *)bh; 408 409 if (bus_space_map(ia->iom_bustag, 410 (bus_addr_t) BUS_ADDR( 411 ia->iom_reg[1].oa_space, 412 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 413 VME_IOC_SIZE, 414 BUS_SPACE_MAP_LINEAR, 415 &bh) != 0) { 416 panic("%s: can't map IOC flush registers", self->dv_xname); 417 } 418 sc->sc_iocflush = (uint32_t *)bh; 419 420 /* 421 * Get "range" property. 422 */ 423 if (prom_getprop(node, "ranges", sizeof(struct rom_range), 424 &sc->sc_nrange, &sc->sc_range) != 0) { 425 panic("%s: can't get ranges property", self->dv_xname); 426 } 427 428 sparcvme_sc = sc; 429 vmeerr_handler = sparc_vme_error; 430 431 /* 432 * Invalidate all IO-cache entries. 433 */ 434 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 435 sc->sc_ioctags[--cline] = 0; 436 } 437 438 /* Enable IO-cache */ 439 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 440 441 printf(": version 0x%x\n", 442 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 443 444 (void)config_found(self, &vba, 0); 445 #endif /* SUN4M */ 446 } 447 448 #if defined(SUN4M) 449 static int 450 sparc_vme_error(void) 451 { 452 struct sparcvme_softc *sc = sparcvme_sc; 453 uint32_t afsr, afpa; 454 char bits[64]; 455 456 afsr = sc->sc_reg->vmebus_afsr; 457 afpa = sc->sc_reg->vmebus_afar; 458 printf("VME error:\n\tAFSR %s\n", 459 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 460 printf("\taddress: 0x%x%x\n", afsr, afpa); 461 return (0); 462 } 463 #endif 464 465 static int 466 vmebus_translate(struct sparcvme_softc *sc, vme_am_t mod, vme_addr_t addr, 467 bus_addr_t *bap) 468 { 469 int i; 470 471 for (i = 0; i < sc->sc_nrange; i++) { 472 struct rom_range *rp = &sc->sc_range[i]; 473 474 if (rp->cspace != mod) 475 continue; 476 477 /* We've found the connection to the parent bus */ 478 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 479 return (0); 480 } 481 return (ENOENT); 482 } 483 484 struct vmeprobe_myarg { 485 int (*cb)(void *, bus_space_tag_t, bus_space_handle_t); 486 void *cbarg; 487 bus_space_tag_t tag; 488 int res; /* backwards */ 489 }; 490 491 static int vmeprobe_mycb(void *, void *); 492 493 static int 494 vmeprobe_mycb(void *bh, void *arg) 495 { 496 struct vmeprobe_myarg *a = arg; 497 498 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 499 return (!a->res); 500 } 501 502 static int 503 sparc_vme_probe(void *cookie, vme_addr_t addr, vme_size_t len, vme_am_t mod, 504 vme_datasize_t datasize, 505 int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), 506 void *arg) 507 { 508 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 509 bus_addr_t paddr; 510 bus_size_t size; 511 struct vmeprobe_myarg myarg; 512 int res, i; 513 514 if (vmebus_translate(sc, mod, addr, &paddr) != 0) 515 return (EINVAL); 516 517 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 518 519 if (callback) { 520 myarg.cb = callback; 521 myarg.cbarg = arg; 522 myarg.tag = sc->sc_bustag; 523 myarg.res = 0; 524 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 525 0, vmeprobe_mycb, &myarg); 526 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 527 } 528 529 for (i = 0; i < len / size; i++) { 530 myarg.res = 0; 531 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 532 0, 0, 0); 533 if (res == 0) 534 return (EIO); 535 paddr += size; 536 } 537 return (0); 538 } 539 540 static int 541 sparc_vme_map(void *cookie, vme_addr_t addr, vme_size_t size, vme_am_t mod, 542 vme_datasize_t datasize, vme_swap_t swap, 543 bus_space_tag_t *tp, bus_space_handle_t *hp, vme_mapresc_t *rp) 544 { 545 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 546 bus_addr_t paddr; 547 int error; 548 549 error = vmebus_translate(sc, mod, addr, &paddr); 550 if (error != 0) 551 return (error); 552 553 *tp = sc->sc_bustag; 554 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 555 } 556 557 int 558 sparc_vme_mmap_cookie(vme_addr_t addr, vme_am_t mod, bus_space_handle_t *hp) 559 { 560 struct sparcvme_softc *sc = sparcvme_sc; 561 bus_addr_t paddr; 562 int error; 563 564 error = vmebus_translate(sc, mod, addr, &paddr); 565 if (error != 0) 566 return (error); 567 568 return (bus_space_mmap(sc->sc_bustag, paddr, 0, 569 0/*prot is ignored*/, 0)); 570 } 571 572 #ifdef notyet 573 #if defined(SUN4M) 574 static void 575 sparc_vme_iommu_barrier(bus_space_tag_t t, bus_space_handle_t h, 576 bus_size_t offset, bus_size_t size. 577 int flags) 578 { 579 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 580 581 /* Read async fault status to flush write-buffers */ 582 (*(volatile int *)&vbp->vmebus_afsr); 583 } 584 #endif /* SUN4M */ 585 #endif 586 587 588 589 /* 590 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 591 */ 592 static int vme_ipl_to_pil[] = { 593 0, 594 2, 595 3, 596 5, 597 7, 598 9, 599 11, 600 13 601 }; 602 603 604 /* 605 * All VME device interrupts go through vmeintr(). This function reads 606 * the VME vector from the bus, then dispatches the device interrupt 607 * handler. All handlers for devices that map to the same Processor 608 * Interrupt Level (according to the table above) are on a linked list 609 * of `sparc_vme_intr_handle' structures. The head of which is passed 610 * down as the argument to `vmeintr(void *arg)'. 611 */ 612 struct sparc_vme_intr_handle { 613 struct intrhand ih; 614 struct sparc_vme_intr_handle *next; 615 int vec; /* VME interrupt vector */ 616 int pri; /* VME interrupt priority */ 617 struct sparcvme_softc *sc;/*XXX*/ 618 }; 619 620 #if defined(SUN4) 621 int 622 vmeintr4(void *arg) 623 { 624 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 625 int level, vec; 626 int rv = 0; 627 628 level = (ihp->pri << 1) | 1; 629 630 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level)); 631 632 if (vec == -1) { 633 #ifdef DEBUG 634 /* 635 * This seems to happen only with the i82586 based 636 * `ie1' boards. 637 */ 638 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 639 #endif 640 return (1); /* XXX - pretend we handled it, for now */ 641 } 642 643 for (; ihp; ihp = ihp->next) 644 if (ihp->vec == vec && ihp->ih.ih_fun) { 645 splx(ihp->ih.ih_classipl); 646 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 647 } 648 649 return (rv); 650 } 651 #endif 652 653 #if defined(SUN4M) 654 int 655 vmeintr4m(void *arg) 656 { 657 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 658 int level, vec; 659 int rv = 0; 660 661 level = (ihp->pri << 1) | 1; 662 663 #if 0 664 int pending; 665 666 /* Flush VME <=> Sbus write buffers */ 667 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 668 669 pending = *((int*)ICR_SI_PEND); 670 if ((pending & SINTR_VME(ihp->pri)) == 0) { 671 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 672 ihp->pri, pending); 673 return (0); 674 } 675 #endif 676 #if 0 677 /* Why gives this a bus timeout sometimes? */ 678 vec = ihp->sc->sc_vec->vmebusvec[level]; 679 #else 680 /* so, arrange to catch the fault... */ 681 { 682 extern struct user *proc0paddr; 683 extern int fkbyte(volatile char *, struct pcb *); 684 volatile char *addr = &ihp->sc->sc_vec->vmebusvec[level]; 685 struct pcb *xpcb; 686 u_long saveonfault; 687 int s; 688 689 s = splhigh(); 690 if (curlwp == NULL) 691 xpcb = (struct pcb *)proc0paddr; 692 else 693 xpcb = &curlwp->l_addr->u_pcb; 694 695 saveonfault = (u_long)xpcb->pcb_onfault; 696 vec = fkbyte(addr, xpcb); 697 xpcb->pcb_onfault = (caddr_t)saveonfault; 698 699 splx(s); 700 } 701 #endif 702 703 if (vec == -1) { 704 #ifdef DEBUG 705 /* 706 * This seems to happen only with the i82586 based 707 * `ie1' boards. 708 */ 709 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 710 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 711 *((int*)ICR_SI_PEND), 712 ihp->sc->sc_reg->vmebus_afsr, 713 ihp->sc->sc_reg->vmebus_afar); 714 #endif 715 return (1); /* XXX - pretend we handled it, for now */ 716 } 717 718 for (; ihp; ihp = ihp->next) 719 if (ihp->vec == vec && ihp->ih.ih_fun) { 720 splx(ihp->ih.ih_classipl); 721 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 722 } 723 724 return (rv); 725 } 726 #endif /* SUN4M */ 727 728 static int 729 sparc_vme_intr_map(void *cookie, int level, int vec, 730 vme_intr_handle_t *ihp) 731 { 732 struct sparc_vme_intr_handle *ih; 733 734 ih = (vme_intr_handle_t) 735 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 736 ih->pri = level; 737 ih->vec = vec; 738 ih->sc = cookie;/*XXX*/ 739 *ihp = ih; 740 return (0); 741 } 742 743 static const struct evcnt * 744 sparc_vme_intr_evcnt(void *cookie, vme_intr_handle_t vih) 745 { 746 747 /* XXX for now, no evcnt parent reported */ 748 return NULL; 749 } 750 751 static void * 752 sparc_vme_intr_establish(void *cookie, vme_intr_handle_t vih, int level, 753 int (*func)(void *), void *arg) 754 { 755 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 756 struct sparc_vme_intr_handle *svih = 757 (struct sparc_vme_intr_handle *)vih; 758 struct intrhand *ih; 759 int pil; 760 761 /* Translate VME priority to processor IPL */ 762 pil = vme_ipl_to_pil[svih->pri]; 763 764 if (level < pil) 765 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n", 766 level, pil); 767 768 svih->ih.ih_fun = func; 769 svih->ih.ih_arg = arg; 770 svih->ih.ih_classipl = level; /* note: used slightly differently 771 than in intr.c (no shift) */ 772 svih->next = NULL; 773 774 /* ensure the interrupt subsystem will call us at this level */ 775 for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next) 776 if (ih->ih_fun == sc->sc_vmeintr) 777 break; 778 779 if (ih == NULL) { 780 ih = (struct intrhand *) 781 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 782 if (ih == NULL) 783 panic("vme_addirq"); 784 bzero(ih, sizeof *ih); 785 ih->ih_fun = sc->sc_vmeintr; 786 ih->ih_arg = vih; 787 intr_establish(pil, 0, ih, NULL); 788 } else { 789 svih->next = (vme_intr_handle_t)ih->ih_arg; 790 ih->ih_arg = vih; 791 } 792 return (NULL); 793 } 794 795 static void 796 sparc_vme_unmap(void *cookie, vme_mapresc_t resc) 797 { 798 799 /* Not implemented */ 800 panic("sparc_vme_unmap"); 801 } 802 803 static void 804 sparc_vme_intr_disestablish(void *cookie, void *a) 805 { 806 807 /* Not implemented */ 808 panic("sparc_vme_intr_disestablish"); 809 } 810 811 812 813 /* 814 * VME DMA functions. 815 */ 816 817 #if defined(SUN4) || defined(SUN4M) 818 static void 819 sparc_vct_dmamap_destroy(void *cookie, bus_dmamap_t map) 820 { 821 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 822 823 bus_dmamap_destroy(sc->sc_dmatag, map); 824 } 825 #endif 826 827 #if defined(SUN4) 828 static int 829 sparc_vct4_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 830 vme_datasize_t datasize, vme_swap_t swap, 831 int nsegments, vme_size_t maxsegsz, 832 vme_addr_t boundary, int flags, 833 bus_dmamap_t *dmamp) 834 { 835 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 836 837 /* Allocate a base map through parent bus ops */ 838 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 839 boundary, flags, dmamp)); 840 } 841 842 static int 843 sparc_vme4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 844 void *buf, bus_size_t buflen, 845 struct proc *p, int flags) 846 { 847 bus_addr_t dva; 848 bus_size_t sgsize; 849 u_long ldva; 850 vaddr_t va, voff; 851 pmap_t pmap; 852 int pagesz = PAGE_SIZE; 853 int error; 854 855 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 856 857 va = (vaddr_t)buf; 858 voff = va & (pagesz - 1); 859 va &= -pagesz; 860 861 /* 862 * Allocate an integral number of pages from DVMA space 863 * covering the passed buffer. 864 */ 865 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 866 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 867 map->_dm_boundary, 868 (flags & BUS_DMA_NOWAIT) == 0 869 ? EX_WAITOK 870 : EX_NOWAIT, 871 &ldva); 872 if (error != 0) 873 return (error); 874 dva = (bus_addr_t)ldva; 875 876 map->dm_mapsize = buflen; 877 map->dm_nsegs = 1; 878 /* Adjust DVMA address to VME view */ 879 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 880 map->dm_segs[0].ds_len = buflen; 881 map->dm_segs[0]._ds_sgsize = sgsize; 882 883 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 884 885 for (; sgsize != 0; ) { 886 paddr_t pa; 887 /* 888 * Get the physical address for this page. 889 */ 890 (void) pmap_extract(pmap, va, &pa); 891 892 #ifdef notyet 893 if (have_iocache) 894 pa |= PG_IOC; 895 #endif 896 pmap_enter(pmap_kernel(), dva, 897 pa | PMAP_NC, 898 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 899 900 dva += pagesz; 901 va += pagesz; 902 sgsize -= pagesz; 903 } 904 pmap_update(pmap_kernel()); 905 906 return (0); 907 } 908 909 static void 910 sparc_vme4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 911 { 912 bus_dma_segment_t *segs = map->dm_segs; 913 int nsegs = map->dm_nsegs; 914 bus_addr_t dva; 915 bus_size_t len; 916 int i, s, error; 917 918 for (i = 0; i < nsegs; i++) { 919 /* Go from VME to CPU view */ 920 dva = segs[i].ds_addr + VME4_DVMA_BASE; 921 dva &= -PAGE_SIZE; 922 len = segs[i]._ds_sgsize; 923 924 /* Remove double-mapping in DVMA space */ 925 pmap_remove(pmap_kernel(), dva, dva + len); 926 927 /* Release DVMA space */ 928 s = splhigh(); 929 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 930 splx(s); 931 if (error != 0) 932 printf("warning: %ld of DVMA space lost\n", len); 933 } 934 pmap_update(pmap_kernel()); 935 936 /* Mark the mappings as invalid. */ 937 map->dm_mapsize = 0; 938 map->dm_nsegs = 0; 939 } 940 941 static void 942 sparc_vme4_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 943 bus_addr_t offset, bus_size_t len, int ops) 944 { 945 946 /* 947 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 948 * Currently the cache is flushed in bus_dma_load()... 949 */ 950 } 951 #endif /* SUN4 */ 952 953 #if defined(SUN4M) 954 static int 955 sparc_vme_iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size, 956 int nsegments, bus_size_t maxsegsz, 957 bus_size_t boundary, int flags, 958 bus_dmamap_t *dmamp) 959 { 960 961 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 962 return (EINVAL); 963 } 964 965 static int 966 sparc_vct_iommu_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 967 vme_datasize_t datasize, vme_swap_t swap, 968 int nsegments, vme_size_t maxsegsz, 969 vme_addr_t boundary, int flags, 970 bus_dmamap_t *dmamp) 971 { 972 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 973 bus_dmamap_t map; 974 int error; 975 976 /* Allocate a base map through parent bus ops */ 977 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 978 boundary, flags, &map); 979 if (error != 0) 980 return (error); 981 982 /* 983 * Each I/O cache line maps to a 8K section of VME DVMA space, so 984 * we must ensure that DVMA alloctions are always 8K aligned. 985 */ 986 map->_dm_align = VME_IOC_PAGESZ; 987 988 /* Set map region based on Address Modifier */ 989 switch ((am & VME_AM_ADRSIZEMASK)) { 990 case VME_AM_A16: 991 case VME_AM_A24: 992 /* 1 MB of DVMA space */ 993 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 994 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 995 break; 996 case VME_AM_A32: 997 /* 8 MB of DVMA space */ 998 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 999 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 1000 break; 1001 } 1002 1003 *dmamp = map; 1004 return (0); 1005 } 1006 1007 static int 1008 sparc_vme_iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 1009 void *buf, bus_size_t buflen, 1010 struct proc *p, int flags) 1011 { 1012 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1013 volatile uint32_t *ioctags; 1014 int error; 1015 1016 /* Round request to a multiple of the I/O cache size */ 1017 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1018 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1019 if (error != 0) 1020 return (error); 1021 1022 /* Allocate I/O cache entries for this range */ 1023 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1024 while (buflen > 0) { 1025 *ioctags = VME_IOC_IC | VME_IOC_W; 1026 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1027 buflen -= VME_IOC_PAGESZ; 1028 } 1029 1030 /* 1031 * Adjust DVMA address to VME view. 1032 * Note: the DVMA base address is the same for all 1033 * VME address spaces. 1034 */ 1035 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1036 return (0); 1037 } 1038 1039 1040 static void 1041 sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1042 { 1043 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1044 volatile uint32_t *flushregs; 1045 int len; 1046 1047 /* Go from VME to CPU view */ 1048 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1049 1050 /* Flush VME I/O cache */ 1051 len = map->dm_segs[0]._ds_sgsize; 1052 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1053 while (len > 0) { 1054 *flushregs = 0; 1055 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1056 len -= VME_IOC_PAGESZ; 1057 } 1058 1059 /* 1060 * Start a read from `tag space' which will not complete until 1061 * all cache flushes have finished 1062 */ 1063 (*sc->sc_ioctags); 1064 1065 bus_dmamap_unload(sc->sc_dmatag, map); 1066 } 1067 1068 static void 1069 sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 1070 bus_addr_t offset, bus_size_t len, int ops) 1071 { 1072 1073 /* 1074 * XXX Should perform cache flushes as necessary. 1075 */ 1076 } 1077 #endif /* SUN4M */ 1078 1079 #if defined(SUN4) || defined(SUN4M) 1080 static int 1081 sparc_vme_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1082 size_t size, caddr_t *kvap, int flags) 1083 { 1084 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1085 1086 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1087 } 1088 #endif /* SUN4 || SUN4M */ 1089