1 /* $NetBSD: vme_machdep.c,v 1.56 2007/10/17 19:57:12 garbled Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.56 2007/10/17 19:57:12 garbled Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/extent.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/malloc.h> 47 #include <sys/errno.h> 48 49 #include <sys/proc.h> 50 #include <sys/user.h> 51 #include <sys/syslog.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #define _SPARC_BUS_DMA_PRIVATE 56 #include <machine/bus.h> 57 #include <sparc/sparc/iommuvar.h> 58 #include <machine/autoconf.h> 59 #include <machine/oldmon.h> 60 #include <machine/cpu.h> 61 #include <machine/ctlreg.h> 62 63 #include <dev/vme/vmereg.h> 64 #include <dev/vme/vmevar.h> 65 66 #include <sparc/sparc/asm.h> 67 #include <sparc/sparc/vaddrs.h> 68 #include <sparc/sparc/cpuvar.h> 69 #include <sparc/dev/vmereg.h> 70 71 struct sparcvme_softc { 72 struct device sc_dev; /* base device */ 73 bus_space_tag_t sc_bustag; 74 bus_dma_tag_t sc_dmatag; 75 struct vmebusreg *sc_reg; /* VME control registers */ 76 struct vmebusvec *sc_vec; /* VME interrupt vector */ 77 struct rom_range *sc_range; /* ROM range property */ 78 int sc_nrange; 79 volatile uint32_t *sc_ioctags; /* VME IO-cache tag registers */ 80 volatile uint32_t *sc_iocflush;/* VME IO-cache flush registers */ 81 int (*sc_vmeintr)(void *); 82 }; 83 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 84 85 /* autoconfiguration driver */ 86 static int vmematch_iommu(struct device *, struct cfdata *, void *); 87 static void vmeattach_iommu(struct device *, struct device *, void *); 88 static int vmematch_mainbus(struct device *, struct cfdata *, void *); 89 static void vmeattach_mainbus(struct device *, struct device *, void *); 90 #if defined(SUN4) 91 int vmeintr4(void *); 92 #endif 93 #if defined(SUN4M) 94 int vmeintr4m(void *); 95 static int sparc_vme_error(void); 96 #endif 97 98 99 static int sparc_vme_probe(void *, vme_addr_t, vme_size_t, 100 vme_am_t, vme_datasize_t, 101 int (*)(void *, 102 bus_space_tag_t, bus_space_handle_t), 103 void *); 104 static int sparc_vme_map(void *, vme_addr_t, vme_size_t, vme_am_t, 105 vme_datasize_t, vme_swap_t, 106 bus_space_tag_t *, bus_space_handle_t *, 107 vme_mapresc_t *); 108 static void sparc_vme_unmap(void *, vme_mapresc_t); 109 static int sparc_vme_intr_map(void *, int, int, vme_intr_handle_t *); 110 static const struct evcnt *sparc_vme_intr_evcnt(void *, vme_intr_handle_t); 111 static void * sparc_vme_intr_establish(void *, vme_intr_handle_t, int, 112 int (*)(void *), void *); 113 static void sparc_vme_intr_disestablish(void *, void *); 114 115 static int vmebus_translate(struct sparcvme_softc *, vme_am_t, 116 vme_addr_t, bus_addr_t *); 117 #ifdef notyet 118 #if defined(SUN4M) 119 static void sparc_vme_iommu_barrier(bus_space_tag_t, bus_space_handle_t, 120 bus_size_t, bus_size_t, int); 121 122 #endif /* SUN4M */ 123 #endif 124 125 /* 126 * DMA functions. 127 */ 128 #if defined(SUN4) || defined(SUN4M) 129 static void sparc_vct_dmamap_destroy(void *, bus_dmamap_t); 130 #endif 131 132 #if defined(SUN4) 133 static int sparc_vct4_dmamap_create(void *, vme_size_t, vme_am_t, 134 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 135 int, bus_dmamap_t *); 136 static int sparc_vme4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 137 bus_size_t, struct proc *, int); 138 static void sparc_vme4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 139 static void sparc_vme4_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 140 bus_addr_t, bus_size_t, int); 141 #endif /* SUN4 */ 142 143 #if defined(SUN4M) 144 static int sparc_vct_iommu_dmamap_create(void *, vme_size_t, vme_am_t, 145 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 146 int, bus_dmamap_t *); 147 static int sparc_vme_iommu_dmamap_create(bus_dma_tag_t, bus_size_t, 148 int, bus_size_t, bus_size_t, int, bus_dmamap_t *); 149 150 static int sparc_vme_iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t, 151 void *, bus_size_t, struct proc *, int); 152 static void sparc_vme_iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 153 static void sparc_vme_iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 154 bus_addr_t, bus_size_t, int); 155 #endif /* SUN4M */ 156 157 #if defined(SUN4) || defined(SUN4M) 158 static int sparc_vme_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, 159 int, size_t, void **, int); 160 #endif 161 162 #if 0 163 static void sparc_vme_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 164 static void sparc_vme_dmamem_unmap(bus_dma_tag_t, void *, size_t); 165 static paddr_t sparc_vme_dmamem_mmap(bus_dma_tag_t, 166 bus_dma_segment_t *, int, off_t, int, int); 167 #endif 168 169 int sparc_vme_mmap_cookie(vme_addr_t, vme_am_t, bus_space_handle_t *); 170 171 CFATTACH_DECL(vme_mainbus, sizeof(struct sparcvme_softc), 172 vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 173 174 CFATTACH_DECL(vme_iommu, sizeof(struct sparcvme_softc), 175 vmematch_iommu, vmeattach_iommu, NULL, NULL); 176 177 static int vme_attached; 178 179 int (*vmeerr_handler)(void); 180 181 #define VMEMOD_D32 0x40 /* ??? */ 182 183 /* If the PROM does not provide the `ranges' property, we make up our own */ 184 struct rom_range vmebus_translations[] = { 185 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 186 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 187 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 188 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 189 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 190 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 191 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 192 #undef _DS 193 }; 194 195 /* 196 * The VME bus logic on sun4 machines maps DMA requests in the first MB 197 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 198 * for DVMA space allocations. The DMA addresses returned by 199 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 200 */ 201 struct extent *vme_dvmamap; 202 203 /* 204 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 205 * VME space to the last 8MB of DVMA space and the first 1MB of 206 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 207 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 208 * The following constants define subregions in the IOMMU DVMA map 209 * for VME DVMA allocations. The DMA addresses returned by 210 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 211 */ 212 #define VME_IOMMU_DVMA_BASE 0xff800000 213 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 214 #define VME_IOMMU_DVMA_AM24_END 0xff900000 215 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 216 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 217 218 struct vme_chipset_tag sparc_vme_chipset_tag = { 219 NULL, 220 sparc_vme_map, 221 sparc_vme_unmap, 222 sparc_vme_probe, 223 sparc_vme_intr_map, 224 sparc_vme_intr_evcnt, 225 sparc_vme_intr_establish, 226 sparc_vme_intr_disestablish, 227 0, 0, 0 /* bus specific DMA stuff */ 228 }; 229 230 231 #if defined(SUN4) 232 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 233 NULL, /* cookie */ 234 _bus_dmamap_create, 235 _bus_dmamap_destroy, 236 sparc_vme4_dmamap_load, 237 _bus_dmamap_load_mbuf, 238 _bus_dmamap_load_uio, 239 _bus_dmamap_load_raw, 240 sparc_vme4_dmamap_unload, 241 sparc_vme4_dmamap_sync, 242 243 _bus_dmamem_alloc, 244 _bus_dmamem_free, 245 sparc_vme_dmamem_map, 246 _bus_dmamem_unmap, 247 _bus_dmamem_mmap 248 }; 249 #endif 250 251 #if defined(SUN4M) 252 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 253 NULL, /* cookie */ 254 sparc_vme_iommu_dmamap_create, 255 _bus_dmamap_destroy, 256 sparc_vme_iommu_dmamap_load, 257 _bus_dmamap_load_mbuf, 258 _bus_dmamap_load_uio, 259 _bus_dmamap_load_raw, 260 sparc_vme_iommu_dmamap_unload, 261 sparc_vme_iommu_dmamap_sync, 262 263 _bus_dmamem_alloc, 264 _bus_dmamem_free, 265 sparc_vme_dmamem_map, 266 _bus_dmamem_unmap, 267 _bus_dmamem_mmap 268 }; 269 #endif 270 271 272 static int 273 vmematch_mainbus(struct device *parent, struct cfdata *cf, void *aux) 274 { 275 struct mainbus_attach_args *ma = aux; 276 277 if (!CPU_ISSUN4 || vme_attached) 278 return (0); 279 280 return (strcmp("vme", ma->ma_name) == 0); 281 } 282 283 static int 284 vmematch_iommu(struct device *parent, struct cfdata *cf, void *aux) 285 { 286 struct iommu_attach_args *ia = aux; 287 288 if (vme_attached) 289 return 0; 290 291 return (strcmp("vme", ia->iom_name) == 0); 292 } 293 294 295 static void 296 vmeattach_mainbus(struct device *parent, struct device *self, void *aux) 297 { 298 #if defined(SUN4) 299 struct mainbus_attach_args *ma = aux; 300 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 301 struct vmebus_attach_args vba; 302 303 vme_attached = 1; 304 305 sc->sc_bustag = ma->ma_bustag; 306 sc->sc_dmatag = ma->ma_dmatag; 307 308 /* VME interrupt entry point */ 309 sc->sc_vmeintr = vmeintr4; 310 311 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 312 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 313 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 314 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 315 316 vba.va_vct = &sparc_vme_chipset_tag; 317 vba.va_bdt = &sparc_vme4_dma_tag; 318 vba.va_slaveconfig = 0; 319 320 /* Fall back to our own `range' construction */ 321 sc->sc_range = vmebus_translations; 322 sc->sc_nrange = 323 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 324 325 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 326 M_DEVBUF, 0, 0, EX_NOWAIT); 327 if (vme_dvmamap == NULL) 328 panic("vme: unable to allocate DVMA map"); 329 330 printf("\n"); 331 (void)config_found(self, &vba, 0); 332 333 #endif /* SUN4 */ 334 return; 335 } 336 337 /* sun4m vmebus */ 338 static void 339 vmeattach_iommu(struct device *parent, struct device *self, void *aux) 340 { 341 #if defined(SUN4M) 342 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 343 struct iommu_attach_args *ia = aux; 344 struct vmebus_attach_args vba; 345 bus_space_handle_t bh; 346 int node; 347 int cline; 348 349 sc->sc_bustag = ia->iom_bustag; 350 sc->sc_dmatag = ia->iom_dmatag; 351 352 /* VME interrupt entry point */ 353 sc->sc_vmeintr = vmeintr4m; 354 355 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 356 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 357 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 358 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 359 360 vba.va_vct = &sparc_vme_chipset_tag; 361 vba.va_bdt = &sparc_vme_iommu_dma_tag; 362 vba.va_slaveconfig = 0; 363 364 node = ia->iom_node; 365 366 /* 367 * Map VME control space 368 */ 369 if (ia->iom_nreg < 2) { 370 printf("%s: only %d register sets\n", self->dv_xname, 371 ia->iom_nreg); 372 return; 373 } 374 375 if (bus_space_map(ia->iom_bustag, 376 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 377 ia->iom_reg[0].oa_base), 378 (bus_size_t)ia->iom_reg[0].oa_size, 379 BUS_SPACE_MAP_LINEAR, 380 &bh) != 0) { 381 panic("%s: can't map vmebusreg", self->dv_xname); 382 } 383 sc->sc_reg = (struct vmebusreg *)bh; 384 385 if (bus_space_map(ia->iom_bustag, 386 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 387 ia->iom_reg[1].oa_base), 388 (bus_size_t)ia->iom_reg[1].oa_size, 389 BUS_SPACE_MAP_LINEAR, 390 &bh) != 0) { 391 panic("%s: can't map vmebusvec", self->dv_xname); 392 } 393 sc->sc_vec = (struct vmebusvec *)bh; 394 395 /* 396 * Map VME IO cache tags and flush control. 397 */ 398 if (bus_space_map(ia->iom_bustag, 399 (bus_addr_t) BUS_ADDR( 400 ia->iom_reg[1].oa_space, 401 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 402 VME_IOC_SIZE, 403 BUS_SPACE_MAP_LINEAR, 404 &bh) != 0) { 405 panic("%s: can't map IOC tags", self->dv_xname); 406 } 407 sc->sc_ioctags = (uint32_t *)bh; 408 409 if (bus_space_map(ia->iom_bustag, 410 (bus_addr_t) BUS_ADDR( 411 ia->iom_reg[1].oa_space, 412 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 413 VME_IOC_SIZE, 414 BUS_SPACE_MAP_LINEAR, 415 &bh) != 0) { 416 panic("%s: can't map IOC flush registers", self->dv_xname); 417 } 418 sc->sc_iocflush = (uint32_t *)bh; 419 420 /* 421 * Get "range" property. 422 */ 423 if (prom_getprop(node, "ranges", sizeof(struct rom_range), 424 &sc->sc_nrange, &sc->sc_range) != 0) { 425 panic("%s: can't get ranges property", self->dv_xname); 426 } 427 428 sparcvme_sc = sc; 429 vmeerr_handler = sparc_vme_error; 430 431 /* 432 * Invalidate all IO-cache entries. 433 */ 434 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 435 sc->sc_ioctags[--cline] = 0; 436 } 437 438 /* Enable IO-cache */ 439 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 440 441 printf(": version 0x%x\n", 442 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 443 444 (void)config_found(self, &vba, 0); 445 #endif /* SUN4M */ 446 } 447 448 #if defined(SUN4M) 449 static int 450 sparc_vme_error(void) 451 { 452 struct sparcvme_softc *sc = sparcvme_sc; 453 uint32_t afsr, afpa; 454 char bits[64]; 455 456 afsr = sc->sc_reg->vmebus_afsr; 457 afpa = sc->sc_reg->vmebus_afar; 458 printf("VME error:\n\tAFSR %s\n", 459 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 460 printf("\taddress: 0x%x%x\n", afsr, afpa); 461 return (0); 462 } 463 #endif 464 465 static int 466 vmebus_translate(struct sparcvme_softc *sc, vme_am_t mod, vme_addr_t addr, 467 bus_addr_t *bap) 468 { 469 int i; 470 471 for (i = 0; i < sc->sc_nrange; i++) { 472 struct rom_range *rp = &sc->sc_range[i]; 473 474 if (rp->cspace != mod) 475 continue; 476 477 /* We've found the connection to the parent bus */ 478 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 479 return (0); 480 } 481 return (ENOENT); 482 } 483 484 struct vmeprobe_myarg { 485 int (*cb)(void *, bus_space_tag_t, bus_space_handle_t); 486 void *cbarg; 487 bus_space_tag_t tag; 488 int res; /* backwards */ 489 }; 490 491 static int vmeprobe_mycb(void *, void *); 492 493 static int 494 vmeprobe_mycb(void *bh, void *arg) 495 { 496 struct vmeprobe_myarg *a = arg; 497 498 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 499 return (!a->res); 500 } 501 502 static int 503 sparc_vme_probe(void *cookie, vme_addr_t addr, vme_size_t len, vme_am_t mod, 504 vme_datasize_t datasize, 505 int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), 506 void *arg) 507 { 508 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 509 bus_addr_t paddr; 510 bus_size_t size; 511 struct vmeprobe_myarg myarg; 512 int res, i; 513 514 if (vmebus_translate(sc, mod, addr, &paddr) != 0) 515 return (EINVAL); 516 517 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 518 519 if (callback) { 520 myarg.cb = callback; 521 myarg.cbarg = arg; 522 myarg.tag = sc->sc_bustag; 523 myarg.res = 0; 524 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 525 0, vmeprobe_mycb, &myarg); 526 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 527 } 528 529 for (i = 0; i < len / size; i++) { 530 myarg.res = 0; 531 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 532 0, 0, 0); 533 if (res == 0) 534 return (EIO); 535 paddr += size; 536 } 537 return (0); 538 } 539 540 static int 541 sparc_vme_map(void *cookie, vme_addr_t addr, vme_size_t size, vme_am_t mod, 542 vme_datasize_t datasize, vme_swap_t swap, 543 bus_space_tag_t *tp, bus_space_handle_t *hp, vme_mapresc_t *rp) 544 { 545 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 546 bus_addr_t paddr; 547 int error; 548 549 error = vmebus_translate(sc, mod, addr, &paddr); 550 if (error != 0) 551 return (error); 552 553 *tp = sc->sc_bustag; 554 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 555 } 556 557 int 558 sparc_vme_mmap_cookie(vme_addr_t addr, vme_am_t mod, bus_space_handle_t *hp) 559 { 560 struct sparcvme_softc *sc = sparcvme_sc; 561 bus_addr_t paddr; 562 int error; 563 564 error = vmebus_translate(sc, mod, addr, &paddr); 565 if (error != 0) 566 return (error); 567 568 return (bus_space_mmap(sc->sc_bustag, paddr, 0, 569 0/*prot is ignored*/, 0)); 570 } 571 572 #ifdef notyet 573 #if defined(SUN4M) 574 static void 575 sparc_vme_iommu_barrier(bus_space_tag_t t, bus_space_handle_t h, 576 bus_size_t offset, bus_size_t size. 577 int flags) 578 { 579 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 580 581 /* Read async fault status to flush write-buffers */ 582 (*(volatile int *)&vbp->vmebus_afsr); 583 } 584 #endif /* SUN4M */ 585 #endif 586 587 588 589 /* 590 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 591 */ 592 static int vme_ipl_to_pil[] = { 593 0, 594 2, 595 3, 596 5, 597 7, 598 9, 599 11, 600 13 601 }; 602 603 604 /* 605 * All VME device interrupts go through vmeintr(). This function reads 606 * the VME vector from the bus, then dispatches the device interrupt 607 * handler. All handlers for devices that map to the same Processor 608 * Interrupt Level (according to the table above) are on a linked list 609 * of `sparc_vme_intr_handle' structures. The head of which is passed 610 * down as the argument to `vmeintr(void *arg)'. 611 */ 612 struct sparc_vme_intr_handle { 613 struct intrhand ih; 614 struct sparc_vme_intr_handle *next; 615 int vec; /* VME interrupt vector */ 616 int pri; /* VME interrupt priority */ 617 struct sparcvme_softc *sc;/*XXX*/ 618 }; 619 620 #if defined(SUN4) 621 int 622 vmeintr4(void *arg) 623 { 624 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 625 int level, vec; 626 int rv = 0; 627 628 level = (ihp->pri << 1) | 1; 629 630 vec = ldcontrolb((void *)(AC_VMEINTVEC | level)); 631 632 if (vec == -1) { 633 #ifdef DEBUG 634 /* 635 * This seems to happen only with the i82586 based 636 * `ie1' boards. 637 */ 638 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 639 #endif 640 return (1); /* XXX - pretend we handled it, for now */ 641 } 642 643 for (; ihp; ihp = ihp->next) 644 if (ihp->vec == vec && ihp->ih.ih_fun) { 645 splx(ihp->ih.ih_classipl); 646 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 647 } 648 649 return (rv); 650 } 651 #endif 652 653 #if defined(SUN4M) 654 int 655 vmeintr4m(void *arg) 656 { 657 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 658 int level, vec; 659 int rv = 0; 660 661 level = (ihp->pri << 1) | 1; 662 663 #if 0 664 int pending; 665 666 /* Flush VME <=> Sbus write buffers */ 667 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 668 669 pending = *((int*)ICR_SI_PEND); 670 if ((pending & SINTR_VME(ihp->pri)) == 0) { 671 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 672 ihp->pri, pending); 673 return (0); 674 } 675 #endif 676 #if 0 677 /* Why gives this a bus timeout sometimes? */ 678 vec = ihp->sc->sc_vec->vmebusvec[level]; 679 #else 680 /* so, arrange to catch the fault... */ 681 { 682 extern int fkbyte(volatile char *, struct pcb *); 683 volatile char *addr = &ihp->sc->sc_vec->vmebusvec[level]; 684 struct pcb *xpcb; 685 u_long saveonfault; 686 int s; 687 688 s = splhigh(); 689 690 xpcb = &curlwp->l_addr->u_pcb; 691 saveonfault = (u_long)xpcb->pcb_onfault; 692 vec = fkbyte(addr, xpcb); 693 xpcb->pcb_onfault = (void *)saveonfault; 694 695 splx(s); 696 } 697 #endif 698 699 if (vec == -1) { 700 #ifdef DEBUG 701 /* 702 * This seems to happen only with the i82586 based 703 * `ie1' boards. 704 */ 705 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 706 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 707 *((int*)ICR_SI_PEND), 708 ihp->sc->sc_reg->vmebus_afsr, 709 ihp->sc->sc_reg->vmebus_afar); 710 #endif 711 return (1); /* XXX - pretend we handled it, for now */ 712 } 713 714 for (; ihp; ihp = ihp->next) 715 if (ihp->vec == vec && ihp->ih.ih_fun) { 716 splx(ihp->ih.ih_classipl); 717 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 718 } 719 720 return (rv); 721 } 722 #endif /* SUN4M */ 723 724 static int 725 sparc_vme_intr_map(void *cookie, int level, int vec, 726 vme_intr_handle_t *ihp) 727 { 728 struct sparc_vme_intr_handle *ih; 729 730 ih = (vme_intr_handle_t) 731 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 732 ih->pri = level; 733 ih->vec = vec; 734 ih->sc = cookie;/*XXX*/ 735 *ihp = ih; 736 return (0); 737 } 738 739 static const struct evcnt * 740 sparc_vme_intr_evcnt(void *cookie, vme_intr_handle_t vih) 741 { 742 743 /* XXX for now, no evcnt parent reported */ 744 return NULL; 745 } 746 747 static void * 748 sparc_vme_intr_establish(void *cookie, vme_intr_handle_t vih, int level, 749 int (*func)(void *), void *arg) 750 { 751 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 752 struct sparc_vme_intr_handle *svih = 753 (struct sparc_vme_intr_handle *)vih; 754 struct intrhand *ih; 755 int pil; 756 757 /* Translate VME priority to processor IPL */ 758 pil = vme_ipl_to_pil[svih->pri]; 759 760 if (level < pil) 761 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n", 762 level, pil); 763 764 svih->ih.ih_fun = func; 765 svih->ih.ih_arg = arg; 766 svih->ih.ih_classipl = level; /* note: used slightly differently 767 than in intr.c (no shift) */ 768 svih->next = NULL; 769 770 /* ensure the interrupt subsystem will call us at this level */ 771 for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next) 772 if (ih->ih_fun == sc->sc_vmeintr) 773 break; 774 775 if (ih == NULL) { 776 ih = (struct intrhand *) 777 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 778 if (ih == NULL) 779 panic("vme_addirq"); 780 bzero(ih, sizeof *ih); 781 ih->ih_fun = sc->sc_vmeintr; 782 ih->ih_arg = vih; 783 intr_establish(pil, 0, ih, NULL); 784 } else { 785 svih->next = (vme_intr_handle_t)ih->ih_arg; 786 ih->ih_arg = vih; 787 } 788 return (NULL); 789 } 790 791 static void 792 sparc_vme_unmap(void *cookie, vme_mapresc_t resc) 793 { 794 795 /* Not implemented */ 796 panic("sparc_vme_unmap"); 797 } 798 799 static void 800 sparc_vme_intr_disestablish(void *cookie, void *a) 801 { 802 803 /* Not implemented */ 804 panic("sparc_vme_intr_disestablish"); 805 } 806 807 808 809 /* 810 * VME DMA functions. 811 */ 812 813 #if defined(SUN4) || defined(SUN4M) 814 static void 815 sparc_vct_dmamap_destroy(void *cookie, bus_dmamap_t map) 816 { 817 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 818 819 bus_dmamap_destroy(sc->sc_dmatag, map); 820 } 821 #endif 822 823 #if defined(SUN4) 824 static int 825 sparc_vct4_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 826 vme_datasize_t datasize, vme_swap_t swap, 827 int nsegments, vme_size_t maxsegsz, 828 vme_addr_t boundary, int flags, 829 bus_dmamap_t *dmamp) 830 { 831 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 832 833 /* Allocate a base map through parent bus ops */ 834 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 835 boundary, flags, dmamp)); 836 } 837 838 static int 839 sparc_vme4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 840 void *buf, bus_size_t buflen, 841 struct proc *p, int flags) 842 { 843 bus_addr_t dva; 844 bus_size_t sgsize; 845 u_long ldva; 846 vaddr_t va, voff; 847 pmap_t pmap; 848 int pagesz = PAGE_SIZE; 849 int error; 850 851 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 852 853 va = (vaddr_t)buf; 854 voff = va & (pagesz - 1); 855 va &= -pagesz; 856 857 /* 858 * Allocate an integral number of pages from DVMA space 859 * covering the passed buffer. 860 */ 861 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 862 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 863 map->_dm_boundary, 864 (flags & BUS_DMA_NOWAIT) == 0 865 ? EX_WAITOK 866 : EX_NOWAIT, 867 &ldva); 868 if (error != 0) 869 return (error); 870 dva = (bus_addr_t)ldva; 871 872 map->dm_mapsize = buflen; 873 map->dm_nsegs = 1; 874 /* Adjust DVMA address to VME view */ 875 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 876 map->dm_segs[0].ds_len = buflen; 877 map->dm_segs[0]._ds_sgsize = sgsize; 878 879 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 880 881 for (; sgsize != 0; ) { 882 paddr_t pa; 883 /* 884 * Get the physical address for this page. 885 */ 886 (void) pmap_extract(pmap, va, &pa); 887 888 #ifdef notyet 889 if (have_iocache) 890 pa |= PG_IOC; 891 #endif 892 pmap_enter(pmap_kernel(), dva, 893 pa | PMAP_NC, 894 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 895 896 dva += pagesz; 897 va += pagesz; 898 sgsize -= pagesz; 899 } 900 pmap_update(pmap_kernel()); 901 902 return (0); 903 } 904 905 static void 906 sparc_vme4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 907 { 908 bus_dma_segment_t *segs = map->dm_segs; 909 int nsegs = map->dm_nsegs; 910 bus_addr_t dva; 911 bus_size_t len; 912 int i, s, error; 913 914 for (i = 0; i < nsegs; i++) { 915 /* Go from VME to CPU view */ 916 dva = segs[i].ds_addr + VME4_DVMA_BASE; 917 dva &= -PAGE_SIZE; 918 len = segs[i]._ds_sgsize; 919 920 /* Remove double-mapping in DVMA space */ 921 pmap_remove(pmap_kernel(), dva, dva + len); 922 923 /* Release DVMA space */ 924 s = splhigh(); 925 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 926 splx(s); 927 if (error != 0) 928 printf("warning: %ld of DVMA space lost\n", len); 929 } 930 pmap_update(pmap_kernel()); 931 932 /* Mark the mappings as invalid. */ 933 map->dm_mapsize = 0; 934 map->dm_nsegs = 0; 935 } 936 937 static void 938 sparc_vme4_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 939 bus_addr_t offset, bus_size_t len, int ops) 940 { 941 942 /* 943 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 944 * Currently the cache is flushed in bus_dma_load()... 945 */ 946 } 947 #endif /* SUN4 */ 948 949 #if defined(SUN4M) 950 static int 951 sparc_vme_iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size, 952 int nsegments, bus_size_t maxsegsz, 953 bus_size_t boundary, int flags, 954 bus_dmamap_t *dmamp) 955 { 956 957 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 958 return (EINVAL); 959 } 960 961 static int 962 sparc_vct_iommu_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 963 vme_datasize_t datasize, vme_swap_t swap, 964 int nsegments, vme_size_t maxsegsz, 965 vme_addr_t boundary, int flags, 966 bus_dmamap_t *dmamp) 967 { 968 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 969 bus_dmamap_t map; 970 int error; 971 972 /* Allocate a base map through parent bus ops */ 973 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 974 boundary, flags, &map); 975 if (error != 0) 976 return (error); 977 978 /* 979 * Each I/O cache line maps to a 8K section of VME DVMA space, so 980 * we must ensure that DVMA alloctions are always 8K aligned. 981 */ 982 map->_dm_align = VME_IOC_PAGESZ; 983 984 /* Set map region based on Address Modifier */ 985 switch ((am & VME_AM_ADRSIZEMASK)) { 986 case VME_AM_A16: 987 case VME_AM_A24: 988 /* 1 MB of DVMA space */ 989 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 990 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 991 break; 992 case VME_AM_A32: 993 /* 8 MB of DVMA space */ 994 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 995 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 996 break; 997 } 998 999 *dmamp = map; 1000 return (0); 1001 } 1002 1003 static int 1004 sparc_vme_iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 1005 void *buf, bus_size_t buflen, 1006 struct proc *p, int flags) 1007 { 1008 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1009 volatile uint32_t *ioctags; 1010 int error; 1011 1012 /* Round request to a multiple of the I/O cache size */ 1013 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1014 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1015 if (error != 0) 1016 return (error); 1017 1018 /* Allocate I/O cache entries for this range */ 1019 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1020 while (buflen > 0) { 1021 *ioctags = VME_IOC_IC | VME_IOC_W; 1022 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1023 buflen -= VME_IOC_PAGESZ; 1024 } 1025 1026 /* 1027 * Adjust DVMA address to VME view. 1028 * Note: the DVMA base address is the same for all 1029 * VME address spaces. 1030 */ 1031 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1032 return (0); 1033 } 1034 1035 1036 static void 1037 sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1038 { 1039 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1040 volatile uint32_t *flushregs; 1041 int len; 1042 1043 /* Go from VME to CPU view */ 1044 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1045 1046 /* Flush VME I/O cache */ 1047 len = map->dm_segs[0]._ds_sgsize; 1048 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1049 while (len > 0) { 1050 *flushregs = 0; 1051 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1052 len -= VME_IOC_PAGESZ; 1053 } 1054 1055 /* 1056 * Start a read from `tag space' which will not complete until 1057 * all cache flushes have finished 1058 */ 1059 (*sc->sc_ioctags); 1060 1061 bus_dmamap_unload(sc->sc_dmatag, map); 1062 } 1063 1064 static void 1065 sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 1066 bus_addr_t offset, bus_size_t len, int ops) 1067 { 1068 1069 /* 1070 * XXX Should perform cache flushes as necessary. 1071 */ 1072 } 1073 #endif /* SUN4M */ 1074 1075 #if defined(SUN4) || defined(SUN4M) 1076 static int 1077 sparc_vme_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1078 size_t size, void **kvap, int flags) 1079 { 1080 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1081 1082 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1083 } 1084 #endif /* SUN4 || SUN4M */ 1085