1 /* $NetBSD: vme_machdep.c,v 1.45 2003/01/18 06:44:59 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/param.h> 40 #include <sys/extent.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/malloc.h> 44 #include <sys/errno.h> 45 46 #include <sys/proc.h> 47 #include <sys/user.h> 48 #include <sys/syslog.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define _SPARC_BUS_DMA_PRIVATE 53 #include <machine/bus.h> 54 #include <sparc/sparc/iommuvar.h> 55 #include <machine/autoconf.h> 56 #include <machine/oldmon.h> 57 #include <machine/cpu.h> 58 #include <machine/ctlreg.h> 59 60 #include <dev/vme/vmereg.h> 61 #include <dev/vme/vmevar.h> 62 63 #include <sparc/sparc/asm.h> 64 #include <sparc/sparc/vaddrs.h> 65 #include <sparc/sparc/cpuvar.h> 66 #include <sparc/dev/vmereg.h> 67 68 struct sparcvme_softc { 69 struct device sc_dev; /* base device */ 70 bus_space_tag_t sc_bustag; 71 bus_dma_tag_t sc_dmatag; 72 struct vmebusreg *sc_reg; /* VME control registers */ 73 struct vmebusvec *sc_vec; /* VME interrupt vector */ 74 struct rom_range *sc_range; /* ROM range property */ 75 int sc_nrange; 76 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */ 77 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */ 78 int (*sc_vmeintr) __P((void *)); 79 }; 80 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 81 82 /* autoconfiguration driver */ 83 static int vmematch_iommu __P((struct device *, struct cfdata *, void *)); 84 static void vmeattach_iommu __P((struct device *, struct device *, void *)); 85 static int vmematch_mainbus __P((struct device *, struct cfdata *, void *)); 86 static void vmeattach_mainbus __P((struct device *, struct device *, void *)); 87 #if defined(SUN4) 88 int vmeintr4 __P((void *)); 89 #endif 90 #if defined(SUN4M) 91 int vmeintr4m __P((void *)); 92 static int sparc_vme_error __P((void)); 93 #endif 94 95 96 static int sparc_vme_probe __P((void *, vme_addr_t, vme_size_t, 97 vme_am_t, vme_datasize_t, 98 int (*) __P((void *, bus_space_tag_t, bus_space_handle_t)), void *)); 99 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_am_t, 100 vme_datasize_t, vme_swap_t, 101 bus_space_tag_t *, bus_space_handle_t *, 102 vme_mapresc_t *)); 103 static void sparc_vme_unmap __P((void *, vme_mapresc_t)); 104 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *)); 105 static const struct evcnt *sparc_vme_intr_evcnt __P((void *, 106 vme_intr_handle_t)); 107 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t, int, 108 int (*) __P((void *)), void *)); 109 static void sparc_vme_intr_disestablish __P((void *, void *)); 110 111 static int vmebus_translate __P((struct sparcvme_softc *, vme_am_t, 112 vme_addr_t, bus_addr_t *)); 113 #if defined(SUN4M) 114 static void sparc_vme_iommu_barrier __P(( bus_space_tag_t, bus_space_handle_t, 115 bus_size_t, bus_size_t, int)); 116 117 #endif 118 119 /* 120 * DMA functions. 121 */ 122 static void sparc_vct_dmamap_destroy __P((void *, bus_dmamap_t)); 123 124 #if defined(SUN4) 125 static int sparc_vct4_dmamap_create __P((void *, vme_size_t, vme_am_t, 126 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 127 int, bus_dmamap_t *)); 128 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 129 bus_size_t, struct proc *, int)); 130 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 131 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 132 bus_addr_t, bus_size_t, int)); 133 #endif 134 135 #if defined(SUN4M) 136 static int sparc_vct_iommu_dmamap_create __P((void *, vme_size_t, vme_am_t, 137 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 138 int, bus_dmamap_t *)); 139 static int sparc_vme_iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, 140 int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); 141 142 static int sparc_vme_iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, 143 void *, bus_size_t, struct proc *, int)); 144 static void sparc_vme_iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 145 static void sparc_vme_iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 146 bus_addr_t, bus_size_t, int)); 147 #endif 148 149 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *, 150 int, size_t, caddr_t *, int)); 151 #if 0 152 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); 153 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t)); 154 static paddr_t sparc_vme_dmamem_mmap __P((bus_dma_tag_t, 155 bus_dma_segment_t *, int, off_t, int, int)); 156 #endif 157 158 int sparc_vme_mmap_cookie __P((vme_addr_t, vme_am_t, bus_space_handle_t *)); 159 160 CFATTACH_DECL(vme_mainbus, sizeof(struct sparcvme_softc), 161 vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 162 163 CFATTACH_DECL(vme_iommu, sizeof(struct sparcvme_softc), 164 vmematch_iommu, vmeattach_iommu, NULL, NULL); 165 166 int (*vmeerr_handler) __P((void)); 167 168 #define VMEMOD_D32 0x40 /* ??? */ 169 170 /* If the PROM does not provide the `ranges' property, we make up our own */ 171 struct rom_range vmebus_translations[] = { 172 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 173 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 174 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 175 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 176 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 177 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 178 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 179 #undef _DS 180 }; 181 182 /* 183 * The VME bus logic on sun4 machines maps DMA requests in the first MB 184 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 185 * for DVMA space allocations. The DMA addresses returned by 186 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 187 */ 188 struct extent *vme_dvmamap; 189 190 /* 191 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 192 * VME space to the last 8MB of DVMA space and the first 1MB of 193 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 194 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 195 * The following constants define subregions in the IOMMU DVMA map 196 * for VME DVMA allocations. The DMA addresses returned by 197 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 198 */ 199 #define VME_IOMMU_DVMA_BASE 0xff800000 200 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 201 #define VME_IOMMU_DVMA_AM24_END 0xff900000 202 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 203 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 204 205 struct sparc_bus_space_tag sparc_vme_bus_tag = { 206 NULL, /* cookie */ 207 NULL, /* parent bus tag */ 208 NULL, /* ranges */ 209 0, /* nranges */ 210 NULL, /* bus_map */ 211 NULL, /* bus_unmap */ 212 NULL, /* bus_subregion */ 213 NULL, /* barrier */ 214 NULL, /* mmap */ 215 NULL, /* intr_establish */ 216 #if __FULL_SPARC_BUS_SPACE 217 NULL, /* read_1 */ 218 NULL, /* read_2 */ 219 NULL, /* read_4 */ 220 NULL, /* read_8 */ 221 NULL, /* write_1 */ 222 NULL, /* write_2 */ 223 NULL, /* write_4 */ 224 NULL /* write_8 */ 225 #endif 226 }; 227 228 struct vme_chipset_tag sparc_vme_chipset_tag = { 229 NULL, 230 sparc_vme_map, 231 sparc_vme_unmap, 232 sparc_vme_probe, 233 sparc_vme_intr_map, 234 sparc_vme_intr_evcnt, 235 sparc_vme_intr_establish, 236 sparc_vme_intr_disestablish, 237 0, 0, 0 /* bus specific DMA stuff */ 238 }; 239 240 241 #if defined(SUN4) 242 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 243 NULL, /* cookie */ 244 _bus_dmamap_create, 245 _bus_dmamap_destroy, 246 sparc_vme4_dmamap_load, 247 _bus_dmamap_load_mbuf, 248 _bus_dmamap_load_uio, 249 _bus_dmamap_load_raw, 250 sparc_vme4_dmamap_unload, 251 sparc_vme4_dmamap_sync, 252 253 _bus_dmamem_alloc, 254 _bus_dmamem_free, 255 sparc_vme_dmamem_map, 256 _bus_dmamem_unmap, 257 _bus_dmamem_mmap 258 }; 259 #endif 260 261 #if defined(SUN4M) 262 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 263 NULL, /* cookie */ 264 sparc_vme_iommu_dmamap_create, 265 _bus_dmamap_destroy, 266 sparc_vme_iommu_dmamap_load, 267 _bus_dmamap_load_mbuf, 268 _bus_dmamap_load_uio, 269 _bus_dmamap_load_raw, 270 sparc_vme_iommu_dmamap_unload, 271 sparc_vme_iommu_dmamap_sync, 272 273 _bus_dmamem_alloc, 274 _bus_dmamem_free, 275 sparc_vme_dmamem_map, 276 _bus_dmamem_unmap, 277 _bus_dmamem_mmap 278 }; 279 #endif 280 281 282 int 283 vmematch_mainbus(parent, cf, aux) 284 struct device *parent; 285 struct cfdata *cf; 286 void *aux; 287 { 288 struct mainbus_attach_args *ma = aux; 289 290 if (!CPU_ISSUN4) 291 return (0); 292 293 return (strcmp("vme", ma->ma_name) == 0); 294 } 295 296 int 297 vmematch_iommu(parent, cf, aux) 298 struct device *parent; 299 struct cfdata *cf; 300 void *aux; 301 { 302 struct iommu_attach_args *ia = aux; 303 304 return (strcmp("vme", ia->iom_name) == 0); 305 } 306 307 308 void 309 vmeattach_mainbus(parent, self, aux) 310 struct device *parent, *self; 311 void *aux; 312 { 313 #if defined(SUN4) 314 struct mainbus_attach_args *ma = aux; 315 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 316 struct vmebus_attach_args vba; 317 318 if (self->dv_unit > 0) { 319 printf(" unsupported\n"); 320 return; 321 } 322 323 sc->sc_bustag = ma->ma_bustag; 324 sc->sc_dmatag = ma->ma_dmatag; 325 326 /* VME interrupt entry point */ 327 sc->sc_vmeintr = vmeintr4; 328 329 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 330 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 331 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 332 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 333 334 #if 0 335 sparc_vme_bus_tag.parent = ma->ma_bustag; 336 vba.vba_bustag = &sparc_vme_bus_tag; 337 #endif 338 vba.va_vct = &sparc_vme_chipset_tag; 339 vba.va_bdt = &sparc_vme4_dma_tag; 340 vba.va_slaveconfig = 0; 341 342 /* Fall back to our own `range' construction */ 343 sc->sc_range = vmebus_translations; 344 sc->sc_nrange = 345 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 346 347 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 348 M_DEVBUF, 0, 0, EX_NOWAIT); 349 if (vme_dvmamap == NULL) 350 panic("vme: unable to allocate DVMA map"); 351 352 printf("\n"); 353 (void)config_found(self, &vba, 0); 354 355 #endif 356 return; 357 } 358 359 /* sun4m vmebus */ 360 void 361 vmeattach_iommu(parent, self, aux) 362 struct device *parent, *self; 363 void *aux; 364 { 365 #if defined(SUN4M) 366 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 367 struct iommu_attach_args *ia = aux; 368 struct vmebus_attach_args vba; 369 bus_space_handle_t bh; 370 int node; 371 int cline; 372 373 if (self->dv_unit > 0) { 374 printf(" unsupported\n"); 375 return; 376 } 377 378 sc->sc_bustag = ia->iom_bustag; 379 sc->sc_dmatag = ia->iom_dmatag; 380 381 /* VME interrupt entry point */ 382 sc->sc_vmeintr = vmeintr4m; 383 384 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 385 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 386 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 387 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 388 sparc_vme_bus_tag.sparc_bus_barrier = sparc_vme_iommu_barrier; 389 390 #if 0 391 vba.vba_bustag = &sparc_vme_bus_tag; 392 #endif 393 vba.va_vct = &sparc_vme_chipset_tag; 394 vba.va_bdt = &sparc_vme_iommu_dma_tag; 395 vba.va_slaveconfig = 0; 396 397 node = ia->iom_node; 398 399 /* 400 * Map VME control space 401 */ 402 if (ia->iom_nreg < 2) { 403 printf("%s: only %d register sets\n", self->dv_xname, 404 ia->iom_nreg); 405 return; 406 } 407 408 if (bus_space_map(ia->iom_bustag, 409 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 410 ia->iom_reg[0].oa_base), 411 (bus_size_t)ia->iom_reg[0].oa_size, 412 BUS_SPACE_MAP_LINEAR, 413 &bh) != 0) { 414 panic("%s: can't map vmebusreg", self->dv_xname); 415 } 416 sc->sc_reg = (struct vmebusreg *)bh; 417 418 if (bus_space_map(ia->iom_bustag, 419 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 420 ia->iom_reg[1].oa_base), 421 (bus_size_t)ia->iom_reg[1].oa_size, 422 BUS_SPACE_MAP_LINEAR, 423 &bh) != 0) { 424 panic("%s: can't map vmebusvec", self->dv_xname); 425 } 426 sc->sc_vec = (struct vmebusvec *)bh; 427 428 /* 429 * Map VME IO cache tags and flush control. 430 */ 431 if (bus_space_map(ia->iom_bustag, 432 (bus_addr_t) BUS_ADDR( 433 ia->iom_reg[1].oa_space, 434 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 435 VME_IOC_SIZE, 436 BUS_SPACE_MAP_LINEAR, 437 &bh) != 0) { 438 panic("%s: can't map IOC tags", self->dv_xname); 439 } 440 sc->sc_ioctags = (u_int32_t *)bh; 441 442 if (bus_space_map(ia->iom_bustag, 443 (bus_addr_t) BUS_ADDR( 444 ia->iom_reg[1].oa_space, 445 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 446 VME_IOC_SIZE, 447 BUS_SPACE_MAP_LINEAR, 448 &bh) != 0) { 449 panic("%s: can't map IOC flush registers", self->dv_xname); 450 } 451 sc->sc_iocflush = (u_int32_t *)bh; 452 453 /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg; 454 455 /* 456 * Get "range" property. 457 */ 458 if (PROM_getprop(node, "ranges", sizeof(struct rom_range), 459 &sc->sc_nrange, (void **)&sc->sc_range) != 0) { 460 panic("%s: can't get ranges property", self->dv_xname); 461 } 462 463 sparcvme_sc = sc; 464 vmeerr_handler = sparc_vme_error; 465 466 /* 467 * Invalidate all IO-cache entries. 468 */ 469 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 470 sc->sc_ioctags[--cline] = 0; 471 } 472 473 /* Enable IO-cache */ 474 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 475 476 printf(": version 0x%x\n", 477 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 478 479 (void)config_found(self, &vba, 0); 480 #endif 481 } 482 483 #if defined(SUN4M) 484 static int 485 sparc_vme_error() 486 { 487 struct sparcvme_softc *sc = sparcvme_sc; 488 u_int32_t afsr, afpa; 489 char bits[64]; 490 491 afsr = sc->sc_reg->vmebus_afsr; 492 afpa = sc->sc_reg->vmebus_afar; 493 printf("VME error:\n\tAFSR %s\n", 494 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 495 printf("\taddress: 0x%x%x\n", afsr, afpa); 496 return (0); 497 } 498 #endif 499 500 int 501 vmebus_translate(sc, mod, addr, bap) 502 struct sparcvme_softc *sc; 503 vme_am_t mod; 504 vme_addr_t addr; 505 bus_addr_t *bap; 506 { 507 int i; 508 509 for (i = 0; i < sc->sc_nrange; i++) { 510 struct rom_range *rp = &sc->sc_range[i]; 511 512 if (rp->cspace != mod) 513 continue; 514 515 /* We've found the connection to the parent bus */ 516 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 517 return (0); 518 } 519 return (ENOENT); 520 } 521 522 struct vmeprobe_myarg { 523 int (*cb) __P((void *, bus_space_tag_t, bus_space_handle_t)); 524 void *cbarg; 525 bus_space_tag_t tag; 526 int res; /* backwards */ 527 }; 528 529 static int vmeprobe_mycb __P((void *, void *)); 530 static int 531 vmeprobe_mycb(bh, arg) 532 void *bh, *arg; 533 { 534 struct vmeprobe_myarg *a = arg; 535 536 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 537 return (!a->res); 538 } 539 540 int 541 sparc_vme_probe(cookie, addr, len, mod, datasize, callback, arg) 542 void *cookie; 543 vme_addr_t addr; 544 vme_size_t len; 545 vme_am_t mod; 546 vme_datasize_t datasize; 547 int (*callback) __P((void *, bus_space_tag_t, bus_space_handle_t)); 548 void *arg; 549 { 550 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 551 bus_addr_t paddr; 552 bus_size_t size; 553 struct vmeprobe_myarg myarg; 554 int res, i; 555 556 if (vmebus_translate(sc, mod, addr, &paddr) != 0) 557 return (EINVAL); 558 559 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 560 561 if (callback) { 562 myarg.cb = callback; 563 myarg.cbarg = arg; 564 myarg.tag = sc->sc_bustag; 565 myarg.res = 0; 566 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 567 0, vmeprobe_mycb, &myarg); 568 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 569 } 570 571 for (i = 0; i < len / size; i++) { 572 myarg.res = 0; 573 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 574 0, 0, 0); 575 if (res == 0) 576 return (EIO); 577 paddr += size; 578 } 579 return (0); 580 } 581 582 int 583 sparc_vme_map(cookie, addr, size, mod, datasize, swap, tp, hp, rp) 584 void *cookie; 585 vme_addr_t addr; 586 vme_size_t size; 587 vme_am_t mod; 588 vme_datasize_t datasize; 589 vme_swap_t swap; 590 bus_space_tag_t *tp; 591 bus_space_handle_t *hp; 592 vme_mapresc_t *rp; 593 { 594 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 595 bus_addr_t paddr; 596 int error; 597 598 error = vmebus_translate(sc, mod, addr, &paddr); 599 if (error != 0) 600 return (error); 601 602 *tp = sc->sc_bustag; 603 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 604 } 605 606 int 607 sparc_vme_mmap_cookie(addr, mod, hp) 608 vme_addr_t addr; 609 vme_am_t mod; 610 bus_space_handle_t *hp; 611 { 612 struct sparcvme_softc *sc = sparcvme_sc; 613 bus_addr_t paddr; 614 int error; 615 616 error = vmebus_translate(sc, mod, addr, &paddr); 617 if (error != 0) 618 return (error); 619 620 return (bus_space_mmap(sc->sc_bustag, paddr, 0, 621 0/*prot is ignored*/, 0)); 622 } 623 624 #if defined(SUN4M) 625 void 626 sparc_vme_iommu_barrier(t, h, offset, size, flags) 627 bus_space_tag_t t; 628 bus_space_handle_t h; 629 bus_size_t offset; 630 bus_size_t size; 631 int flags; 632 { 633 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 634 635 /* Read async fault status to flush write-buffers */ 636 (*(volatile int *)&vbp->vmebus_afsr); 637 } 638 #endif 639 640 641 642 /* 643 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 644 */ 645 static int vme_ipl_to_pil[] = { 646 0, 647 2, 648 3, 649 5, 650 7, 651 9, 652 11, 653 13 654 }; 655 656 657 /* 658 * All VME device interrupts go through vmeintr(). This function reads 659 * the VME vector from the bus, then dispatches the device interrupt 660 * handler. All handlers for devices that map to the same Processor 661 * Interrupt Level (according to the table above) are on a linked list 662 * of `sparc_vme_intr_handle' structures. The head of which is passed 663 * down as the argument to `vmeintr(void *arg)'. 664 */ 665 struct sparc_vme_intr_handle { 666 struct intrhand ih; 667 struct sparc_vme_intr_handle *next; 668 int vec; /* VME interrupt vector */ 669 int pri; /* VME interrupt priority */ 670 struct sparcvme_softc *sc;/*XXX*/ 671 }; 672 673 #if defined(SUN4) 674 int 675 vmeintr4(arg) 676 void *arg; 677 { 678 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 679 int level, vec; 680 int rv = 0; 681 682 level = (ihp->pri << 1) | 1; 683 684 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level)); 685 686 if (vec == -1) { 687 #ifdef DEBUG 688 /* 689 * This seems to happen only with the i82586 based 690 * `ie1' boards. 691 */ 692 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 693 #endif 694 return (1); /* XXX - pretend we handled it, for now */ 695 } 696 697 for (; ihp; ihp = ihp->next) 698 if (ihp->vec == vec && ihp->ih.ih_fun) { 699 splx(ihp->ih.ih_classipl); 700 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 701 } 702 703 return (rv); 704 } 705 #endif 706 707 #if defined(SUN4M) 708 int 709 vmeintr4m(arg) 710 void *arg; 711 { 712 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 713 int level, vec; 714 int rv = 0; 715 716 level = (ihp->pri << 1) | 1; 717 718 #if 0 719 int pending; 720 721 /* Flush VME <=> Sbus write buffers */ 722 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 723 724 pending = *((int*)ICR_SI_PEND); 725 if ((pending & SINTR_VME(ihp->pri)) == 0) { 726 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 727 ihp->pri, pending); 728 return (0); 729 } 730 #endif 731 #if 0 732 /* Why gives this a bus timeout sometimes? */ 733 vec = ihp->sc->sc_vec->vmebusvec[level]; 734 #else 735 /* so, arrange to catch the fault... */ 736 { 737 extern struct user *proc0paddr; 738 extern int fkbyte __P((caddr_t, struct pcb *)); 739 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level]; 740 struct pcb *xpcb; 741 u_long saveonfault; 742 int s; 743 744 s = splhigh(); 745 if (curlwp == NULL) 746 xpcb = (struct pcb *)proc0paddr; 747 else 748 xpcb = &curlwp->l_addr->u_pcb; 749 750 saveonfault = (u_long)xpcb->pcb_onfault; 751 vec = fkbyte(addr, xpcb); 752 xpcb->pcb_onfault = (caddr_t)saveonfault; 753 754 splx(s); 755 } 756 #endif 757 758 if (vec == -1) { 759 #ifdef DEBUG 760 /* 761 * This seems to happen only with the i82586 based 762 * `ie1' boards. 763 */ 764 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 765 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 766 *((int*)ICR_SI_PEND), 767 ihp->sc->sc_reg->vmebus_afsr, 768 ihp->sc->sc_reg->vmebus_afar); 769 #endif 770 return (1); /* XXX - pretend we handled it, for now */ 771 } 772 773 for (; ihp; ihp = ihp->next) 774 if (ihp->vec == vec && ihp->ih.ih_fun) { 775 splx(ihp->ih.ih_classipl); 776 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 777 } 778 779 return (rv); 780 } 781 #endif 782 783 int 784 sparc_vme_intr_map(cookie, level, vec, ihp) 785 void *cookie; 786 int level; 787 int vec; 788 vme_intr_handle_t *ihp; 789 { 790 struct sparc_vme_intr_handle *ih; 791 792 ih = (vme_intr_handle_t) 793 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 794 ih->pri = level; 795 ih->vec = vec; 796 ih->sc = cookie;/*XXX*/ 797 *ihp = ih; 798 return (0); 799 } 800 801 const struct evcnt * 802 sparc_vme_intr_evcnt(cookie, vih) 803 void *cookie; 804 vme_intr_handle_t vih; 805 { 806 807 /* XXX for now, no evcnt parent reported */ 808 return NULL; 809 } 810 811 void * 812 sparc_vme_intr_establish(cookie, vih, level, func, arg) 813 void *cookie; 814 vme_intr_handle_t vih; 815 int level; 816 int (*func) __P((void *)); 817 void *arg; 818 { 819 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 820 struct sparc_vme_intr_handle *svih = 821 (struct sparc_vme_intr_handle *)vih; 822 struct intrhand *ih; 823 int pil; 824 825 /* Translate VME priority to processor IPL */ 826 pil = vme_ipl_to_pil[svih->pri]; 827 828 if (level < pil) 829 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n", 830 level, pil); 831 832 svih->ih.ih_fun = func; 833 svih->ih.ih_arg = arg; 834 svih->ih.ih_classipl = level; /* note: used slightly differently 835 than in intr.c (no shift) */ 836 svih->next = NULL; 837 838 /* ensure the interrupt subsystem will call us at this level */ 839 for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next) 840 if (ih->ih_fun == sc->sc_vmeintr) 841 break; 842 843 if (ih == NULL) { 844 ih = (struct intrhand *) 845 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 846 if (ih == NULL) 847 panic("vme_addirq"); 848 bzero(ih, sizeof *ih); 849 ih->ih_fun = sc->sc_vmeintr; 850 ih->ih_arg = vih; 851 intr_establish(pil, 0, ih, NULL); 852 } else { 853 svih->next = (vme_intr_handle_t)ih->ih_arg; 854 ih->ih_arg = vih; 855 } 856 return (NULL); 857 } 858 859 void 860 sparc_vme_unmap(cookie, resc) 861 void * cookie; 862 vme_mapresc_t resc; 863 { 864 /* Not implemented */ 865 panic("sparc_vme_unmap"); 866 } 867 868 void 869 sparc_vme_intr_disestablish(cookie, a) 870 void *cookie; 871 void *a; 872 { 873 /* Not implemented */ 874 panic("sparc_vme_intr_disestablish"); 875 } 876 877 878 879 /* 880 * VME DMA functions. 881 */ 882 883 static void 884 sparc_vct_dmamap_destroy(cookie, map) 885 void *cookie; 886 bus_dmamap_t map; 887 { 888 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 889 bus_dmamap_destroy(sc->sc_dmatag, map); 890 } 891 892 #if defined(SUN4) 893 static int 894 sparc_vct4_dmamap_create(cookie, size, am, datasize, swap, nsegments, maxsegsz, 895 boundary, flags, dmamp) 896 void *cookie; 897 vme_size_t size; 898 vme_am_t am; 899 vme_datasize_t datasize; 900 vme_swap_t swap; 901 int nsegments; 902 vme_size_t maxsegsz; 903 vme_addr_t boundary; 904 int flags; 905 bus_dmamap_t *dmamp; 906 { 907 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 908 909 /* Allocate a base map through parent bus ops */ 910 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 911 boundary, flags, dmamp)); 912 } 913 914 int 915 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags) 916 bus_dma_tag_t t; 917 bus_dmamap_t map; 918 void *buf; 919 bus_size_t buflen; 920 struct proc *p; 921 int flags; 922 { 923 bus_addr_t dva; 924 bus_size_t sgsize; 925 vaddr_t va, voff; 926 pmap_t pmap; 927 int pagesz = PAGE_SIZE; 928 int error; 929 930 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 931 932 va = (vaddr_t)buf; 933 voff = va & (pagesz - 1); 934 va &= -pagesz; 935 936 /* 937 * Allocate an integral number of pages from DVMA space 938 * covering the passed buffer. 939 */ 940 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 941 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 942 map->_dm_boundary, 943 (flags & BUS_DMA_NOWAIT) == 0 944 ? EX_WAITOK 945 : EX_NOWAIT, 946 (u_long *)&dva); 947 if (error != 0) 948 return (error); 949 950 map->dm_mapsize = buflen; 951 map->dm_nsegs = 1; 952 /* Adjust DVMA address to VME view */ 953 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 954 map->dm_segs[0].ds_len = buflen; 955 map->dm_segs[0]._ds_sgsize = sgsize; 956 957 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 958 959 for (; sgsize != 0; ) { 960 paddr_t pa; 961 /* 962 * Get the physical address for this page. 963 */ 964 (void) pmap_extract(pmap, va, &pa); 965 966 #ifdef notyet 967 if (have_iocache) 968 pa |= PG_IOC; 969 #endif 970 pmap_enter(pmap_kernel(), dva, 971 pa | PMAP_NC, 972 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 973 974 dva += pagesz; 975 va += pagesz; 976 sgsize -= pagesz; 977 } 978 pmap_update(pmap_kernel()); 979 980 return (0); 981 } 982 983 void 984 sparc_vme4_dmamap_unload(t, map) 985 bus_dma_tag_t t; 986 bus_dmamap_t map; 987 { 988 bus_dma_segment_t *segs = map->dm_segs; 989 int nsegs = map->dm_nsegs; 990 bus_addr_t dva; 991 bus_size_t len; 992 int i, s, error; 993 994 for (i = 0; i < nsegs; i++) { 995 /* Go from VME to CPU view */ 996 dva = segs[i].ds_addr + VME4_DVMA_BASE; 997 dva &= -PAGE_SIZE; 998 len = segs[i]._ds_sgsize; 999 1000 /* Remove double-mapping in DVMA space */ 1001 pmap_remove(pmap_kernel(), dva, dva + len); 1002 1003 /* Release DVMA space */ 1004 s = splhigh(); 1005 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 1006 splx(s); 1007 if (error != 0) 1008 printf("warning: %ld of DVMA space lost\n", len); 1009 } 1010 pmap_update(pmap_kernel()); 1011 1012 /* Mark the mappings as invalid. */ 1013 map->dm_mapsize = 0; 1014 map->dm_nsegs = 0; 1015 } 1016 1017 void 1018 sparc_vme4_dmamap_sync(t, map, offset, len, ops) 1019 bus_dma_tag_t t; 1020 bus_dmamap_t map; 1021 bus_addr_t offset; 1022 bus_size_t len; 1023 int ops; 1024 { 1025 1026 /* 1027 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 1028 * Currently the cache is flushed in bus_dma_load()... 1029 */ 1030 } 1031 #endif /* SUN4 */ 1032 1033 #if defined(SUN4M) 1034 static int 1035 sparc_vme_iommu_dmamap_create (t, size, nsegments, maxsegsz, 1036 boundary, flags, dmamp) 1037 bus_dma_tag_t t; 1038 bus_size_t size; 1039 int nsegments; 1040 bus_size_t maxsegsz; 1041 bus_size_t boundary; 1042 int flags; 1043 bus_dmamap_t *dmamp; 1044 { 1045 1046 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 1047 return (EINVAL); 1048 } 1049 1050 static int 1051 sparc_vct_iommu_dmamap_create(cookie, size, am, datasize, swap, nsegments, 1052 maxsegsz, boundary, flags, dmamp) 1053 void *cookie; 1054 vme_size_t size; 1055 vme_am_t am; 1056 vme_datasize_t datasize; 1057 vme_swap_t swap; 1058 int nsegments; 1059 vme_size_t maxsegsz; 1060 vme_addr_t boundary; 1061 int flags; 1062 bus_dmamap_t *dmamp; 1063 { 1064 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 1065 bus_dmamap_t map; 1066 int error; 1067 1068 /* Allocate a base map through parent bus ops */ 1069 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 1070 boundary, flags, &map); 1071 if (error != 0) 1072 return (error); 1073 1074 /* 1075 * Each I/O cache line maps to a 8K section of VME DVMA space, so 1076 * we must ensure that DVMA alloctions are always 8K aligned. 1077 */ 1078 map->_dm_align = VME_IOC_PAGESZ; 1079 1080 /* Set map region based on Address Modifier */ 1081 switch ((am & VME_AM_ADRSIZEMASK)) { 1082 case VME_AM_A16: 1083 case VME_AM_A24: 1084 /* 1 MB of DVMA space */ 1085 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 1086 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 1087 break; 1088 case VME_AM_A32: 1089 /* 8 MB of DVMA space */ 1090 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 1091 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 1092 break; 1093 } 1094 1095 *dmamp = map; 1096 return (0); 1097 } 1098 1099 int 1100 sparc_vme_iommu_dmamap_load(t, map, buf, buflen, p, flags) 1101 bus_dma_tag_t t; 1102 bus_dmamap_t map; 1103 void *buf; 1104 bus_size_t buflen; 1105 struct proc *p; 1106 int flags; 1107 { 1108 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1109 volatile u_int32_t *ioctags; 1110 int error; 1111 1112 /* Round request to a multiple of the I/O cache size */ 1113 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1114 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1115 if (error != 0) 1116 return (error); 1117 1118 /* Allocate I/O cache entries for this range */ 1119 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1120 while (buflen > 0) { 1121 *ioctags = VME_IOC_IC | VME_IOC_W; 1122 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1123 buflen -= VME_IOC_PAGESZ; 1124 } 1125 1126 /* 1127 * Adjust DVMA address to VME view. 1128 * Note: the DVMA base address is the same for all 1129 * VME address spaces. 1130 */ 1131 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1132 return (0); 1133 } 1134 1135 1136 void 1137 sparc_vme_iommu_dmamap_unload(t, map) 1138 bus_dma_tag_t t; 1139 bus_dmamap_t map; 1140 { 1141 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1142 volatile u_int32_t *flushregs; 1143 int len; 1144 1145 /* Go from VME to CPU view */ 1146 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1147 1148 /* Flush VME I/O cache */ 1149 len = map->dm_segs[0]._ds_sgsize; 1150 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1151 while (len > 0) { 1152 *flushregs = 0; 1153 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1154 len -= VME_IOC_PAGESZ; 1155 } 1156 1157 /* 1158 * Start a read from `tag space' which will not complete until 1159 * all cache flushes have finished 1160 */ 1161 (*sc->sc_ioctags); 1162 1163 bus_dmamap_unload(sc->sc_dmatag, map); 1164 } 1165 1166 void 1167 sparc_vme_iommu_dmamap_sync(t, map, offset, len, ops) 1168 bus_dma_tag_t t; 1169 bus_dmamap_t map; 1170 bus_addr_t offset; 1171 bus_size_t len; 1172 int ops; 1173 { 1174 1175 /* 1176 * XXX Should perform cache flushes as necessary. 1177 */ 1178 } 1179 #endif /* SUN4M */ 1180 1181 int 1182 sparc_vme_dmamem_map(t, segs, nsegs, size, kvap, flags) 1183 bus_dma_tag_t t; 1184 bus_dma_segment_t *segs; 1185 int nsegs; 1186 size_t size; 1187 caddr_t *kvap; 1188 int flags; 1189 { 1190 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1191 1192 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1193 } 1194