1 /* $NetBSD: vme_machdep.c,v 1.34 2001/09/26 20:53:05 eeh Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/param.h> 40 #include <sys/extent.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/malloc.h> 44 #include <sys/errno.h> 45 46 #include <sys/proc.h> 47 #include <sys/user.h> 48 #include <sys/syslog.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define _SPARC_BUS_DMA_PRIVATE 53 #include <machine/bus.h> 54 #include <sparc/sparc/iommuvar.h> 55 #include <machine/autoconf.h> 56 #include <machine/oldmon.h> 57 #include <machine/cpu.h> 58 #include <machine/ctlreg.h> 59 60 #include <dev/vme/vmereg.h> 61 #include <dev/vme/vmevar.h> 62 63 #include <sparc/sparc/asm.h> 64 #include <sparc/sparc/vaddrs.h> 65 #include <sparc/sparc/cpuvar.h> 66 #include <sparc/dev/vmereg.h> 67 68 struct sparcvme_softc { 69 struct device sc_dev; /* base device */ 70 bus_space_tag_t sc_bustag; 71 bus_dma_tag_t sc_dmatag; 72 struct vmebusreg *sc_reg; /* VME control registers */ 73 struct vmebusvec *sc_vec; /* VME interrupt vector */ 74 struct rom_range *sc_range; /* ROM range property */ 75 int sc_nrange; 76 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */ 77 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */ 78 int (*sc_vmeintr) __P((void *)); 79 }; 80 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 81 82 /* autoconfiguration driver */ 83 static int vmematch_iommu __P((struct device *, struct cfdata *, void *)); 84 static void vmeattach_iommu __P((struct device *, struct device *, void *)); 85 static int vmematch_mainbus __P((struct device *, struct cfdata *, void *)); 86 static void vmeattach_mainbus __P((struct device *, struct device *, void *)); 87 #if defined(SUN4) 88 int vmeintr4 __P((void *)); 89 #endif 90 #if defined(SUN4M) 91 int vmeintr4m __P((void *)); 92 static int sparc_vme_error __P((void)); 93 #endif 94 95 96 static int sparc_vme_probe __P((void *, vme_addr_t, vme_size_t, 97 vme_am_t, vme_datasize_t, 98 int (*) __P((void *, bus_space_tag_t, bus_space_handle_t)), void *)); 99 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_am_t, 100 vme_datasize_t, vme_swap_t, 101 bus_space_tag_t *, bus_space_handle_t *, 102 vme_mapresc_t *)); 103 static void sparc_vme_unmap __P((void *, vme_mapresc_t)); 104 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *)); 105 static const struct evcnt *sparc_vme_intr_evcnt __P((void *, 106 vme_intr_handle_t)); 107 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t, int, 108 int (*) __P((void *)), void *)); 109 static void sparc_vme_intr_disestablish __P((void *, void *)); 110 111 static int vmebus_translate __P((struct sparcvme_softc *, vme_am_t, 112 vme_addr_t, bus_type_t *, bus_addr_t *)); 113 #if defined(SUN4M) 114 static void sparc_vme_iommu_barrier __P(( bus_space_tag_t, bus_space_handle_t, 115 bus_size_t, bus_size_t, int)); 116 117 #endif 118 119 /* 120 * DMA functions. 121 */ 122 static void sparc_vct_dmamap_destroy __P((void *, bus_dmamap_t)); 123 124 #if defined(SUN4) 125 static int sparc_vct4_dmamap_create __P((void *, vme_size_t, vme_am_t, 126 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 127 int, bus_dmamap_t *)); 128 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 129 bus_size_t, struct proc *, int)); 130 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 131 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 132 bus_addr_t, bus_size_t, int)); 133 #endif 134 135 #if defined(SUN4M) 136 static int sparc_vct_iommu_dmamap_create __P((void *, vme_size_t, vme_am_t, 137 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 138 int, bus_dmamap_t *)); 139 static int sparc_vme_iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, 140 int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); 141 142 static int sparc_vme_iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, 143 void *, bus_size_t, struct proc *, int)); 144 static void sparc_vme_iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 145 static void sparc_vme_iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 146 bus_addr_t, bus_size_t, int)); 147 #endif 148 149 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *, 150 int, size_t, caddr_t *, int)); 151 #if 0 152 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); 153 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t)); 154 static paddr_t sparc_vme_dmamem_mmap __P((bus_dma_tag_t, 155 bus_dma_segment_t *, int, off_t, int, int)); 156 #endif 157 158 int sparc_vme_mmap_cookie __P((vme_addr_t, vme_am_t, bus_space_handle_t *)); 159 160 struct cfattach vme_mainbus_ca = { 161 sizeof(struct sparcvme_softc), vmematch_mainbus, vmeattach_mainbus 162 }; 163 164 struct cfattach vme_iommu_ca = { 165 sizeof(struct sparcvme_softc), vmematch_iommu, vmeattach_iommu 166 }; 167 168 int (*vmeerr_handler) __P((void)); 169 170 #define VMEMOD_D32 0x40 /* ??? */ 171 172 /* If the PROM does not provide the `ranges' property, we make up our own */ 173 struct rom_range vmebus_translations[] = { 174 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 175 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 176 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 177 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 178 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 179 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 180 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 181 #undef _DS 182 }; 183 184 /* 185 * The VME bus logic on sun4 machines maps DMA requests in the first MB 186 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 187 * for DVMA space allocations. The DMA addresses returned by 188 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 189 */ 190 struct extent *vme_dvmamap; 191 192 /* 193 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 194 * VME space to the last 8MB of DVMA space and the first 1MB of 195 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 196 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 197 * The following constants define subregions in the IOMMU DVMA map 198 * for VME DVMA allocations. The DMA addresses returned by 199 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 200 */ 201 #define VME_IOMMU_DVMA_BASE 0xff800000 202 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 203 #define VME_IOMMU_DVMA_AM24_END 0xff900000 204 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 205 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 206 207 struct sparc_bus_space_tag sparc_vme_bus_tag = { 208 NULL, /* cookie */ 209 NULL, /* parent bus tag */ 210 NULL, /* bus_map */ 211 NULL, /* bus_unmap */ 212 NULL, /* bus_subregion */ 213 NULL /* barrier */ 214 }; 215 216 struct vme_chipset_tag sparc_vme_chipset_tag = { 217 NULL, 218 sparc_vme_map, 219 sparc_vme_unmap, 220 sparc_vme_probe, 221 sparc_vme_intr_map, 222 sparc_vme_intr_evcnt, 223 sparc_vme_intr_establish, 224 sparc_vme_intr_disestablish, 225 0, 0, 0 /* bus specific DMA stuff */ 226 }; 227 228 229 #if defined(SUN4) 230 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 231 NULL, /* cookie */ 232 _bus_dmamap_create, 233 _bus_dmamap_destroy, 234 sparc_vme4_dmamap_load, 235 _bus_dmamap_load_mbuf, 236 _bus_dmamap_load_uio, 237 _bus_dmamap_load_raw, 238 sparc_vme4_dmamap_unload, 239 sparc_vme4_dmamap_sync, 240 241 _bus_dmamem_alloc, 242 _bus_dmamem_free, 243 sparc_vme_dmamem_map, 244 _bus_dmamem_unmap, 245 _bus_dmamem_mmap 246 }; 247 #endif 248 249 #if defined(SUN4M) 250 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 251 NULL, /* cookie */ 252 sparc_vme_iommu_dmamap_create, 253 _bus_dmamap_destroy, 254 sparc_vme_iommu_dmamap_load, 255 _bus_dmamap_load_mbuf, 256 _bus_dmamap_load_uio, 257 _bus_dmamap_load_raw, 258 sparc_vme_iommu_dmamap_unload, 259 sparc_vme_iommu_dmamap_sync, 260 261 _bus_dmamem_alloc, 262 _bus_dmamem_free, 263 sparc_vme_dmamem_map, 264 _bus_dmamem_unmap, 265 _bus_dmamem_mmap 266 }; 267 #endif 268 269 270 int 271 vmematch_mainbus(parent, cf, aux) 272 struct device *parent; 273 struct cfdata *cf; 274 void *aux; 275 { 276 struct mainbus_attach_args *ma = aux; 277 278 if (!CPU_ISSUN4) 279 return (0); 280 281 return (strcmp("vme", ma->ma_name) == 0); 282 } 283 284 int 285 vmematch_iommu(parent, cf, aux) 286 struct device *parent; 287 struct cfdata *cf; 288 void *aux; 289 { 290 struct iommu_attach_args *ia = aux; 291 292 return (strcmp("vme", ia->iom_name) == 0); 293 } 294 295 296 void 297 vmeattach_mainbus(parent, self, aux) 298 struct device *parent, *self; 299 void *aux; 300 { 301 #if defined(SUN4) 302 struct mainbus_attach_args *ma = aux; 303 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 304 struct vmebus_attach_args vba; 305 306 if (self->dv_unit > 0) { 307 printf(" unsupported\n"); 308 return; 309 } 310 311 sc->sc_bustag = ma->ma_bustag; 312 sc->sc_dmatag = ma->ma_dmatag; 313 314 /* VME interrupt entry point */ 315 sc->sc_vmeintr = vmeintr4; 316 317 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 318 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 319 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 320 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 321 322 #if 0 323 sparc_vme_bus_tag.parent = ma->ma_bustag; 324 vba.vba_bustag = &sparc_vme_bus_tag; 325 #endif 326 vba.va_vct = &sparc_vme_chipset_tag; 327 vba.va_bdt = &sparc_vme4_dma_tag; 328 vba.va_slaveconfig = 0; 329 330 /* Fall back to our own `range' construction */ 331 sc->sc_range = vmebus_translations; 332 sc->sc_nrange = 333 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 334 335 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 336 M_DEVBUF, 0, 0, EX_NOWAIT); 337 if (vme_dvmamap == NULL) 338 panic("vme: unable to allocate DVMA map"); 339 340 printf("\n"); 341 (void)config_found(self, &vba, 0); 342 343 #endif 344 return; 345 } 346 347 /* sun4m vmebus */ 348 void 349 vmeattach_iommu(parent, self, aux) 350 struct device *parent, *self; 351 void *aux; 352 { 353 #if defined(SUN4M) 354 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 355 struct iommu_attach_args *ia = aux; 356 struct vmebus_attach_args vba; 357 bus_space_handle_t bh; 358 int node; 359 int cline; 360 361 if (self->dv_unit > 0) { 362 printf(" unsupported\n"); 363 return; 364 } 365 366 sc->sc_bustag = ia->iom_bustag; 367 sc->sc_dmatag = ia->iom_dmatag; 368 369 /* VME interrupt entry point */ 370 sc->sc_vmeintr = vmeintr4m; 371 372 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 373 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 374 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 375 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 376 sparc_vme_bus_tag.sparc_bus_barrier = sparc_vme_iommu_barrier; 377 378 #if 0 379 vba.vba_bustag = &sparc_vme_bus_tag; 380 #endif 381 vba.va_vct = &sparc_vme_chipset_tag; 382 vba.va_bdt = &sparc_vme_iommu_dma_tag; 383 vba.va_slaveconfig = 0; 384 385 node = ia->iom_node; 386 387 /* 388 * Map VME control space 389 */ 390 if (ia->iom_nreg < 2) { 391 printf("%s: only %d register sets\n", self->dv_xname, 392 ia->iom_nreg); 393 return; 394 } 395 396 if (bus_space_map2(ia->iom_bustag, 397 (bus_type_t)ia->iom_reg[0].ior_iospace, 398 (bus_addr_t)ia->iom_reg[0].ior_pa, 399 (bus_size_t)ia->iom_reg[0].ior_size, 400 BUS_SPACE_MAP_LINEAR, 401 0, &bh) != 0) { 402 panic("%s: can't map vmebusreg", self->dv_xname); 403 } 404 sc->sc_reg = (struct vmebusreg *)bh; 405 406 if (bus_space_map2(ia->iom_bustag, 407 (bus_type_t)ia->iom_reg[1].ior_iospace, 408 (bus_addr_t)ia->iom_reg[1].ior_pa, 409 (bus_size_t)ia->iom_reg[1].ior_size, 410 BUS_SPACE_MAP_LINEAR, 411 0, &bh) != 0) { 412 panic("%s: can't map vmebusvec", self->dv_xname); 413 } 414 sc->sc_vec = (struct vmebusvec *)bh; 415 416 /* 417 * Map VME IO cache tags and flush control. 418 */ 419 if (bus_space_map2(ia->iom_bustag, 420 (bus_type_t)ia->iom_reg[1].ior_iospace, 421 (bus_addr_t)ia->iom_reg[1].ior_pa + VME_IOC_TAGOFFSET, 422 VME_IOC_SIZE, 423 BUS_SPACE_MAP_LINEAR, 424 0, &bh) != 0) { 425 panic("%s: can't map IOC tags", self->dv_xname); 426 } 427 sc->sc_ioctags = (u_int32_t *)bh; 428 429 if (bus_space_map2(ia->iom_bustag, 430 (bus_type_t)ia->iom_reg[1].ior_iospace, 431 (bus_addr_t)ia->iom_reg[1].ior_pa+VME_IOC_FLUSHOFFSET, 432 VME_IOC_SIZE, 433 BUS_SPACE_MAP_LINEAR, 434 0, &bh) != 0) { 435 panic("%s: can't map IOC flush registers", self->dv_xname); 436 } 437 sc->sc_iocflush = (u_int32_t *)bh; 438 439 /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg; 440 441 /* 442 * Get "range" property. 443 */ 444 if (PROM_getprop(node, "ranges", sizeof(struct rom_range), 445 &sc->sc_nrange, (void **)&sc->sc_range) != 0) { 446 panic("%s: can't get ranges property", self->dv_xname); 447 } 448 449 sparcvme_sc = sc; 450 vmeerr_handler = sparc_vme_error; 451 452 /* 453 * Invalidate all IO-cache entries. 454 */ 455 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 456 sc->sc_ioctags[--cline] = 0; 457 } 458 459 /* Enable IO-cache */ 460 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 461 462 printf(": version 0x%x\n", 463 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 464 465 (void)config_found(self, &vba, 0); 466 #endif 467 } 468 469 #if defined(SUN4M) 470 static int 471 sparc_vme_error() 472 { 473 struct sparcvme_softc *sc = sparcvme_sc; 474 u_int32_t afsr, afpa; 475 char bits[64]; 476 477 afsr = sc->sc_reg->vmebus_afsr; 478 afpa = sc->sc_reg->vmebus_afar; 479 printf("VME error:\n\tAFSR %s\n", 480 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 481 printf("\taddress: 0x%x%x\n", afsr, afpa); 482 return (0); 483 } 484 #endif 485 486 int 487 vmebus_translate(sc, mod, addr, btp, bap) 488 struct sparcvme_softc *sc; 489 vme_am_t mod; 490 vme_addr_t addr; 491 bus_type_t *btp; 492 bus_addr_t *bap; 493 { 494 int i; 495 496 for (i = 0; i < sc->sc_nrange; i++) { 497 498 if (sc->sc_range[i].cspace != mod) 499 continue; 500 501 /* We've found the connection to the parent bus */ 502 *bap = sc->sc_range[i].poffset + addr; 503 *btp = sc->sc_range[i].pspace; 504 return (0); 505 } 506 return (ENOENT); 507 } 508 509 struct vmeprobe_myarg { 510 int (*cb) __P((void *, bus_space_tag_t, bus_space_handle_t)); 511 void *cbarg; 512 bus_space_tag_t tag; 513 int res; /* backwards */ 514 }; 515 516 static int vmeprobe_mycb __P((void *, void *)); 517 static int 518 vmeprobe_mycb(bh, arg) 519 void *bh, *arg; 520 { 521 struct vmeprobe_myarg *a = arg; 522 523 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 524 return (!a->res); 525 } 526 527 int 528 sparc_vme_probe(cookie, addr, len, mod, datasize, callback, arg) 529 void *cookie; 530 vme_addr_t addr; 531 vme_size_t len; 532 vme_am_t mod; 533 vme_datasize_t datasize; 534 int (*callback) __P((void *, bus_space_tag_t, bus_space_handle_t)); 535 void *arg; 536 { 537 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 538 bus_type_t iospace; 539 bus_addr_t paddr; 540 bus_size_t size; 541 struct vmeprobe_myarg myarg; 542 int res, i; 543 544 if (vmebus_translate(sc, mod, addr, &iospace, &paddr) != 0) 545 return (EINVAL); 546 547 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 548 549 if (callback) { 550 myarg.cb = callback; 551 myarg.cbarg = arg; 552 myarg.tag = sc->sc_bustag; 553 myarg.res = 0; 554 res = bus_space_probe(sc->sc_bustag, iospace, paddr, size, 0, 555 0, vmeprobe_mycb, &myarg); 556 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 557 } 558 559 for (i = 0; i < len / size; i++) { 560 myarg.res = 0; 561 res = bus_space_probe(sc->sc_bustag, iospace, paddr, size, 0, 562 0, 0, 0); 563 if (res == 0) 564 return (EIO); 565 paddr += size; 566 } 567 return (0); 568 } 569 570 int 571 sparc_vme_map(cookie, addr, size, mod, datasize, swap, tp, hp, rp) 572 void *cookie; 573 vme_addr_t addr; 574 vme_size_t size; 575 vme_am_t mod; 576 vme_datasize_t datasize; 577 vme_swap_t swap; 578 bus_space_tag_t *tp; 579 bus_space_handle_t *hp; 580 vme_mapresc_t *rp; 581 { 582 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 583 bus_type_t iospace; 584 bus_addr_t paddr; 585 int error; 586 587 error = vmebus_translate(sc, mod, addr, &iospace, &paddr); 588 if (error != 0) 589 return (error); 590 591 *tp = sc->sc_bustag; 592 return (bus_space_map2(sc->sc_bustag, iospace, paddr, size, 0, 0, hp)); 593 } 594 595 int 596 sparc_vme_mmap_cookie(addr, mod, hp) 597 vme_addr_t addr; 598 vme_am_t mod; 599 bus_space_handle_t *hp; 600 { 601 struct sparcvme_softc *sc = sparcvme_sc; 602 bus_type_t iospace; 603 bus_addr_t paddr; 604 int error; 605 606 error = vmebus_translate(sc, mod, addr, &iospace, &paddr); 607 if (error != 0) 608 return (error); 609 610 return (bus_space_mmap(sc->sc_bustag, BUS_ADDR(iospace, paddr), 0, 611 0/*prot is ignored*/, 0)); 612 } 613 614 #if defined(SUN4M) 615 void 616 sparc_vme_iommu_barrier(t, h, offset, size, flags) 617 bus_space_tag_t t; 618 bus_space_handle_t h; 619 bus_size_t offset; 620 bus_size_t size; 621 int flags; 622 { 623 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 624 625 /* Read async fault status to flush write-buffers */ 626 (*(volatile int *)&vbp->vmebus_afsr); 627 } 628 #endif 629 630 631 632 /* 633 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 634 */ 635 static int vme_ipl_to_pil[] = { 636 0, 637 2, 638 3, 639 5, 640 7, 641 9, 642 11, 643 13 644 }; 645 646 647 /* 648 * All VME device interrupts go through vmeintr(). This function reads 649 * the VME vector from the bus, then dispatches the device interrupt 650 * handler. All handlers for devices that map to the same Processor 651 * Interrupt Level (according to the table above) are on a linked list 652 * of `sparc_vme_intr_handle' structures. The head of which is passed 653 * down as the argument to `vmeintr(void *arg)'. 654 */ 655 struct sparc_vme_intr_handle { 656 struct intrhand ih; 657 struct sparc_vme_intr_handle *next; 658 int vec; /* VME interrupt vector */ 659 int pri; /* VME interrupt priority */ 660 struct sparcvme_softc *sc;/*XXX*/ 661 }; 662 663 #if defined(SUN4) 664 int 665 vmeintr4(arg) 666 void *arg; 667 { 668 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 669 int level, vec; 670 int rv = 0; 671 672 level = (ihp->pri << 1) | 1; 673 674 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level)); 675 676 if (vec == -1) { 677 #ifdef DEBUG 678 /* 679 * This seems to happen only with the i82586 based 680 * `ie1' boards. 681 */ 682 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 683 #endif 684 return (1); /* XXX - pretend we handled it, for now */ 685 } 686 687 for (; ihp; ihp = ihp->next) 688 if (ihp->vec == vec && ihp->ih.ih_fun) 689 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 690 691 return (rv); 692 } 693 #endif 694 695 #if defined(SUN4M) 696 int 697 vmeintr4m(arg) 698 void *arg; 699 { 700 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 701 int level, vec; 702 int rv = 0; 703 704 level = (ihp->pri << 1) | 1; 705 706 #if 0 707 int pending; 708 709 /* Flush VME <=> Sbus write buffers */ 710 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 711 712 pending = *((int*)ICR_SI_PEND); 713 if ((pending & SINTR_VME(ihp->pri)) == 0) { 714 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 715 ihp->pri, pending); 716 return (0); 717 } 718 #endif 719 #if 0 720 /* Why gives this a bus timeout sometimes? */ 721 vec = ihp->sc->sc_vec->vmebusvec[level]; 722 #else 723 /* so, arrange to catch the fault... */ 724 { 725 extern struct user *proc0paddr; 726 extern int fkbyte __P((caddr_t, struct pcb *)); 727 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level]; 728 struct pcb *xpcb; 729 u_long saveonfault; 730 int s; 731 732 s = splhigh(); 733 if (curproc == NULL) 734 xpcb = (struct pcb *)proc0paddr; 735 else 736 xpcb = &curproc->p_addr->u_pcb; 737 738 saveonfault = (u_long)xpcb->pcb_onfault; 739 vec = fkbyte(addr, xpcb); 740 xpcb->pcb_onfault = (caddr_t)saveonfault; 741 742 splx(s); 743 } 744 #endif 745 746 if (vec == -1) { 747 #ifdef DEBUG 748 /* 749 * This seems to happen only with the i82586 based 750 * `ie1' boards. 751 */ 752 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 753 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 754 *((int*)ICR_SI_PEND), 755 ihp->sc->sc_reg->vmebus_afsr, 756 ihp->sc->sc_reg->vmebus_afar); 757 #endif 758 return (1); /* XXX - pretend we handled it, for now */ 759 } 760 761 for (; ihp; ihp = ihp->next) 762 if (ihp->vec == vec && ihp->ih.ih_fun) 763 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 764 765 return (rv); 766 } 767 #endif 768 769 int 770 sparc_vme_intr_map(cookie, level, vec, ihp) 771 void *cookie; 772 int level; 773 int vec; 774 vme_intr_handle_t *ihp; 775 { 776 struct sparc_vme_intr_handle *ih; 777 778 ih = (vme_intr_handle_t) 779 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 780 ih->pri = level; 781 ih->vec = vec; 782 ih->sc = cookie;/*XXX*/ 783 *ihp = ih; 784 return (0); 785 } 786 787 const struct evcnt * 788 sparc_vme_intr_evcnt(cookie, vih) 789 void *cookie; 790 vme_intr_handle_t vih; 791 { 792 793 /* XXX for now, no evcnt parent reported */ 794 return NULL; 795 } 796 797 void * 798 sparc_vme_intr_establish(cookie, vih, pri, func, arg) 799 void *cookie; 800 vme_intr_handle_t vih; 801 int pri; 802 int (*func) __P((void *)); 803 void *arg; 804 { 805 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 806 struct sparc_vme_intr_handle *svih = 807 (struct sparc_vme_intr_handle *)vih; 808 struct intrhand *ih; 809 int level; 810 811 /* XXX pri == svih->pri ??? */ 812 813 /* Translate VME priority to processor IPL */ 814 level = vme_ipl_to_pil[svih->pri]; 815 816 svih->ih.ih_fun = func; 817 svih->ih.ih_arg = arg; 818 svih->next = NULL; 819 820 /* ensure the interrupt subsystem will call us at this level */ 821 for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next) 822 if (ih->ih_fun == sc->sc_vmeintr) 823 break; 824 825 if (ih == NULL) { 826 ih = (struct intrhand *) 827 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 828 if (ih == NULL) 829 panic("vme_addirq"); 830 bzero(ih, sizeof *ih); 831 ih->ih_fun = sc->sc_vmeintr; 832 ih->ih_arg = vih; 833 intr_establish(level, ih); 834 } else { 835 svih->next = (vme_intr_handle_t)ih->ih_arg; 836 ih->ih_arg = vih; 837 } 838 return (NULL); 839 } 840 841 void 842 sparc_vme_unmap(cookie, resc) 843 void * cookie; 844 vme_mapresc_t resc; 845 { 846 /* Not implemented */ 847 panic("sparc_vme_unmap"); 848 } 849 850 void 851 sparc_vme_intr_disestablish(cookie, a) 852 void *cookie; 853 void *a; 854 { 855 /* Not implemented */ 856 panic("sparc_vme_intr_disestablish"); 857 } 858 859 860 861 /* 862 * VME DMA functions. 863 */ 864 865 static void 866 sparc_vct_dmamap_destroy(cookie, map) 867 void *cookie; 868 bus_dmamap_t map; 869 { 870 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 871 bus_dmamap_destroy(sc->sc_dmatag, map); 872 } 873 874 #if defined(SUN4) 875 static int 876 sparc_vct4_dmamap_create(cookie, size, am, datasize, swap, nsegments, maxsegsz, 877 boundary, flags, dmamp) 878 void *cookie; 879 vme_size_t size; 880 vme_am_t am; 881 vme_datasize_t datasize; 882 vme_swap_t swap; 883 int nsegments; 884 vme_size_t maxsegsz; 885 vme_addr_t boundary; 886 int flags; 887 bus_dmamap_t *dmamp; 888 { 889 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 890 891 /* Allocate a base map through parent bus ops */ 892 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 893 boundary, flags, dmamp)); 894 } 895 896 int 897 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags) 898 bus_dma_tag_t t; 899 bus_dmamap_t map; 900 void *buf; 901 bus_size_t buflen; 902 struct proc *p; 903 int flags; 904 { 905 bus_addr_t dva; 906 bus_size_t sgsize; 907 vaddr_t va, voff; 908 pmap_t pmap; 909 int pagesz = PAGE_SIZE; 910 int error; 911 912 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 913 914 va = (vaddr_t)buf; 915 voff = va & (pagesz - 1); 916 va &= -pagesz; 917 918 /* 919 * Allocate an integral number of pages from DVMA space 920 * covering the passed buffer. 921 */ 922 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 923 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 924 map->_dm_boundary, 925 (flags & BUS_DMA_NOWAIT) == 0 926 ? EX_WAITOK 927 : EX_NOWAIT, 928 (u_long *)&dva); 929 if (error != 0) 930 return (error); 931 932 map->dm_mapsize = buflen; 933 map->dm_nsegs = 1; 934 /* Adjust DVMA address to VME view */ 935 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 936 map->dm_segs[0].ds_len = buflen; 937 map->dm_segs[0]._ds_sgsize = sgsize; 938 939 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 940 941 for (; sgsize != 0; ) { 942 paddr_t pa; 943 /* 944 * Get the physical address for this page. 945 */ 946 (void) pmap_extract(pmap, va, &pa); 947 948 #ifdef notyet 949 if (have_iocache) 950 pa |= PG_IOC; 951 #endif 952 pmap_enter(pmap_kernel(), dva, 953 pa | PMAP_NC, 954 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 955 956 dva += pagesz; 957 va += pagesz; 958 sgsize -= pagesz; 959 } 960 pmap_update(pmap_kernel()); 961 962 return (0); 963 } 964 965 void 966 sparc_vme4_dmamap_unload(t, map) 967 bus_dma_tag_t t; 968 bus_dmamap_t map; 969 { 970 bus_dma_segment_t *segs = map->dm_segs; 971 int nsegs = map->dm_nsegs; 972 bus_addr_t dva; 973 bus_size_t len; 974 int i, s, error; 975 976 for (i = 0; i < nsegs; i++) { 977 /* Go from VME to CPU view */ 978 dva = segs[i].ds_addr + VME4_DVMA_BASE; 979 dva &= -PAGE_SIZE; 980 len = segs[i]._ds_sgsize; 981 982 /* Remove double-mapping in DVMA space */ 983 pmap_remove(pmap_kernel(), dva, dva + len); 984 985 /* Release DVMA space */ 986 s = splhigh(); 987 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 988 splx(s); 989 if (error != 0) 990 printf("warning: %ld of DVMA space lost\n", len); 991 } 992 pmap_update(pmap_kernel()); 993 994 /* Mark the mappings as invalid. */ 995 map->dm_mapsize = 0; 996 map->dm_nsegs = 0; 997 } 998 999 void 1000 sparc_vme4_dmamap_sync(t, map, offset, len, ops) 1001 bus_dma_tag_t t; 1002 bus_dmamap_t map; 1003 bus_addr_t offset; 1004 bus_size_t len; 1005 int ops; 1006 { 1007 1008 /* 1009 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 1010 * Currently the cache is flushed in bus_dma_load()... 1011 */ 1012 } 1013 #endif /* SUN4 */ 1014 1015 #if defined(SUN4M) 1016 static int 1017 sparc_vme_iommu_dmamap_create (t, size, nsegments, maxsegsz, 1018 boundary, flags, dmamp) 1019 bus_dma_tag_t t; 1020 bus_size_t size; 1021 int nsegments; 1022 bus_size_t maxsegsz; 1023 bus_size_t boundary; 1024 int flags; 1025 bus_dmamap_t *dmamp; 1026 { 1027 1028 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 1029 return (EINVAL); 1030 } 1031 1032 static int 1033 sparc_vct_iommu_dmamap_create(cookie, size, am, datasize, swap, nsegments, 1034 maxsegsz, boundary, flags, dmamp) 1035 void *cookie; 1036 vme_size_t size; 1037 vme_am_t am; 1038 vme_datasize_t datasize; 1039 vme_swap_t swap; 1040 int nsegments; 1041 vme_size_t maxsegsz; 1042 vme_addr_t boundary; 1043 int flags; 1044 bus_dmamap_t *dmamp; 1045 { 1046 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 1047 bus_dmamap_t map; 1048 int error; 1049 1050 /* Allocate a base map through parent bus ops */ 1051 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 1052 boundary, flags, &map); 1053 if (error != 0) 1054 return (error); 1055 1056 /* 1057 * Each I/O cache line maps to a 8K section of VME DVMA space, so 1058 * we must ensure that DVMA alloctions are always 8K aligned. 1059 */ 1060 map->_dm_align = VME_IOC_PAGESZ; 1061 1062 /* Set map region based on Address Modifier */ 1063 switch ((am & VME_AM_ADRSIZEMASK)) { 1064 case VME_AM_A16: 1065 case VME_AM_A24: 1066 /* 1 MB of DVMA space */ 1067 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 1068 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 1069 break; 1070 case VME_AM_A32: 1071 /* 8 MB of DVMA space */ 1072 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 1073 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 1074 break; 1075 } 1076 1077 *dmamp = map; 1078 return (0); 1079 } 1080 1081 int 1082 sparc_vme_iommu_dmamap_load(t, map, buf, buflen, p, flags) 1083 bus_dma_tag_t t; 1084 bus_dmamap_t map; 1085 void *buf; 1086 bus_size_t buflen; 1087 struct proc *p; 1088 int flags; 1089 { 1090 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1091 volatile u_int32_t *ioctags; 1092 int error; 1093 1094 /* Round request to a multiple of the I/O cache size */ 1095 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1096 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1097 if (error != 0) 1098 return (error); 1099 1100 /* Allocate I/O cache entries for this range */ 1101 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1102 while (buflen > 0) { 1103 *ioctags = VME_IOC_IC | VME_IOC_W; 1104 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1105 buflen -= VME_IOC_PAGESZ; 1106 } 1107 1108 /* 1109 * Adjust DVMA address to VME view. 1110 * Note: the DVMA base address is the same for all 1111 * VME address spaces. 1112 */ 1113 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1114 return (0); 1115 } 1116 1117 1118 void 1119 sparc_vme_iommu_dmamap_unload(t, map) 1120 bus_dma_tag_t t; 1121 bus_dmamap_t map; 1122 { 1123 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1124 volatile u_int32_t *flushregs; 1125 int len; 1126 1127 /* Go from VME to CPU view */ 1128 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1129 1130 /* Flush VME I/O cache */ 1131 len = map->dm_segs[0]._ds_sgsize; 1132 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1133 while (len > 0) { 1134 *flushregs = 0; 1135 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1136 len -= VME_IOC_PAGESZ; 1137 } 1138 1139 /* 1140 * Start a read from `tag space' which will not complete until 1141 * all cache flushes have finished 1142 */ 1143 (*sc->sc_ioctags); 1144 1145 bus_dmamap_unload(sc->sc_dmatag, map); 1146 } 1147 1148 void 1149 sparc_vme_iommu_dmamap_sync(t, map, offset, len, ops) 1150 bus_dma_tag_t t; 1151 bus_dmamap_t map; 1152 bus_addr_t offset; 1153 bus_size_t len; 1154 int ops; 1155 { 1156 1157 /* 1158 * XXX Should perform cache flushes as necessary. 1159 */ 1160 } 1161 #endif /* SUN4M */ 1162 1163 int 1164 sparc_vme_dmamem_map(t, segs, nsegs, size, kvap, flags) 1165 bus_dma_tag_t t; 1166 bus_dma_segment_t *segs; 1167 int nsegs; 1168 size_t size; 1169 caddr_t *kvap; 1170 int flags; 1171 { 1172 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1173 1174 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1175 } 1176