1 /* $NetBSD: vme_machdep.c,v 1.30 2000/07/04 22:22:56 pk Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/param.h> 40 #include <sys/extent.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/malloc.h> 44 #include <sys/errno.h> 45 46 #include <sys/proc.h> 47 #include <sys/user.h> 48 #include <sys/syslog.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define _SPARC_BUS_DMA_PRIVATE 53 #include <machine/bus.h> 54 #include <sparc/sparc/iommuvar.h> 55 #include <machine/autoconf.h> 56 #include <machine/oldmon.h> 57 #include <machine/cpu.h> 58 #include <machine/ctlreg.h> 59 60 #include <dev/vme/vmereg.h> 61 #include <dev/vme/vmevar.h> 62 63 #include <sparc/sparc/asm.h> 64 #include <sparc/sparc/vaddrs.h> 65 #include <sparc/sparc/cpuvar.h> 66 #include <sparc/dev/vmereg.h> 67 68 struct sparcvme_softc { 69 struct device sc_dev; /* base device */ 70 bus_space_tag_t sc_bustag; 71 bus_dma_tag_t sc_dmatag; 72 struct vmebusreg *sc_reg; /* VME control registers */ 73 struct vmebusvec *sc_vec; /* VME interrupt vector */ 74 struct rom_range *sc_range; /* ROM range property */ 75 int sc_nrange; 76 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */ 77 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */ 78 int (*sc_vmeintr) __P((void *)); 79 }; 80 struct sparcvme_softc *sparcvme_sc;/*XXX*/ 81 82 /* autoconfiguration driver */ 83 static int vmematch_iommu __P((struct device *, struct cfdata *, void *)); 84 static void vmeattach_iommu __P((struct device *, struct device *, void *)); 85 static int vmematch_mainbus __P((struct device *, struct cfdata *, void *)); 86 static void vmeattach_mainbus __P((struct device *, struct device *, void *)); 87 #if defined(SUN4) 88 int vmeintr4 __P((void *)); 89 #endif 90 #if defined(SUN4M) 91 int vmeintr4m __P((void *)); 92 static int sparc_vme_error __P((void)); 93 #endif 94 95 96 static int sparc_vme_probe __P((void *, vme_addr_t, vme_size_t, 97 vme_am_t, vme_datasize_t, 98 int (*) __P((void *, bus_space_tag_t, bus_space_handle_t)), void *)); 99 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_am_t, 100 vme_datasize_t, vme_swap_t, 101 bus_space_tag_t *, bus_space_handle_t *, 102 vme_mapresc_t *)); 103 static void sparc_vme_unmap __P((void *, vme_mapresc_t)); 104 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *)); 105 static const struct evcnt *sparc_vme_intr_evcnt __P((void *, 106 vme_intr_handle_t)); 107 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t, int, 108 int (*) __P((void *)), void *)); 109 static void sparc_vme_intr_disestablish __P((void *, void *)); 110 111 static int vmebus_translate __P((struct sparcvme_softc *, vme_am_t, 112 vme_addr_t, bus_type_t *, bus_addr_t *)); 113 #if defined(SUN4M) 114 static void sparc_vme_iommu_barrier __P(( bus_space_tag_t, bus_space_handle_t, 115 bus_size_t, bus_size_t, int)); 116 117 #endif 118 119 /* 120 * DMA functions. 121 */ 122 static void sparc_vct_dmamap_destroy __P((void *, bus_dmamap_t)); 123 124 #if defined(SUN4) 125 static int sparc_vct4_dmamap_create __P((void *, vme_size_t, vme_am_t, 126 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 127 int, bus_dmamap_t *)); 128 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 129 bus_size_t, struct proc *, int)); 130 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 131 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 132 bus_addr_t, bus_size_t, int)); 133 #endif 134 135 #if defined(SUN4M) 136 static int sparc_vct_iommu_dmamap_create __P((void *, vme_size_t, vme_am_t, 137 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 138 int, bus_dmamap_t *)); 139 static int sparc_vme_iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, 140 int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); 141 142 static int sparc_vme_iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, 143 void *, bus_size_t, struct proc *, int)); 144 static void sparc_vme_iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 145 static void sparc_vme_iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, 146 bus_addr_t, bus_size_t, int)); 147 #endif 148 149 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *, 150 int, size_t, caddr_t *, int)); 151 #if 0 152 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); 153 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t)); 154 static paddr_t sparc_vme_dmamem_mmap __P((bus_dma_tag_t, 155 bus_dma_segment_t *, int, off_t, int, int)); 156 #endif 157 158 int sparc_vme_mmap_cookie __P((vme_addr_t, vme_am_t, bus_space_handle_t *)); 159 160 struct cfattach vme_mainbus_ca = { 161 sizeof(struct sparcvme_softc), vmematch_mainbus, vmeattach_mainbus 162 }; 163 164 struct cfattach vme_iommu_ca = { 165 sizeof(struct sparcvme_softc), vmematch_iommu, vmeattach_iommu 166 }; 167 168 int (*vmeerr_handler) __P((void)); 169 170 #define VMEMOD_D32 0x40 /* ??? */ 171 172 /* If the PROM does not provide the `ranges' property, we make up our own */ 173 struct rom_range vmebus_translations[] = { 174 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 175 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 176 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 177 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 178 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 179 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 180 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 181 #undef _DS 182 }; 183 184 /* 185 * The VME bus logic on sun4 machines maps DMA requests in the first MB 186 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 187 * for DVMA space allocations. The DMA addresses returned by 188 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 189 */ 190 struct extent *vme_dvmamap; 191 192 /* 193 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 194 * VME space to the last 8MB of DVMA space and the first 1MB of 195 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 196 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space). 197 * The following constants define subregions in the IOMMU DVMA map 198 * for VME DVMA allocations. The DMA addresses returned by 199 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 200 */ 201 #define VME_IOMMU_DVMA_BASE 0xff800000 202 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 203 #define VME_IOMMU_DVMA_AM24_END 0xff900000 204 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 205 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 206 207 struct sparc_bus_space_tag sparc_vme_bus_tag = { 208 NULL, /* cookie */ 209 NULL, /* parent bus tag */ 210 NULL, /* bus_map */ 211 NULL, /* bus_unmap */ 212 NULL, /* bus_subregion */ 213 NULL /* barrier */ 214 }; 215 216 struct vme_chipset_tag sparc_vme_chipset_tag = { 217 NULL, 218 sparc_vme_map, 219 sparc_vme_unmap, 220 sparc_vme_probe, 221 sparc_vme_intr_map, 222 sparc_vme_intr_evcnt, 223 sparc_vme_intr_establish, 224 sparc_vme_intr_disestablish, 225 0, 0, 0 /* bus specific DMA stuff */ 226 }; 227 228 229 #if defined(SUN4) 230 struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 231 NULL, /* cookie */ 232 _bus_dmamap_create, 233 _bus_dmamap_destroy, 234 sparc_vme4_dmamap_load, 235 _bus_dmamap_load_mbuf, 236 _bus_dmamap_load_uio, 237 _bus_dmamap_load_raw, 238 sparc_vme4_dmamap_unload, 239 sparc_vme4_dmamap_sync, 240 241 _bus_dmamem_alloc, 242 _bus_dmamem_free, 243 sparc_vme_dmamem_map, 244 _bus_dmamem_unmap, 245 _bus_dmamem_mmap 246 }; 247 #endif 248 249 #if defined(SUN4M) 250 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 251 NULL, /* cookie */ 252 sparc_vme_iommu_dmamap_create, 253 _bus_dmamap_destroy, 254 sparc_vme_iommu_dmamap_load, 255 _bus_dmamap_load_mbuf, 256 _bus_dmamap_load_uio, 257 _bus_dmamap_load_raw, 258 sparc_vme_iommu_dmamap_unload, 259 sparc_vme_iommu_dmamap_sync, 260 261 _bus_dmamem_alloc, 262 _bus_dmamem_free, 263 sparc_vme_dmamem_map, 264 _bus_dmamem_unmap, 265 _bus_dmamem_mmap 266 }; 267 #endif 268 269 270 int 271 vmematch_mainbus(parent, cf, aux) 272 struct device *parent; 273 struct cfdata *cf; 274 void *aux; 275 { 276 struct mainbus_attach_args *ma = aux; 277 278 if (!CPU_ISSUN4) 279 return (0); 280 281 return (strcmp("vme", ma->ma_name) == 0); 282 } 283 284 int 285 vmematch_iommu(parent, cf, aux) 286 struct device *parent; 287 struct cfdata *cf; 288 void *aux; 289 { 290 struct iommu_attach_args *ia = aux; 291 292 return (strcmp("vme", ia->iom_name) == 0); 293 } 294 295 296 void 297 vmeattach_mainbus(parent, self, aux) 298 struct device *parent, *self; 299 void *aux; 300 { 301 #if defined(SUN4) 302 struct mainbus_attach_args *ma = aux; 303 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 304 struct vmebus_attach_args vba; 305 306 if (self->dv_unit > 0) { 307 printf(" unsupported\n"); 308 return; 309 } 310 311 sc->sc_bustag = ma->ma_bustag; 312 sc->sc_dmatag = ma->ma_dmatag; 313 314 /* VME interrupt entry point */ 315 sc->sc_vmeintr = vmeintr4; 316 317 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 318 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 319 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 320 /*XXX*/ sparc_vme4_dma_tag._cookie = self; 321 322 #if 0 323 sparc_vme_bus_tag.parent = ma->ma_bustag; 324 vba.vba_bustag = &sparc_vme_bus_tag; 325 #endif 326 vba.va_vct = &sparc_vme_chipset_tag; 327 vba.va_bdt = &sparc_vme4_dma_tag; 328 vba.va_slaveconfig = 0; 329 330 /* Fall back to our own `range' construction */ 331 sc->sc_range = vmebus_translations; 332 sc->sc_nrange = 333 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 334 335 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END, 336 M_DEVBUF, 0, 0, EX_NOWAIT); 337 if (vme_dvmamap == NULL) 338 panic("vme: unable to allocate DVMA map"); 339 340 printf("\n"); 341 (void)config_found(self, &vba, 0); 342 343 #endif 344 return; 345 } 346 347 /* sun4m vmebus */ 348 void 349 vmeattach_iommu(parent, self, aux) 350 struct device *parent, *self; 351 void *aux; 352 { 353 #if defined(SUN4M) 354 struct sparcvme_softc *sc = (struct sparcvme_softc *)self; 355 struct iommu_attach_args *ia = aux; 356 struct vmebus_attach_args vba; 357 bus_space_handle_t bh; 358 int node; 359 int cline; 360 361 if (self->dv_unit > 0) { 362 printf(" unsupported\n"); 363 return; 364 } 365 366 sc->sc_bustag = ia->iom_bustag; 367 sc->sc_dmatag = ia->iom_dmatag; 368 369 /* VME interrupt entry point */ 370 sc->sc_vmeintr = vmeintr4m; 371 372 /*XXX*/ sparc_vme_chipset_tag.cookie = self; 373 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 374 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 375 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self; 376 sparc_vme_bus_tag.sparc_bus_barrier = sparc_vme_iommu_barrier; 377 378 #if 0 379 vba.vba_bustag = &sparc_vme_bus_tag; 380 #endif 381 vba.va_vct = &sparc_vme_chipset_tag; 382 vba.va_bdt = &sparc_vme_iommu_dma_tag; 383 vba.va_slaveconfig = 0; 384 385 node = ia->iom_node; 386 387 /* 388 * Map VME control space 389 */ 390 if (ia->iom_nreg < 2) { 391 printf("%s: only %d register sets\n", self->dv_xname, 392 ia->iom_nreg); 393 return; 394 } 395 396 if (bus_space_map2(ia->iom_bustag, 397 (bus_type_t)ia->iom_reg[0].ior_iospace, 398 (bus_addr_t)ia->iom_reg[0].ior_pa, 399 (bus_size_t)ia->iom_reg[0].ior_size, 400 BUS_SPACE_MAP_LINEAR, 401 0, &bh) != 0) { 402 panic("%s: can't map vmebusreg", self->dv_xname); 403 } 404 sc->sc_reg = (struct vmebusreg *)bh; 405 406 if (bus_space_map2(ia->iom_bustag, 407 (bus_type_t)ia->iom_reg[1].ior_iospace, 408 (bus_addr_t)ia->iom_reg[1].ior_pa, 409 (bus_size_t)ia->iom_reg[1].ior_size, 410 BUS_SPACE_MAP_LINEAR, 411 0, &bh) != 0) { 412 panic("%s: can't map vmebusvec", self->dv_xname); 413 } 414 sc->sc_vec = (struct vmebusvec *)bh; 415 416 /* 417 * Map VME IO cache tags and flush control. 418 */ 419 if (bus_space_map2(ia->iom_bustag, 420 (bus_type_t)ia->iom_reg[1].ior_iospace, 421 (bus_addr_t)ia->iom_reg[1].ior_pa + VME_IOC_TAGOFFSET, 422 VME_IOC_SIZE, 423 BUS_SPACE_MAP_LINEAR, 424 0, &bh) != 0) { 425 panic("%s: can't map IOC tags", self->dv_xname); 426 } 427 sc->sc_ioctags = (u_int32_t *)bh; 428 429 if (bus_space_map2(ia->iom_bustag, 430 (bus_type_t)ia->iom_reg[1].ior_iospace, 431 (bus_addr_t)ia->iom_reg[1].ior_pa+VME_IOC_FLUSHOFFSET, 432 VME_IOC_SIZE, 433 BUS_SPACE_MAP_LINEAR, 434 0, &bh) != 0) { 435 panic("%s: can't map IOC flush registers", self->dv_xname); 436 } 437 sc->sc_iocflush = (u_int32_t *)bh; 438 439 /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg; 440 441 /* 442 * Get "range" property. 443 */ 444 if (getprop(node, "ranges", sizeof(struct rom_range), 445 &sc->sc_nrange, (void **)&sc->sc_range) != 0) { 446 panic("%s: can't get ranges property", self->dv_xname); 447 } 448 449 sparcvme_sc = sc; 450 vmeerr_handler = sparc_vme_error; 451 452 /* 453 * Invalidate all IO-cache entries. 454 */ 455 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 456 sc->sc_ioctags[--cline] = 0; 457 } 458 459 /* Enable IO-cache */ 460 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 461 462 printf(": version 0x%x\n", 463 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 464 465 (void)config_found(self, &vba, 0); 466 #endif 467 } 468 469 #if defined(SUN4M) 470 static int 471 sparc_vme_error() 472 { 473 struct sparcvme_softc *sc = sparcvme_sc; 474 u_int32_t afsr, afpa; 475 char bits[64]; 476 477 afsr = sc->sc_reg->vmebus_afsr; 478 afpa = sc->sc_reg->vmebus_afar; 479 printf("VME error:\n\tAFSR %s\n", 480 bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits))); 481 printf("\taddress: 0x%x%x\n", afsr, afpa); 482 return (0); 483 } 484 #endif 485 486 int 487 vmebus_translate(sc, mod, addr, btp, bap) 488 struct sparcvme_softc *sc; 489 vme_am_t mod; 490 vme_addr_t addr; 491 bus_type_t *btp; 492 bus_addr_t *bap; 493 { 494 int i; 495 496 for (i = 0; i < sc->sc_nrange; i++) { 497 498 if (sc->sc_range[i].cspace != mod) 499 continue; 500 501 /* We've found the connection to the parent bus */ 502 *bap = sc->sc_range[i].poffset + addr; 503 *btp = sc->sc_range[i].pspace; 504 return (0); 505 } 506 return (ENOENT); 507 } 508 509 struct vmeprobe_myarg { 510 int (*cb) __P((void *, bus_space_tag_t, bus_space_handle_t)); 511 void *cbarg; 512 bus_space_tag_t tag; 513 int res; /* backwards */ 514 }; 515 516 static int vmeprobe_mycb __P((void *, void *)); 517 static int 518 vmeprobe_mycb(bh, arg) 519 void *bh, *arg; 520 { 521 struct vmeprobe_myarg *a = arg; 522 523 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 524 return (!a->res); 525 } 526 527 int 528 sparc_vme_probe(cookie, addr, len, mod, datasize, callback, arg) 529 void *cookie; 530 vme_addr_t addr; 531 vme_size_t len; 532 vme_am_t mod; 533 vme_datasize_t datasize; 534 int (*callback) __P((void *, bus_space_tag_t, bus_space_handle_t)); 535 void *arg; 536 { 537 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 538 bus_type_t iospace; 539 bus_addr_t paddr; 540 bus_size_t size; 541 struct vmeprobe_myarg myarg; 542 int res, i; 543 544 if (vmebus_translate(sc, mod, addr, &iospace, &paddr) != 0) 545 return (EINVAL); 546 547 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 548 549 if (callback) { 550 myarg.cb = callback; 551 myarg.cbarg = arg; 552 myarg.tag = sc->sc_bustag; 553 myarg.res = 0; 554 res = bus_space_probe(sc->sc_bustag, iospace, paddr, size, 0, 555 0, vmeprobe_mycb, &myarg); 556 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 557 } 558 559 for (i = 0; i < len / size; i++) { 560 myarg.res = 0; 561 res = bus_space_probe(sc->sc_bustag, iospace, paddr, size, 0, 562 0, 0, 0); 563 if (res == 0) 564 return (EIO); 565 paddr += size; 566 } 567 return (0); 568 } 569 570 int 571 sparc_vme_map(cookie, addr, size, mod, datasize, swap, tp, hp, rp) 572 void *cookie; 573 vme_addr_t addr; 574 vme_size_t size; 575 vme_am_t mod; 576 vme_datasize_t datasize; 577 vme_swap_t swap; 578 bus_space_tag_t *tp; 579 bus_space_handle_t *hp; 580 vme_mapresc_t *rp; 581 { 582 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 583 bus_type_t iospace; 584 bus_addr_t paddr; 585 int error; 586 587 error = vmebus_translate(sc, mod, addr, &iospace, &paddr); 588 if (error != 0) 589 return (error); 590 591 *tp = sc->sc_bustag; 592 return (bus_space_map2(sc->sc_bustag, iospace, paddr, size, 0, 0, hp)); 593 } 594 595 int 596 sparc_vme_mmap_cookie(addr, mod, hp) 597 vme_addr_t addr; 598 vme_am_t mod; 599 bus_space_handle_t *hp; 600 { 601 struct sparcvme_softc *sc = sparcvme_sc; 602 bus_type_t iospace; 603 bus_addr_t paddr; 604 int error; 605 606 error = vmebus_translate(sc, mod, addr, &iospace, &paddr); 607 if (error != 0) 608 return (error); 609 610 return (bus_space_mmap(sc->sc_bustag, iospace, paddr, 0, hp)); 611 } 612 613 #if defined(SUN4M) 614 void 615 sparc_vme_iommu_barrier(t, h, offset, size, flags) 616 bus_space_tag_t t; 617 bus_space_handle_t h; 618 bus_size_t offset; 619 bus_size_t size; 620 int flags; 621 { 622 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie; 623 624 /* Read async fault status to flush write-buffers */ 625 (*(volatile int *)&vbp->vmebus_afsr); 626 } 627 #endif 628 629 630 631 /* 632 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 633 */ 634 static int vme_ipl_to_pil[] = { 635 0, 636 2, 637 3, 638 5, 639 7, 640 9, 641 11, 642 13 643 }; 644 645 646 /* 647 * All VME device interrupts go through vmeintr(). This function reads 648 * the VME vector from the bus, then dispatches the device interrupt 649 * handler. All handlers for devices that map to the same Processor 650 * Interrupt Level (according to the table above) are on a linked list 651 * of `sparc_vme_intr_handle' structures. The head of which is passed 652 * down as the argument to `vmeintr(void *arg)'. 653 */ 654 struct sparc_vme_intr_handle { 655 struct intrhand ih; 656 struct sparc_vme_intr_handle *next; 657 int vec; /* VME interrupt vector */ 658 int pri; /* VME interrupt priority */ 659 struct sparcvme_softc *sc;/*XXX*/ 660 }; 661 662 #if defined(SUN4) 663 int 664 vmeintr4(arg) 665 void *arg; 666 { 667 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 668 int level, vec; 669 int rv = 0; 670 671 level = (ihp->pri << 1) | 1; 672 673 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level)); 674 675 if (vec == -1) { 676 #ifdef DEBUG 677 /* 678 * This seems to happen only with the i82586 based 679 * `ie1' boards. 680 */ 681 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 682 #endif 683 return (1); /* XXX - pretend we handled it, for now */ 684 } 685 686 for (; ihp; ihp = ihp->next) 687 if (ihp->vec == vec && ihp->ih.ih_fun) 688 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 689 690 return (rv); 691 } 692 #endif 693 694 #if defined(SUN4M) 695 int 696 vmeintr4m(arg) 697 void *arg; 698 { 699 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 700 int level, vec; 701 int rv = 0; 702 703 level = (ihp->pri << 1) | 1; 704 705 #if 0 706 int pending; 707 708 /* Flush VME <=> Sbus write buffers */ 709 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 710 711 pending = *((int*)ICR_SI_PEND); 712 if ((pending & SINTR_VME(ihp->pri)) == 0) { 713 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 714 ihp->pri, pending); 715 return (0); 716 } 717 #endif 718 #if 0 719 /* Why gives this a bus timeout sometimes? */ 720 vec = ihp->sc->sc_vec->vmebusvec[level]; 721 #else 722 /* so, arrange to catch the fault... */ 723 { 724 extern struct user *proc0paddr; 725 extern int fkbyte __P((caddr_t, struct pcb *)); 726 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level]; 727 struct pcb *xpcb; 728 u_long saveonfault; 729 int s; 730 731 s = splhigh(); 732 if (curproc == NULL) 733 xpcb = (struct pcb *)proc0paddr; 734 else 735 xpcb = &curproc->p_addr->u_pcb; 736 737 saveonfault = (u_long)xpcb->pcb_onfault; 738 vec = fkbyte(addr, xpcb); 739 xpcb->pcb_onfault = (caddr_t)saveonfault; 740 741 splx(s); 742 } 743 #endif 744 745 if (vec == -1) { 746 #ifdef DEBUG 747 /* 748 * This seems to happen only with the i82586 based 749 * `ie1' boards. 750 */ 751 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 752 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 753 *((int*)ICR_SI_PEND), 754 ihp->sc->sc_reg->vmebus_afsr, 755 ihp->sc->sc_reg->vmebus_afar); 756 #endif 757 return (1); /* XXX - pretend we handled it, for now */ 758 } 759 760 for (; ihp; ihp = ihp->next) 761 if (ihp->vec == vec && ihp->ih.ih_fun) 762 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 763 764 return (rv); 765 } 766 #endif 767 768 int 769 sparc_vme_intr_map(cookie, level, vec, ihp) 770 void *cookie; 771 int level; 772 int vec; 773 vme_intr_handle_t *ihp; 774 { 775 struct sparc_vme_intr_handle *ih; 776 777 ih = (vme_intr_handle_t) 778 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT); 779 ih->pri = level; 780 ih->vec = vec; 781 ih->sc = cookie;/*XXX*/ 782 *ihp = ih; 783 return (0); 784 } 785 786 const struct evcnt * 787 sparc_vme_intr_evcnt(cookie, vih) 788 void *cookie; 789 vme_intr_handle_t vih; 790 { 791 792 /* XXX for now, no evcnt parent reported */ 793 return NULL; 794 } 795 796 void * 797 sparc_vme_intr_establish(cookie, vih, pri, func, arg) 798 void *cookie; 799 vme_intr_handle_t vih; 800 int pri; 801 int (*func) __P((void *)); 802 void *arg; 803 { 804 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 805 struct sparc_vme_intr_handle *svih = 806 (struct sparc_vme_intr_handle *)vih; 807 struct intrhand *ih; 808 int level; 809 810 /* XXX pri == svih->pri ??? */ 811 812 /* Translate VME priority to processor IPL */ 813 level = vme_ipl_to_pil[svih->pri]; 814 815 svih->ih.ih_fun = func; 816 svih->ih.ih_arg = arg; 817 svih->next = NULL; 818 819 /* ensure the interrupt subsystem will call us at this level */ 820 for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next) 821 if (ih->ih_fun == sc->sc_vmeintr) 822 break; 823 824 if (ih == NULL) { 825 ih = (struct intrhand *) 826 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT); 827 if (ih == NULL) 828 panic("vme_addirq"); 829 bzero(ih, sizeof *ih); 830 ih->ih_fun = sc->sc_vmeintr; 831 ih->ih_arg = vih; 832 intr_establish(level, ih); 833 } else { 834 svih->next = (vme_intr_handle_t)ih->ih_arg; 835 ih->ih_arg = vih; 836 } 837 return (NULL); 838 } 839 840 void 841 sparc_vme_unmap(cookie, resc) 842 void * cookie; 843 vme_mapresc_t resc; 844 { 845 /* Not implemented */ 846 panic("sparc_vme_unmap"); 847 } 848 849 void 850 sparc_vme_intr_disestablish(cookie, a) 851 void *cookie; 852 void *a; 853 { 854 /* Not implemented */ 855 panic("sparc_vme_intr_disestablish"); 856 } 857 858 859 860 /* 861 * VME DMA functions. 862 */ 863 864 static void 865 sparc_vct_dmamap_destroy(cookie, map) 866 void *cookie; 867 bus_dmamap_t map; 868 { 869 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 870 bus_dmamap_destroy(sc->sc_dmatag, map); 871 } 872 873 #if defined(SUN4) 874 static int 875 sparc_vct4_dmamap_create(cookie, size, am, datasize, swap, nsegments, maxsegsz, 876 boundary, flags, dmamp) 877 void *cookie; 878 vme_size_t size; 879 vme_am_t am; 880 vme_datasize_t datasize; 881 vme_swap_t swap; 882 int nsegments; 883 vme_size_t maxsegsz; 884 vme_addr_t boundary; 885 int flags; 886 bus_dmamap_t *dmamp; 887 { 888 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 889 890 /* Allocate a base map through parent bus ops */ 891 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 892 boundary, flags, dmamp)); 893 } 894 895 int 896 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags) 897 bus_dma_tag_t t; 898 bus_dmamap_t map; 899 void *buf; 900 bus_size_t buflen; 901 struct proc *p; 902 int flags; 903 { 904 bus_addr_t dva; 905 bus_size_t sgsize; 906 vaddr_t va, voff; 907 pmap_t pmap; 908 int pagesz = PAGE_SIZE; 909 int error; 910 911 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 912 913 va = (vaddr_t)buf; 914 voff = va & (pagesz - 1); 915 va &= -pagesz; 916 917 /* 918 * Allocate an integral number of pages from DVMA space 919 * covering the passed buffer. 920 */ 921 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 922 error = extent_alloc(vme_dvmamap, sgsize, pagesz, 923 map->_dm_boundary, 924 (flags & BUS_DMA_NOWAIT) == 0 925 ? EX_WAITOK 926 : EX_NOWAIT, 927 (u_long *)&dva); 928 if (error != 0) 929 return (error); 930 931 map->dm_mapsize = buflen; 932 map->dm_nsegs = 1; 933 /* Adjust DVMA address to VME view */ 934 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 935 map->dm_segs[0].ds_len = buflen; 936 map->dm_segs[0]._ds_sgsize = sgsize; 937 938 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 939 940 for (; sgsize != 0; ) { 941 paddr_t pa; 942 /* 943 * Get the physical address for this page. 944 */ 945 (void) pmap_extract(pmap, va, &pa); 946 947 #ifdef notyet 948 if (have_iocache) 949 pa |= PG_IOC; 950 #endif 951 pmap_enter(pmap_kernel(), dva, 952 pa | PMAP_NC, 953 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 954 955 dva += pagesz; 956 va += pagesz; 957 sgsize -= pagesz; 958 } 959 960 return (0); 961 } 962 963 void 964 sparc_vme4_dmamap_unload(t, map) 965 bus_dma_tag_t t; 966 bus_dmamap_t map; 967 { 968 bus_dma_segment_t *segs = map->dm_segs; 969 int nsegs = map->dm_nsegs; 970 bus_addr_t dva; 971 bus_size_t len; 972 int i, s, error; 973 974 for (i = 0; i < nsegs; i++) { 975 /* Go from VME to CPU view */ 976 dva = segs[i].ds_addr + VME4_DVMA_BASE; 977 dva &= -PAGE_SIZE; 978 len = segs[i]._ds_sgsize; 979 980 /* Remove double-mapping in DVMA space */ 981 pmap_remove(pmap_kernel(), dva, dva + len); 982 983 /* Release DVMA space */ 984 s = splhigh(); 985 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT); 986 splx(s); 987 if (error != 0) 988 printf("warning: %ld of DVMA space lost\n", len); 989 } 990 991 /* Mark the mappings as invalid. */ 992 map->dm_mapsize = 0; 993 map->dm_nsegs = 0; 994 } 995 996 void 997 sparc_vme4_dmamap_sync(t, map, offset, len, ops) 998 bus_dma_tag_t t; 999 bus_dmamap_t map; 1000 bus_addr_t offset; 1001 bus_size_t len; 1002 int ops; 1003 { 1004 1005 /* 1006 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 1007 * Currently the cache is flushed in bus_dma_load()... 1008 */ 1009 } 1010 #endif /* SUN4 */ 1011 1012 #if defined(SUN4M) 1013 static int 1014 sparc_vme_iommu_dmamap_create (t, size, nsegments, maxsegsz, 1015 boundary, flags, dmamp) 1016 bus_dma_tag_t t; 1017 bus_size_t size; 1018 int nsegments; 1019 bus_size_t maxsegsz; 1020 bus_size_t boundary; 1021 int flags; 1022 bus_dmamap_t *dmamp; 1023 { 1024 1025 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 1026 return (EINVAL); 1027 } 1028 1029 static int 1030 sparc_vct_iommu_dmamap_create(cookie, size, am, datasize, swap, nsegments, 1031 maxsegsz, boundary, flags, dmamp) 1032 void *cookie; 1033 vme_size_t size; 1034 vme_am_t am; 1035 vme_datasize_t datasize; 1036 vme_swap_t swap; 1037 int nsegments; 1038 vme_size_t maxsegsz; 1039 vme_addr_t boundary; 1040 int flags; 1041 bus_dmamap_t *dmamp; 1042 { 1043 struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie; 1044 bus_dmamap_t map; 1045 int error; 1046 1047 /* Allocate a base map through parent bus ops */ 1048 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 1049 boundary, flags, &map); 1050 if (error != 0) 1051 return (error); 1052 1053 /* 1054 * Each I/O cache line maps to a 8K section of VME DVMA space, so 1055 * we must ensure that DVMA alloctions are always 8K aligned. 1056 */ 1057 map->_dm_align = VME_IOC_PAGESZ; 1058 1059 /* Set map region based on Address Modifier */ 1060 switch ((am & VME_AM_ADRSIZEMASK)) { 1061 case VME_AM_A16: 1062 case VME_AM_A24: 1063 /* 1 MB of DVMA space */ 1064 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 1065 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 1066 break; 1067 case VME_AM_A32: 1068 /* 8 MB of DVMA space */ 1069 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 1070 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 1071 break; 1072 } 1073 1074 *dmamp = map; 1075 return (0); 1076 } 1077 1078 int 1079 sparc_vme_iommu_dmamap_load(t, map, buf, buflen, p, flags) 1080 bus_dma_tag_t t; 1081 bus_dmamap_t map; 1082 void *buf; 1083 bus_size_t buflen; 1084 struct proc *p; 1085 int flags; 1086 { 1087 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1088 volatile u_int32_t *ioctags; 1089 int error; 1090 1091 /* Round request to a multiple of the I/O cache size */ 1092 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1093 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1094 if (error != 0) 1095 return (error); 1096 1097 /* Allocate I/O cache entries for this range */ 1098 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1099 while (buflen > 0) { 1100 *ioctags = VME_IOC_IC | VME_IOC_W; 1101 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1102 buflen -= VME_IOC_PAGESZ; 1103 } 1104 1105 /* 1106 * Adjust DVMA address to VME view. 1107 * Note: the DVMA base address is the same for all 1108 * VME address spaces. 1109 */ 1110 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1111 return (0); 1112 } 1113 1114 1115 void 1116 sparc_vme_iommu_dmamap_unload(t, map) 1117 bus_dma_tag_t t; 1118 bus_dmamap_t map; 1119 { 1120 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1121 volatile u_int32_t *flushregs; 1122 int len; 1123 1124 /* Go from VME to CPU view */ 1125 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1126 1127 /* Flush VME I/O cache */ 1128 len = map->dm_segs[0]._ds_sgsize; 1129 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1130 while (len > 0) { 1131 *flushregs = 0; 1132 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1133 len -= VME_IOC_PAGESZ; 1134 } 1135 1136 /* 1137 * Start a read from `tag space' which will not complete until 1138 * all cache flushes have finished 1139 */ 1140 (*sc->sc_ioctags); 1141 1142 bus_dmamap_unload(sc->sc_dmatag, map); 1143 } 1144 1145 void 1146 sparc_vme_iommu_dmamap_sync(t, map, offset, len, ops) 1147 bus_dma_tag_t t; 1148 bus_dmamap_t map; 1149 bus_addr_t offset; 1150 bus_size_t len; 1151 int ops; 1152 { 1153 1154 /* 1155 * XXX Should perform cache flushes as necessary. 1156 */ 1157 } 1158 #endif /* SUN4M */ 1159 1160 int 1161 sparc_vme_dmamem_map(t, segs, nsegs, size, kvap, flags) 1162 bus_dma_tag_t t; 1163 bus_dma_segment_t *segs; 1164 int nsegs; 1165 size_t size; 1166 caddr_t *kvap; 1167 int flags; 1168 { 1169 struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie; 1170 1171 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1172 } 1173