1 /* $NetBSD: esp.c,v 1.48 2004/02/24 15:12:51 wiz Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1994 Peter Galbavy 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Peter Galbavy 55 * 4. The name of the author may not be used to endorse or promote products 56 * derived from this software without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 60 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 61 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 62 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 63 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 64 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 66 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 67 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 68 * POSSIBILITY OF SUCH DAMAGE. 69 */ 70 71 /* 72 * Based on aic6360 by Jarle Greipsland 73 * 74 * Acknowledgements: Many of the algorithms used in this driver are 75 * inspired by the work of Julian Elischer (julian@tfs.com) and 76 * Charles Hannum (mycroft@duality.gnu.ai.mit.edu). Thanks a million! 77 */ 78 79 /* 80 * Grabbed from the sparc port at revision 1.73 for the NeXT. 81 * Darrin B. Jewell <dbj@NetBSD.org> Sat Jul 4 15:41:32 1998 82 */ 83 84 #include <sys/cdefs.h> 85 __KERNEL_RCSID(0, "$NetBSD: esp.c,v 1.48 2004/02/24 15:12:51 wiz Exp $"); 86 87 #include <sys/types.h> 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/errno.h> 92 #include <sys/ioctl.h> 93 #include <sys/device.h> 94 #include <sys/buf.h> 95 #include <sys/proc.h> 96 #include <sys/user.h> 97 #include <sys/queue.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <dev/scsipi/scsi_all.h> 102 #include <dev/scsipi/scsipi_all.h> 103 #include <dev/scsipi/scsiconf.h> 104 #include <dev/scsipi/scsi_message.h> 105 106 #include <machine/bus.h> 107 #include <machine/autoconf.h> 108 #include <machine/cpu.h> 109 110 #include <dev/ic/ncr53c9xreg.h> 111 #include <dev/ic/ncr53c9xvar.h> 112 113 #include <next68k/next68k/isr.h> 114 115 #include <next68k/dev/intiovar.h> 116 #include <next68k/dev/nextdmareg.h> 117 #include <next68k/dev/nextdmavar.h> 118 119 #include <next68k/dev/espreg.h> 120 #include <next68k/dev/espvar.h> 121 122 #ifdef DEBUG 123 #undef ESP_DEBUG 124 #endif 125 126 #ifdef ESP_DEBUG 127 int esp_debug = 0; 128 #define DPRINTF(x) if (esp_debug) printf x; 129 extern char *ndtracep; 130 extern char ndtrace[]; 131 extern int ndtraceshow; 132 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0) 133 #else 134 #define DPRINTF(x) 135 #define NDTRACEIF(x) 136 #endif 137 #define PRINTF(x) printf x; 138 139 140 void espattach_intio __P((struct device *, struct device *, void *)); 141 int espmatch_intio __P((struct device *, struct cfdata *, void *)); 142 143 /* DMA callbacks */ 144 bus_dmamap_t esp_dmacb_continue __P((void *arg)); 145 void esp_dmacb_completed __P((bus_dmamap_t map, void *arg)); 146 void esp_dmacb_shutdown __P((void *arg)); 147 148 static void findchannel_defer __P((struct device *)); 149 150 #ifdef ESP_DEBUG 151 char esp_dma_dump[5*1024] = ""; 152 struct ncr53c9x_softc *esp_debug_sc = 0; 153 void esp_dma_store __P((struct ncr53c9x_softc *sc)); 154 void esp_dma_print __P((struct ncr53c9x_softc *sc)); 155 int esp_dma_nest = 0; 156 #endif 157 158 159 /* Linkup to the rest of the kernel */ 160 CFATTACH_DECL(esp, sizeof(struct esp_softc), 161 espmatch_intio, espattach_intio, NULL, NULL); 162 163 static int attached = 0; 164 165 /* 166 * Functions and the switch for the MI code. 167 */ 168 u_char esp_read_reg __P((struct ncr53c9x_softc *, int)); 169 void esp_write_reg __P((struct ncr53c9x_softc *, int, u_char)); 170 int esp_dma_isintr __P((struct ncr53c9x_softc *)); 171 void esp_dma_reset __P((struct ncr53c9x_softc *)); 172 int esp_dma_intr __P((struct ncr53c9x_softc *)); 173 int esp_dma_setup __P((struct ncr53c9x_softc *, caddr_t *, 174 size_t *, int, size_t *)); 175 void esp_dma_go __P((struct ncr53c9x_softc *)); 176 void esp_dma_stop __P((struct ncr53c9x_softc *)); 177 int esp_dma_isactive __P((struct ncr53c9x_softc *)); 178 179 struct ncr53c9x_glue esp_glue = { 180 esp_read_reg, 181 esp_write_reg, 182 esp_dma_isintr, 183 esp_dma_reset, 184 esp_dma_intr, 185 esp_dma_setup, 186 esp_dma_go, 187 esp_dma_stop, 188 esp_dma_isactive, 189 NULL, /* gl_clear_latched_intr */ 190 }; 191 192 #ifdef ESP_DEBUG 193 #define XCHR(x) "0123456789abcdef"[(x) & 0xf] 194 static void 195 esp_hex_dump(unsigned char *pkt, size_t len) 196 { 197 size_t i, j; 198 199 printf("00000000 "); 200 for(i=0; i<len; i++) { 201 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i])); 202 if ((i+1) % 16 == 8) { 203 printf(" "); 204 } 205 if ((i+1) % 16 == 0) { 206 printf(" %c", '|'); 207 for(j=0; j<16; j++) { 208 printf("%c", pkt[i-15+j]>=32 && pkt[i-15+j]<127?pkt[i-15+j]:'.'); 209 } 210 printf("%c\n%c%c%c%c%c%c%c%c ", '|', 211 XCHR((i+1)>>28),XCHR((i+1)>>24),XCHR((i+1)>>20),XCHR((i+1)>>16), 212 XCHR((i+1)>>12), XCHR((i+1)>>8), XCHR((i+1)>>4), XCHR(i+1)); 213 } 214 } 215 printf("\n"); 216 } 217 #endif 218 219 int 220 espmatch_intio(parent, cf, aux) 221 struct device *parent; 222 struct cfdata *cf; 223 void *aux; 224 { 225 struct intio_attach_args *ia = (struct intio_attach_args *)aux; 226 227 if (attached) 228 return (0); 229 230 ia->ia_addr = (void *)NEXT_P_SCSI; 231 232 return(1); 233 } 234 235 static void 236 findchannel_defer(self) 237 struct device *self; 238 { 239 struct esp_softc *esc = (void *)self; 240 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; 241 int error; 242 243 if (!esc->sc_dma) { 244 printf ("%s", sc->sc_dev.dv_xname); 245 esc->sc_dma = nextdma_findchannel ("scsi"); 246 if (!esc->sc_dma) 247 panic ("%s: can't find DMA channel", 248 sc->sc_dev.dv_xname); 249 } 250 251 nextdma_setconf (esc->sc_dma, shutdown_cb, &esp_dmacb_shutdown); 252 nextdma_setconf (esc->sc_dma, continue_cb, &esp_dmacb_continue); 253 nextdma_setconf (esc->sc_dma, completed_cb, &esp_dmacb_completed); 254 nextdma_setconf (esc->sc_dma, cb_arg, sc); 255 256 error = bus_dmamap_create(esc->sc_dma->sc_dmat, 257 sc->sc_maxxfer, 258 sc->sc_maxxfer/PAGE_SIZE+1, sc->sc_maxxfer, 259 0, BUS_DMA_ALLOCNOW, &esc->sc_main_dmamap); 260 if (error) { 261 panic("%s: can't create main i/o DMA map, error = %d", 262 sc->sc_dev.dv_xname, error); 263 } 264 265 error = bus_dmamap_create(esc->sc_dma->sc_dmat, 266 ESP_DMA_TAILBUFSIZE, 1, ESP_DMA_TAILBUFSIZE, 267 0, BUS_DMA_ALLOCNOW, &esc->sc_tail_dmamap); 268 if (error) { 269 panic("%s: can't create tail i/o DMA map, error = %d", 270 sc->sc_dev.dv_xname, error); 271 } 272 273 #if 0 274 /* Turn on target selection using the `DMA' method */ 275 sc->sc_features |= NCR_F_DMASELECT; 276 #endif 277 278 /* Do the common parts of attachment. */ 279 sc->sc_adapter.adapt_minphys = minphys; 280 sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request; 281 ncr53c9x_attach(sc); 282 283 /* Establish interrupt channel */ 284 isrlink_autovec(ncr53c9x_intr, sc, NEXT_I_IPL(NEXT_I_SCSI), 0, NULL); 285 INTR_ENABLE(NEXT_I_SCSI); 286 287 /* register interrupt stats */ 288 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 289 sc->sc_dev.dv_xname, "intr"); 290 291 printf ("%s: using DMA channel %s\n", sc->sc_dev.dv_xname, 292 esc->sc_dma->sc_dev.dv_xname); 293 } 294 295 void 296 espattach_intio(parent, self, aux) 297 struct device *parent, *self; 298 void *aux; 299 { 300 struct esp_softc *esc = (void *)self; 301 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; 302 struct intio_attach_args *ia = (struct intio_attach_args *)aux; 303 304 #ifdef ESP_DEBUG 305 esp_debug_sc = sc; 306 #endif 307 308 esc->sc_bst = ia->ia_bst; 309 if (bus_space_map(esc->sc_bst, NEXT_P_SCSI, 310 ESP_DEVICE_SIZE, 0, &esc->sc_bsh)) { 311 panic("\n%s: can't map ncr53c90 registers", 312 sc->sc_dev.dv_xname); 313 } 314 315 sc->sc_id = 7; 316 sc->sc_freq = 20; /* Mhz */ 317 318 /* 319 * Set up glue for MI code early; we use some of it here. 320 */ 321 sc->sc_glue = &esp_glue; 322 323 /* 324 * XXX More of this should be in ncr53c9x_attach(), but 325 * XXX should we really poke around the chip that much in 326 * XXX the MI code? Think about this more... 327 */ 328 329 /* 330 * It is necessary to try to load the 2nd config register here, 331 * to find out what rev the esp chip is, else the ncr53c9x_reset 332 * will not set up the defaults correctly. 333 */ 334 sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB; 335 sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_RPE; 336 sc->sc_cfg3 = NCRCFG3_CDB; 337 NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); 338 339 if ((NCR_READ_REG(sc, NCR_CFG2) & ~NCRCFG2_RSVD) != 340 (NCRCFG2_SCSI2 | NCRCFG2_RPE)) { 341 sc->sc_rev = NCR_VARIANT_ESP100; 342 } else { 343 sc->sc_cfg2 = NCRCFG2_SCSI2; 344 NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); 345 sc->sc_cfg3 = 0; 346 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 347 sc->sc_cfg3 = (NCRCFG3_CDB | NCRCFG3_FCLK); 348 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 349 if (NCR_READ_REG(sc, NCR_CFG3) != 350 (NCRCFG3_CDB | NCRCFG3_FCLK)) { 351 sc->sc_rev = NCR_VARIANT_ESP100A; 352 } else { 353 /* NCRCFG2_FE enables > 64K transfers */ 354 sc->sc_cfg2 |= NCRCFG2_FE; 355 sc->sc_cfg3 = 0; 356 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 357 sc->sc_rev = NCR_VARIANT_ESP200; 358 } 359 } 360 361 /* 362 * XXX minsync and maxxfer _should_ be set up in MI code, 363 * XXX but it appears to have some dependency on what sort 364 * XXX of DMA we're hooked up to, etc. 365 */ 366 367 /* 368 * This is the value used to start sync negotiations 369 * Note that the NCR register "SYNCTP" is programmed 370 * in "clocks per byte", and has a minimum value of 4. 371 * The SCSI period used in negotiation is one-fourth 372 * of the time (in nanoseconds) needed to transfer one byte. 373 * Since the chip's clock is given in MHz, we have the following 374 * formula: 4 * period = (1000 / freq) * 4 375 */ 376 sc->sc_minsync = /* 1000 / sc->sc_freq */ 0; 377 378 /* 379 * Alas, we must now modify the value a bit, because it's 380 * only valid when can switch on FASTCLK and FASTSCSI bits 381 * in config register 3... 382 */ 383 switch (sc->sc_rev) { 384 case NCR_VARIANT_ESP100: 385 sc->sc_maxxfer = 64 * 1024; 386 sc->sc_minsync = 0; /* No synch on old chip? */ 387 break; 388 389 case NCR_VARIANT_ESP100A: 390 sc->sc_maxxfer = 64 * 1024; 391 /* Min clocks/byte is 5 */ 392 sc->sc_minsync = /* ncr53c9x_cpb2stp(sc, 5) */ 0; 393 break; 394 395 case NCR_VARIANT_ESP200: 396 sc->sc_maxxfer = 16 * 1024 * 1024; 397 /* XXX - do actually set FAST* bits */ 398 break; 399 } 400 401 /* @@@ Some ESP_DCTL bits probably need setting */ 402 NCR_WRITE_REG(sc, ESP_DCTL, 403 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_RESET); 404 DELAY(10); 405 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 406 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 407 DELAY(10); 408 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 409 410 esc->sc_dma = nextdma_findchannel ("scsi"); 411 if (esc->sc_dma) { 412 findchannel_defer (self); 413 } else { 414 printf ("\n"); 415 config_defer (self, findchannel_defer); 416 } 417 418 attached = 1; 419 } 420 421 /* 422 * Glue functions. 423 */ 424 425 u_char 426 esp_read_reg(sc, reg) 427 struct ncr53c9x_softc *sc; 428 int reg; 429 { 430 struct esp_softc *esc = (struct esp_softc *)sc; 431 432 return(bus_space_read_1(esc->sc_bst, esc->sc_bsh, reg)); 433 } 434 435 void 436 esp_write_reg(sc, reg, val) 437 struct ncr53c9x_softc *sc; 438 int reg; 439 u_char val; 440 { 441 struct esp_softc *esc = (struct esp_softc *)sc; 442 443 bus_space_write_1(esc->sc_bst, esc->sc_bsh, reg, val); 444 } 445 446 volatile u_int32_t save1; 447 448 #define xADDR 0x0211a000 449 int doze __P((volatile int)); 450 int 451 doze(c) 452 volatile int c; 453 { 454 /* static int tmp1; */ 455 u_int32_t tmp1; 456 volatile u_int8_t tmp2; 457 volatile u_int8_t *reg = (volatile u_int8_t *)IIOV(xADDR); 458 if (c > 244) return (0); 459 if (c == 0) return (0); 460 /* ((*(volatile u_long *)IIOV(NEXT_P_INTRMASK))&=(~NEXT_I_BIT(x))) */ 461 (*reg) = 0; 462 (*reg) = 0; 463 do { 464 save1 = (*reg); 465 tmp2 = *(reg + 3); 466 tmp1 = tmp2; 467 } while (tmp1 <= c); 468 return (0); 469 } 470 471 int 472 esp_dma_isintr(sc) 473 struct ncr53c9x_softc *sc; 474 { 475 struct esp_softc *esc = (struct esp_softc *)sc; 476 if (INTR_OCCURRED(NEXT_I_SCSI)) { 477 NDTRACEIF (*ndtracep++ = 'i'); 478 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | (esc->sc_datain ? ESPDCTL_DMARD : 0)); 479 return (1); 480 } else { 481 return (0); 482 } 483 } 484 485 #define nd_bsr4(reg) bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg)) 486 #define nd_bsw4(reg,val) bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val)) 487 int 488 esp_dma_intr(sc) 489 struct ncr53c9x_softc *sc; 490 { 491 struct esp_softc *esc = (struct esp_softc *)sc; 492 struct nextdma_softc *nsc = esc->sc_dma; 493 struct nextdma_status *stat = &nsc->sc_stat; 494 495 int r = (INTR_OCCURRED(NEXT_I_SCSI)); 496 int flushcount; 497 r = 1; 498 499 NDTRACEIF (*ndtracep++ = 'I'); 500 if (r) { 501 /* printf ("esp_dma_isintr start\n"); */ 502 { 503 int s = spldma(); 504 void *ndmap = stat->nd_map; 505 int ndidx = stat->nd_idx; 506 splx(s); 507 508 flushcount = 0; 509 510 #ifdef ESP_DEBUG 511 /* esp_dma_nest++; */ 512 513 if (esp_debug) { 514 char sbuf[256]; 515 516 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 517 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 518 printf("esp_dma_isintr = 0x%s\n", sbuf); 519 } 520 #endif 521 522 while (!nextdma_finished(nsc)) { /* esp_dma_isactive(sc)) { */ 523 NDTRACEIF (*ndtracep++ = 'w'); 524 NDTRACEIF ( 525 sprintf (ndtracep, "f%dm%dl%dw", NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, 526 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL)); 527 ndtracep += strlen (ndtracep); 528 ); 529 if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) 530 flushcount=5; 531 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 532 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 533 534 s = spldma(); 535 while (ndmap == stat->nd_map && ndidx == stat->nd_idx && 536 !(nd_bsr4 (DD_CSR) & 0x08000000) && 537 ++flushcount < 5) { 538 splx(s); 539 NDTRACEIF (*ndtracep++ = 'F'); 540 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_FLUSH | 541 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 542 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 543 doze(0x32); 544 NCR_WRITE_REG(sc, ESP_DCTL, 545 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 546 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 547 doze(0x32); 548 s = spldma(); 549 } 550 NDTRACEIF (*ndtracep++ = '0' + flushcount); 551 if (flushcount > 4) { 552 int next; 553 int onext = 0; 554 splx(s); 555 DPRINTF (("DMA reset\n")); 556 while (((next = nd_bsr4 (DD_NEXT)) != 557 (nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF)) && 558 onext != next) { 559 onext = next; 560 DELAY(50); 561 } 562 NDTRACEIF (*ndtracep++ = 'R'); 563 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 564 NDTRACEIF ( 565 sprintf (ndtracep, "ff:%d tcm:%d tcl:%d ", 566 NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, 567 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL)); 568 ndtracep += strlen (ndtracep); 569 ); 570 s = spldma(); 571 nextdma_reset (nsc); 572 splx(s); 573 goto out; 574 } 575 splx(s); 576 577 #ifdef DIAGNOSTIC 578 if (flushcount > 4) { 579 NDTRACEIF (*ndtracep++ = '+'); 580 printf("%s: unexpected flushcount %d on %s\n",sc->sc_dev.dv_xname, 581 flushcount, esc->sc_datain ? "read" : "write"); 582 } 583 #endif 584 585 if (!nextdma_finished(nsc)) { /* esp_dma_isactive(sc)) { */ 586 NDTRACEIF (*ndtracep++ = '1'); 587 } 588 flushcount = 0; 589 s = spldma(); 590 ndmap = stat->nd_map; 591 ndidx = stat->nd_idx; 592 splx(s); 593 594 } 595 out: ; 596 597 #ifdef ESP_DEBUG 598 /* esp_dma_nest--; */ 599 #endif 600 601 } 602 603 doze (0x32); 604 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | (esc->sc_datain ? ESPDCTL_DMARD : 0)); 605 NDTRACEIF (*ndtracep++ = 'b'); 606 607 while (esc->sc_datain != -1) DELAY(50); 608 609 if (esc->sc_dmaaddr) { 610 bus_size_t xfer_len = 0; 611 int resid; 612 613 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 614 if (stat->nd_exception == 0) { 615 resid = NCR_READ_REG((sc), NCR_TCL) + (NCR_READ_REG((sc), NCR_TCM) << 8); 616 if (resid) { 617 resid += (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF); 618 #ifdef ESP_DEBUG 619 if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) 620 if ((NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) != 16 || NCR_READ_REG((sc), NCR_TCL) != 240) 621 ndtraceshow++; 622 #endif 623 } 624 xfer_len = esc->sc_dmasize - resid; 625 } else { 626 /*static*/ void ncr53c9x_abort(struct ncr53c9x_softc *, struct ncr53c9x_ecb *); 627 #define ncr53c9x_sched_msgout(m) \ 628 do { \ 629 NCR_MISC(("ncr53c9x_sched_msgout %x %d", m, __LINE__)); \ 630 NCRCMD(sc, NCRCMD_SETATN); \ 631 sc->sc_flags |= NCR_ATN; \ 632 sc->sc_msgpriq |= (m); \ 633 } while (0) 634 int i; 635 xfer_len = 0; 636 if (esc->sc_begin) 637 xfer_len += esc->sc_begin_size; 638 if (esc->sc_main_dmamap) 639 xfer_len += esc->sc_main_dmamap->dm_xfer_len; 640 if (esc->sc_tail_dmamap) 641 xfer_len += esc->sc_tail_dmamap->dm_xfer_len; 642 resid = 0; 643 printf ("X\n"); 644 for (i = 0; i < 16; i++) { 645 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_FLUSH | 646 ESPDCTL_16MHZ | ESPDCTL_INTENB | 647 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 648 NCR_WRITE_REG(sc, ESP_DCTL, 649 ESPDCTL_16MHZ | ESPDCTL_INTENB | 650 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 651 } 652 #if 0 653 printf ("ff:%02x tcm:%d tcl:%d esp_dstat:%02x stat:%02x step: %02x intr:%02x new stat:%02X\n", 654 NCR_READ_REG(sc, NCR_FFLAG), 655 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL), 656 NCR_READ_REG(sc, ESP_DSTAT), 657 sc->sc_espstat, sc->sc_espstep, 658 sc->sc_espintr, NCR_READ_REG(sc, NCR_STAT)); 659 printf ("sc->sc_state: %x sc->sc_phase: %x sc->sc_espstep:%x sc->sc_prevphase:%x sc->sc_flags:%x\n", 660 sc->sc_state, sc->sc_phase, sc->sc_espstep, sc->sc_prevphase, sc->sc_flags); 661 #endif 662 /* sc->sc_flags &= ~NCR_ICCS; */ 663 sc->sc_nexus->flags |= ECB_ABORT; 664 if (sc->sc_phase == MESSAGE_IN_PHASE) { 665 /* ncr53c9x_sched_msgout(SEND_ABORT); */ 666 ncr53c9x_abort(sc, sc->sc_nexus); 667 } else if (sc->sc_phase != STATUS_PHASE) { 668 printf ("ATTENTION!!! not message/status phase: %d\n", sc->sc_phase); 669 } 670 } 671 672 NDTRACEIF ( 673 sprintf (ndtracep, "f%dm%dl%ds%dx%dr%dS", NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, NCR_READ_REG((sc), NCR_TCM), 674 NCR_READ_REG((sc), NCR_TCL), esc->sc_dmasize, (int)xfer_len, resid); 675 ndtracep += strlen (ndtracep); 676 ); 677 678 *(esc->sc_dmaaddr) += xfer_len; 679 *(esc->sc_dmalen) -= xfer_len; 680 esc->sc_dmaaddr = 0; 681 esc->sc_dmalen = 0; 682 esc->sc_dmasize = 0; 683 } 684 685 NDTRACEIF (*ndtracep++ = 'B'); 686 sc->sc_espstat = NCR_READ_REG(sc, NCR_STAT) | (sc->sc_espstat & NCRSTAT_INT); 687 688 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 689 /* printf ("esp_dma_isintr DONE\n"); */ 690 691 } 692 693 return (r); 694 } 695 696 void 697 esp_dma_reset(sc) 698 struct ncr53c9x_softc *sc; 699 { 700 struct esp_softc *esc = (struct esp_softc *)sc; 701 702 DPRINTF(("esp DMA reset\n")); 703 704 #ifdef ESP_DEBUG 705 if (esp_debug) { 706 char sbuf[256]; 707 708 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 709 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 710 printf(" *intrstat = 0x%s\n", sbuf); 711 712 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)), 713 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 714 printf(" *intrmask = 0x%s\n", sbuf); 715 } 716 #endif 717 718 #if 0 719 /* Clear the DMAMOD bit in the DCTL register: */ 720 NCR_WRITE_REG(sc, ESP_DCTL, 721 ESPDCTL_16MHZ | ESPDCTL_INTENB); 722 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 723 #endif 724 725 nextdma_reset(esc->sc_dma); 726 nextdma_init(esc->sc_dma); 727 728 esc->sc_datain = -1; 729 esc->sc_dmaaddr = 0; 730 esc->sc_dmalen = 0; 731 esc->sc_dmasize = 0; 732 733 esc->sc_loaded = 0; 734 735 esc->sc_begin = 0; 736 esc->sc_begin_size = 0; 737 738 if (esc->sc_main_dmamap->dm_mapsize) { 739 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_main_dmamap); 740 } 741 esc->sc_main = 0; 742 esc->sc_main_size = 0; 743 744 if (esc->sc_tail_dmamap->dm_mapsize) { 745 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap); 746 } 747 esc->sc_tail = 0; 748 esc->sc_tail_size = 0; 749 } 750 751 /* it appears that: 752 * addr and len arguments to this need to be kept up to date 753 * with the status of the transfter. 754 * the dmasize of this is the actual length of the transfer 755 * request, which is guaranteed to be less than maxxfer. 756 * (len may be > maxxfer) 757 */ 758 759 int 760 esp_dma_setup(sc, addr, len, datain, dmasize) 761 struct ncr53c9x_softc *sc; 762 caddr_t *addr; 763 size_t *len; 764 int datain; 765 size_t *dmasize; 766 { 767 struct esp_softc *esc = (struct esp_softc *)sc; 768 769 NDTRACEIF (*ndtracep++ = 'h'); 770 #ifdef DIAGNOSTIC 771 #ifdef ESP_DEBUG 772 /* if this is a read DMA, pre-fill the buffer with 0xdeadbeef 773 * to identify bogus reads 774 */ 775 if (datain) { 776 int *v = (int *)(*addr); 777 int i; 778 for(i=0;i<((*len)/4);i++) v[i] = 0xdeadbeef; 779 v = (int *)(&(esc->sc_tailbuf[0])); 780 for(i=0;i<((sizeof(esc->sc_tailbuf)/4));i++) v[i] = 0xdeafbeef; 781 } else { 782 int *v; 783 int i; 784 v = (int *)(&(esc->sc_tailbuf[0])); 785 for(i=0;i<((sizeof(esc->sc_tailbuf)/4));i++) v[i] = 0xfeeb1eed; 786 } 787 #endif 788 #endif 789 790 DPRINTF(("esp_dma_setup(%p,0x%08x,0x%08x)\n",*addr,*len,*dmasize)); 791 792 #if 0 793 #ifdef DIAGNOSTIC /* @@@ this is ok sometimes. verify that we handle it ok 794 * and then remove this check 795 */ 796 if (*len != *dmasize) { 797 panic("esp dmalen 0x%lx != size 0x%lx",*len,*dmasize); 798 } 799 #endif 800 #endif 801 802 #ifdef DIAGNOSTIC 803 if ((esc->sc_datain != -1) || 804 (esc->sc_main_dmamap->dm_mapsize != 0) || 805 (esc->sc_tail_dmamap->dm_mapsize != 0) || 806 (esc->sc_dmasize != 0)) { 807 panic("%s: map already loaded in esp_dma_setup" 808 "\tdatain = %d\n\tmain_mapsize=%ld\n\tail_mapsize=%ld\n\tdmasize = %d", 809 sc->sc_dev.dv_xname, esc->sc_datain, 810 esc->sc_main_dmamap->dm_mapsize, 811 esc->sc_tail_dmamap->dm_mapsize, 812 esc->sc_dmasize); 813 } 814 #endif 815 816 /* we are sometimes asked to DMA zero bytes, that's easy */ 817 if (*dmasize <= 0) { 818 return(0); 819 } 820 821 if (*dmasize > ESP_MAX_DMASIZE) 822 *dmasize = ESP_MAX_DMASIZE; 823 824 /* Save these in case we have to abort DMA */ 825 esc->sc_datain = datain; 826 esc->sc_dmaaddr = addr; 827 esc->sc_dmalen = len; 828 esc->sc_dmasize = *dmasize; 829 830 esc->sc_loaded = 0; 831 832 #define DMA_SCSI_ALIGNMENT 16 833 #define DMA_SCSI_ALIGN(type, addr) \ 834 ((type)(((unsigned)(addr)+DMA_SCSI_ALIGNMENT-1) \ 835 &~(DMA_SCSI_ALIGNMENT-1))) 836 #define DMA_SCSI_ALIGNED(addr) \ 837 (((unsigned)(addr)&(DMA_SCSI_ALIGNMENT-1))==0) 838 839 { 840 size_t slop_bgn_size; /* # bytes to be fifo'd at beginning */ 841 size_t slop_end_size; /* # bytes to be transferred in tail buffer */ 842 843 { 844 u_long bgn = (u_long)(*esc->sc_dmaaddr); 845 u_long end = (u_long)(*esc->sc_dmaaddr+esc->sc_dmasize); 846 847 slop_bgn_size = DMA_SCSI_ALIGNMENT-(bgn % DMA_SCSI_ALIGNMENT); 848 if (slop_bgn_size == DMA_SCSI_ALIGNMENT) slop_bgn_size = 0; 849 slop_end_size = (end % DMA_ENDALIGNMENT); 850 } 851 852 /* Force a minimum slop end size. This ensures that write 853 * requests will overrun, as required to get completion interrupts. 854 * In addition, since the tail buffer is guaranteed to be mapped 855 * in a single DMA segment, the overrun won't accidentally 856 * end up in its own segment. 857 */ 858 if (!esc->sc_datain) { 859 #if 0 860 slop_end_size += ESP_DMA_MAXTAIL; 861 #else 862 slop_end_size += 0x10; 863 #endif 864 } 865 866 /* Check to make sure we haven't counted extra slop 867 * as would happen for a very short DMA buffer, also 868 * for short buffers, just stuff the entire thing in the tail 869 */ 870 if ((slop_bgn_size+slop_end_size >= esc->sc_dmasize) 871 #if 0 872 || (esc->sc_dmasize <= ESP_DMA_MAXTAIL) 873 #endif 874 ) 875 { 876 slop_bgn_size = 0; 877 slop_end_size = esc->sc_dmasize; 878 } 879 880 /* initialize the fifo buffer */ 881 if (slop_bgn_size) { 882 esc->sc_begin = *esc->sc_dmaaddr; 883 esc->sc_begin_size = slop_bgn_size; 884 } else { 885 esc->sc_begin = 0; 886 esc->sc_begin_size = 0; 887 } 888 889 #if 01 890 /* Load the normal DMA map */ 891 { 892 esc->sc_main = *esc->sc_dmaaddr+slop_bgn_size; 893 esc->sc_main_size = (esc->sc_dmasize)-(slop_end_size+slop_bgn_size); 894 895 if (esc->sc_main_size) { 896 int error; 897 898 if (!esc->sc_datain || DMA_ENDALIGNED(esc->sc_main_size + slop_end_size)) { 899 KASSERT(DMA_SCSI_ALIGNMENT == DMA_ENDALIGNMENT); 900 KASSERT(DMA_BEGINALIGNMENT == DMA_ENDALIGNMENT); 901 esc->sc_main_size += slop_end_size; 902 slop_end_size = 0; 903 if (!esc->sc_datain) { 904 esc->sc_main_size = DMA_ENDALIGN(caddr_t,esc->sc_main+esc->sc_main_size)-esc->sc_main; 905 } 906 } 907 908 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 909 esc->sc_main_dmamap, 910 esc->sc_main, esc->sc_main_size, 911 NULL, BUS_DMA_NOWAIT); 912 if (error) { 913 #ifdef ESP_DEBUG 914 printf("%s: esc->sc_main_dmamap->_dm_size = %ld\n", 915 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_size); 916 printf("%s: esc->sc_main_dmamap->_dm_segcnt = %d\n", 917 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_segcnt); 918 printf("%s: esc->sc_main_dmamap->_dm_maxsegsz = %ld\n", 919 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_maxsegsz); 920 printf("%s: esc->sc_main_dmamap->_dm_boundary = %ld\n", 921 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_boundary); 922 esp_dma_print(sc); 923 #endif 924 panic("%s: can't load main DMA map. error = %d, addr=%p, size=0x%08x", 925 sc->sc_dev.dv_xname, error,esc->sc_main,esc->sc_main_size); 926 } 927 if (!esc->sc_datain) { /* patch the DMA map for write overrun */ 928 esc->sc_main_dmamap->dm_mapsize += ESP_DMA_OVERRUN; 929 esc->sc_main_dmamap->dm_segs[esc->sc_main_dmamap->dm_nsegs - 1].ds_len += 930 ESP_DMA_OVERRUN; 931 } 932 #if 0 933 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 934 0, esc->sc_main_dmamap->dm_mapsize, 935 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 936 esc->sc_main_dmamap->dm_xfer_len = 0; 937 #endif 938 } else { 939 esc->sc_main = 0; 940 } 941 } 942 943 /* Load the tail DMA map */ 944 if (slop_end_size) { 945 esc->sc_tail = DMA_ENDALIGN(caddr_t,esc->sc_tailbuf+slop_end_size)-slop_end_size; 946 /* If the beginning of the tail is not correctly aligned, 947 * we have no choice but to align the start, which might then unalign the end. 948 */ 949 esc->sc_tail = DMA_SCSI_ALIGN(caddr_t,esc->sc_tail); 950 /* So therefore, we change the tail size to be end aligned again. */ 951 esc->sc_tail_size = DMA_ENDALIGN(caddr_t,esc->sc_tail+slop_end_size)-esc->sc_tail; 952 953 /* @@@ next DMA overrun lossage */ 954 if (!esc->sc_datain) { 955 esc->sc_tail_size += ESP_DMA_OVERRUN; 956 } 957 958 { 959 int error; 960 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 961 esc->sc_tail_dmamap, 962 esc->sc_tail, esc->sc_tail_size, 963 NULL, BUS_DMA_NOWAIT); 964 if (error) { 965 panic("%s: can't load tail DMA map. error = %d, addr=%p, size=0x%08x", 966 sc->sc_dev.dv_xname, error,esc->sc_tail,esc->sc_tail_size); 967 } 968 #if 0 969 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 970 0, esc->sc_tail_dmamap->dm_mapsize, 971 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 972 esc->sc_tail_dmamap->dm_xfer_len = 0; 973 #endif 974 } 975 } 976 #else 977 978 esc->sc_begin = *esc->sc_dmaaddr; 979 slop_bgn_size = DMA_SCSI_ALIGNMENT-((ulong)esc->sc_begin % DMA_SCSI_ALIGNMENT); 980 if (slop_bgn_size == DMA_SCSI_ALIGNMENT) slop_bgn_size = 0; 981 slop_end_size = esc->sc_dmasize - slop_bgn_size; 982 983 if (slop_bgn_size < esc->sc_dmasize) { 984 int error; 985 986 esc->sc_tail = 0; 987 esc->sc_tail_size = 0; 988 989 esc->sc_begin_size = slop_bgn_size; 990 esc->sc_main = *esc->sc_dmaaddr+slop_bgn_size; 991 esc->sc_main_size = DMA_ENDALIGN(caddr_t,esc->sc_main+esc->sc_dmasize-slop_bgn_size)-esc->sc_main; 992 993 if (!esc->sc_datain) { 994 esc->sc_main_size += ESP_DMA_OVERRUN; 995 } 996 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 997 esc->sc_main_dmamap, 998 esc->sc_main, esc->sc_main_size, 999 NULL, BUS_DMA_NOWAIT); 1000 if (error) { 1001 panic("%s: can't load main DMA map. error = %d, addr=%p, size=0x%08x", 1002 sc->sc_dev.dv_xname, error,esc->sc_main,esc->sc_main_size); 1003 } 1004 } else { 1005 esc->sc_begin = 0; 1006 esc->sc_begin_size = 0; 1007 esc->sc_main = 0; 1008 esc->sc_main_size = 0; 1009 1010 #if 0 1011 esc->sc_tail = DMA_ENDALIGN(caddr_t,esc->sc_tailbuf+slop_bgn_size)-slop_bgn_size; 1012 /* If the beginning of the tail is not correctly aligned, 1013 * we have no choice but to align the start, which might then unalign the end. 1014 */ 1015 #endif 1016 esc->sc_tail = DMA_SCSI_ALIGN(caddr_t,esc->sc_tailbuf); 1017 /* So therefore, we change the tail size to be end aligned again. */ 1018 esc->sc_tail_size = DMA_ENDALIGN(caddr_t,esc->sc_tail+esc->sc_dmasize)-esc->sc_tail; 1019 1020 /* @@@ next DMA overrun lossage */ 1021 if (!esc->sc_datain) { 1022 esc->sc_tail_size += ESP_DMA_OVERRUN; 1023 } 1024 1025 { 1026 int error; 1027 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 1028 esc->sc_tail_dmamap, 1029 esc->sc_tail, esc->sc_tail_size, 1030 NULL, BUS_DMA_NOWAIT); 1031 if (error) { 1032 panic("%s: can't load tail DMA map. error = %d, addr=%p, size=0x%08x", 1033 sc->sc_dev.dv_xname, error,esc->sc_tail,esc->sc_tail_size); 1034 } 1035 } 1036 } 1037 #endif 1038 1039 DPRINTF(("%s: setup: %8p %d %8p %d %8p %d %8p %d\n", sc->sc_dev.dv_xname, 1040 *esc->sc_dmaaddr, esc->sc_dmasize, esc->sc_begin, 1041 esc->sc_begin_size, esc->sc_main, esc->sc_main_size, esc->sc_tail, 1042 esc->sc_tail_size)); 1043 } 1044 1045 return (0); 1046 } 1047 1048 #ifdef ESP_DEBUG 1049 /* For debugging */ 1050 void 1051 esp_dma_store(sc) 1052 struct ncr53c9x_softc *sc; 1053 { 1054 struct esp_softc *esc = (struct esp_softc *)sc; 1055 char *p = &esp_dma_dump[0]; 1056 1057 p += sprintf(p,"%s: sc_datain=%d\n",sc->sc_dev.dv_xname,esc->sc_datain); 1058 p += sprintf(p,"%s: sc_loaded=0x%08x\n",sc->sc_dev.dv_xname,esc->sc_loaded); 1059 1060 if (esc->sc_dmaaddr) { 1061 p += sprintf(p,"%s: sc_dmaaddr=%p\n",sc->sc_dev.dv_xname,*esc->sc_dmaaddr); 1062 } else { 1063 p += sprintf(p,"%s: sc_dmaaddr=NULL\n",sc->sc_dev.dv_xname); 1064 } 1065 if (esc->sc_dmalen) { 1066 p += sprintf(p,"%s: sc_dmalen=0x%08x\n",sc->sc_dev.dv_xname,*esc->sc_dmalen); 1067 } else { 1068 p += sprintf(p,"%s: sc_dmalen=NULL\n",sc->sc_dev.dv_xname); 1069 } 1070 p += sprintf(p,"%s: sc_dmasize=0x%08x\n",sc->sc_dev.dv_xname,esc->sc_dmasize); 1071 1072 p += sprintf(p,"%s: sc_begin = %p, sc_begin_size = 0x%08x\n", 1073 sc->sc_dev.dv_xname, esc->sc_begin, esc->sc_begin_size); 1074 p += sprintf(p,"%s: sc_main = %p, sc_main_size = 0x%08x\n", 1075 sc->sc_dev.dv_xname, esc->sc_main, esc->sc_main_size); 1076 /* if (esc->sc_main) */ { 1077 int i; 1078 bus_dmamap_t map = esc->sc_main_dmamap; 1079 p += sprintf(p,"%s: sc_main_dmamap. mapsize = 0x%08lx, nsegs = %d\n", 1080 sc->sc_dev.dv_xname, map->dm_mapsize, map->dm_nsegs); 1081 for(i=0;i<map->dm_nsegs;i++) { 1082 p += sprintf(p,"%s: map->dm_segs[%d].ds_addr = 0x%08lx, len = 0x%08lx\n", 1083 sc->sc_dev.dv_xname, i, map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len); 1084 } 1085 } 1086 p += sprintf(p,"%s: sc_tail = %p, sc_tail_size = 0x%08x\n", 1087 sc->sc_dev.dv_xname, esc->sc_tail, esc->sc_tail_size); 1088 /* if (esc->sc_tail) */ { 1089 int i; 1090 bus_dmamap_t map = esc->sc_tail_dmamap; 1091 p += sprintf(p,"%s: sc_tail_dmamap. mapsize = 0x%08lx, nsegs = %d\n", 1092 sc->sc_dev.dv_xname, map->dm_mapsize, map->dm_nsegs); 1093 for(i=0;i<map->dm_nsegs;i++) { 1094 p += sprintf(p,"%s: map->dm_segs[%d].ds_addr = 0x%08lx, len = 0x%08lx\n", 1095 sc->sc_dev.dv_xname, i, map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len); 1096 } 1097 } 1098 } 1099 1100 void 1101 esp_dma_print(sc) 1102 struct ncr53c9x_softc *sc; 1103 { 1104 esp_dma_store(sc); 1105 printf("%s",esp_dma_dump); 1106 } 1107 #endif 1108 1109 void 1110 esp_dma_go(sc) 1111 struct ncr53c9x_softc *sc; 1112 { 1113 struct esp_softc *esc = (struct esp_softc *)sc; 1114 struct nextdma_softc *nsc = esc->sc_dma; 1115 struct nextdma_status *stat = &nsc->sc_stat; 1116 /* int s = spldma(); */ 1117 1118 #ifdef ESP_DEBUG 1119 if (ndtracep != ndtrace) { 1120 if (ndtraceshow) { 1121 *ndtracep = '\0'; 1122 printf ("esp ndtrace: %s\n", ndtrace); 1123 ndtraceshow = 0; 1124 } else { 1125 DPRINTF (("X")); 1126 } 1127 ndtracep = ndtrace; 1128 } 1129 #endif 1130 1131 DPRINTF(("%s: esp_dma_go(datain = %d)\n", 1132 sc->sc_dev.dv_xname, esc->sc_datain)); 1133 1134 #ifdef ESP_DEBUG 1135 if (esp_debug) esp_dma_print(sc); 1136 else esp_dma_store(sc); 1137 #endif 1138 1139 #ifdef ESP_DEBUG 1140 { 1141 int n = NCR_READ_REG(sc, NCR_FFLAG); 1142 DPRINTF(("%s: fifo size = %d, seq = 0x%x\n", 1143 sc->sc_dev.dv_xname, 1144 n & NCRFIFO_FF, (n & NCRFIFO_SS)>>5)); 1145 } 1146 #endif 1147 1148 /* zero length DMA transfers are boring */ 1149 if (esc->sc_dmasize == 0) { 1150 /* splx(s); */ 1151 return; 1152 } 1153 1154 #if defined(DIAGNOSTIC) 1155 if ((esc->sc_begin_size == 0) && 1156 (esc->sc_main_dmamap->dm_mapsize == 0) && 1157 (esc->sc_tail_dmamap->dm_mapsize == 0)) { 1158 #ifdef ESP_DEBUG 1159 esp_dma_print(sc); 1160 #endif 1161 panic("%s: No DMA requested!",sc->sc_dev.dv_xname); 1162 } 1163 #endif 1164 1165 /* Stuff the fifo with the begin buffer */ 1166 if (esc->sc_datain) { 1167 int i; 1168 DPRINTF(("%s: FIFO read of %d bytes:", 1169 sc->sc_dev.dv_xname,esc->sc_begin_size)); 1170 for(i=0;i<esc->sc_begin_size;i++) { 1171 esc->sc_begin[i]=NCR_READ_REG(sc, NCR_FIFO); 1172 DPRINTF((" %02x",esc->sc_begin[i]&0xff)); 1173 } 1174 DPRINTF(("\n")); 1175 } else { 1176 int i; 1177 DPRINTF(("%s: FIFO write of %d bytes:", 1178 sc->sc_dev.dv_xname,esc->sc_begin_size)); 1179 for(i=0;i<esc->sc_begin_size;i++) { 1180 NCR_WRITE_REG(sc, NCR_FIFO, esc->sc_begin[i]); 1181 DPRINTF((" %02x",esc->sc_begin[i]&0xff)); 1182 } 1183 DPRINTF(("\n")); 1184 } 1185 1186 if (esc->sc_main_dmamap->dm_mapsize) { 1187 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1188 0, esc->sc_main_dmamap->dm_mapsize, 1189 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1190 esc->sc_main_dmamap->dm_xfer_len = 0; 1191 } 1192 1193 if (esc->sc_tail_dmamap->dm_mapsize) { 1194 /* if we are a DMA write cycle, copy the end slop */ 1195 if (!esc->sc_datain) { 1196 memcpy(esc->sc_tail, *esc->sc_dmaaddr+esc->sc_begin_size+esc->sc_main_size, 1197 esc->sc_dmasize-(esc->sc_begin_size+esc->sc_main_size)); 1198 } 1199 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1200 0, esc->sc_tail_dmamap->dm_mapsize, 1201 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1202 esc->sc_tail_dmamap->dm_xfer_len = 0; 1203 } 1204 1205 stat->nd_exception = 0; 1206 nextdma_start(nsc, (esc->sc_datain ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1207 1208 if (esc->sc_datain) { 1209 NCR_WRITE_REG(sc, ESP_DCTL, 1210 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | ESPDCTL_DMARD); 1211 } else { 1212 NCR_WRITE_REG(sc, ESP_DCTL, 1213 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD); 1214 } 1215 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1216 1217 NDTRACEIF (if (esc->sc_begin_size) { *ndtracep++ = '1'; *ndtracep++ = 'A' + esc->sc_begin_size; }); 1218 NDTRACEIF (if (esc->sc_main_size) { *ndtracep++ = '2'; *ndtracep++ = '0' + esc->sc_main_dmamap->dm_nsegs; }); 1219 NDTRACEIF (if (esc->sc_tail_size) { *ndtracep++ = '3'; *ndtracep++ = 'A' + esc->sc_tail_size; }); 1220 1221 /* splx(s); */ 1222 } 1223 1224 void 1225 esp_dma_stop(sc) 1226 struct ncr53c9x_softc *sc; 1227 { 1228 struct esp_softc *esc = (struct esp_softc *)sc; 1229 nextdma_print(esc->sc_dma); 1230 #ifdef ESP_DEBUG 1231 esp_dma_print(sc); 1232 #endif 1233 #if 1 1234 panic("%s: stop not yet implemented",sc->sc_dev.dv_xname); 1235 #endif 1236 } 1237 1238 int 1239 esp_dma_isactive(sc) 1240 struct ncr53c9x_softc *sc; 1241 { 1242 struct esp_softc *esc = (struct esp_softc *)sc; 1243 int r = (esc->sc_dmaaddr != NULL); /* !nextdma_finished(esc->sc_dma); */ 1244 DPRINTF(("esp_dma_isactive = %d\n",r)); 1245 return(r); 1246 } 1247 1248 /****************************************************************/ 1249 1250 int esp_dma_int __P((void *)); 1251 int esp_dma_int(arg) 1252 void *arg; 1253 { 1254 void nextdma_rotate __P((struct nextdma_softc *)); 1255 void nextdma_setup_curr_regs __P((struct nextdma_softc *)); 1256 void nextdma_setup_cont_regs __P((struct nextdma_softc *)); 1257 1258 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1259 struct esp_softc *esc = (struct esp_softc *)sc; 1260 struct nextdma_softc *nsc = esc->sc_dma; 1261 struct nextdma_status *stat = &nsc->sc_stat; 1262 unsigned int state; 1263 1264 NDTRACEIF (*ndtracep++ = 'E'); 1265 1266 state = nd_bsr4 (DD_CSR); 1267 1268 #if 1 1269 NDTRACEIF ( 1270 if (state & DMACSR_COMPLETE) *ndtracep++ = 'c'; 1271 if (state & DMACSR_ENABLE) *ndtracep++ = 'e'; 1272 if (state & DMACSR_BUSEXC) *ndtracep++ = 'b'; 1273 if (state & DMACSR_READ) *ndtracep++ = 'r'; 1274 if (state & DMACSR_SUPDATE) *ndtracep++ = 's'; 1275 ); 1276 1277 NDTRACEIF (*ndtracep++ = 'E'); 1278 1279 #ifdef ESP_DEBUG 1280 if (0) if ((state & DMACSR_BUSEXC) && (state & DMACSR_ENABLE)) ndtraceshow++; 1281 if (0) if ((state & DMACSR_SUPDATE)) ndtraceshow++; 1282 #endif 1283 #endif 1284 1285 if ((stat->nd_exception == 0) && (state & DMACSR_COMPLETE) && (state & DMACSR_ENABLE)) { 1286 stat->nd_map->dm_xfer_len += stat->nd_map->dm_segs[stat->nd_idx].ds_len; 1287 } 1288 1289 if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) { 1290 if (nsc->sc_conf.nd_completed_cb) 1291 (*nsc->sc_conf.nd_completed_cb)(stat->nd_map, nsc->sc_conf.nd_cb_arg); 1292 } 1293 nextdma_rotate(nsc); 1294 1295 if ((state & DMACSR_COMPLETE) && (state & DMACSR_ENABLE)) { 1296 #if 0 1297 int l = nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF; 1298 int s = nd_bsr4 (DD_STOP); 1299 #endif 1300 /* nextdma_setup_cont_regs(nsc); */ 1301 if (stat->nd_map_cont) { 1302 nd_bsw4 (DD_START, stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1303 nd_bsw4 (DD_STOP, (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1304 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)); 1305 } 1306 1307 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE) | 1308 (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0)); 1309 1310 #if 0 1311 #ifdef ESP_DEBUG 1312 if (state & DMACSR_BUSEXC) { 1313 sprintf (ndtracep, "CE/BUSEXC: %08lX %08X %08X\n", 1314 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + stat->nd_map->dm_segs[stat->nd_idx].ds_len), 1315 l, s); 1316 ndtracep += strlen (ndtracep); 1317 } 1318 #endif 1319 #endif 1320 } else { 1321 #if 0 1322 if (state & DMACSR_BUSEXC) { 1323 while (nd_bsr4 (DD_NEXT) != 1324 (nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF)) 1325 printf ("Y"); /* DELAY(50); */ 1326 state = nd_bsr4 (DD_CSR); 1327 } 1328 #endif 1329 1330 if (!(state & DMACSR_SUPDATE)) { 1331 nextdma_rotate(nsc); 1332 } else { 1333 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | 1334 DMACSR_INITBUF | DMACSR_RESET | 1335 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1336 1337 nd_bsw4 (DD_NEXT, stat->nd_map->dm_segs[stat->nd_idx].ds_addr); 1338 nd_bsw4 (DD_LIMIT, 1339 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1340 stat->nd_map->dm_segs[stat->nd_idx].ds_len) | 0/* x80000000 */); 1341 if (stat->nd_map_cont) { 1342 nd_bsw4 (DD_START, 1343 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1344 nd_bsw4 (DD_STOP, 1345 (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1346 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len) | 0/* x80000000 */); 1347 } 1348 nd_bsw4 (DD_CSR, DMACSR_SETENABLE | 1349 DMACSR_CLRCOMPLETE | (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE) | 1350 (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0)); 1351 #if 1 1352 #ifdef ESP_DEBUG 1353 sprintf (ndtracep, "supdate "); 1354 ndtracep += strlen (ndtracep); 1355 sprintf (ndtracep, "%08X %08X %08X %08X ", 1356 nd_bsr4 (DD_NEXT), 1357 nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF, 1358 nd_bsr4 (DD_START), 1359 nd_bsr4 (DD_STOP) & 0x7FFFFFFF); 1360 ndtracep += strlen (ndtracep); 1361 #endif 1362 #endif 1363 stat->nd_exception++; 1364 return(1); 1365 /* NCR_WRITE_REG(sc, ESP_DCTL, ctl); */ 1366 goto restart; 1367 } 1368 1369 if (stat->nd_map) { 1370 #if 1 1371 #ifdef ESP_DEBUG 1372 sprintf (ndtracep, "%08X %08X %08X %08X ", 1373 nd_bsr4 (DD_NEXT), 1374 nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF, 1375 nd_bsr4 (DD_START), 1376 nd_bsr4 (DD_STOP) & 0x7FFFFFFF); 1377 ndtracep += strlen (ndtracep); 1378 #endif 1379 #endif 1380 1381 #if 0 1382 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET); 1383 1384 nd_bsw4 (DD_CSR, 0); 1385 #endif 1386 #if 1 1387 /* 6/2 */ 1388 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | 1389 DMACSR_INITBUF | DMACSR_RESET | 1390 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1391 1392 /* nextdma_setup_curr_regs(nsc); */ 1393 nd_bsw4 (DD_NEXT, stat->nd_map->dm_segs[stat->nd_idx].ds_addr); 1394 nd_bsw4 (DD_LIMIT, 1395 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1396 stat->nd_map->dm_segs[stat->nd_idx].ds_len) | 0/* x80000000 */); 1397 /* nextdma_setup_cont_regs(nsc); */ 1398 if (stat->nd_map_cont) { 1399 nd_bsw4 (DD_START, 1400 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1401 nd_bsw4 (DD_STOP, 1402 (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1403 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len) | 0/* x80000000 */); 1404 } 1405 1406 nd_bsw4 (DD_CSR, 1407 DMACSR_SETENABLE | (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0) | 1408 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1409 #ifdef ESP_DEBUG 1410 /* ndtraceshow++; */ 1411 #endif 1412 stat->nd_exception++; 1413 return(1); 1414 #endif 1415 /* NCR_WRITE_REG(sc, ESP_DCTL, ctl); */ 1416 goto restart; 1417 restart: 1418 #if 1 1419 #ifdef ESP_DEBUG 1420 sprintf (ndtracep, "restart %08lX %08lX\n", 1421 stat->nd_map->dm_segs[stat->nd_idx].ds_addr, 1422 stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1423 stat->nd_map->dm_segs[stat->nd_idx].ds_len); 1424 if (stat->nd_map_cont) { 1425 sprintf (ndtracep + strlen(ndtracep) - 1, " %08lX %08lX\n", 1426 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr, 1427 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1428 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len); 1429 } 1430 ndtracep += strlen (ndtracep); 1431 #endif 1432 #endif 1433 nextdma_print(nsc); 1434 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 1435 printf ("ff:%02x tcm:%d tcl:%d esp_dstat:%02x state:%02x step: %02x intr:%02x state:%08X\n", 1436 NCR_READ_REG(sc, NCR_FFLAG), 1437 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL), 1438 NCR_READ_REG(sc, ESP_DSTAT), 1439 NCR_READ_REG(sc, NCR_STAT), NCR_READ_REG(sc, NCR_STEP), 1440 NCR_READ_REG(sc, NCR_INTR), state); 1441 #ifdef ESP_DEBUG 1442 *ndtracep = '\0'; 1443 printf ("ndtrace: %s\n", ndtrace); 1444 #endif 1445 panic("%s: busexc/supdate occurred. Please email this output to chris@pin.lu.", 1446 sc->sc_dev.dv_xname); 1447 #ifdef ESP_DEBUG 1448 ndtraceshow++; 1449 #endif 1450 } else { 1451 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET); 1452 if (nsc->sc_conf.nd_shutdown_cb) 1453 (*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg); 1454 } 1455 } 1456 return (1); 1457 } 1458 1459 /* Internal DMA callback routines */ 1460 bus_dmamap_t 1461 esp_dmacb_continue(arg) 1462 void *arg; 1463 { 1464 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1465 struct esp_softc *esc = (struct esp_softc *)sc; 1466 1467 NDTRACEIF (*ndtracep++ = 'x'); 1468 DPRINTF(("%s: DMA continue\n",sc->sc_dev.dv_xname)); 1469 1470 #ifdef DIAGNOSTIC 1471 if ((esc->sc_datain < 0) || (esc->sc_datain > 1)) { 1472 panic("%s: map not loaded in DMA continue callback, datain = %d", 1473 sc->sc_dev.dv_xname,esc->sc_datain); 1474 } 1475 #endif 1476 1477 if ((!(esc->sc_loaded & ESP_LOADED_MAIN)) && 1478 (esc->sc_main_dmamap->dm_mapsize)) { 1479 DPRINTF(("%s: Loading main map\n",sc->sc_dev.dv_xname)); 1480 #if 0 1481 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1482 0, esc->sc_main_dmamap->dm_mapsize, 1483 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1484 esc->sc_main_dmamap->dm_xfer_len = 0; 1485 #endif 1486 esc->sc_loaded |= ESP_LOADED_MAIN; 1487 return(esc->sc_main_dmamap); 1488 } 1489 1490 if ((!(esc->sc_loaded & ESP_LOADED_TAIL)) && 1491 (esc->sc_tail_dmamap->dm_mapsize)) { 1492 DPRINTF(("%s: Loading tail map\n",sc->sc_dev.dv_xname)); 1493 #if 0 1494 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1495 0, esc->sc_tail_dmamap->dm_mapsize, 1496 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1497 esc->sc_tail_dmamap->dm_xfer_len = 0; 1498 #endif 1499 esc->sc_loaded |= ESP_LOADED_TAIL; 1500 return(esc->sc_tail_dmamap); 1501 } 1502 1503 DPRINTF(("%s: not loading map\n",sc->sc_dev.dv_xname)); 1504 return(0); 1505 } 1506 1507 1508 void 1509 esp_dmacb_completed(map, arg) 1510 bus_dmamap_t map; 1511 void *arg; 1512 { 1513 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1514 struct esp_softc *esc = (struct esp_softc *)sc; 1515 1516 NDTRACEIF (*ndtracep++ = 'X'); 1517 DPRINTF(("%s: DMA completed\n",sc->sc_dev.dv_xname)); 1518 1519 #ifdef DIAGNOSTIC 1520 if ((esc->sc_datain < 0) || (esc->sc_datain > 1)) { 1521 panic("%s: invalid DMA direction in completed callback, datain = %d", 1522 sc->sc_dev.dv_xname,esc->sc_datain); 1523 } 1524 #endif 1525 1526 #if defined(DIAGNOSTIC) && 0 1527 { 1528 int i; 1529 for(i=0;i<map->dm_nsegs;i++) { 1530 if (map->dm_xfer_len != map->dm_mapsize) { 1531 printf("%s: map->dm_mapsize = %d\n", sc->sc_dev.dv_xname,map->dm_mapsize); 1532 printf("%s: map->dm_nsegs = %d\n", sc->sc_dev.dv_xname,map->dm_nsegs); 1533 printf("%s: map->dm_xfer_len = %d\n", sc->sc_dev.dv_xname,map->dm_xfer_len); 1534 for(i=0;i<map->dm_nsegs;i++) { 1535 printf("%s: map->dm_segs[%d].ds_addr = 0x%08lx\n", 1536 sc->sc_dev.dv_xname,i,map->dm_segs[i].ds_addr); 1537 printf("%s: map->dm_segs[%d].ds_len = %d\n", 1538 sc->sc_dev.dv_xname,i,map->dm_segs[i].ds_len); 1539 } 1540 panic("%s: incomplete DMA transfer",sc->sc_dev.dv_xname); 1541 } 1542 } 1543 } 1544 #endif 1545 1546 if (map == esc->sc_main_dmamap) { 1547 #ifdef DIAGNOSTIC 1548 if ((esc->sc_loaded & ESP_UNLOADED_MAIN) || 1549 !(esc->sc_loaded & ESP_LOADED_MAIN)) { 1550 panic("%s: unexpected completed call for main map",sc->sc_dev.dv_xname); 1551 } 1552 #endif 1553 esc->sc_loaded |= ESP_UNLOADED_MAIN; 1554 } else if (map == esc->sc_tail_dmamap) { 1555 #ifdef DIAGNOSTIC 1556 if ((esc->sc_loaded & ESP_UNLOADED_TAIL) || 1557 !(esc->sc_loaded & ESP_LOADED_TAIL)) { 1558 panic("%s: unexpected completed call for tail map",sc->sc_dev.dv_xname); 1559 } 1560 #endif 1561 esc->sc_loaded |= ESP_UNLOADED_TAIL; 1562 } 1563 #ifdef DIAGNOSTIC 1564 else { 1565 panic("%s: unexpected completed map", sc->sc_dev.dv_xname); 1566 } 1567 #endif 1568 1569 #ifdef ESP_DEBUG 1570 if (esp_debug) { 1571 if (map == esc->sc_main_dmamap) { 1572 printf("%s: completed main map\n",sc->sc_dev.dv_xname); 1573 } else if (map == esc->sc_tail_dmamap) { 1574 printf("%s: completed tail map\n",sc->sc_dev.dv_xname); 1575 } 1576 } 1577 #endif 1578 1579 #if 0 1580 if ((map == esc->sc_tail_dmamap) || 1581 ((esc->sc_tail_size == 0) && (map == esc->sc_main_dmamap))) { 1582 1583 /* Clear the DMAMOD bit in the DCTL register to give control 1584 * back to the scsi chip. 1585 */ 1586 if (esc->sc_datain) { 1587 NCR_WRITE_REG(sc, ESP_DCTL, 1588 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMARD); 1589 } else { 1590 NCR_WRITE_REG(sc, ESP_DCTL, 1591 ESPDCTL_16MHZ | ESPDCTL_INTENB); 1592 } 1593 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1594 } 1595 #endif 1596 1597 1598 #if 0 1599 bus_dmamap_sync(esc->sc_dma->sc_dmat, map, 1600 0, map->dm_mapsize, 1601 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1602 #endif 1603 1604 } 1605 1606 void 1607 esp_dmacb_shutdown(arg) 1608 void *arg; 1609 { 1610 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1611 struct esp_softc *esc = (struct esp_softc *)sc; 1612 1613 NDTRACEIF (*ndtracep++ = 'S'); 1614 DPRINTF(("%s: DMA shutdown\n",sc->sc_dev.dv_xname)); 1615 1616 if (esc->sc_loaded == 0) 1617 return; 1618 1619 #if 0 1620 { 1621 /* Clear the DMAMOD bit in the DCTL register to give control 1622 * back to the scsi chip. 1623 */ 1624 if (esc->sc_datain) { 1625 NCR_WRITE_REG(sc, ESP_DCTL, 1626 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMARD); 1627 } else { 1628 NCR_WRITE_REG(sc, ESP_DCTL, 1629 ESPDCTL_16MHZ | ESPDCTL_INTENB); 1630 } 1631 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1632 } 1633 #endif 1634 1635 DPRINTF(("%s: esp_dma_nest == %d\n",sc->sc_dev.dv_xname,esp_dma_nest)); 1636 1637 /* Stuff the end slop into fifo */ 1638 1639 #ifdef ESP_DEBUG 1640 if (esp_debug) { 1641 1642 int n = NCR_READ_REG(sc, NCR_FFLAG); 1643 DPRINTF(("%s: fifo size = %d, seq = 0x%x\n", 1644 sc->sc_dev.dv_xname,n & NCRFIFO_FF, (n & NCRFIFO_SS)>>5)); 1645 } 1646 #endif 1647 1648 if (esc->sc_main_dmamap->dm_mapsize) { 1649 if (!esc->sc_datain) { /* unpatch the DMA map for write overrun */ 1650 esc->sc_main_dmamap->dm_mapsize -= ESP_DMA_OVERRUN; 1651 esc->sc_main_dmamap->dm_segs[esc->sc_main_dmamap->dm_nsegs - 1].ds_len -= 1652 ESP_DMA_OVERRUN; 1653 } 1654 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1655 0, esc->sc_main_dmamap->dm_mapsize, 1656 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1657 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_main_dmamap); 1658 NDTRACEIF ( 1659 sprintf (ndtracep, "m%ld", esc->sc_main_dmamap->dm_xfer_len); 1660 ndtracep += strlen (ndtracep); 1661 ); 1662 } 1663 1664 if (esc->sc_tail_dmamap->dm_mapsize) { 1665 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1666 0, esc->sc_tail_dmamap->dm_mapsize, 1667 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1668 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap); 1669 /* copy the tail DMA buffer data for read transfers */ 1670 if (esc->sc_datain) { 1671 memcpy(*esc->sc_dmaaddr+esc->sc_begin_size+esc->sc_main_size, 1672 esc->sc_tail, esc->sc_dmasize-(esc->sc_begin_size+esc->sc_main_size)); 1673 } 1674 NDTRACEIF ( 1675 sprintf (ndtracep, "t%ld", esc->sc_tail_dmamap->dm_xfer_len); 1676 ndtracep += strlen (ndtracep); 1677 ); 1678 } 1679 1680 #ifdef ESP_DEBUG 1681 if (esp_debug) { 1682 printf("%s: dma_shutdown: addr=%p,len=0x%08x,size=0x%08x\n", 1683 sc->sc_dev.dv_xname, 1684 *esc->sc_dmaaddr, *esc->sc_dmalen, esc->sc_dmasize); 1685 if (esp_debug > 10) { 1686 esp_hex_dump(*(esc->sc_dmaaddr),esc->sc_dmasize); 1687 printf("%s: tail=%p,tailbuf=%p,tail_size=0x%08x\n", 1688 sc->sc_dev.dv_xname, 1689 esc->sc_tail, &(esc->sc_tailbuf[0]), esc->sc_tail_size); 1690 esp_hex_dump(&(esc->sc_tailbuf[0]),sizeof(esc->sc_tailbuf)); 1691 } 1692 } 1693 #endif 1694 1695 esc->sc_main = 0; 1696 esc->sc_main_size = 0; 1697 esc->sc_tail = 0; 1698 esc->sc_tail_size = 0; 1699 1700 esc->sc_datain = -1; 1701 /* esc->sc_dmaaddr = 0; */ 1702 /* esc->sc_dmalen = 0; */ 1703 /* esc->sc_dmasize = 0; */ 1704 1705 esc->sc_loaded = 0; 1706 1707 esc->sc_begin = 0; 1708 esc->sc_begin_size = 0; 1709 1710 #ifdef ESP_DEBUG 1711 if (esp_debug) { 1712 char sbuf[256]; 1713 1714 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 1715 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 1716 printf(" *intrstat = 0x%s\n", sbuf); 1717 1718 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)), 1719 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 1720 printf(" *intrmask = 0x%s\n", sbuf); 1721 } 1722 #endif 1723 } 1724