1 /* $NetBSD: esp.c,v 1.45 2003/07/15 02:59:31 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1994 Peter Galbavy 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Peter Galbavy 55 * 4. The name of the author may not be used to endorse or promote products 56 * derived from this software without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 60 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 61 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 62 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 63 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 64 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 66 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 67 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 68 * POSSIBILITY OF SUCH DAMAGE. 69 */ 70 71 /* 72 * Based on aic6360 by Jarle Greipsland 73 * 74 * Acknowledgements: Many of the algorithms used in this driver are 75 * inspired by the work of Julian Elischer (julian@tfs.com) and 76 * Charles Hannum (mycroft@duality.gnu.ai.mit.edu). Thanks a million! 77 */ 78 79 /* 80 * Grabbed from the sparc port at revision 1.73 for the NeXT. 81 * Darrin B. Jewell <dbj@netbsd.org> Sat Jul 4 15:41:32 1998 82 */ 83 84 #include <sys/cdefs.h> 85 __KERNEL_RCSID(0, "$NetBSD: esp.c,v 1.45 2003/07/15 02:59:31 lukem Exp $"); 86 87 #include <sys/types.h> 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/errno.h> 92 #include <sys/ioctl.h> 93 #include <sys/device.h> 94 #include <sys/buf.h> 95 #include <sys/proc.h> 96 #include <sys/user.h> 97 #include <sys/queue.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <dev/scsipi/scsi_all.h> 102 #include <dev/scsipi/scsipi_all.h> 103 #include <dev/scsipi/scsiconf.h> 104 #include <dev/scsipi/scsi_message.h> 105 106 #include <machine/bus.h> 107 #include <machine/autoconf.h> 108 #include <machine/cpu.h> 109 110 #include <dev/ic/ncr53c9xreg.h> 111 #include <dev/ic/ncr53c9xvar.h> 112 113 #include <next68k/next68k/isr.h> 114 115 #include <next68k/dev/intiovar.h> 116 #include <next68k/dev/nextdmareg.h> 117 #include <next68k/dev/nextdmavar.h> 118 119 #include <next68k/dev/espreg.h> 120 #include <next68k/dev/espvar.h> 121 122 #ifdef DEBUG 123 #undef ESP_DEBUG 124 #endif 125 126 #ifdef ESP_DEBUG 127 int esp_debug = 0; 128 #define DPRINTF(x) if (esp_debug) printf x; 129 extern char *ndtracep; 130 extern char ndtrace[]; 131 extern int ndtraceshow; 132 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0) 133 #else 134 #define DPRINTF(x) 135 #define NDTRACEIF(x) 136 #endif 137 #define PRINTF(x) printf x; 138 139 140 void espattach_intio __P((struct device *, struct device *, void *)); 141 int espmatch_intio __P((struct device *, struct cfdata *, void *)); 142 143 /* DMA callbacks */ 144 bus_dmamap_t esp_dmacb_continue __P((void *arg)); 145 void esp_dmacb_completed __P((bus_dmamap_t map, void *arg)); 146 void esp_dmacb_shutdown __P((void *arg)); 147 148 static void findchannel_defer __P((struct device *)); 149 150 #ifdef ESP_DEBUG 151 char esp_dma_dump[5*1024] = ""; 152 struct ncr53c9x_softc *esp_debug_sc = 0; 153 void esp_dma_store __P((struct ncr53c9x_softc *sc)); 154 void esp_dma_print __P((struct ncr53c9x_softc *sc)); 155 int esp_dma_nest = 0; 156 #endif 157 158 159 /* Linkup to the rest of the kernel */ 160 CFATTACH_DECL(esp, sizeof(struct esp_softc), 161 espmatch_intio, espattach_intio, NULL, NULL); 162 163 static int attached = 0; 164 165 /* 166 * Functions and the switch for the MI code. 167 */ 168 u_char esp_read_reg __P((struct ncr53c9x_softc *, int)); 169 void esp_write_reg __P((struct ncr53c9x_softc *, int, u_char)); 170 int esp_dma_isintr __P((struct ncr53c9x_softc *)); 171 void esp_dma_reset __P((struct ncr53c9x_softc *)); 172 int esp_dma_intr __P((struct ncr53c9x_softc *)); 173 int esp_dma_setup __P((struct ncr53c9x_softc *, caddr_t *, 174 size_t *, int, size_t *)); 175 void esp_dma_go __P((struct ncr53c9x_softc *)); 176 void esp_dma_stop __P((struct ncr53c9x_softc *)); 177 int esp_dma_isactive __P((struct ncr53c9x_softc *)); 178 179 struct ncr53c9x_glue esp_glue = { 180 esp_read_reg, 181 esp_write_reg, 182 esp_dma_isintr, 183 esp_dma_reset, 184 esp_dma_intr, 185 esp_dma_setup, 186 esp_dma_go, 187 esp_dma_stop, 188 esp_dma_isactive, 189 NULL, /* gl_clear_latched_intr */ 190 }; 191 192 #ifdef ESP_DEBUG 193 #define XCHR(x) "0123456789abcdef"[(x) & 0xf] 194 static void 195 esp_hex_dump(unsigned char *pkt, size_t len) 196 { 197 size_t i, j; 198 199 printf("00000000 "); 200 for(i=0; i<len; i++) { 201 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i])); 202 if ((i+1) % 16 == 8) { 203 printf(" "); 204 } 205 if ((i+1) % 16 == 0) { 206 printf(" %c", '|'); 207 for(j=0; j<16; j++) { 208 printf("%c", pkt[i-15+j]>=32 && pkt[i-15+j]<127?pkt[i-15+j]:'.'); 209 } 210 printf("%c\n%c%c%c%c%c%c%c%c ", '|', 211 XCHR((i+1)>>28),XCHR((i+1)>>24),XCHR((i+1)>>20),XCHR((i+1)>>16), 212 XCHR((i+1)>>12), XCHR((i+1)>>8), XCHR((i+1)>>4), XCHR(i+1)); 213 } 214 } 215 printf("\n"); 216 } 217 #endif 218 219 int 220 espmatch_intio(parent, cf, aux) 221 struct device *parent; 222 struct cfdata *cf; 223 void *aux; 224 { 225 struct intio_attach_args *ia = (struct intio_attach_args *)aux; 226 227 if (attached) 228 return (0); 229 230 ia->ia_addr = (void *)NEXT_P_SCSI; 231 232 return(1); 233 } 234 235 static void 236 findchannel_defer(self) 237 struct device *self; 238 { 239 struct esp_softc *esc = (void *)self; 240 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; 241 int error; 242 243 if (!esc->sc_dma) { 244 printf ("%s", sc->sc_dev.dv_xname); 245 esc->sc_dma = nextdma_findchannel ("scsi"); 246 if (!esc->sc_dma) 247 panic ("%s: can't find DMA channel", 248 sc->sc_dev.dv_xname); 249 } 250 251 nextdma_setconf (esc->sc_dma, shutdown_cb, &esp_dmacb_shutdown); 252 nextdma_setconf (esc->sc_dma, continue_cb, &esp_dmacb_continue); 253 nextdma_setconf (esc->sc_dma, completed_cb, &esp_dmacb_completed); 254 nextdma_setconf (esc->sc_dma, cb_arg, sc); 255 256 error = bus_dmamap_create(esc->sc_dma->sc_dmat, 257 sc->sc_maxxfer, 258 sc->sc_maxxfer/PAGE_SIZE+1, sc->sc_maxxfer, 259 0, BUS_DMA_ALLOCNOW, &esc->sc_main_dmamap); 260 if (error) { 261 panic("%s: can't create main i/o DMA map, error = %d", 262 sc->sc_dev.dv_xname, error); 263 } 264 265 error = bus_dmamap_create(esc->sc_dma->sc_dmat, 266 ESP_DMA_TAILBUFSIZE, 1, ESP_DMA_TAILBUFSIZE, 267 0, BUS_DMA_ALLOCNOW, &esc->sc_tail_dmamap); 268 if (error) { 269 panic("%s: can't create tail i/o DMA map, error = %d", 270 sc->sc_dev.dv_xname, error); 271 } 272 273 #if 0 274 /* Turn on target selection using the `DMA' method */ 275 sc->sc_features |= NCR_F_DMASELECT; 276 #endif 277 278 /* Do the common parts of attachment. */ 279 sc->sc_adapter.adapt_minphys = minphys; 280 sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request; 281 ncr53c9x_attach(sc); 282 283 /* Establish interrupt channel */ 284 isrlink_autovec(ncr53c9x_intr, sc, NEXT_I_IPL(NEXT_I_SCSI), 0, NULL); 285 INTR_ENABLE(NEXT_I_SCSI); 286 287 /* register interrupt stats */ 288 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 289 sc->sc_dev.dv_xname, "intr"); 290 291 printf ("%s: using DMA channel %s\n", sc->sc_dev.dv_xname, 292 esc->sc_dma->sc_dev.dv_xname); 293 } 294 295 void 296 espattach_intio(parent, self, aux) 297 struct device *parent, *self; 298 void *aux; 299 { 300 struct esp_softc *esc = (void *)self; 301 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; 302 struct intio_attach_args *ia = (struct intio_attach_args *)aux; 303 304 #ifdef ESP_DEBUG 305 esp_debug_sc = sc; 306 #endif 307 308 esc->sc_bst = ia->ia_bst; 309 if (bus_space_map(esc->sc_bst, NEXT_P_SCSI, 310 ESP_DEVICE_SIZE, 0, &esc->sc_bsh)) { 311 panic("\n%s: can't map ncr53c90 registers", 312 sc->sc_dev.dv_xname); 313 } 314 315 sc->sc_id = 7; 316 sc->sc_freq = 20; /* Mhz */ 317 318 /* 319 * Set up glue for MI code early; we use some of it here. 320 */ 321 sc->sc_glue = &esp_glue; 322 323 /* 324 * XXX More of this should be in ncr53c9x_attach(), but 325 * XXX should we really poke around the chip that much in 326 * XXX the MI code? Think about this more... 327 */ 328 329 /* 330 * It is necessary to try to load the 2nd config register here, 331 * to find out what rev the esp chip is, else the ncr53c9x_reset 332 * will not set up the defaults correctly. 333 */ 334 sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB; 335 sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_RPE; 336 sc->sc_cfg3 = NCRCFG3_CDB; 337 NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); 338 339 if ((NCR_READ_REG(sc, NCR_CFG2) & ~NCRCFG2_RSVD) != 340 (NCRCFG2_SCSI2 | NCRCFG2_RPE)) { 341 sc->sc_rev = NCR_VARIANT_ESP100; 342 } else { 343 sc->sc_cfg2 = NCRCFG2_SCSI2; 344 NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); 345 sc->sc_cfg3 = 0; 346 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 347 sc->sc_cfg3 = (NCRCFG3_CDB | NCRCFG3_FCLK); 348 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 349 if (NCR_READ_REG(sc, NCR_CFG3) != 350 (NCRCFG3_CDB | NCRCFG3_FCLK)) { 351 sc->sc_rev = NCR_VARIANT_ESP100A; 352 } else { 353 /* NCRCFG2_FE enables > 64K transfers */ 354 sc->sc_cfg2 |= NCRCFG2_FE; 355 sc->sc_cfg3 = 0; 356 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 357 sc->sc_rev = NCR_VARIANT_ESP200; 358 } 359 } 360 361 /* 362 * XXX minsync and maxxfer _should_ be set up in MI code, 363 * XXX but it appears to have some dependency on what sort 364 * XXX of DMA we're hooked up to, etc. 365 */ 366 367 /* 368 * This is the value used to start sync negotiations 369 * Note that the NCR register "SYNCTP" is programmed 370 * in "clocks per byte", and has a minimum value of 4. 371 * The SCSI period used in negotiation is one-fourth 372 * of the time (in nanoseconds) needed to transfer one byte. 373 * Since the chip's clock is given in MHz, we have the following 374 * formula: 4 * period = (1000 / freq) * 4 375 */ 376 sc->sc_minsync = /* 1000 / sc->sc_freq */ 0; 377 378 /* 379 * Alas, we must now modify the value a bit, because it's 380 * only valid when can switch on FASTCLK and FASTSCSI bits 381 * in config register 3... 382 */ 383 switch (sc->sc_rev) { 384 case NCR_VARIANT_ESP100: 385 sc->sc_maxxfer = 64 * 1024; 386 sc->sc_minsync = 0; /* No synch on old chip? */ 387 break; 388 389 case NCR_VARIANT_ESP100A: 390 sc->sc_maxxfer = 64 * 1024; 391 /* Min clocks/byte is 5 */ 392 sc->sc_minsync = /* ncr53c9x_cpb2stp(sc, 5) */ 0; 393 break; 394 395 case NCR_VARIANT_ESP200: 396 sc->sc_maxxfer = 16 * 1024 * 1024; 397 /* XXX - do actually set FAST* bits */ 398 break; 399 } 400 401 /* @@@ Some ESP_DCTL bits probably need setting */ 402 NCR_WRITE_REG(sc, ESP_DCTL, 403 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_RESET); 404 DELAY(10); 405 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 406 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 407 DELAY(10); 408 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 409 410 esc->sc_dma = nextdma_findchannel ("scsi"); 411 if (esc->sc_dma) { 412 findchannel_defer (self); 413 } else { 414 printf ("\n"); 415 config_defer (self, findchannel_defer); 416 } 417 418 attached = 1; 419 } 420 421 /* 422 * Glue functions. 423 */ 424 425 u_char 426 esp_read_reg(sc, reg) 427 struct ncr53c9x_softc *sc; 428 int reg; 429 { 430 struct esp_softc *esc = (struct esp_softc *)sc; 431 432 return(bus_space_read_1(esc->sc_bst, esc->sc_bsh, reg)); 433 } 434 435 void 436 esp_write_reg(sc, reg, val) 437 struct ncr53c9x_softc *sc; 438 int reg; 439 u_char val; 440 { 441 struct esp_softc *esc = (struct esp_softc *)sc; 442 443 bus_space_write_1(esc->sc_bst, esc->sc_bsh, reg, val); 444 } 445 446 volatile u_int32_t save1; 447 448 #define xADDR 0x0211a000 449 int doze __P((volatile int)); 450 int 451 doze(c) 452 volatile int c; 453 { 454 /* static int tmp1; */ 455 u_int32_t tmp1; 456 volatile u_int8_t tmp2; 457 volatile u_int8_t *reg = (volatile u_int8_t *)IIOV(xADDR); 458 if (c > 244) return (0); 459 if (c == 0) return (0); 460 /* ((*(volatile u_long *)IIOV(NEXT_P_INTRMASK))&=(~NEXT_I_BIT(x))) */ 461 (*reg) = 0; 462 (*reg) = 0; 463 do { 464 save1 = (*reg); 465 tmp2 = *(reg + 3); 466 tmp1 = tmp2; 467 } while (tmp1 <= c); 468 return (0); 469 } 470 471 int 472 esp_dma_isintr(sc) 473 struct ncr53c9x_softc *sc; 474 { 475 struct esp_softc *esc = (struct esp_softc *)sc; 476 if (INTR_OCCURRED(NEXT_I_SCSI)) { 477 NDTRACEIF (*ndtracep++ = 'i'); 478 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | (esc->sc_datain ? ESPDCTL_DMARD : 0)); 479 return (1); 480 } else { 481 return (0); 482 } 483 } 484 485 #define nd_bsr4(reg) bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg)) 486 #define nd_bsw4(reg,val) bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val)) 487 int 488 esp_dma_intr(sc) 489 struct ncr53c9x_softc *sc; 490 { 491 struct esp_softc *esc = (struct esp_softc *)sc; 492 struct nextdma_softc *nsc = esc->sc_dma; 493 struct nextdma_status *stat = &nsc->sc_stat; 494 495 int r = (INTR_OCCURRED(NEXT_I_SCSI)); 496 int flushcount; 497 r = 1; 498 499 NDTRACEIF (*ndtracep++ = 'I'); 500 if (r) { 501 /* printf ("esp_dma_isintr start\n"); */ 502 { 503 int s = spldma(); 504 void *ndmap = stat->nd_map; 505 int ndidx = stat->nd_idx; 506 splx(s); 507 508 flushcount = 0; 509 510 #ifdef ESP_DEBUG 511 /* esp_dma_nest++; */ 512 513 if (esp_debug) { 514 char sbuf[256]; 515 516 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 517 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 518 printf("esp_dma_isintr = 0x%s\n", sbuf); 519 } 520 #endif 521 522 while (!nextdma_finished(nsc)) { /* esp_dma_isactive(sc)) { */ 523 NDTRACEIF (*ndtracep++ = 'w'); 524 NDTRACEIF ( 525 sprintf (ndtracep, "f%dm%dl%dw", NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, 526 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL)); 527 ndtracep += strlen (ndtracep); 528 ); 529 if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) 530 flushcount=5; 531 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 532 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 533 534 s = spldma(); 535 while (ndmap == stat->nd_map && ndidx == stat->nd_idx && 536 !(nd_bsr4 (DD_CSR) & 0x08000000) && 537 ++flushcount < 5) { 538 splx(s); 539 NDTRACEIF (*ndtracep++ = 'F'); 540 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_FLUSH | 541 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 542 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 543 doze(0x32); 544 NCR_WRITE_REG(sc, ESP_DCTL, 545 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 546 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 547 doze(0x32); 548 s = spldma(); 549 } 550 NDTRACEIF (*ndtracep++ = '0' + flushcount); 551 if (flushcount > 4) { 552 int next; 553 int onext = 0; 554 splx(s); 555 DPRINTF (("DMA reset\n")); 556 while (((next = nd_bsr4 (DD_NEXT)) != 557 (nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF)) && 558 onext != next) { 559 onext = next; 560 DELAY(50); 561 } 562 NDTRACEIF (*ndtracep++ = 'R'); 563 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 564 NDTRACEIF ( 565 sprintf (ndtracep, "ff:%d tcm:%d tcl:%d ", 566 NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, 567 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL)); 568 ndtracep += strlen (ndtracep); 569 ); 570 s = spldma(); 571 nextdma_reset (nsc); 572 splx(s); 573 goto out; 574 } 575 splx(s); 576 577 #ifdef DIAGNOSTIC 578 if (flushcount > 4) { 579 NDTRACEIF (*ndtracep++ = '+'); 580 printf("%s: unexpected flushcount %d on %s\n",sc->sc_dev.dv_xname, 581 flushcount, esc->sc_datain ? "read" : "write"); 582 } 583 #endif 584 585 if (!nextdma_finished(nsc)) { /* esp_dma_isactive(sc)) { */ 586 NDTRACEIF (*ndtracep++ = '1'); 587 } 588 flushcount = 0; 589 s = spldma(); 590 ndmap = stat->nd_map; 591 ndidx = stat->nd_idx; 592 splx(s); 593 594 goto loop; 595 596 loop: 597 } 598 goto out; 599 out: 600 601 #ifdef ESP_DEBUG 602 /* esp_dma_nest--; */ 603 #endif 604 605 } 606 607 doze (0x32); 608 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | (esc->sc_datain ? ESPDCTL_DMARD : 0)); 609 NDTRACEIF (*ndtracep++ = 'b'); 610 611 while (esc->sc_datain != -1) DELAY(50); 612 613 if (esc->sc_dmaaddr) { 614 bus_size_t xfer_len = 0; 615 int resid; 616 617 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 618 if (stat->nd_exception == 0) { 619 resid = NCR_READ_REG((sc), NCR_TCL) + (NCR_READ_REG((sc), NCR_TCM) << 8); 620 if (resid) { 621 resid += (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF); 622 #ifdef ESP_DEBUG 623 if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) 624 if ((NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) != 16 || NCR_READ_REG((sc), NCR_TCL) != 240) 625 ndtraceshow++; 626 #endif 627 } 628 xfer_len = esc->sc_dmasize - resid; 629 } else { 630 /*static*/ void ncr53c9x_abort(struct ncr53c9x_softc *, struct ncr53c9x_ecb *); 631 #define ncr53c9x_sched_msgout(m) \ 632 do { \ 633 NCR_MISC(("ncr53c9x_sched_msgout %x %d", m, __LINE__)); \ 634 NCRCMD(sc, NCRCMD_SETATN); \ 635 sc->sc_flags |= NCR_ATN; \ 636 sc->sc_msgpriq |= (m); \ 637 } while (0) 638 int i; 639 xfer_len = 0; 640 if (esc->sc_begin) 641 xfer_len += esc->sc_begin_size; 642 if (esc->sc_main_dmamap) 643 xfer_len += esc->sc_main_dmamap->dm_xfer_len; 644 if (esc->sc_tail_dmamap) 645 xfer_len += esc->sc_tail_dmamap->dm_xfer_len; 646 resid = 0; 647 printf ("X\n"); 648 for (i = 0; i < 16; i++) { 649 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_FLUSH | 650 ESPDCTL_16MHZ | ESPDCTL_INTENB | 651 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 652 NCR_WRITE_REG(sc, ESP_DCTL, 653 ESPDCTL_16MHZ | ESPDCTL_INTENB | 654 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 655 } 656 #if 0 657 printf ("ff:%02x tcm:%d tcl:%d esp_dstat:%02x stat:%02x step: %02x intr:%02x new stat:%02X\n", 658 NCR_READ_REG(sc, NCR_FFLAG), 659 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL), 660 NCR_READ_REG(sc, ESP_DSTAT), 661 sc->sc_espstat, sc->sc_espstep, 662 sc->sc_espintr, NCR_READ_REG(sc, NCR_STAT)); 663 printf ("sc->sc_state: %x sc->sc_phase: %x sc->sc_espstep:%x sc->sc_prevphase:%x sc->sc_flags:%x\n", 664 sc->sc_state, sc->sc_phase, sc->sc_espstep, sc->sc_prevphase, sc->sc_flags); 665 #endif 666 /* sc->sc_flags &= ~NCR_ICCS; */ 667 sc->sc_nexus->flags |= ECB_ABORT; 668 if (sc->sc_phase == MESSAGE_IN_PHASE) { 669 /* ncr53c9x_sched_msgout(SEND_ABORT); */ 670 ncr53c9x_abort(sc, sc->sc_nexus); 671 } else if (sc->sc_phase != STATUS_PHASE) { 672 printf ("ATTENTION!!! not message/status phase: %d\n", sc->sc_phase); 673 } 674 } 675 676 NDTRACEIF ( 677 sprintf (ndtracep, "f%dm%dl%ds%dx%dr%dS", NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, NCR_READ_REG((sc), NCR_TCM), 678 NCR_READ_REG((sc), NCR_TCL), esc->sc_dmasize, (int)xfer_len, resid); 679 ndtracep += strlen (ndtracep); 680 ); 681 682 *(esc->sc_dmaaddr) += xfer_len; 683 *(esc->sc_dmalen) -= xfer_len; 684 esc->sc_dmaaddr = 0; 685 esc->sc_dmalen = 0; 686 esc->sc_dmasize = 0; 687 } 688 689 NDTRACEIF (*ndtracep++ = 'B'); 690 sc->sc_espstat = NCR_READ_REG(sc, NCR_STAT) | (sc->sc_espstat & NCRSTAT_INT); 691 692 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 693 /* printf ("esp_dma_isintr DONE\n"); */ 694 695 } 696 697 return (r); 698 } 699 700 void 701 esp_dma_reset(sc) 702 struct ncr53c9x_softc *sc; 703 { 704 struct esp_softc *esc = (struct esp_softc *)sc; 705 706 DPRINTF(("esp DMA reset\n")); 707 708 #ifdef ESP_DEBUG 709 if (esp_debug) { 710 char sbuf[256]; 711 712 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 713 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 714 printf(" *intrstat = 0x%s\n", sbuf); 715 716 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)), 717 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 718 printf(" *intrmask = 0x%s\n", sbuf); 719 } 720 #endif 721 722 #if 0 723 /* Clear the DMAMOD bit in the DCTL register: */ 724 NCR_WRITE_REG(sc, ESP_DCTL, 725 ESPDCTL_16MHZ | ESPDCTL_INTENB); 726 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 727 #endif 728 729 nextdma_reset(esc->sc_dma); 730 nextdma_init(esc->sc_dma); 731 732 esc->sc_datain = -1; 733 esc->sc_dmaaddr = 0; 734 esc->sc_dmalen = 0; 735 esc->sc_dmasize = 0; 736 737 esc->sc_loaded = 0; 738 739 esc->sc_begin = 0; 740 esc->sc_begin_size = 0; 741 742 if (esc->sc_main_dmamap->dm_mapsize) { 743 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_main_dmamap); 744 } 745 esc->sc_main = 0; 746 esc->sc_main_size = 0; 747 748 if (esc->sc_tail_dmamap->dm_mapsize) { 749 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap); 750 } 751 esc->sc_tail = 0; 752 esc->sc_tail_size = 0; 753 } 754 755 /* it appears that: 756 * addr and len arguments to this need to be kept up to date 757 * with the status of the transfter. 758 * the dmasize of this is the actual length of the transfer 759 * request, which is guaranteed to be less than maxxfer. 760 * (len may be > maxxfer) 761 */ 762 763 int 764 esp_dma_setup(sc, addr, len, datain, dmasize) 765 struct ncr53c9x_softc *sc; 766 caddr_t *addr; 767 size_t *len; 768 int datain; 769 size_t *dmasize; 770 { 771 struct esp_softc *esc = (struct esp_softc *)sc; 772 773 NDTRACEIF (*ndtracep++ = 'h'); 774 #ifdef DIAGNOSTIC 775 #ifdef ESP_DEBUG 776 /* if this is a read DMA, pre-fill the buffer with 0xdeadbeef 777 * to identify bogus reads 778 */ 779 if (datain) { 780 int *v = (int *)(*addr); 781 int i; 782 for(i=0;i<((*len)/4);i++) v[i] = 0xdeadbeef; 783 v = (int *)(&(esc->sc_tailbuf[0])); 784 for(i=0;i<((sizeof(esc->sc_tailbuf)/4));i++) v[i] = 0xdeafbeef; 785 } else { 786 int *v; 787 int i; 788 v = (int *)(&(esc->sc_tailbuf[0])); 789 for(i=0;i<((sizeof(esc->sc_tailbuf)/4));i++) v[i] = 0xfeeb1eed; 790 } 791 #endif 792 #endif 793 794 DPRINTF(("esp_dma_setup(%p,0x%08x,0x%08x)\n",*addr,*len,*dmasize)); 795 796 #if 0 797 #ifdef DIAGNOSTIC /* @@@ this is ok sometimes. verify that we handle it ok 798 * and then remove this check 799 */ 800 if (*len != *dmasize) { 801 panic("esp dmalen 0x%lx != size 0x%lx",*len,*dmasize); 802 } 803 #endif 804 #endif 805 806 #ifdef DIAGNOSTIC 807 if ((esc->sc_datain != -1) || 808 (esc->sc_main_dmamap->dm_mapsize != 0) || 809 (esc->sc_tail_dmamap->dm_mapsize != 0) || 810 (esc->sc_dmasize != 0)) { 811 panic("%s: map already loaded in esp_dma_setup" 812 "\tdatain = %d\n\tmain_mapsize=%ld\n\tail_mapsize=%ld\n\tdmasize = %d", 813 sc->sc_dev.dv_xname, esc->sc_datain, 814 esc->sc_main_dmamap->dm_mapsize, 815 esc->sc_tail_dmamap->dm_mapsize, 816 esc->sc_dmasize); 817 } 818 #endif 819 820 /* we are sometimes asked to DMA zero bytes, that's easy */ 821 if (*dmasize <= 0) { 822 return(0); 823 } 824 825 if (*dmasize > ESP_MAX_DMASIZE) 826 *dmasize = ESP_MAX_DMASIZE; 827 828 /* Save these in case we have to abort DMA */ 829 esc->sc_datain = datain; 830 esc->sc_dmaaddr = addr; 831 esc->sc_dmalen = len; 832 esc->sc_dmasize = *dmasize; 833 834 esc->sc_loaded = 0; 835 836 #define DMA_SCSI_ALIGNMENT 16 837 #define DMA_SCSI_ALIGN(type, addr) \ 838 ((type)(((unsigned)(addr)+DMA_SCSI_ALIGNMENT-1) \ 839 &~(DMA_SCSI_ALIGNMENT-1))) 840 #define DMA_SCSI_ALIGNED(addr) \ 841 (((unsigned)(addr)&(DMA_SCSI_ALIGNMENT-1))==0) 842 843 { 844 size_t slop_bgn_size; /* # bytes to be fifo'd at beginning */ 845 size_t slop_end_size; /* # bytes to be transferred in tail buffer */ 846 847 { 848 u_long bgn = (u_long)(*esc->sc_dmaaddr); 849 u_long end = (u_long)(*esc->sc_dmaaddr+esc->sc_dmasize); 850 851 slop_bgn_size = DMA_SCSI_ALIGNMENT-(bgn % DMA_SCSI_ALIGNMENT); 852 if (slop_bgn_size == DMA_SCSI_ALIGNMENT) slop_bgn_size = 0; 853 slop_end_size = (end % DMA_ENDALIGNMENT); 854 } 855 856 /* Force a minimum slop end size. This ensures that write 857 * requests will overrun, as required to get completion interrupts. 858 * In addition, since the tail buffer is guaranteed to be mapped 859 * in a single DMA segment, the overrun won't accidentally 860 * end up in its own segment. 861 */ 862 if (!esc->sc_datain) { 863 #if 0 864 slop_end_size += ESP_DMA_MAXTAIL; 865 #else 866 slop_end_size += 0x10; 867 #endif 868 } 869 870 /* Check to make sure we haven't counted extra slop 871 * as would happen for a very short DMA buffer, also 872 * for short buffers, just stuff the entire thing in the tail 873 */ 874 if ((slop_bgn_size+slop_end_size >= esc->sc_dmasize) 875 #if 0 876 || (esc->sc_dmasize <= ESP_DMA_MAXTAIL) 877 #endif 878 ) 879 { 880 slop_bgn_size = 0; 881 slop_end_size = esc->sc_dmasize; 882 } 883 884 /* initialize the fifo buffer */ 885 if (slop_bgn_size) { 886 esc->sc_begin = *esc->sc_dmaaddr; 887 esc->sc_begin_size = slop_bgn_size; 888 } else { 889 esc->sc_begin = 0; 890 esc->sc_begin_size = 0; 891 } 892 893 #if 01 894 /* Load the normal DMA map */ 895 { 896 esc->sc_main = *esc->sc_dmaaddr+slop_bgn_size; 897 esc->sc_main_size = (esc->sc_dmasize)-(slop_end_size+slop_bgn_size); 898 899 if (esc->sc_main_size) { 900 int error; 901 902 if (!esc->sc_datain || DMA_ENDALIGNED(esc->sc_main_size + slop_end_size)) { 903 KASSERT(DMA_SCSI_ALIGNMENT == DMA_ENDALIGNMENT); 904 KASSERT(DMA_BEGINALIGNMENT == DMA_ENDALIGNMENT); 905 esc->sc_main_size += slop_end_size; 906 slop_end_size = 0; 907 if (!esc->sc_datain) { 908 esc->sc_main_size = DMA_ENDALIGN(caddr_t,esc->sc_main+esc->sc_main_size)-esc->sc_main; 909 } 910 } 911 912 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 913 esc->sc_main_dmamap, 914 esc->sc_main, esc->sc_main_size, 915 NULL, BUS_DMA_NOWAIT); 916 if (error) { 917 #ifdef ESP_DEBUG 918 printf("%s: esc->sc_main_dmamap->_dm_size = %ld\n", 919 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_size); 920 printf("%s: esc->sc_main_dmamap->_dm_segcnt = %d\n", 921 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_segcnt); 922 printf("%s: esc->sc_main_dmamap->_dm_maxsegsz = %ld\n", 923 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_maxsegsz); 924 printf("%s: esc->sc_main_dmamap->_dm_boundary = %ld\n", 925 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_boundary); 926 esp_dma_print(sc); 927 #endif 928 panic("%s: can't load main DMA map. error = %d, addr=%p, size=0x%08x", 929 sc->sc_dev.dv_xname, error,esc->sc_main,esc->sc_main_size); 930 } 931 if (!esc->sc_datain) { /* patch the DMA map for write overrun */ 932 esc->sc_main_dmamap->dm_mapsize += ESP_DMA_OVERRUN; 933 esc->sc_main_dmamap->dm_segs[esc->sc_main_dmamap->dm_nsegs - 1].ds_len += 934 ESP_DMA_OVERRUN; 935 } 936 #if 0 937 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 938 0, esc->sc_main_dmamap->dm_mapsize, 939 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 940 esc->sc_main_dmamap->dm_xfer_len = 0; 941 #endif 942 } else { 943 esc->sc_main = 0; 944 } 945 } 946 947 /* Load the tail DMA map */ 948 if (slop_end_size) { 949 esc->sc_tail = DMA_ENDALIGN(caddr_t,esc->sc_tailbuf+slop_end_size)-slop_end_size; 950 /* If the beginning of the tail is not correctly aligned, 951 * we have no choice but to align the start, which might then unalign the end. 952 */ 953 esc->sc_tail = DMA_SCSI_ALIGN(caddr_t,esc->sc_tail); 954 /* So therefore, we change the tail size to be end aligned again. */ 955 esc->sc_tail_size = DMA_ENDALIGN(caddr_t,esc->sc_tail+slop_end_size)-esc->sc_tail; 956 957 /* @@@ next DMA overrun lossage */ 958 if (!esc->sc_datain) { 959 esc->sc_tail_size += ESP_DMA_OVERRUN; 960 } 961 962 { 963 int error; 964 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 965 esc->sc_tail_dmamap, 966 esc->sc_tail, esc->sc_tail_size, 967 NULL, BUS_DMA_NOWAIT); 968 if (error) { 969 panic("%s: can't load tail DMA map. error = %d, addr=%p, size=0x%08x", 970 sc->sc_dev.dv_xname, error,esc->sc_tail,esc->sc_tail_size); 971 } 972 #if 0 973 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 974 0, esc->sc_tail_dmamap->dm_mapsize, 975 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 976 esc->sc_tail_dmamap->dm_xfer_len = 0; 977 #endif 978 } 979 } 980 #else 981 982 esc->sc_begin = *esc->sc_dmaaddr; 983 slop_bgn_size = DMA_SCSI_ALIGNMENT-((ulong)esc->sc_begin % DMA_SCSI_ALIGNMENT); 984 if (slop_bgn_size == DMA_SCSI_ALIGNMENT) slop_bgn_size = 0; 985 slop_end_size = esc->sc_dmasize - slop_bgn_size; 986 987 if (slop_bgn_size < esc->sc_dmasize) { 988 int error; 989 990 esc->sc_tail = 0; 991 esc->sc_tail_size = 0; 992 993 esc->sc_begin_size = slop_bgn_size; 994 esc->sc_main = *esc->sc_dmaaddr+slop_bgn_size; 995 esc->sc_main_size = DMA_ENDALIGN(caddr_t,esc->sc_main+esc->sc_dmasize-slop_bgn_size)-esc->sc_main; 996 997 if (!esc->sc_datain) { 998 esc->sc_main_size += ESP_DMA_OVERRUN; 999 } 1000 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 1001 esc->sc_main_dmamap, 1002 esc->sc_main, esc->sc_main_size, 1003 NULL, BUS_DMA_NOWAIT); 1004 if (error) { 1005 panic("%s: can't load main DMA map. error = %d, addr=%p, size=0x%08x", 1006 sc->sc_dev.dv_xname, error,esc->sc_main,esc->sc_main_size); 1007 } 1008 } else { 1009 esc->sc_begin = 0; 1010 esc->sc_begin_size = 0; 1011 esc->sc_main = 0; 1012 esc->sc_main_size = 0; 1013 1014 #if 0 1015 esc->sc_tail = DMA_ENDALIGN(caddr_t,esc->sc_tailbuf+slop_bgn_size)-slop_bgn_size; 1016 /* If the beginning of the tail is not correctly aligned, 1017 * we have no choice but to align the start, which might then unalign the end. 1018 */ 1019 #endif 1020 esc->sc_tail = DMA_SCSI_ALIGN(caddr_t,esc->sc_tailbuf); 1021 /* So therefore, we change the tail size to be end aligned again. */ 1022 esc->sc_tail_size = DMA_ENDALIGN(caddr_t,esc->sc_tail+esc->sc_dmasize)-esc->sc_tail; 1023 1024 /* @@@ next DMA overrun lossage */ 1025 if (!esc->sc_datain) { 1026 esc->sc_tail_size += ESP_DMA_OVERRUN; 1027 } 1028 1029 { 1030 int error; 1031 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 1032 esc->sc_tail_dmamap, 1033 esc->sc_tail, esc->sc_tail_size, 1034 NULL, BUS_DMA_NOWAIT); 1035 if (error) { 1036 panic("%s: can't load tail DMA map. error = %d, addr=%p, size=0x%08x", 1037 sc->sc_dev.dv_xname, error,esc->sc_tail,esc->sc_tail_size); 1038 } 1039 } 1040 } 1041 #endif 1042 1043 DPRINTF(("%s: setup: %8p %d %8p %d %8p %d %8p %d\n", sc->sc_dev.dv_xname, 1044 *esc->sc_dmaaddr, esc->sc_dmasize, esc->sc_begin, 1045 esc->sc_begin_size, esc->sc_main, esc->sc_main_size, esc->sc_tail, 1046 esc->sc_tail_size)); 1047 } 1048 1049 return (0); 1050 } 1051 1052 #ifdef ESP_DEBUG 1053 /* For debugging */ 1054 void 1055 esp_dma_store(sc) 1056 struct ncr53c9x_softc *sc; 1057 { 1058 struct esp_softc *esc = (struct esp_softc *)sc; 1059 char *p = &esp_dma_dump[0]; 1060 1061 p += sprintf(p,"%s: sc_datain=%d\n",sc->sc_dev.dv_xname,esc->sc_datain); 1062 p += sprintf(p,"%s: sc_loaded=0x%08x\n",sc->sc_dev.dv_xname,esc->sc_loaded); 1063 1064 if (esc->sc_dmaaddr) { 1065 p += sprintf(p,"%s: sc_dmaaddr=%p\n",sc->sc_dev.dv_xname,*esc->sc_dmaaddr); 1066 } else { 1067 p += sprintf(p,"%s: sc_dmaaddr=NULL\n",sc->sc_dev.dv_xname); 1068 } 1069 if (esc->sc_dmalen) { 1070 p += sprintf(p,"%s: sc_dmalen=0x%08x\n",sc->sc_dev.dv_xname,*esc->sc_dmalen); 1071 } else { 1072 p += sprintf(p,"%s: sc_dmalen=NULL\n",sc->sc_dev.dv_xname); 1073 } 1074 p += sprintf(p,"%s: sc_dmasize=0x%08x\n",sc->sc_dev.dv_xname,esc->sc_dmasize); 1075 1076 p += sprintf(p,"%s: sc_begin = %p, sc_begin_size = 0x%08x\n", 1077 sc->sc_dev.dv_xname, esc->sc_begin, esc->sc_begin_size); 1078 p += sprintf(p,"%s: sc_main = %p, sc_main_size = 0x%08x\n", 1079 sc->sc_dev.dv_xname, esc->sc_main, esc->sc_main_size); 1080 /* if (esc->sc_main) */ { 1081 int i; 1082 bus_dmamap_t map = esc->sc_main_dmamap; 1083 p += sprintf(p,"%s: sc_main_dmamap. mapsize = 0x%08lx, nsegs = %d\n", 1084 sc->sc_dev.dv_xname, map->dm_mapsize, map->dm_nsegs); 1085 for(i=0;i<map->dm_nsegs;i++) { 1086 p += sprintf(p,"%s: map->dm_segs[%d].ds_addr = 0x%08lx, len = 0x%08lx\n", 1087 sc->sc_dev.dv_xname, i, map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len); 1088 } 1089 } 1090 p += sprintf(p,"%s: sc_tail = %p, sc_tail_size = 0x%08x\n", 1091 sc->sc_dev.dv_xname, esc->sc_tail, esc->sc_tail_size); 1092 /* if (esc->sc_tail) */ { 1093 int i; 1094 bus_dmamap_t map = esc->sc_tail_dmamap; 1095 p += sprintf(p,"%s: sc_tail_dmamap. mapsize = 0x%08lx, nsegs = %d\n", 1096 sc->sc_dev.dv_xname, map->dm_mapsize, map->dm_nsegs); 1097 for(i=0;i<map->dm_nsegs;i++) { 1098 p += sprintf(p,"%s: map->dm_segs[%d].ds_addr = 0x%08lx, len = 0x%08lx\n", 1099 sc->sc_dev.dv_xname, i, map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len); 1100 } 1101 } 1102 } 1103 1104 void 1105 esp_dma_print(sc) 1106 struct ncr53c9x_softc *sc; 1107 { 1108 esp_dma_store(sc); 1109 printf("%s",esp_dma_dump); 1110 } 1111 #endif 1112 1113 void 1114 esp_dma_go(sc) 1115 struct ncr53c9x_softc *sc; 1116 { 1117 struct esp_softc *esc = (struct esp_softc *)sc; 1118 struct nextdma_softc *nsc = esc->sc_dma; 1119 struct nextdma_status *stat = &nsc->sc_stat; 1120 /* int s = spldma(); */ 1121 1122 #ifdef ESP_DEBUG 1123 if (ndtracep != ndtrace) { 1124 if (ndtraceshow) { 1125 *ndtracep = '\0'; 1126 printf ("esp ndtrace: %s\n", ndtrace); 1127 ndtraceshow = 0; 1128 } else { 1129 DPRINTF (("X")); 1130 } 1131 ndtracep = ndtrace; 1132 } 1133 #endif 1134 1135 DPRINTF(("%s: esp_dma_go(datain = %d)\n", 1136 sc->sc_dev.dv_xname, esc->sc_datain)); 1137 1138 #ifdef ESP_DEBUG 1139 if (esp_debug) esp_dma_print(sc); 1140 else esp_dma_store(sc); 1141 #endif 1142 1143 #ifdef ESP_DEBUG 1144 { 1145 int n = NCR_READ_REG(sc, NCR_FFLAG); 1146 DPRINTF(("%s: fifo size = %d, seq = 0x%x\n", 1147 sc->sc_dev.dv_xname, 1148 n & NCRFIFO_FF, (n & NCRFIFO_SS)>>5)); 1149 } 1150 #endif 1151 1152 /* zero length DMA transfers are boring */ 1153 if (esc->sc_dmasize == 0) { 1154 /* splx(s); */ 1155 return; 1156 } 1157 1158 #if defined(DIAGNOSTIC) 1159 if ((esc->sc_begin_size == 0) && 1160 (esc->sc_main_dmamap->dm_mapsize == 0) && 1161 (esc->sc_tail_dmamap->dm_mapsize == 0)) { 1162 #ifdef ESP_DEBUG 1163 esp_dma_print(sc); 1164 #endif 1165 panic("%s: No DMA requested!",sc->sc_dev.dv_xname); 1166 } 1167 #endif 1168 1169 /* Stuff the fifo with the begin buffer */ 1170 if (esc->sc_datain) { 1171 int i; 1172 DPRINTF(("%s: FIFO read of %d bytes:", 1173 sc->sc_dev.dv_xname,esc->sc_begin_size)); 1174 for(i=0;i<esc->sc_begin_size;i++) { 1175 esc->sc_begin[i]=NCR_READ_REG(sc, NCR_FIFO); 1176 DPRINTF((" %02x",esc->sc_begin[i]&0xff)); 1177 } 1178 DPRINTF(("\n")); 1179 } else { 1180 int i; 1181 DPRINTF(("%s: FIFO write of %d bytes:", 1182 sc->sc_dev.dv_xname,esc->sc_begin_size)); 1183 for(i=0;i<esc->sc_begin_size;i++) { 1184 NCR_WRITE_REG(sc, NCR_FIFO, esc->sc_begin[i]); 1185 DPRINTF((" %02x",esc->sc_begin[i]&0xff)); 1186 } 1187 DPRINTF(("\n")); 1188 } 1189 1190 if (esc->sc_main_dmamap->dm_mapsize) { 1191 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1192 0, esc->sc_main_dmamap->dm_mapsize, 1193 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1194 esc->sc_main_dmamap->dm_xfer_len = 0; 1195 } 1196 1197 if (esc->sc_tail_dmamap->dm_mapsize) { 1198 /* if we are a DMA write cycle, copy the end slop */ 1199 if (!esc->sc_datain) { 1200 memcpy(esc->sc_tail, *esc->sc_dmaaddr+esc->sc_begin_size+esc->sc_main_size, 1201 esc->sc_dmasize-(esc->sc_begin_size+esc->sc_main_size)); 1202 } 1203 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1204 0, esc->sc_tail_dmamap->dm_mapsize, 1205 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1206 esc->sc_tail_dmamap->dm_xfer_len = 0; 1207 } 1208 1209 stat->nd_exception = 0; 1210 nextdma_start(nsc, (esc->sc_datain ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1211 1212 if (esc->sc_datain) { 1213 NCR_WRITE_REG(sc, ESP_DCTL, 1214 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | ESPDCTL_DMARD); 1215 } else { 1216 NCR_WRITE_REG(sc, ESP_DCTL, 1217 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD); 1218 } 1219 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1220 1221 NDTRACEIF (if (esc->sc_begin_size) { *ndtracep++ = '1'; *ndtracep++ = 'A' + esc->sc_begin_size; }); 1222 NDTRACEIF (if (esc->sc_main_size) { *ndtracep++ = '2'; *ndtracep++ = '0' + esc->sc_main_dmamap->dm_nsegs; }); 1223 NDTRACEIF (if (esc->sc_tail_size) { *ndtracep++ = '3'; *ndtracep++ = 'A' + esc->sc_tail_size; }); 1224 1225 /* splx(s); */ 1226 } 1227 1228 void 1229 esp_dma_stop(sc) 1230 struct ncr53c9x_softc *sc; 1231 { 1232 struct esp_softc *esc = (struct esp_softc *)sc; 1233 nextdma_print(esc->sc_dma); 1234 #ifdef ESP_DEBUG 1235 esp_dma_print(sc); 1236 #endif 1237 #if 1 1238 panic("%s: stop not yet implemented",sc->sc_dev.dv_xname); 1239 #endif 1240 } 1241 1242 int 1243 esp_dma_isactive(sc) 1244 struct ncr53c9x_softc *sc; 1245 { 1246 struct esp_softc *esc = (struct esp_softc *)sc; 1247 int r = (esc->sc_dmaaddr != NULL); /* !nextdma_finished(esc->sc_dma); */ 1248 DPRINTF(("esp_dma_isactive = %d\n",r)); 1249 return(r); 1250 } 1251 1252 /****************************************************************/ 1253 1254 int esp_dma_int __P((void *)); 1255 int esp_dma_int(arg) 1256 void *arg; 1257 { 1258 void nextdma_rotate __P((struct nextdma_softc *)); 1259 void nextdma_setup_curr_regs __P((struct nextdma_softc *)); 1260 void nextdma_setup_cont_regs __P((struct nextdma_softc *)); 1261 1262 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1263 struct esp_softc *esc = (struct esp_softc *)sc; 1264 struct nextdma_softc *nsc = esc->sc_dma; 1265 struct nextdma_status *stat = &nsc->sc_stat; 1266 unsigned int state; 1267 1268 NDTRACEIF (*ndtracep++ = 'E'); 1269 1270 state = nd_bsr4 (DD_CSR); 1271 1272 #if 1 1273 NDTRACEIF ( 1274 if (state & DMACSR_COMPLETE) *ndtracep++ = 'c'; 1275 if (state & DMACSR_ENABLE) *ndtracep++ = 'e'; 1276 if (state & DMACSR_BUSEXC) *ndtracep++ = 'b'; 1277 if (state & DMACSR_READ) *ndtracep++ = 'r'; 1278 if (state & DMACSR_SUPDATE) *ndtracep++ = 's'; 1279 ); 1280 1281 NDTRACEIF (*ndtracep++ = 'E'); 1282 1283 #ifdef ESP_DEBUG 1284 if (0) if ((state & DMACSR_BUSEXC) && (state & DMACSR_ENABLE)) ndtraceshow++; 1285 if (0) if ((state & DMACSR_SUPDATE)) ndtraceshow++; 1286 #endif 1287 #endif 1288 1289 if ((stat->nd_exception == 0) && (state & DMACSR_COMPLETE) && (state & DMACSR_ENABLE)) { 1290 stat->nd_map->dm_xfer_len += stat->nd_map->dm_segs[stat->nd_idx].ds_len; 1291 } 1292 1293 if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) { 1294 if (nsc->sc_conf.nd_completed_cb) 1295 (*nsc->sc_conf.nd_completed_cb)(stat->nd_map, nsc->sc_conf.nd_cb_arg); 1296 } 1297 nextdma_rotate(nsc); 1298 1299 if ((state & DMACSR_COMPLETE) && (state & DMACSR_ENABLE)) { 1300 #if 0 1301 int l = nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF; 1302 int s = nd_bsr4 (DD_STOP); 1303 #endif 1304 /* nextdma_setup_cont_regs(nsc); */ 1305 if (stat->nd_map_cont) { 1306 nd_bsw4 (DD_START, stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1307 nd_bsw4 (DD_STOP, (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1308 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)); 1309 } 1310 1311 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE) | 1312 (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0)); 1313 1314 #if 0 1315 #ifdef ESP_DEBUG 1316 if (state & DMACSR_BUSEXC) { 1317 sprintf (ndtracep, "CE/BUSEXC: %08lX %08X %08X\n", 1318 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + stat->nd_map->dm_segs[stat->nd_idx].ds_len), 1319 l, s); 1320 ndtracep += strlen (ndtracep); 1321 } 1322 #endif 1323 #endif 1324 } else { 1325 #if 0 1326 if (state & DMACSR_BUSEXC) { 1327 while (nd_bsr4 (DD_NEXT) != 1328 (nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF)) 1329 printf ("Y"); /* DELAY(50); */ 1330 state = nd_bsr4 (DD_CSR); 1331 } 1332 #endif 1333 1334 if (!(state & DMACSR_SUPDATE)) { 1335 nextdma_rotate(nsc); 1336 } else { 1337 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | 1338 DMACSR_INITBUF | DMACSR_RESET | 1339 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1340 1341 nd_bsw4 (DD_NEXT, stat->nd_map->dm_segs[stat->nd_idx].ds_addr); 1342 nd_bsw4 (DD_LIMIT, 1343 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1344 stat->nd_map->dm_segs[stat->nd_idx].ds_len) | 0/* x80000000 */); 1345 if (stat->nd_map_cont) { 1346 nd_bsw4 (DD_START, 1347 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1348 nd_bsw4 (DD_STOP, 1349 (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1350 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len) | 0/* x80000000 */); 1351 } 1352 nd_bsw4 (DD_CSR, DMACSR_SETENABLE | 1353 DMACSR_CLRCOMPLETE | (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE) | 1354 (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0)); 1355 #if 1 1356 #ifdef ESP_DEBUG 1357 sprintf (ndtracep, "supdate "); 1358 ndtracep += strlen (ndtracep); 1359 sprintf (ndtracep, "%08X %08X %08X %08X ", 1360 nd_bsr4 (DD_NEXT), 1361 nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF, 1362 nd_bsr4 (DD_START), 1363 nd_bsr4 (DD_STOP) & 0x7FFFFFFF); 1364 ndtracep += strlen (ndtracep); 1365 #endif 1366 #endif 1367 stat->nd_exception++; 1368 return(1); 1369 /* NCR_WRITE_REG(sc, ESP_DCTL, ctl); */ 1370 goto restart; 1371 } 1372 1373 if (stat->nd_map) { 1374 #if 1 1375 #ifdef ESP_DEBUG 1376 sprintf (ndtracep, "%08X %08X %08X %08X ", 1377 nd_bsr4 (DD_NEXT), 1378 nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF, 1379 nd_bsr4 (DD_START), 1380 nd_bsr4 (DD_STOP) & 0x7FFFFFFF); 1381 ndtracep += strlen (ndtracep); 1382 #endif 1383 #endif 1384 1385 #if 0 1386 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET); 1387 1388 nd_bsw4 (DD_CSR, 0); 1389 #endif 1390 #if 1 1391 /* 6/2 */ 1392 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | 1393 DMACSR_INITBUF | DMACSR_RESET | 1394 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1395 1396 /* nextdma_setup_curr_regs(nsc); */ 1397 nd_bsw4 (DD_NEXT, stat->nd_map->dm_segs[stat->nd_idx].ds_addr); 1398 nd_bsw4 (DD_LIMIT, 1399 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1400 stat->nd_map->dm_segs[stat->nd_idx].ds_len) | 0/* x80000000 */); 1401 /* nextdma_setup_cont_regs(nsc); */ 1402 if (stat->nd_map_cont) { 1403 nd_bsw4 (DD_START, 1404 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1405 nd_bsw4 (DD_STOP, 1406 (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1407 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len) | 0/* x80000000 */); 1408 } 1409 1410 nd_bsw4 (DD_CSR, 1411 DMACSR_SETENABLE | (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0) | 1412 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1413 #ifdef ESP_DEBUG 1414 /* ndtraceshow++; */ 1415 #endif 1416 stat->nd_exception++; 1417 return(1); 1418 #endif 1419 /* NCR_WRITE_REG(sc, ESP_DCTL, ctl); */ 1420 goto restart; 1421 restart: 1422 #if 1 1423 #ifdef ESP_DEBUG 1424 sprintf (ndtracep, "restart %08lX %08lX\n", 1425 stat->nd_map->dm_segs[stat->nd_idx].ds_addr, 1426 stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1427 stat->nd_map->dm_segs[stat->nd_idx].ds_len); 1428 if (stat->nd_map_cont) { 1429 sprintf (ndtracep + strlen(ndtracep) - 1, " %08lX %08lX\n", 1430 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr, 1431 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1432 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len); 1433 } 1434 ndtracep += strlen (ndtracep); 1435 #endif 1436 #endif 1437 nextdma_print(nsc); 1438 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 1439 printf ("ff:%02x tcm:%d tcl:%d esp_dstat:%02x state:%02x step: %02x intr:%02x state:%08X\n", 1440 NCR_READ_REG(sc, NCR_FFLAG), 1441 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL), 1442 NCR_READ_REG(sc, ESP_DSTAT), 1443 NCR_READ_REG(sc, NCR_STAT), NCR_READ_REG(sc, NCR_STEP), 1444 NCR_READ_REG(sc, NCR_INTR), state); 1445 #ifdef ESP_DEBUG 1446 *ndtracep = '\0'; 1447 printf ("ndtrace: %s\n", ndtrace); 1448 #endif 1449 panic("%s: busexc/supdate occured. Please email this output to chris@pin.lu.", 1450 sc->sc_dev.dv_xname); 1451 #ifdef ESP_DEBUG 1452 ndtraceshow++; 1453 #endif 1454 } else { 1455 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET); 1456 if (nsc->sc_conf.nd_shutdown_cb) 1457 (*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg); 1458 } 1459 } 1460 return (1); 1461 } 1462 1463 /* Internal DMA callback routines */ 1464 bus_dmamap_t 1465 esp_dmacb_continue(arg) 1466 void *arg; 1467 { 1468 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1469 struct esp_softc *esc = (struct esp_softc *)sc; 1470 1471 NDTRACEIF (*ndtracep++ = 'x'); 1472 DPRINTF(("%s: DMA continue\n",sc->sc_dev.dv_xname)); 1473 1474 #ifdef DIAGNOSTIC 1475 if ((esc->sc_datain < 0) || (esc->sc_datain > 1)) { 1476 panic("%s: map not loaded in DMA continue callback, datain = %d", 1477 sc->sc_dev.dv_xname,esc->sc_datain); 1478 } 1479 #endif 1480 1481 if ((!(esc->sc_loaded & ESP_LOADED_MAIN)) && 1482 (esc->sc_main_dmamap->dm_mapsize)) { 1483 DPRINTF(("%s: Loading main map\n",sc->sc_dev.dv_xname)); 1484 #if 0 1485 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1486 0, esc->sc_main_dmamap->dm_mapsize, 1487 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1488 esc->sc_main_dmamap->dm_xfer_len = 0; 1489 #endif 1490 esc->sc_loaded |= ESP_LOADED_MAIN; 1491 return(esc->sc_main_dmamap); 1492 } 1493 1494 if ((!(esc->sc_loaded & ESP_LOADED_TAIL)) && 1495 (esc->sc_tail_dmamap->dm_mapsize)) { 1496 DPRINTF(("%s: Loading tail map\n",sc->sc_dev.dv_xname)); 1497 #if 0 1498 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1499 0, esc->sc_tail_dmamap->dm_mapsize, 1500 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1501 esc->sc_tail_dmamap->dm_xfer_len = 0; 1502 #endif 1503 esc->sc_loaded |= ESP_LOADED_TAIL; 1504 return(esc->sc_tail_dmamap); 1505 } 1506 1507 DPRINTF(("%s: not loading map\n",sc->sc_dev.dv_xname)); 1508 return(0); 1509 } 1510 1511 1512 void 1513 esp_dmacb_completed(map, arg) 1514 bus_dmamap_t map; 1515 void *arg; 1516 { 1517 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1518 struct esp_softc *esc = (struct esp_softc *)sc; 1519 1520 NDTRACEIF (*ndtracep++ = 'X'); 1521 DPRINTF(("%s: DMA completed\n",sc->sc_dev.dv_xname)); 1522 1523 #ifdef DIAGNOSTIC 1524 if ((esc->sc_datain < 0) || (esc->sc_datain > 1)) { 1525 panic("%s: invalid DMA direction in completed callback, datain = %d", 1526 sc->sc_dev.dv_xname,esc->sc_datain); 1527 } 1528 #endif 1529 1530 #if defined(DIAGNOSTIC) && 0 1531 { 1532 int i; 1533 for(i=0;i<map->dm_nsegs;i++) { 1534 if (map->dm_xfer_len != map->dm_mapsize) { 1535 printf("%s: map->dm_mapsize = %d\n", sc->sc_dev.dv_xname,map->dm_mapsize); 1536 printf("%s: map->dm_nsegs = %d\n", sc->sc_dev.dv_xname,map->dm_nsegs); 1537 printf("%s: map->dm_xfer_len = %d\n", sc->sc_dev.dv_xname,map->dm_xfer_len); 1538 for(i=0;i<map->dm_nsegs;i++) { 1539 printf("%s: map->dm_segs[%d].ds_addr = 0x%08lx\n", 1540 sc->sc_dev.dv_xname,i,map->dm_segs[i].ds_addr); 1541 printf("%s: map->dm_segs[%d].ds_len = %d\n", 1542 sc->sc_dev.dv_xname,i,map->dm_segs[i].ds_len); 1543 } 1544 panic("%s: incomplete DMA transfer",sc->sc_dev.dv_xname); 1545 } 1546 } 1547 } 1548 #endif 1549 1550 if (map == esc->sc_main_dmamap) { 1551 #ifdef DIAGNOSTIC 1552 if ((esc->sc_loaded & ESP_UNLOADED_MAIN) || 1553 !(esc->sc_loaded & ESP_LOADED_MAIN)) { 1554 panic("%s: unexpected completed call for main map",sc->sc_dev.dv_xname); 1555 } 1556 #endif 1557 esc->sc_loaded |= ESP_UNLOADED_MAIN; 1558 } else if (map == esc->sc_tail_dmamap) { 1559 #ifdef DIAGNOSTIC 1560 if ((esc->sc_loaded & ESP_UNLOADED_TAIL) || 1561 !(esc->sc_loaded & ESP_LOADED_TAIL)) { 1562 panic("%s: unexpected completed call for tail map",sc->sc_dev.dv_xname); 1563 } 1564 #endif 1565 esc->sc_loaded |= ESP_UNLOADED_TAIL; 1566 } 1567 #ifdef DIAGNOSTIC 1568 else { 1569 panic("%s: unexpected completed map", sc->sc_dev.dv_xname); 1570 } 1571 #endif 1572 1573 #ifdef ESP_DEBUG 1574 if (esp_debug) { 1575 if (map == esc->sc_main_dmamap) { 1576 printf("%s: completed main map\n",sc->sc_dev.dv_xname); 1577 } else if (map == esc->sc_tail_dmamap) { 1578 printf("%s: completed tail map\n",sc->sc_dev.dv_xname); 1579 } 1580 } 1581 #endif 1582 1583 #if 0 1584 if ((map == esc->sc_tail_dmamap) || 1585 ((esc->sc_tail_size == 0) && (map == esc->sc_main_dmamap))) { 1586 1587 /* Clear the DMAMOD bit in the DCTL register to give control 1588 * back to the scsi chip. 1589 */ 1590 if (esc->sc_datain) { 1591 NCR_WRITE_REG(sc, ESP_DCTL, 1592 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMARD); 1593 } else { 1594 NCR_WRITE_REG(sc, ESP_DCTL, 1595 ESPDCTL_16MHZ | ESPDCTL_INTENB); 1596 } 1597 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1598 } 1599 #endif 1600 1601 1602 #if 0 1603 bus_dmamap_sync(esc->sc_dma->sc_dmat, map, 1604 0, map->dm_mapsize, 1605 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1606 #endif 1607 1608 } 1609 1610 void 1611 esp_dmacb_shutdown(arg) 1612 void *arg; 1613 { 1614 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1615 struct esp_softc *esc = (struct esp_softc *)sc; 1616 1617 NDTRACEIF (*ndtracep++ = 'S'); 1618 DPRINTF(("%s: DMA shutdown\n",sc->sc_dev.dv_xname)); 1619 1620 if (esc->sc_loaded == 0) 1621 return; 1622 1623 #if 0 1624 { 1625 /* Clear the DMAMOD bit in the DCTL register to give control 1626 * back to the scsi chip. 1627 */ 1628 if (esc->sc_datain) { 1629 NCR_WRITE_REG(sc, ESP_DCTL, 1630 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMARD); 1631 } else { 1632 NCR_WRITE_REG(sc, ESP_DCTL, 1633 ESPDCTL_16MHZ | ESPDCTL_INTENB); 1634 } 1635 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1636 } 1637 #endif 1638 1639 DPRINTF(("%s: esp_dma_nest == %d\n",sc->sc_dev.dv_xname,esp_dma_nest)); 1640 1641 /* Stuff the end slop into fifo */ 1642 1643 #ifdef ESP_DEBUG 1644 if (esp_debug) { 1645 1646 int n = NCR_READ_REG(sc, NCR_FFLAG); 1647 DPRINTF(("%s: fifo size = %d, seq = 0x%x\n", 1648 sc->sc_dev.dv_xname,n & NCRFIFO_FF, (n & NCRFIFO_SS)>>5)); 1649 } 1650 #endif 1651 1652 if (esc->sc_main_dmamap->dm_mapsize) { 1653 if (!esc->sc_datain) { /* unpatch the DMA map for write overrun */ 1654 esc->sc_main_dmamap->dm_mapsize -= ESP_DMA_OVERRUN; 1655 esc->sc_main_dmamap->dm_segs[esc->sc_main_dmamap->dm_nsegs - 1].ds_len -= 1656 ESP_DMA_OVERRUN; 1657 } 1658 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1659 0, esc->sc_main_dmamap->dm_mapsize, 1660 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1661 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_main_dmamap); 1662 NDTRACEIF ( 1663 sprintf (ndtracep, "m%ld", esc->sc_main_dmamap->dm_xfer_len); 1664 ndtracep += strlen (ndtracep); 1665 ); 1666 } 1667 1668 if (esc->sc_tail_dmamap->dm_mapsize) { 1669 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1670 0, esc->sc_tail_dmamap->dm_mapsize, 1671 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1672 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap); 1673 /* copy the tail DMA buffer data for read transfers */ 1674 if (esc->sc_datain) { 1675 memcpy(*esc->sc_dmaaddr+esc->sc_begin_size+esc->sc_main_size, 1676 esc->sc_tail, esc->sc_dmasize-(esc->sc_begin_size+esc->sc_main_size)); 1677 } 1678 NDTRACEIF ( 1679 sprintf (ndtracep, "t%ld", esc->sc_tail_dmamap->dm_xfer_len); 1680 ndtracep += strlen (ndtracep); 1681 ); 1682 } 1683 1684 #ifdef ESP_DEBUG 1685 if (esp_debug) { 1686 printf("%s: dma_shutdown: addr=%p,len=0x%08x,size=0x%08x\n", 1687 sc->sc_dev.dv_xname, 1688 *esc->sc_dmaaddr, *esc->sc_dmalen, esc->sc_dmasize); 1689 if (esp_debug > 10) { 1690 esp_hex_dump(*(esc->sc_dmaaddr),esc->sc_dmasize); 1691 printf("%s: tail=%p,tailbuf=%p,tail_size=0x%08x\n", 1692 sc->sc_dev.dv_xname, 1693 esc->sc_tail, &(esc->sc_tailbuf[0]), esc->sc_tail_size); 1694 esp_hex_dump(&(esc->sc_tailbuf[0]),sizeof(esc->sc_tailbuf)); 1695 } 1696 } 1697 #endif 1698 1699 esc->sc_main = 0; 1700 esc->sc_main_size = 0; 1701 esc->sc_tail = 0; 1702 esc->sc_tail_size = 0; 1703 1704 esc->sc_datain = -1; 1705 /* esc->sc_dmaaddr = 0; */ 1706 /* esc->sc_dmalen = 0; */ 1707 /* esc->sc_dmasize = 0; */ 1708 1709 esc->sc_loaded = 0; 1710 1711 esc->sc_begin = 0; 1712 esc->sc_begin_size = 0; 1713 1714 #ifdef ESP_DEBUG 1715 if (esp_debug) { 1716 char sbuf[256]; 1717 1718 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 1719 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 1720 printf(" *intrstat = 0x%s\n", sbuf); 1721 1722 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)), 1723 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 1724 printf(" *intrmask = 0x%s\n", sbuf); 1725 } 1726 #endif 1727 } 1728