1 /* $NetBSD: esp.c,v 1.44 2003/05/03 18:10:54 wiz Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1994 Peter Galbavy 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Peter Galbavy 55 * 4. The name of the author may not be used to endorse or promote products 56 * derived from this software without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 60 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 61 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 62 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 63 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 64 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 66 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 67 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 68 * POSSIBILITY OF SUCH DAMAGE. 69 */ 70 71 /* 72 * Based on aic6360 by Jarle Greipsland 73 * 74 * Acknowledgements: Many of the algorithms used in this driver are 75 * inspired by the work of Julian Elischer (julian@tfs.com) and 76 * Charles Hannum (mycroft@duality.gnu.ai.mit.edu). Thanks a million! 77 */ 78 79 /* 80 * Grabbed from the sparc port at revision 1.73 for the NeXT. 81 * Darrin B. Jewell <dbj@netbsd.org> Sat Jul 4 15:41:32 1998 82 */ 83 84 #include <sys/types.h> 85 #include <sys/param.h> 86 #include <sys/systm.h> 87 #include <sys/kernel.h> 88 #include <sys/errno.h> 89 #include <sys/ioctl.h> 90 #include <sys/device.h> 91 #include <sys/buf.h> 92 #include <sys/proc.h> 93 #include <sys/user.h> 94 #include <sys/queue.h> 95 96 #include <uvm/uvm_extern.h> 97 98 #include <dev/scsipi/scsi_all.h> 99 #include <dev/scsipi/scsipi_all.h> 100 #include <dev/scsipi/scsiconf.h> 101 #include <dev/scsipi/scsi_message.h> 102 103 #include <machine/bus.h> 104 #include <machine/autoconf.h> 105 #include <machine/cpu.h> 106 107 #include <dev/ic/ncr53c9xreg.h> 108 #include <dev/ic/ncr53c9xvar.h> 109 110 #include <next68k/next68k/isr.h> 111 112 #include <next68k/dev/intiovar.h> 113 #include <next68k/dev/nextdmareg.h> 114 #include <next68k/dev/nextdmavar.h> 115 116 #include <next68k/dev/espreg.h> 117 #include <next68k/dev/espvar.h> 118 119 #ifdef DEBUG 120 #undef ESP_DEBUG 121 #endif 122 123 #ifdef ESP_DEBUG 124 int esp_debug = 0; 125 #define DPRINTF(x) if (esp_debug) printf x; 126 extern char *ndtracep; 127 extern char ndtrace[]; 128 extern int ndtraceshow; 129 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0) 130 #else 131 #define DPRINTF(x) 132 #define NDTRACEIF(x) 133 #endif 134 #define PRINTF(x) printf x; 135 136 137 void espattach_intio __P((struct device *, struct device *, void *)); 138 int espmatch_intio __P((struct device *, struct cfdata *, void *)); 139 140 /* DMA callbacks */ 141 bus_dmamap_t esp_dmacb_continue __P((void *arg)); 142 void esp_dmacb_completed __P((bus_dmamap_t map, void *arg)); 143 void esp_dmacb_shutdown __P((void *arg)); 144 145 static void findchannel_defer __P((struct device *)); 146 147 #ifdef ESP_DEBUG 148 char esp_dma_dump[5*1024] = ""; 149 struct ncr53c9x_softc *esp_debug_sc = 0; 150 void esp_dma_store __P((struct ncr53c9x_softc *sc)); 151 void esp_dma_print __P((struct ncr53c9x_softc *sc)); 152 int esp_dma_nest = 0; 153 #endif 154 155 156 /* Linkup to the rest of the kernel */ 157 CFATTACH_DECL(esp, sizeof(struct esp_softc), 158 espmatch_intio, espattach_intio, NULL, NULL); 159 160 static int attached = 0; 161 162 /* 163 * Functions and the switch for the MI code. 164 */ 165 u_char esp_read_reg __P((struct ncr53c9x_softc *, int)); 166 void esp_write_reg __P((struct ncr53c9x_softc *, int, u_char)); 167 int esp_dma_isintr __P((struct ncr53c9x_softc *)); 168 void esp_dma_reset __P((struct ncr53c9x_softc *)); 169 int esp_dma_intr __P((struct ncr53c9x_softc *)); 170 int esp_dma_setup __P((struct ncr53c9x_softc *, caddr_t *, 171 size_t *, int, size_t *)); 172 void esp_dma_go __P((struct ncr53c9x_softc *)); 173 void esp_dma_stop __P((struct ncr53c9x_softc *)); 174 int esp_dma_isactive __P((struct ncr53c9x_softc *)); 175 176 struct ncr53c9x_glue esp_glue = { 177 esp_read_reg, 178 esp_write_reg, 179 esp_dma_isintr, 180 esp_dma_reset, 181 esp_dma_intr, 182 esp_dma_setup, 183 esp_dma_go, 184 esp_dma_stop, 185 esp_dma_isactive, 186 NULL, /* gl_clear_latched_intr */ 187 }; 188 189 #ifdef ESP_DEBUG 190 #define XCHR(x) "0123456789abcdef"[(x) & 0xf] 191 static void 192 esp_hex_dump(unsigned char *pkt, size_t len) 193 { 194 size_t i, j; 195 196 printf("00000000 "); 197 for(i=0; i<len; i++) { 198 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i])); 199 if ((i+1) % 16 == 8) { 200 printf(" "); 201 } 202 if ((i+1) % 16 == 0) { 203 printf(" %c", '|'); 204 for(j=0; j<16; j++) { 205 printf("%c", pkt[i-15+j]>=32 && pkt[i-15+j]<127?pkt[i-15+j]:'.'); 206 } 207 printf("%c\n%c%c%c%c%c%c%c%c ", '|', 208 XCHR((i+1)>>28),XCHR((i+1)>>24),XCHR((i+1)>>20),XCHR((i+1)>>16), 209 XCHR((i+1)>>12), XCHR((i+1)>>8), XCHR((i+1)>>4), XCHR(i+1)); 210 } 211 } 212 printf("\n"); 213 } 214 #endif 215 216 int 217 espmatch_intio(parent, cf, aux) 218 struct device *parent; 219 struct cfdata *cf; 220 void *aux; 221 { 222 struct intio_attach_args *ia = (struct intio_attach_args *)aux; 223 224 if (attached) 225 return (0); 226 227 ia->ia_addr = (void *)NEXT_P_SCSI; 228 229 return(1); 230 } 231 232 static void 233 findchannel_defer(self) 234 struct device *self; 235 { 236 struct esp_softc *esc = (void *)self; 237 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; 238 int error; 239 240 if (!esc->sc_dma) { 241 printf ("%s", sc->sc_dev.dv_xname); 242 esc->sc_dma = nextdma_findchannel ("scsi"); 243 if (!esc->sc_dma) 244 panic ("%s: can't find DMA channel", 245 sc->sc_dev.dv_xname); 246 } 247 248 nextdma_setconf (esc->sc_dma, shutdown_cb, &esp_dmacb_shutdown); 249 nextdma_setconf (esc->sc_dma, continue_cb, &esp_dmacb_continue); 250 nextdma_setconf (esc->sc_dma, completed_cb, &esp_dmacb_completed); 251 nextdma_setconf (esc->sc_dma, cb_arg, sc); 252 253 error = bus_dmamap_create(esc->sc_dma->sc_dmat, 254 sc->sc_maxxfer, 255 sc->sc_maxxfer/PAGE_SIZE+1, sc->sc_maxxfer, 256 0, BUS_DMA_ALLOCNOW, &esc->sc_main_dmamap); 257 if (error) { 258 panic("%s: can't create main i/o DMA map, error = %d", 259 sc->sc_dev.dv_xname, error); 260 } 261 262 error = bus_dmamap_create(esc->sc_dma->sc_dmat, 263 ESP_DMA_TAILBUFSIZE, 1, ESP_DMA_TAILBUFSIZE, 264 0, BUS_DMA_ALLOCNOW, &esc->sc_tail_dmamap); 265 if (error) { 266 panic("%s: can't create tail i/o DMA map, error = %d", 267 sc->sc_dev.dv_xname, error); 268 } 269 270 #if 0 271 /* Turn on target selection using the `DMA' method */ 272 sc->sc_features |= NCR_F_DMASELECT; 273 #endif 274 275 /* Do the common parts of attachment. */ 276 sc->sc_adapter.adapt_minphys = minphys; 277 sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request; 278 ncr53c9x_attach(sc); 279 280 /* Establish interrupt channel */ 281 isrlink_autovec(ncr53c9x_intr, sc, NEXT_I_IPL(NEXT_I_SCSI), 0, NULL); 282 INTR_ENABLE(NEXT_I_SCSI); 283 284 /* register interrupt stats */ 285 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 286 sc->sc_dev.dv_xname, "intr"); 287 288 printf ("%s: using DMA channel %s\n", sc->sc_dev.dv_xname, 289 esc->sc_dma->sc_dev.dv_xname); 290 } 291 292 void 293 espattach_intio(parent, self, aux) 294 struct device *parent, *self; 295 void *aux; 296 { 297 struct esp_softc *esc = (void *)self; 298 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; 299 struct intio_attach_args *ia = (struct intio_attach_args *)aux; 300 301 #ifdef ESP_DEBUG 302 esp_debug_sc = sc; 303 #endif 304 305 esc->sc_bst = ia->ia_bst; 306 if (bus_space_map(esc->sc_bst, NEXT_P_SCSI, 307 ESP_DEVICE_SIZE, 0, &esc->sc_bsh)) { 308 panic("\n%s: can't map ncr53c90 registers", 309 sc->sc_dev.dv_xname); 310 } 311 312 sc->sc_id = 7; 313 sc->sc_freq = 20; /* Mhz */ 314 315 /* 316 * Set up glue for MI code early; we use some of it here. 317 */ 318 sc->sc_glue = &esp_glue; 319 320 /* 321 * XXX More of this should be in ncr53c9x_attach(), but 322 * XXX should we really poke around the chip that much in 323 * XXX the MI code? Think about this more... 324 */ 325 326 /* 327 * It is necessary to try to load the 2nd config register here, 328 * to find out what rev the esp chip is, else the ncr53c9x_reset 329 * will not set up the defaults correctly. 330 */ 331 sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB; 332 sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_RPE; 333 sc->sc_cfg3 = NCRCFG3_CDB; 334 NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); 335 336 if ((NCR_READ_REG(sc, NCR_CFG2) & ~NCRCFG2_RSVD) != 337 (NCRCFG2_SCSI2 | NCRCFG2_RPE)) { 338 sc->sc_rev = NCR_VARIANT_ESP100; 339 } else { 340 sc->sc_cfg2 = NCRCFG2_SCSI2; 341 NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); 342 sc->sc_cfg3 = 0; 343 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 344 sc->sc_cfg3 = (NCRCFG3_CDB | NCRCFG3_FCLK); 345 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 346 if (NCR_READ_REG(sc, NCR_CFG3) != 347 (NCRCFG3_CDB | NCRCFG3_FCLK)) { 348 sc->sc_rev = NCR_VARIANT_ESP100A; 349 } else { 350 /* NCRCFG2_FE enables > 64K transfers */ 351 sc->sc_cfg2 |= NCRCFG2_FE; 352 sc->sc_cfg3 = 0; 353 NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); 354 sc->sc_rev = NCR_VARIANT_ESP200; 355 } 356 } 357 358 /* 359 * XXX minsync and maxxfer _should_ be set up in MI code, 360 * XXX but it appears to have some dependency on what sort 361 * XXX of DMA we're hooked up to, etc. 362 */ 363 364 /* 365 * This is the value used to start sync negotiations 366 * Note that the NCR register "SYNCTP" is programmed 367 * in "clocks per byte", and has a minimum value of 4. 368 * The SCSI period used in negotiation is one-fourth 369 * of the time (in nanoseconds) needed to transfer one byte. 370 * Since the chip's clock is given in MHz, we have the following 371 * formula: 4 * period = (1000 / freq) * 4 372 */ 373 sc->sc_minsync = /* 1000 / sc->sc_freq */ 0; 374 375 /* 376 * Alas, we must now modify the value a bit, because it's 377 * only valid when can switch on FASTCLK and FASTSCSI bits 378 * in config register 3... 379 */ 380 switch (sc->sc_rev) { 381 case NCR_VARIANT_ESP100: 382 sc->sc_maxxfer = 64 * 1024; 383 sc->sc_minsync = 0; /* No synch on old chip? */ 384 break; 385 386 case NCR_VARIANT_ESP100A: 387 sc->sc_maxxfer = 64 * 1024; 388 /* Min clocks/byte is 5 */ 389 sc->sc_minsync = /* ncr53c9x_cpb2stp(sc, 5) */ 0; 390 break; 391 392 case NCR_VARIANT_ESP200: 393 sc->sc_maxxfer = 16 * 1024 * 1024; 394 /* XXX - do actually set FAST* bits */ 395 break; 396 } 397 398 /* @@@ Some ESP_DCTL bits probably need setting */ 399 NCR_WRITE_REG(sc, ESP_DCTL, 400 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_RESET); 401 DELAY(10); 402 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 403 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 404 DELAY(10); 405 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 406 407 esc->sc_dma = nextdma_findchannel ("scsi"); 408 if (esc->sc_dma) { 409 findchannel_defer (self); 410 } else { 411 printf ("\n"); 412 config_defer (self, findchannel_defer); 413 } 414 415 attached = 1; 416 } 417 418 /* 419 * Glue functions. 420 */ 421 422 u_char 423 esp_read_reg(sc, reg) 424 struct ncr53c9x_softc *sc; 425 int reg; 426 { 427 struct esp_softc *esc = (struct esp_softc *)sc; 428 429 return(bus_space_read_1(esc->sc_bst, esc->sc_bsh, reg)); 430 } 431 432 void 433 esp_write_reg(sc, reg, val) 434 struct ncr53c9x_softc *sc; 435 int reg; 436 u_char val; 437 { 438 struct esp_softc *esc = (struct esp_softc *)sc; 439 440 bus_space_write_1(esc->sc_bst, esc->sc_bsh, reg, val); 441 } 442 443 volatile u_int32_t save1; 444 445 #define xADDR 0x0211a000 446 int doze __P((volatile int)); 447 int 448 doze(c) 449 volatile int c; 450 { 451 /* static int tmp1; */ 452 u_int32_t tmp1; 453 volatile u_int8_t tmp2; 454 volatile u_int8_t *reg = (volatile u_int8_t *)IIOV(xADDR); 455 if (c > 244) return (0); 456 if (c == 0) return (0); 457 /* ((*(volatile u_long *)IIOV(NEXT_P_INTRMASK))&=(~NEXT_I_BIT(x))) */ 458 (*reg) = 0; 459 (*reg) = 0; 460 do { 461 save1 = (*reg); 462 tmp2 = *(reg + 3); 463 tmp1 = tmp2; 464 } while (tmp1 <= c); 465 return (0); 466 } 467 468 int 469 esp_dma_isintr(sc) 470 struct ncr53c9x_softc *sc; 471 { 472 struct esp_softc *esc = (struct esp_softc *)sc; 473 if (INTR_OCCURRED(NEXT_I_SCSI)) { 474 NDTRACEIF (*ndtracep++ = 'i'); 475 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | (esc->sc_datain ? ESPDCTL_DMARD : 0)); 476 return (1); 477 } else { 478 return (0); 479 } 480 } 481 482 #define nd_bsr4(reg) bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg)) 483 #define nd_bsw4(reg,val) bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val)) 484 int 485 esp_dma_intr(sc) 486 struct ncr53c9x_softc *sc; 487 { 488 struct esp_softc *esc = (struct esp_softc *)sc; 489 struct nextdma_softc *nsc = esc->sc_dma; 490 struct nextdma_status *stat = &nsc->sc_stat; 491 492 int r = (INTR_OCCURRED(NEXT_I_SCSI)); 493 int flushcount; 494 r = 1; 495 496 NDTRACEIF (*ndtracep++ = 'I'); 497 if (r) { 498 /* printf ("esp_dma_isintr start\n"); */ 499 { 500 int s = spldma(); 501 void *ndmap = stat->nd_map; 502 int ndidx = stat->nd_idx; 503 splx(s); 504 505 flushcount = 0; 506 507 #ifdef ESP_DEBUG 508 /* esp_dma_nest++; */ 509 510 if (esp_debug) { 511 char sbuf[256]; 512 513 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 514 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 515 printf("esp_dma_isintr = 0x%s\n", sbuf); 516 } 517 #endif 518 519 while (!nextdma_finished(nsc)) { /* esp_dma_isactive(sc)) { */ 520 NDTRACEIF (*ndtracep++ = 'w'); 521 NDTRACEIF ( 522 sprintf (ndtracep, "f%dm%dl%dw", NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, 523 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL)); 524 ndtracep += strlen (ndtracep); 525 ); 526 if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) 527 flushcount=5; 528 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 529 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 530 531 s = spldma(); 532 while (ndmap == stat->nd_map && ndidx == stat->nd_idx && 533 !(nd_bsr4 (DD_CSR) & 0x08000000) && 534 ++flushcount < 5) { 535 splx(s); 536 NDTRACEIF (*ndtracep++ = 'F'); 537 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_FLUSH | 538 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 539 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 540 doze(0x32); 541 NCR_WRITE_REG(sc, ESP_DCTL, 542 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | 543 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 544 doze(0x32); 545 s = spldma(); 546 } 547 NDTRACEIF (*ndtracep++ = '0' + flushcount); 548 if (flushcount > 4) { 549 int next; 550 int onext = 0; 551 splx(s); 552 DPRINTF (("DMA reset\n")); 553 while (((next = nd_bsr4 (DD_NEXT)) != 554 (nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF)) && 555 onext != next) { 556 onext = next; 557 DELAY(50); 558 } 559 NDTRACEIF (*ndtracep++ = 'R'); 560 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 561 NDTRACEIF ( 562 sprintf (ndtracep, "ff:%d tcm:%d tcl:%d ", 563 NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, 564 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL)); 565 ndtracep += strlen (ndtracep); 566 ); 567 s = spldma(); 568 nextdma_reset (nsc); 569 splx(s); 570 goto out; 571 } 572 splx(s); 573 574 #ifdef DIAGNOSTIC 575 if (flushcount > 4) { 576 NDTRACEIF (*ndtracep++ = '+'); 577 printf("%s: unexpected flushcount %d on %s\n",sc->sc_dev.dv_xname, 578 flushcount, esc->sc_datain ? "read" : "write"); 579 } 580 #endif 581 582 if (!nextdma_finished(nsc)) { /* esp_dma_isactive(sc)) { */ 583 NDTRACEIF (*ndtracep++ = '1'); 584 } 585 flushcount = 0; 586 s = spldma(); 587 ndmap = stat->nd_map; 588 ndidx = stat->nd_idx; 589 splx(s); 590 591 goto loop; 592 593 loop: 594 } 595 goto out; 596 out: 597 598 #ifdef ESP_DEBUG 599 /* esp_dma_nest--; */ 600 #endif 601 602 } 603 604 doze (0x32); 605 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB | (esc->sc_datain ? ESPDCTL_DMARD : 0)); 606 NDTRACEIF (*ndtracep++ = 'b'); 607 608 while (esc->sc_datain != -1) DELAY(50); 609 610 if (esc->sc_dmaaddr) { 611 bus_size_t xfer_len = 0; 612 int resid; 613 614 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 615 if (stat->nd_exception == 0) { 616 resid = NCR_READ_REG((sc), NCR_TCL) + (NCR_READ_REG((sc), NCR_TCM) << 8); 617 if (resid) { 618 resid += (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF); 619 #ifdef ESP_DEBUG 620 if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) 621 if ((NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) != 16 || NCR_READ_REG((sc), NCR_TCL) != 240) 622 ndtraceshow++; 623 #endif 624 } 625 xfer_len = esc->sc_dmasize - resid; 626 } else { 627 /*static*/ void ncr53c9x_abort(struct ncr53c9x_softc *, struct ncr53c9x_ecb *); 628 #define ncr53c9x_sched_msgout(m) \ 629 do { \ 630 NCR_MISC(("ncr53c9x_sched_msgout %x %d", m, __LINE__)); \ 631 NCRCMD(sc, NCRCMD_SETATN); \ 632 sc->sc_flags |= NCR_ATN; \ 633 sc->sc_msgpriq |= (m); \ 634 } while (0) 635 int i; 636 xfer_len = 0; 637 if (esc->sc_begin) 638 xfer_len += esc->sc_begin_size; 639 if (esc->sc_main_dmamap) 640 xfer_len += esc->sc_main_dmamap->dm_xfer_len; 641 if (esc->sc_tail_dmamap) 642 xfer_len += esc->sc_tail_dmamap->dm_xfer_len; 643 resid = 0; 644 printf ("X\n"); 645 for (i = 0; i < 16; i++) { 646 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_FLUSH | 647 ESPDCTL_16MHZ | ESPDCTL_INTENB | 648 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 649 NCR_WRITE_REG(sc, ESP_DCTL, 650 ESPDCTL_16MHZ | ESPDCTL_INTENB | 651 (esc->sc_datain ? ESPDCTL_DMARD : 0)); 652 } 653 #if 0 654 printf ("ff:%02x tcm:%d tcl:%d esp_dstat:%02x stat:%02x step: %02x intr:%02x new stat:%02X\n", 655 NCR_READ_REG(sc, NCR_FFLAG), 656 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL), 657 NCR_READ_REG(sc, ESP_DSTAT), 658 sc->sc_espstat, sc->sc_espstep, 659 sc->sc_espintr, NCR_READ_REG(sc, NCR_STAT)); 660 printf ("sc->sc_state: %x sc->sc_phase: %x sc->sc_espstep:%x sc->sc_prevphase:%x sc->sc_flags:%x\n", 661 sc->sc_state, sc->sc_phase, sc->sc_espstep, sc->sc_prevphase, sc->sc_flags); 662 #endif 663 /* sc->sc_flags &= ~NCR_ICCS; */ 664 sc->sc_nexus->flags |= ECB_ABORT; 665 if (sc->sc_phase == MESSAGE_IN_PHASE) { 666 /* ncr53c9x_sched_msgout(SEND_ABORT); */ 667 ncr53c9x_abort(sc, sc->sc_nexus); 668 } else if (sc->sc_phase != STATUS_PHASE) { 669 printf ("ATTENTION!!! not message/status phase: %d\n", sc->sc_phase); 670 } 671 } 672 673 NDTRACEIF ( 674 sprintf (ndtracep, "f%dm%dl%ds%dx%dr%dS", NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, NCR_READ_REG((sc), NCR_TCM), 675 NCR_READ_REG((sc), NCR_TCL), esc->sc_dmasize, (int)xfer_len, resid); 676 ndtracep += strlen (ndtracep); 677 ); 678 679 *(esc->sc_dmaaddr) += xfer_len; 680 *(esc->sc_dmalen) -= xfer_len; 681 esc->sc_dmaaddr = 0; 682 esc->sc_dmalen = 0; 683 esc->sc_dmasize = 0; 684 } 685 686 NDTRACEIF (*ndtracep++ = 'B'); 687 sc->sc_espstat = NCR_READ_REG(sc, NCR_STAT) | (sc->sc_espstat & NCRSTAT_INT); 688 689 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 690 /* printf ("esp_dma_isintr DONE\n"); */ 691 692 } 693 694 return (r); 695 } 696 697 void 698 esp_dma_reset(sc) 699 struct ncr53c9x_softc *sc; 700 { 701 struct esp_softc *esc = (struct esp_softc *)sc; 702 703 DPRINTF(("esp DMA reset\n")); 704 705 #ifdef ESP_DEBUG 706 if (esp_debug) { 707 char sbuf[256]; 708 709 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 710 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 711 printf(" *intrstat = 0x%s\n", sbuf); 712 713 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)), 714 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 715 printf(" *intrmask = 0x%s\n", sbuf); 716 } 717 #endif 718 719 #if 0 720 /* Clear the DMAMOD bit in the DCTL register: */ 721 NCR_WRITE_REG(sc, ESP_DCTL, 722 ESPDCTL_16MHZ | ESPDCTL_INTENB); 723 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 724 #endif 725 726 nextdma_reset(esc->sc_dma); 727 nextdma_init(esc->sc_dma); 728 729 esc->sc_datain = -1; 730 esc->sc_dmaaddr = 0; 731 esc->sc_dmalen = 0; 732 esc->sc_dmasize = 0; 733 734 esc->sc_loaded = 0; 735 736 esc->sc_begin = 0; 737 esc->sc_begin_size = 0; 738 739 if (esc->sc_main_dmamap->dm_mapsize) { 740 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_main_dmamap); 741 } 742 esc->sc_main = 0; 743 esc->sc_main_size = 0; 744 745 if (esc->sc_tail_dmamap->dm_mapsize) { 746 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap); 747 } 748 esc->sc_tail = 0; 749 esc->sc_tail_size = 0; 750 } 751 752 /* it appears that: 753 * addr and len arguments to this need to be kept up to date 754 * with the status of the transfter. 755 * the dmasize of this is the actual length of the transfer 756 * request, which is guaranteed to be less than maxxfer. 757 * (len may be > maxxfer) 758 */ 759 760 int 761 esp_dma_setup(sc, addr, len, datain, dmasize) 762 struct ncr53c9x_softc *sc; 763 caddr_t *addr; 764 size_t *len; 765 int datain; 766 size_t *dmasize; 767 { 768 struct esp_softc *esc = (struct esp_softc *)sc; 769 770 NDTRACEIF (*ndtracep++ = 'h'); 771 #ifdef DIAGNOSTIC 772 #ifdef ESP_DEBUG 773 /* if this is a read DMA, pre-fill the buffer with 0xdeadbeef 774 * to identify bogus reads 775 */ 776 if (datain) { 777 int *v = (int *)(*addr); 778 int i; 779 for(i=0;i<((*len)/4);i++) v[i] = 0xdeadbeef; 780 v = (int *)(&(esc->sc_tailbuf[0])); 781 for(i=0;i<((sizeof(esc->sc_tailbuf)/4));i++) v[i] = 0xdeafbeef; 782 } else { 783 int *v; 784 int i; 785 v = (int *)(&(esc->sc_tailbuf[0])); 786 for(i=0;i<((sizeof(esc->sc_tailbuf)/4));i++) v[i] = 0xfeeb1eed; 787 } 788 #endif 789 #endif 790 791 DPRINTF(("esp_dma_setup(%p,0x%08x,0x%08x)\n",*addr,*len,*dmasize)); 792 793 #if 0 794 #ifdef DIAGNOSTIC /* @@@ this is ok sometimes. verify that we handle it ok 795 * and then remove this check 796 */ 797 if (*len != *dmasize) { 798 panic("esp dmalen 0x%lx != size 0x%lx",*len,*dmasize); 799 } 800 #endif 801 #endif 802 803 #ifdef DIAGNOSTIC 804 if ((esc->sc_datain != -1) || 805 (esc->sc_main_dmamap->dm_mapsize != 0) || 806 (esc->sc_tail_dmamap->dm_mapsize != 0) || 807 (esc->sc_dmasize != 0)) { 808 panic("%s: map already loaded in esp_dma_setup" 809 "\tdatain = %d\n\tmain_mapsize=%ld\n\tail_mapsize=%ld\n\tdmasize = %d", 810 sc->sc_dev.dv_xname, esc->sc_datain, 811 esc->sc_main_dmamap->dm_mapsize, 812 esc->sc_tail_dmamap->dm_mapsize, 813 esc->sc_dmasize); 814 } 815 #endif 816 817 /* we are sometimes asked to DMA zero bytes, that's easy */ 818 if (*dmasize <= 0) { 819 return(0); 820 } 821 822 if (*dmasize > ESP_MAX_DMASIZE) 823 *dmasize = ESP_MAX_DMASIZE; 824 825 /* Save these in case we have to abort DMA */ 826 esc->sc_datain = datain; 827 esc->sc_dmaaddr = addr; 828 esc->sc_dmalen = len; 829 esc->sc_dmasize = *dmasize; 830 831 esc->sc_loaded = 0; 832 833 #define DMA_SCSI_ALIGNMENT 16 834 #define DMA_SCSI_ALIGN(type, addr) \ 835 ((type)(((unsigned)(addr)+DMA_SCSI_ALIGNMENT-1) \ 836 &~(DMA_SCSI_ALIGNMENT-1))) 837 #define DMA_SCSI_ALIGNED(addr) \ 838 (((unsigned)(addr)&(DMA_SCSI_ALIGNMENT-1))==0) 839 840 { 841 size_t slop_bgn_size; /* # bytes to be fifo'd at beginning */ 842 size_t slop_end_size; /* # bytes to be transferred in tail buffer */ 843 844 { 845 u_long bgn = (u_long)(*esc->sc_dmaaddr); 846 u_long end = (u_long)(*esc->sc_dmaaddr+esc->sc_dmasize); 847 848 slop_bgn_size = DMA_SCSI_ALIGNMENT-(bgn % DMA_SCSI_ALIGNMENT); 849 if (slop_bgn_size == DMA_SCSI_ALIGNMENT) slop_bgn_size = 0; 850 slop_end_size = (end % DMA_ENDALIGNMENT); 851 } 852 853 /* Force a minimum slop end size. This ensures that write 854 * requests will overrun, as required to get completion interrupts. 855 * In addition, since the tail buffer is guaranteed to be mapped 856 * in a single DMA segment, the overrun won't accidentally 857 * end up in its own segment. 858 */ 859 if (!esc->sc_datain) { 860 #if 0 861 slop_end_size += ESP_DMA_MAXTAIL; 862 #else 863 slop_end_size += 0x10; 864 #endif 865 } 866 867 /* Check to make sure we haven't counted extra slop 868 * as would happen for a very short DMA buffer, also 869 * for short buffers, just stuff the entire thing in the tail 870 */ 871 if ((slop_bgn_size+slop_end_size >= esc->sc_dmasize) 872 #if 0 873 || (esc->sc_dmasize <= ESP_DMA_MAXTAIL) 874 #endif 875 ) 876 { 877 slop_bgn_size = 0; 878 slop_end_size = esc->sc_dmasize; 879 } 880 881 /* initialize the fifo buffer */ 882 if (slop_bgn_size) { 883 esc->sc_begin = *esc->sc_dmaaddr; 884 esc->sc_begin_size = slop_bgn_size; 885 } else { 886 esc->sc_begin = 0; 887 esc->sc_begin_size = 0; 888 } 889 890 #if 01 891 /* Load the normal DMA map */ 892 { 893 esc->sc_main = *esc->sc_dmaaddr+slop_bgn_size; 894 esc->sc_main_size = (esc->sc_dmasize)-(slop_end_size+slop_bgn_size); 895 896 if (esc->sc_main_size) { 897 int error; 898 899 if (!esc->sc_datain || DMA_ENDALIGNED(esc->sc_main_size + slop_end_size)) { 900 KASSERT(DMA_SCSI_ALIGNMENT == DMA_ENDALIGNMENT); 901 KASSERT(DMA_BEGINALIGNMENT == DMA_ENDALIGNMENT); 902 esc->sc_main_size += slop_end_size; 903 slop_end_size = 0; 904 if (!esc->sc_datain) { 905 esc->sc_main_size = DMA_ENDALIGN(caddr_t,esc->sc_main+esc->sc_main_size)-esc->sc_main; 906 } 907 } 908 909 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 910 esc->sc_main_dmamap, 911 esc->sc_main, esc->sc_main_size, 912 NULL, BUS_DMA_NOWAIT); 913 if (error) { 914 #ifdef ESP_DEBUG 915 printf("%s: esc->sc_main_dmamap->_dm_size = %ld\n", 916 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_size); 917 printf("%s: esc->sc_main_dmamap->_dm_segcnt = %d\n", 918 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_segcnt); 919 printf("%s: esc->sc_main_dmamap->_dm_maxsegsz = %ld\n", 920 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_maxsegsz); 921 printf("%s: esc->sc_main_dmamap->_dm_boundary = %ld\n", 922 sc->sc_dev.dv_xname,esc->sc_main_dmamap->_dm_boundary); 923 esp_dma_print(sc); 924 #endif 925 panic("%s: can't load main DMA map. error = %d, addr=%p, size=0x%08x", 926 sc->sc_dev.dv_xname, error,esc->sc_main,esc->sc_main_size); 927 } 928 if (!esc->sc_datain) { /* patch the DMA map for write overrun */ 929 esc->sc_main_dmamap->dm_mapsize += ESP_DMA_OVERRUN; 930 esc->sc_main_dmamap->dm_segs[esc->sc_main_dmamap->dm_nsegs - 1].ds_len += 931 ESP_DMA_OVERRUN; 932 } 933 #if 0 934 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 935 0, esc->sc_main_dmamap->dm_mapsize, 936 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 937 esc->sc_main_dmamap->dm_xfer_len = 0; 938 #endif 939 } else { 940 esc->sc_main = 0; 941 } 942 } 943 944 /* Load the tail DMA map */ 945 if (slop_end_size) { 946 esc->sc_tail = DMA_ENDALIGN(caddr_t,esc->sc_tailbuf+slop_end_size)-slop_end_size; 947 /* If the beginning of the tail is not correctly aligned, 948 * we have no choice but to align the start, which might then unalign the end. 949 */ 950 esc->sc_tail = DMA_SCSI_ALIGN(caddr_t,esc->sc_tail); 951 /* So therefore, we change the tail size to be end aligned again. */ 952 esc->sc_tail_size = DMA_ENDALIGN(caddr_t,esc->sc_tail+slop_end_size)-esc->sc_tail; 953 954 /* @@@ next DMA overrun lossage */ 955 if (!esc->sc_datain) { 956 esc->sc_tail_size += ESP_DMA_OVERRUN; 957 } 958 959 { 960 int error; 961 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 962 esc->sc_tail_dmamap, 963 esc->sc_tail, esc->sc_tail_size, 964 NULL, BUS_DMA_NOWAIT); 965 if (error) { 966 panic("%s: can't load tail DMA map. error = %d, addr=%p, size=0x%08x", 967 sc->sc_dev.dv_xname, error,esc->sc_tail,esc->sc_tail_size); 968 } 969 #if 0 970 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 971 0, esc->sc_tail_dmamap->dm_mapsize, 972 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 973 esc->sc_tail_dmamap->dm_xfer_len = 0; 974 #endif 975 } 976 } 977 #else 978 979 esc->sc_begin = *esc->sc_dmaaddr; 980 slop_bgn_size = DMA_SCSI_ALIGNMENT-((ulong)esc->sc_begin % DMA_SCSI_ALIGNMENT); 981 if (slop_bgn_size == DMA_SCSI_ALIGNMENT) slop_bgn_size = 0; 982 slop_end_size = esc->sc_dmasize - slop_bgn_size; 983 984 if (slop_bgn_size < esc->sc_dmasize) { 985 int error; 986 987 esc->sc_tail = 0; 988 esc->sc_tail_size = 0; 989 990 esc->sc_begin_size = slop_bgn_size; 991 esc->sc_main = *esc->sc_dmaaddr+slop_bgn_size; 992 esc->sc_main_size = DMA_ENDALIGN(caddr_t,esc->sc_main+esc->sc_dmasize-slop_bgn_size)-esc->sc_main; 993 994 if (!esc->sc_datain) { 995 esc->sc_main_size += ESP_DMA_OVERRUN; 996 } 997 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 998 esc->sc_main_dmamap, 999 esc->sc_main, esc->sc_main_size, 1000 NULL, BUS_DMA_NOWAIT); 1001 if (error) { 1002 panic("%s: can't load main DMA map. error = %d, addr=%p, size=0x%08x", 1003 sc->sc_dev.dv_xname, error,esc->sc_main,esc->sc_main_size); 1004 } 1005 } else { 1006 esc->sc_begin = 0; 1007 esc->sc_begin_size = 0; 1008 esc->sc_main = 0; 1009 esc->sc_main_size = 0; 1010 1011 #if 0 1012 esc->sc_tail = DMA_ENDALIGN(caddr_t,esc->sc_tailbuf+slop_bgn_size)-slop_bgn_size; 1013 /* If the beginning of the tail is not correctly aligned, 1014 * we have no choice but to align the start, which might then unalign the end. 1015 */ 1016 #endif 1017 esc->sc_tail = DMA_SCSI_ALIGN(caddr_t,esc->sc_tailbuf); 1018 /* So therefore, we change the tail size to be end aligned again. */ 1019 esc->sc_tail_size = DMA_ENDALIGN(caddr_t,esc->sc_tail+esc->sc_dmasize)-esc->sc_tail; 1020 1021 /* @@@ next DMA overrun lossage */ 1022 if (!esc->sc_datain) { 1023 esc->sc_tail_size += ESP_DMA_OVERRUN; 1024 } 1025 1026 { 1027 int error; 1028 error = bus_dmamap_load(esc->sc_dma->sc_dmat, 1029 esc->sc_tail_dmamap, 1030 esc->sc_tail, esc->sc_tail_size, 1031 NULL, BUS_DMA_NOWAIT); 1032 if (error) { 1033 panic("%s: can't load tail DMA map. error = %d, addr=%p, size=0x%08x", 1034 sc->sc_dev.dv_xname, error,esc->sc_tail,esc->sc_tail_size); 1035 } 1036 } 1037 } 1038 #endif 1039 1040 DPRINTF(("%s: setup: %8p %d %8p %d %8p %d %8p %d\n", sc->sc_dev.dv_xname, 1041 *esc->sc_dmaaddr, esc->sc_dmasize, esc->sc_begin, 1042 esc->sc_begin_size, esc->sc_main, esc->sc_main_size, esc->sc_tail, 1043 esc->sc_tail_size)); 1044 } 1045 1046 return (0); 1047 } 1048 1049 #ifdef ESP_DEBUG 1050 /* For debugging */ 1051 void 1052 esp_dma_store(sc) 1053 struct ncr53c9x_softc *sc; 1054 { 1055 struct esp_softc *esc = (struct esp_softc *)sc; 1056 char *p = &esp_dma_dump[0]; 1057 1058 p += sprintf(p,"%s: sc_datain=%d\n",sc->sc_dev.dv_xname,esc->sc_datain); 1059 p += sprintf(p,"%s: sc_loaded=0x%08x\n",sc->sc_dev.dv_xname,esc->sc_loaded); 1060 1061 if (esc->sc_dmaaddr) { 1062 p += sprintf(p,"%s: sc_dmaaddr=%p\n",sc->sc_dev.dv_xname,*esc->sc_dmaaddr); 1063 } else { 1064 p += sprintf(p,"%s: sc_dmaaddr=NULL\n",sc->sc_dev.dv_xname); 1065 } 1066 if (esc->sc_dmalen) { 1067 p += sprintf(p,"%s: sc_dmalen=0x%08x\n",sc->sc_dev.dv_xname,*esc->sc_dmalen); 1068 } else { 1069 p += sprintf(p,"%s: sc_dmalen=NULL\n",sc->sc_dev.dv_xname); 1070 } 1071 p += sprintf(p,"%s: sc_dmasize=0x%08x\n",sc->sc_dev.dv_xname,esc->sc_dmasize); 1072 1073 p += sprintf(p,"%s: sc_begin = %p, sc_begin_size = 0x%08x\n", 1074 sc->sc_dev.dv_xname, esc->sc_begin, esc->sc_begin_size); 1075 p += sprintf(p,"%s: sc_main = %p, sc_main_size = 0x%08x\n", 1076 sc->sc_dev.dv_xname, esc->sc_main, esc->sc_main_size); 1077 /* if (esc->sc_main) */ { 1078 int i; 1079 bus_dmamap_t map = esc->sc_main_dmamap; 1080 p += sprintf(p,"%s: sc_main_dmamap. mapsize = 0x%08lx, nsegs = %d\n", 1081 sc->sc_dev.dv_xname, map->dm_mapsize, map->dm_nsegs); 1082 for(i=0;i<map->dm_nsegs;i++) { 1083 p += sprintf(p,"%s: map->dm_segs[%d].ds_addr = 0x%08lx, len = 0x%08lx\n", 1084 sc->sc_dev.dv_xname, i, map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len); 1085 } 1086 } 1087 p += sprintf(p,"%s: sc_tail = %p, sc_tail_size = 0x%08x\n", 1088 sc->sc_dev.dv_xname, esc->sc_tail, esc->sc_tail_size); 1089 /* if (esc->sc_tail) */ { 1090 int i; 1091 bus_dmamap_t map = esc->sc_tail_dmamap; 1092 p += sprintf(p,"%s: sc_tail_dmamap. mapsize = 0x%08lx, nsegs = %d\n", 1093 sc->sc_dev.dv_xname, map->dm_mapsize, map->dm_nsegs); 1094 for(i=0;i<map->dm_nsegs;i++) { 1095 p += sprintf(p,"%s: map->dm_segs[%d].ds_addr = 0x%08lx, len = 0x%08lx\n", 1096 sc->sc_dev.dv_xname, i, map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len); 1097 } 1098 } 1099 } 1100 1101 void 1102 esp_dma_print(sc) 1103 struct ncr53c9x_softc *sc; 1104 { 1105 esp_dma_store(sc); 1106 printf("%s",esp_dma_dump); 1107 } 1108 #endif 1109 1110 void 1111 esp_dma_go(sc) 1112 struct ncr53c9x_softc *sc; 1113 { 1114 struct esp_softc *esc = (struct esp_softc *)sc; 1115 struct nextdma_softc *nsc = esc->sc_dma; 1116 struct nextdma_status *stat = &nsc->sc_stat; 1117 /* int s = spldma(); */ 1118 1119 #ifdef ESP_DEBUG 1120 if (ndtracep != ndtrace) { 1121 if (ndtraceshow) { 1122 *ndtracep = '\0'; 1123 printf ("esp ndtrace: %s\n", ndtrace); 1124 ndtraceshow = 0; 1125 } else { 1126 DPRINTF (("X")); 1127 } 1128 ndtracep = ndtrace; 1129 } 1130 #endif 1131 1132 DPRINTF(("%s: esp_dma_go(datain = %d)\n", 1133 sc->sc_dev.dv_xname, esc->sc_datain)); 1134 1135 #ifdef ESP_DEBUG 1136 if (esp_debug) esp_dma_print(sc); 1137 else esp_dma_store(sc); 1138 #endif 1139 1140 #ifdef ESP_DEBUG 1141 { 1142 int n = NCR_READ_REG(sc, NCR_FFLAG); 1143 DPRINTF(("%s: fifo size = %d, seq = 0x%x\n", 1144 sc->sc_dev.dv_xname, 1145 n & NCRFIFO_FF, (n & NCRFIFO_SS)>>5)); 1146 } 1147 #endif 1148 1149 /* zero length DMA transfers are boring */ 1150 if (esc->sc_dmasize == 0) { 1151 /* splx(s); */ 1152 return; 1153 } 1154 1155 #if defined(DIAGNOSTIC) 1156 if ((esc->sc_begin_size == 0) && 1157 (esc->sc_main_dmamap->dm_mapsize == 0) && 1158 (esc->sc_tail_dmamap->dm_mapsize == 0)) { 1159 #ifdef ESP_DEBUG 1160 esp_dma_print(sc); 1161 #endif 1162 panic("%s: No DMA requested!",sc->sc_dev.dv_xname); 1163 } 1164 #endif 1165 1166 /* Stuff the fifo with the begin buffer */ 1167 if (esc->sc_datain) { 1168 int i; 1169 DPRINTF(("%s: FIFO read of %d bytes:", 1170 sc->sc_dev.dv_xname,esc->sc_begin_size)); 1171 for(i=0;i<esc->sc_begin_size;i++) { 1172 esc->sc_begin[i]=NCR_READ_REG(sc, NCR_FIFO); 1173 DPRINTF((" %02x",esc->sc_begin[i]&0xff)); 1174 } 1175 DPRINTF(("\n")); 1176 } else { 1177 int i; 1178 DPRINTF(("%s: FIFO write of %d bytes:", 1179 sc->sc_dev.dv_xname,esc->sc_begin_size)); 1180 for(i=0;i<esc->sc_begin_size;i++) { 1181 NCR_WRITE_REG(sc, NCR_FIFO, esc->sc_begin[i]); 1182 DPRINTF((" %02x",esc->sc_begin[i]&0xff)); 1183 } 1184 DPRINTF(("\n")); 1185 } 1186 1187 if (esc->sc_main_dmamap->dm_mapsize) { 1188 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1189 0, esc->sc_main_dmamap->dm_mapsize, 1190 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1191 esc->sc_main_dmamap->dm_xfer_len = 0; 1192 } 1193 1194 if (esc->sc_tail_dmamap->dm_mapsize) { 1195 /* if we are a DMA write cycle, copy the end slop */ 1196 if (!esc->sc_datain) { 1197 memcpy(esc->sc_tail, *esc->sc_dmaaddr+esc->sc_begin_size+esc->sc_main_size, 1198 esc->sc_dmasize-(esc->sc_begin_size+esc->sc_main_size)); 1199 } 1200 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1201 0, esc->sc_tail_dmamap->dm_mapsize, 1202 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1203 esc->sc_tail_dmamap->dm_xfer_len = 0; 1204 } 1205 1206 stat->nd_exception = 0; 1207 nextdma_start(nsc, (esc->sc_datain ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1208 1209 if (esc->sc_datain) { 1210 NCR_WRITE_REG(sc, ESP_DCTL, 1211 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD | ESPDCTL_DMARD); 1212 } else { 1213 NCR_WRITE_REG(sc, ESP_DCTL, 1214 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMAMOD); 1215 } 1216 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1217 1218 NDTRACEIF (if (esc->sc_begin_size) { *ndtracep++ = '1'; *ndtracep++ = 'A' + esc->sc_begin_size; }); 1219 NDTRACEIF (if (esc->sc_main_size) { *ndtracep++ = '2'; *ndtracep++ = '0' + esc->sc_main_dmamap->dm_nsegs; }); 1220 NDTRACEIF (if (esc->sc_tail_size) { *ndtracep++ = '3'; *ndtracep++ = 'A' + esc->sc_tail_size; }); 1221 1222 /* splx(s); */ 1223 } 1224 1225 void 1226 esp_dma_stop(sc) 1227 struct ncr53c9x_softc *sc; 1228 { 1229 struct esp_softc *esc = (struct esp_softc *)sc; 1230 nextdma_print(esc->sc_dma); 1231 #ifdef ESP_DEBUG 1232 esp_dma_print(sc); 1233 #endif 1234 #if 1 1235 panic("%s: stop not yet implemented",sc->sc_dev.dv_xname); 1236 #endif 1237 } 1238 1239 int 1240 esp_dma_isactive(sc) 1241 struct ncr53c9x_softc *sc; 1242 { 1243 struct esp_softc *esc = (struct esp_softc *)sc; 1244 int r = (esc->sc_dmaaddr != NULL); /* !nextdma_finished(esc->sc_dma); */ 1245 DPRINTF(("esp_dma_isactive = %d\n",r)); 1246 return(r); 1247 } 1248 1249 /****************************************************************/ 1250 1251 int esp_dma_int __P((void *)); 1252 int esp_dma_int(arg) 1253 void *arg; 1254 { 1255 void nextdma_rotate __P((struct nextdma_softc *)); 1256 void nextdma_setup_curr_regs __P((struct nextdma_softc *)); 1257 void nextdma_setup_cont_regs __P((struct nextdma_softc *)); 1258 1259 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1260 struct esp_softc *esc = (struct esp_softc *)sc; 1261 struct nextdma_softc *nsc = esc->sc_dma; 1262 struct nextdma_status *stat = &nsc->sc_stat; 1263 unsigned int state; 1264 1265 NDTRACEIF (*ndtracep++ = 'E'); 1266 1267 state = nd_bsr4 (DD_CSR); 1268 1269 #if 1 1270 NDTRACEIF ( 1271 if (state & DMACSR_COMPLETE) *ndtracep++ = 'c'; 1272 if (state & DMACSR_ENABLE) *ndtracep++ = 'e'; 1273 if (state & DMACSR_BUSEXC) *ndtracep++ = 'b'; 1274 if (state & DMACSR_READ) *ndtracep++ = 'r'; 1275 if (state & DMACSR_SUPDATE) *ndtracep++ = 's'; 1276 ); 1277 1278 NDTRACEIF (*ndtracep++ = 'E'); 1279 1280 #ifdef ESP_DEBUG 1281 if (0) if ((state & DMACSR_BUSEXC) && (state & DMACSR_ENABLE)) ndtraceshow++; 1282 if (0) if ((state & DMACSR_SUPDATE)) ndtraceshow++; 1283 #endif 1284 #endif 1285 1286 if ((stat->nd_exception == 0) && (state & DMACSR_COMPLETE) && (state & DMACSR_ENABLE)) { 1287 stat->nd_map->dm_xfer_len += stat->nd_map->dm_segs[stat->nd_idx].ds_len; 1288 } 1289 1290 if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) { 1291 if (nsc->sc_conf.nd_completed_cb) 1292 (*nsc->sc_conf.nd_completed_cb)(stat->nd_map, nsc->sc_conf.nd_cb_arg); 1293 } 1294 nextdma_rotate(nsc); 1295 1296 if ((state & DMACSR_COMPLETE) && (state & DMACSR_ENABLE)) { 1297 #if 0 1298 int l = nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF; 1299 int s = nd_bsr4 (DD_STOP); 1300 #endif 1301 /* nextdma_setup_cont_regs(nsc); */ 1302 if (stat->nd_map_cont) { 1303 nd_bsw4 (DD_START, stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1304 nd_bsw4 (DD_STOP, (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1305 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)); 1306 } 1307 1308 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE) | 1309 (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0)); 1310 1311 #if 0 1312 #ifdef ESP_DEBUG 1313 if (state & DMACSR_BUSEXC) { 1314 sprintf (ndtracep, "CE/BUSEXC: %08lX %08X %08X\n", 1315 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + stat->nd_map->dm_segs[stat->nd_idx].ds_len), 1316 l, s); 1317 ndtracep += strlen (ndtracep); 1318 } 1319 #endif 1320 #endif 1321 } else { 1322 #if 0 1323 if (state & DMACSR_BUSEXC) { 1324 while (nd_bsr4 (DD_NEXT) != 1325 (nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF)) 1326 printf ("Y"); /* DELAY(50); */ 1327 state = nd_bsr4 (DD_CSR); 1328 } 1329 #endif 1330 1331 if (!(state & DMACSR_SUPDATE)) { 1332 nextdma_rotate(nsc); 1333 } else { 1334 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | 1335 DMACSR_INITBUF | DMACSR_RESET | 1336 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1337 1338 nd_bsw4 (DD_NEXT, stat->nd_map->dm_segs[stat->nd_idx].ds_addr); 1339 nd_bsw4 (DD_LIMIT, 1340 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1341 stat->nd_map->dm_segs[stat->nd_idx].ds_len) | 0/* x80000000 */); 1342 if (stat->nd_map_cont) { 1343 nd_bsw4 (DD_START, 1344 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1345 nd_bsw4 (DD_STOP, 1346 (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1347 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len) | 0/* x80000000 */); 1348 } 1349 nd_bsw4 (DD_CSR, DMACSR_SETENABLE | 1350 DMACSR_CLRCOMPLETE | (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE) | 1351 (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0)); 1352 #if 1 1353 #ifdef ESP_DEBUG 1354 sprintf (ndtracep, "supdate "); 1355 ndtracep += strlen (ndtracep); 1356 sprintf (ndtracep, "%08X %08X %08X %08X ", 1357 nd_bsr4 (DD_NEXT), 1358 nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF, 1359 nd_bsr4 (DD_START), 1360 nd_bsr4 (DD_STOP) & 0x7FFFFFFF); 1361 ndtracep += strlen (ndtracep); 1362 #endif 1363 #endif 1364 stat->nd_exception++; 1365 return(1); 1366 /* NCR_WRITE_REG(sc, ESP_DCTL, ctl); */ 1367 goto restart; 1368 } 1369 1370 if (stat->nd_map) { 1371 #if 1 1372 #ifdef ESP_DEBUG 1373 sprintf (ndtracep, "%08X %08X %08X %08X ", 1374 nd_bsr4 (DD_NEXT), 1375 nd_bsr4 (DD_LIMIT) & 0x7FFFFFFF, 1376 nd_bsr4 (DD_START), 1377 nd_bsr4 (DD_STOP) & 0x7FFFFFFF); 1378 ndtracep += strlen (ndtracep); 1379 #endif 1380 #endif 1381 1382 #if 0 1383 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET); 1384 1385 nd_bsw4 (DD_CSR, 0); 1386 #endif 1387 #if 1 1388 /* 6/2 */ 1389 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | 1390 DMACSR_INITBUF | DMACSR_RESET | 1391 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1392 1393 /* nextdma_setup_curr_regs(nsc); */ 1394 nd_bsw4 (DD_NEXT, stat->nd_map->dm_segs[stat->nd_idx].ds_addr); 1395 nd_bsw4 (DD_LIMIT, 1396 (stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1397 stat->nd_map->dm_segs[stat->nd_idx].ds_len) | 0/* x80000000 */); 1398 /* nextdma_setup_cont_regs(nsc); */ 1399 if (stat->nd_map_cont) { 1400 nd_bsw4 (DD_START, 1401 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr); 1402 nd_bsw4 (DD_STOP, 1403 (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1404 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len) | 0/* x80000000 */); 1405 } 1406 1407 nd_bsw4 (DD_CSR, 1408 DMACSR_SETENABLE | (stat->nd_map_cont ? DMACSR_SETSUPDATE : 0) | 1409 (state & DMACSR_READ ? DMACSR_SETREAD : DMACSR_SETWRITE)); 1410 #ifdef ESP_DEBUG 1411 /* ndtraceshow++; */ 1412 #endif 1413 stat->nd_exception++; 1414 return(1); 1415 #endif 1416 /* NCR_WRITE_REG(sc, ESP_DCTL, ctl); */ 1417 goto restart; 1418 restart: 1419 #if 1 1420 #ifdef ESP_DEBUG 1421 sprintf (ndtracep, "restart %08lX %08lX\n", 1422 stat->nd_map->dm_segs[stat->nd_idx].ds_addr, 1423 stat->nd_map->dm_segs[stat->nd_idx].ds_addr + 1424 stat->nd_map->dm_segs[stat->nd_idx].ds_len); 1425 if (stat->nd_map_cont) { 1426 sprintf (ndtracep + strlen(ndtracep) - 1, " %08lX %08lX\n", 1427 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr, 1428 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr + 1429 stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len); 1430 } 1431 ndtracep += strlen (ndtracep); 1432 #endif 1433 #endif 1434 nextdma_print(nsc); 1435 NCR_WRITE_REG(sc, ESP_DCTL, ESPDCTL_16MHZ | ESPDCTL_INTENB); 1436 printf ("ff:%02x tcm:%d tcl:%d esp_dstat:%02x state:%02x step: %02x intr:%02x state:%08X\n", 1437 NCR_READ_REG(sc, NCR_FFLAG), 1438 NCR_READ_REG((sc), NCR_TCM), NCR_READ_REG((sc), NCR_TCL), 1439 NCR_READ_REG(sc, ESP_DSTAT), 1440 NCR_READ_REG(sc, NCR_STAT), NCR_READ_REG(sc, NCR_STEP), 1441 NCR_READ_REG(sc, NCR_INTR), state); 1442 #ifdef ESP_DEBUG 1443 *ndtracep = '\0'; 1444 printf ("ndtrace: %s\n", ndtrace); 1445 #endif 1446 panic("%s: busexc/supdate occured. Please email this output to chris@pin.lu.", 1447 sc->sc_dev.dv_xname); 1448 #ifdef ESP_DEBUG 1449 ndtraceshow++; 1450 #endif 1451 } else { 1452 nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET); 1453 if (nsc->sc_conf.nd_shutdown_cb) 1454 (*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg); 1455 } 1456 } 1457 return (1); 1458 } 1459 1460 /* Internal DMA callback routines */ 1461 bus_dmamap_t 1462 esp_dmacb_continue(arg) 1463 void *arg; 1464 { 1465 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1466 struct esp_softc *esc = (struct esp_softc *)sc; 1467 1468 NDTRACEIF (*ndtracep++ = 'x'); 1469 DPRINTF(("%s: DMA continue\n",sc->sc_dev.dv_xname)); 1470 1471 #ifdef DIAGNOSTIC 1472 if ((esc->sc_datain < 0) || (esc->sc_datain > 1)) { 1473 panic("%s: map not loaded in DMA continue callback, datain = %d", 1474 sc->sc_dev.dv_xname,esc->sc_datain); 1475 } 1476 #endif 1477 1478 if ((!(esc->sc_loaded & ESP_LOADED_MAIN)) && 1479 (esc->sc_main_dmamap->dm_mapsize)) { 1480 DPRINTF(("%s: Loading main map\n",sc->sc_dev.dv_xname)); 1481 #if 0 1482 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1483 0, esc->sc_main_dmamap->dm_mapsize, 1484 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1485 esc->sc_main_dmamap->dm_xfer_len = 0; 1486 #endif 1487 esc->sc_loaded |= ESP_LOADED_MAIN; 1488 return(esc->sc_main_dmamap); 1489 } 1490 1491 if ((!(esc->sc_loaded & ESP_LOADED_TAIL)) && 1492 (esc->sc_tail_dmamap->dm_mapsize)) { 1493 DPRINTF(("%s: Loading tail map\n",sc->sc_dev.dv_xname)); 1494 #if 0 1495 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1496 0, esc->sc_tail_dmamap->dm_mapsize, 1497 (esc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1498 esc->sc_tail_dmamap->dm_xfer_len = 0; 1499 #endif 1500 esc->sc_loaded |= ESP_LOADED_TAIL; 1501 return(esc->sc_tail_dmamap); 1502 } 1503 1504 DPRINTF(("%s: not loading map\n",sc->sc_dev.dv_xname)); 1505 return(0); 1506 } 1507 1508 1509 void 1510 esp_dmacb_completed(map, arg) 1511 bus_dmamap_t map; 1512 void *arg; 1513 { 1514 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1515 struct esp_softc *esc = (struct esp_softc *)sc; 1516 1517 NDTRACEIF (*ndtracep++ = 'X'); 1518 DPRINTF(("%s: DMA completed\n",sc->sc_dev.dv_xname)); 1519 1520 #ifdef DIAGNOSTIC 1521 if ((esc->sc_datain < 0) || (esc->sc_datain > 1)) { 1522 panic("%s: invalid DMA direction in completed callback, datain = %d", 1523 sc->sc_dev.dv_xname,esc->sc_datain); 1524 } 1525 #endif 1526 1527 #if defined(DIAGNOSTIC) && 0 1528 { 1529 int i; 1530 for(i=0;i<map->dm_nsegs;i++) { 1531 if (map->dm_xfer_len != map->dm_mapsize) { 1532 printf("%s: map->dm_mapsize = %d\n", sc->sc_dev.dv_xname,map->dm_mapsize); 1533 printf("%s: map->dm_nsegs = %d\n", sc->sc_dev.dv_xname,map->dm_nsegs); 1534 printf("%s: map->dm_xfer_len = %d\n", sc->sc_dev.dv_xname,map->dm_xfer_len); 1535 for(i=0;i<map->dm_nsegs;i++) { 1536 printf("%s: map->dm_segs[%d].ds_addr = 0x%08lx\n", 1537 sc->sc_dev.dv_xname,i,map->dm_segs[i].ds_addr); 1538 printf("%s: map->dm_segs[%d].ds_len = %d\n", 1539 sc->sc_dev.dv_xname,i,map->dm_segs[i].ds_len); 1540 } 1541 panic("%s: incomplete DMA transfer",sc->sc_dev.dv_xname); 1542 } 1543 } 1544 } 1545 #endif 1546 1547 if (map == esc->sc_main_dmamap) { 1548 #ifdef DIAGNOSTIC 1549 if ((esc->sc_loaded & ESP_UNLOADED_MAIN) || 1550 !(esc->sc_loaded & ESP_LOADED_MAIN)) { 1551 panic("%s: unexpected completed call for main map",sc->sc_dev.dv_xname); 1552 } 1553 #endif 1554 esc->sc_loaded |= ESP_UNLOADED_MAIN; 1555 } else if (map == esc->sc_tail_dmamap) { 1556 #ifdef DIAGNOSTIC 1557 if ((esc->sc_loaded & ESP_UNLOADED_TAIL) || 1558 !(esc->sc_loaded & ESP_LOADED_TAIL)) { 1559 panic("%s: unexpected completed call for tail map",sc->sc_dev.dv_xname); 1560 } 1561 #endif 1562 esc->sc_loaded |= ESP_UNLOADED_TAIL; 1563 } 1564 #ifdef DIAGNOSTIC 1565 else { 1566 panic("%s: unexpected completed map", sc->sc_dev.dv_xname); 1567 } 1568 #endif 1569 1570 #ifdef ESP_DEBUG 1571 if (esp_debug) { 1572 if (map == esc->sc_main_dmamap) { 1573 printf("%s: completed main map\n",sc->sc_dev.dv_xname); 1574 } else if (map == esc->sc_tail_dmamap) { 1575 printf("%s: completed tail map\n",sc->sc_dev.dv_xname); 1576 } 1577 } 1578 #endif 1579 1580 #if 0 1581 if ((map == esc->sc_tail_dmamap) || 1582 ((esc->sc_tail_size == 0) && (map == esc->sc_main_dmamap))) { 1583 1584 /* Clear the DMAMOD bit in the DCTL register to give control 1585 * back to the scsi chip. 1586 */ 1587 if (esc->sc_datain) { 1588 NCR_WRITE_REG(sc, ESP_DCTL, 1589 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMARD); 1590 } else { 1591 NCR_WRITE_REG(sc, ESP_DCTL, 1592 ESPDCTL_16MHZ | ESPDCTL_INTENB); 1593 } 1594 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1595 } 1596 #endif 1597 1598 1599 #if 0 1600 bus_dmamap_sync(esc->sc_dma->sc_dmat, map, 1601 0, map->dm_mapsize, 1602 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1603 #endif 1604 1605 } 1606 1607 void 1608 esp_dmacb_shutdown(arg) 1609 void *arg; 1610 { 1611 struct ncr53c9x_softc *sc = (struct ncr53c9x_softc *)arg; 1612 struct esp_softc *esc = (struct esp_softc *)sc; 1613 1614 NDTRACEIF (*ndtracep++ = 'S'); 1615 DPRINTF(("%s: DMA shutdown\n",sc->sc_dev.dv_xname)); 1616 1617 if (esc->sc_loaded == 0) 1618 return; 1619 1620 #if 0 1621 { 1622 /* Clear the DMAMOD bit in the DCTL register to give control 1623 * back to the scsi chip. 1624 */ 1625 if (esc->sc_datain) { 1626 NCR_WRITE_REG(sc, ESP_DCTL, 1627 ESPDCTL_16MHZ | ESPDCTL_INTENB | ESPDCTL_DMARD); 1628 } else { 1629 NCR_WRITE_REG(sc, ESP_DCTL, 1630 ESPDCTL_16MHZ | ESPDCTL_INTENB); 1631 } 1632 DPRINTF(("esp dctl is 0x%02x\n",NCR_READ_REG(sc,ESP_DCTL))); 1633 } 1634 #endif 1635 1636 DPRINTF(("%s: esp_dma_nest == %d\n",sc->sc_dev.dv_xname,esp_dma_nest)); 1637 1638 /* Stuff the end slop into fifo */ 1639 1640 #ifdef ESP_DEBUG 1641 if (esp_debug) { 1642 1643 int n = NCR_READ_REG(sc, NCR_FFLAG); 1644 DPRINTF(("%s: fifo size = %d, seq = 0x%x\n", 1645 sc->sc_dev.dv_xname,n & NCRFIFO_FF, (n & NCRFIFO_SS)>>5)); 1646 } 1647 #endif 1648 1649 if (esc->sc_main_dmamap->dm_mapsize) { 1650 if (!esc->sc_datain) { /* unpatch the DMA map for write overrun */ 1651 esc->sc_main_dmamap->dm_mapsize -= ESP_DMA_OVERRUN; 1652 esc->sc_main_dmamap->dm_segs[esc->sc_main_dmamap->dm_nsegs - 1].ds_len -= 1653 ESP_DMA_OVERRUN; 1654 } 1655 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_main_dmamap, 1656 0, esc->sc_main_dmamap->dm_mapsize, 1657 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1658 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_main_dmamap); 1659 NDTRACEIF ( 1660 sprintf (ndtracep, "m%ld", esc->sc_main_dmamap->dm_xfer_len); 1661 ndtracep += strlen (ndtracep); 1662 ); 1663 } 1664 1665 if (esc->sc_tail_dmamap->dm_mapsize) { 1666 bus_dmamap_sync(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap, 1667 0, esc->sc_tail_dmamap->dm_mapsize, 1668 (esc->sc_datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1669 bus_dmamap_unload(esc->sc_dma->sc_dmat, esc->sc_tail_dmamap); 1670 /* copy the tail DMA buffer data for read transfers */ 1671 if (esc->sc_datain) { 1672 memcpy(*esc->sc_dmaaddr+esc->sc_begin_size+esc->sc_main_size, 1673 esc->sc_tail, esc->sc_dmasize-(esc->sc_begin_size+esc->sc_main_size)); 1674 } 1675 NDTRACEIF ( 1676 sprintf (ndtracep, "t%ld", esc->sc_tail_dmamap->dm_xfer_len); 1677 ndtracep += strlen (ndtracep); 1678 ); 1679 } 1680 1681 #ifdef ESP_DEBUG 1682 if (esp_debug) { 1683 printf("%s: dma_shutdown: addr=%p,len=0x%08x,size=0x%08x\n", 1684 sc->sc_dev.dv_xname, 1685 *esc->sc_dmaaddr, *esc->sc_dmalen, esc->sc_dmasize); 1686 if (esp_debug > 10) { 1687 esp_hex_dump(*(esc->sc_dmaaddr),esc->sc_dmasize); 1688 printf("%s: tail=%p,tailbuf=%p,tail_size=0x%08x\n", 1689 sc->sc_dev.dv_xname, 1690 esc->sc_tail, &(esc->sc_tailbuf[0]), esc->sc_tail_size); 1691 esp_hex_dump(&(esc->sc_tailbuf[0]),sizeof(esc->sc_tailbuf)); 1692 } 1693 } 1694 #endif 1695 1696 esc->sc_main = 0; 1697 esc->sc_main_size = 0; 1698 esc->sc_tail = 0; 1699 esc->sc_tail_size = 0; 1700 1701 esc->sc_datain = -1; 1702 /* esc->sc_dmaaddr = 0; */ 1703 /* esc->sc_dmalen = 0; */ 1704 /* esc->sc_dmasize = 0; */ 1705 1706 esc->sc_loaded = 0; 1707 1708 esc->sc_begin = 0; 1709 esc->sc_begin_size = 0; 1710 1711 #ifdef ESP_DEBUG 1712 if (esp_debug) { 1713 char sbuf[256]; 1714 1715 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 1716 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 1717 printf(" *intrstat = 0x%s\n", sbuf); 1718 1719 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)), 1720 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 1721 printf(" *intrmask = 0x%s\n", sbuf); 1722 } 1723 #endif 1724 } 1725