1 /* $NetBSD: sw.c,v 1.17 2006/03/29 04:16:47 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * This file contains only the machine-dependent parts of the 41 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.) 42 * The machine-independent parts are in ncr5380sbc.c 43 * 44 * Supported hardware includes: 45 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series) 46 * Sun SCSI-3 on VME (si: Sun 4/200-series, others) 47 * 48 * The VME variant has a bit to enable or disable the DMA engine, 49 * but that bit also gates the interrupt line from the NCR5380! 50 * Therefore, in order to get any interrupt from the 5380, (i.e. 51 * for reselect) one must clear the DMA engine transfer count and 52 * then enable DMA. This has the further complication that you 53 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so 54 * we have to turn DMA back off before we even look at the 5380. 55 * 56 * What wonderfully whacky hardware this is! 57 * 58 * David Jones wrote the initial version of this module for NetBSD/sun3, 59 * which included support for the VME adapter only. (no reselection). 60 * 61 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked 62 * both the VME and OBIO code to support disconnect/reselect. 63 * (Required figuring out the hardware "features" noted above.) 64 * 65 * The autoconfiguration boilerplate came from Adam Glass. 66 * 67 * Jason R. Thorpe ported the autoconfiguration and VME portions to 68 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird", 69 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor 70 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg 71 * and Chris Torek for bits of insight needed along the way. Thanks to 72 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb 73 * for the sake of testing. Andrew Gillham helped work out the bugs 74 * the 4/100 DMA code. 75 */ 76 77 /* 78 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA 79 * works, but interrupts (and, thus, reselection) don't. I don't know 80 * why, and I don't have a machine to test this on further. 81 * 82 * DMA, DMA completion interrupts, and reselection work fine on my 83 * 4/260 with modern SCSI-II disks attached. I've had reports of 84 * reselection failing on Sun Shoebox-type configurations where 85 * there are multiple non-SCSI devices behind Emulex or Adaptec 86 * bridges. These devices pre-date the SCSI-I spec, and might not 87 * bahve the way the 5380 code expects. For this reason, only 88 * DMA is enabled by default in this driver. 89 * 90 * Jason R. Thorpe <thorpej@NetBSD.org> 91 * December 8, 1995 92 */ 93 94 #include <sys/cdefs.h> 95 __KERNEL_RCSID(0, "$NetBSD: sw.c,v 1.17 2006/03/29 04:16:47 thorpej Exp $"); 96 97 #include "opt_ddb.h" 98 99 #include <sys/types.h> 100 #include <sys/param.h> 101 #include <sys/systm.h> 102 #include <sys/kernel.h> 103 #include <sys/malloc.h> 104 #include <sys/errno.h> 105 #include <sys/device.h> 106 #include <sys/buf.h> 107 108 #include <machine/bus.h> 109 #include <machine/intr.h> 110 #include <machine/autoconf.h> 111 112 #include <dev/scsipi/scsi_all.h> 113 #include <dev/scsipi/scsipi_all.h> 114 #include <dev/scsipi/scsipi_debug.h> 115 #include <dev/scsipi/scsiconf.h> 116 117 #ifndef DDB 118 #define Debugger() 119 #endif 120 121 #ifndef DEBUG 122 #define DEBUG XXX 123 #endif 124 125 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */ 126 127 #include <dev/ic/ncr5380reg.h> 128 #include <dev/ic/ncr5380var.h> 129 130 #include <sparc/dev/swreg.h> 131 132 /* 133 * Transfers smaller than this are done using PIO 134 * (on assumption they're not worth DMA overhead) 135 */ 136 #define MIN_DMA_LEN 128 137 138 /* 139 * Transfers lager than 65535 bytes need to be split-up. 140 * (Some of the FIFO logic has only 16 bits counters.) 141 * Make the size an integer multiple of the page size 142 * to avoid buf/cluster remap problems. (paranoid?) 143 */ 144 #define MAX_DMA_LEN 0xE000 145 146 #ifdef DEBUG 147 int sw_debug = 0; 148 #endif 149 150 /* 151 * This structure is used to keep track of mapped DMA requests. 152 */ 153 struct sw_dma_handle { 154 int dh_flags; 155 #define SIDH_BUSY 0x01 /* This DH is in use */ 156 #define SIDH_OUT 0x02 /* DMA does data out (write) */ 157 u_char *dh_addr; /* KVA of start of buffer */ 158 int dh_maplen; /* Original data length */ 159 long dh_startingpa; /* PA of buffer; for "sw" */ 160 bus_dmamap_t dh_dmamap; 161 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */ 162 }; 163 164 /* 165 * The first structure member has to be the ncr5380_softc 166 * so we can just cast to go back and fourth between them. 167 */ 168 struct sw_softc { 169 struct ncr5380_softc ncr_sc; 170 bus_space_tag_t sc_bustag; /* bus tags */ 171 bus_dma_tag_t sc_dmatag; 172 173 struct sw_dma_handle *sc_dma; 174 int sc_xlen; /* length of current DMA segment. */ 175 int sc_options; /* options for this instance. */ 176 }; 177 178 /* 179 * Options. By default, DMA is enabled and DMA completion interrupts 180 * and reselect are disabled. You may enable additional features 181 * the `flags' directive in your kernel's configuration file. 182 * 183 * Alternatively, you can patch your kernel with DDB or some other 184 * mechanism. The sc_options member of the softc is OR'd with 185 * the value in sw_options. 186 * 187 * On the "sw", interrupts (and thus) reselection don't work, so they're 188 * disabled by default. DMA is still a little dangerous, too. 189 * 190 * Note, there's a separate sw_options to make life easier. 191 */ 192 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */ 193 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */ 194 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */ 195 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT) 196 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA" 197 int sw_options = SW_ENABLE_DMA; 198 199 static int sw_match(struct device *, struct cfdata *, void *); 200 static void sw_attach(struct device *, struct device *, void *); 201 static int sw_intr(void *); 202 static void sw_reset_adapter(struct ncr5380_softc *); 203 static void sw_minphys(struct buf *); 204 205 void sw_dma_alloc(struct ncr5380_softc *); 206 void sw_dma_free(struct ncr5380_softc *); 207 void sw_dma_poll(struct ncr5380_softc *); 208 209 void sw_dma_setup(struct ncr5380_softc *); 210 void sw_dma_start(struct ncr5380_softc *); 211 void sw_dma_eop(struct ncr5380_softc *); 212 void sw_dma_stop(struct ncr5380_softc *); 213 214 void sw_intr_on(struct ncr5380_softc *); 215 void sw_intr_off(struct ncr5380_softc *); 216 217 /* Shorthand bus space access */ 218 #define SWREG_READ(sc, index) \ 219 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index) 220 #define SWREG_WRITE(sc, index, v) \ 221 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v) 222 223 224 /* The Sun "SCSI Weird" 4/100 obio controller. */ 225 CFATTACH_DECL(sw, sizeof(struct sw_softc), 226 sw_match, sw_attach, NULL, NULL); 227 228 static int 229 sw_match(struct device *parent, struct cfdata *cf, void *aux) 230 { 231 union obio_attach_args *uoba = aux; 232 struct obio4_attach_args *oba; 233 234 /* Nothing but a Sun 4/100 is going to have these devices. */ 235 if (cpuinfo.cpu_type != CPUTYP_4_100) 236 return (0); 237 238 if (uoba->uoba_isobio4 == 0) 239 return (0); 240 241 /* Make sure there is something there... */ 242 oba = &uoba->uoba_oba4; 243 return (bus_space_probe(oba->oba_bustag, oba->oba_paddr, 244 1, /* probe size */ 245 1, /* offset */ 246 0, /* flags */ 247 NULL, NULL)); 248 } 249 250 static void 251 sw_attach(struct device *parent, struct device *self, void *aux) 252 { 253 struct sw_softc *sc = (struct sw_softc *) self; 254 struct ncr5380_softc *ncr_sc = &sc->ncr_sc; 255 union obio_attach_args *uoba = aux; 256 struct obio4_attach_args *oba = &uoba->uoba_oba4; 257 bus_space_handle_t bh; 258 char bits[64]; 259 int i; 260 261 sc->sc_dmatag = oba->oba_dmatag; 262 263 /* Map the controller registers. */ 264 if (bus_space_map(oba->oba_bustag, oba->oba_paddr, 265 SWREG_BANK_SZ, 266 BUS_SPACE_MAP_LINEAR, 267 &bh) != 0) { 268 printf("%s: cannot map registers\n", self->dv_xname); 269 return; 270 } 271 272 ncr_sc->sc_regt = oba->oba_bustag; 273 ncr_sc->sc_regh = bh; 274 275 sc->sc_options = sw_options; 276 277 ncr_sc->sc_dma_setup = sw_dma_setup; 278 ncr_sc->sc_dma_start = sw_dma_start; 279 ncr_sc->sc_dma_eop = sw_dma_stop; 280 ncr_sc->sc_dma_stop = sw_dma_stop; 281 ncr_sc->sc_intr_on = sw_intr_on; 282 ncr_sc->sc_intr_off = sw_intr_off; 283 284 /* 285 * Establish interrupt channel. 286 * Default interrupt priority always is 3. At least, that's 287 * what my board seems to be at. --thorpej 288 */ 289 if (oba->oba_pri == -1) 290 oba->oba_pri = 3; 291 292 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO, 293 sw_intr, sc); 294 295 printf(" pri %d\n", oba->oba_pri); 296 297 298 /* 299 * Pull in the options flags. Allow the user to completely 300 * override the default values. 301 */ 302 if ((device_cfdata(&ncr_sc->sc_dev)->cf_flags & SW_OPTIONS_MASK) != 0) 303 sc->sc_options = 304 device_cfdata(&ncr_sc->sc_dev)->cf_flags & SW_OPTIONS_MASK; 305 306 /* 307 * Initialize fields used by the MI code 308 */ 309 310 /* NCR5380 register bank offsets */ 311 ncr_sc->sci_r0 = 0; 312 ncr_sc->sci_r1 = 1; 313 ncr_sc->sci_r2 = 2; 314 ncr_sc->sci_r3 = 3; 315 ncr_sc->sci_r4 = 4; 316 ncr_sc->sci_r5 = 5; 317 ncr_sc->sci_r6 = 6; 318 ncr_sc->sci_r7 = 7; 319 320 ncr_sc->sc_rev = NCR_VARIANT_NCR5380; 321 322 /* 323 * MD function pointers used by the MI code. 324 */ 325 ncr_sc->sc_pio_out = ncr5380_pio_out; 326 ncr_sc->sc_pio_in = ncr5380_pio_in; 327 ncr_sc->sc_dma_alloc = sw_dma_alloc; 328 ncr_sc->sc_dma_free = sw_dma_free; 329 ncr_sc->sc_dma_poll = sw_dma_poll; 330 331 ncr_sc->sc_flags = 0; 332 if ((sc->sc_options & SW_DO_RESELECT) == 0) 333 ncr_sc->sc_no_disconnect = 0xFF; 334 if ((sc->sc_options & SW_DMA_INTR) == 0) 335 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING; 336 ncr_sc->sc_min_dma_len = MIN_DMA_LEN; 337 338 339 /* 340 * Allocate DMA handles. 341 */ 342 i = SCI_OPENINGS * sizeof(struct sw_dma_handle); 343 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT); 344 if (sc->sc_dma == NULL) 345 panic("sw: DMA handle malloc failed"); 346 347 for (i = 0; i < SCI_OPENINGS; i++) { 348 sc->sc_dma[i].dh_flags = 0; 349 350 /* Allocate a DMA handle */ 351 if (bus_dmamap_create( 352 sc->sc_dmatag, /* tag */ 353 MAXPHYS, /* size */ 354 1, /* nsegments */ 355 MAXPHYS, /* maxsegsz */ 356 0, /* boundary */ 357 BUS_DMA_NOWAIT, 358 &sc->sc_dma[i].dh_dmamap) != 0) { 359 360 printf("%s: DMA buffer map create error\n", 361 ncr_sc->sc_dev.dv_xname); 362 return; 363 } 364 } 365 366 if (sc->sc_options) { 367 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname, 368 bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS, 369 bits, sizeof(bits))); 370 } 371 372 ncr_sc->sc_channel.chan_id = 7; 373 ncr_sc->sc_adapter.adapt_minphys = sw_minphys; 374 375 /* Initialize sw board */ 376 sw_reset_adapter(ncr_sc); 377 378 /* Attach the ncr5380 chip driver */ 379 ncr5380_attach(ncr_sc); 380 } 381 382 static void 383 sw_minphys(struct buf *bp) 384 { 385 if (bp->b_bcount > MAX_DMA_LEN) { 386 #ifdef DEBUG 387 if (sw_debug) { 388 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN); 389 Debugger(); 390 } 391 #endif 392 bp->b_bcount = MAX_DMA_LEN; 393 } 394 minphys(bp); 395 } 396 397 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \ 398 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR ) 399 400 static int 401 sw_intr(void *arg) 402 { 403 struct sw_softc *sc = arg; 404 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg; 405 int dma_error, claimed; 406 u_short csr; 407 408 claimed = 0; 409 dma_error = 0; 410 411 /* SBC interrupt? DMA interrupt? */ 412 csr = SWREG_READ(ncr_sc, SWREG_CSR); 413 414 NCR_TRACE("sw_intr: csr=0x%x\n", csr); 415 416 if (csr & SW_CSR_DMA_CONFLICT) { 417 dma_error |= SW_CSR_DMA_CONFLICT; 418 printf("sw_intr: DMA conflict\n"); 419 } 420 if (csr & SW_CSR_DMA_BUS_ERR) { 421 dma_error |= SW_CSR_DMA_BUS_ERR; 422 printf("sw_intr: DMA bus error\n"); 423 } 424 if (dma_error) { 425 if (sc->ncr_sc.sc_state & NCR_DOINGDMA) 426 sc->ncr_sc.sc_state |= NCR_ABORTING; 427 /* Make sure we will call the main isr. */ 428 csr |= SW_CSR_DMA_IP; 429 } 430 431 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) { 432 claimed = ncr5380_intr(&sc->ncr_sc); 433 #ifdef DEBUG 434 if (!claimed) { 435 printf("sw_intr: spurious from SBC\n"); 436 if (sw_debug & 4) { 437 Debugger(); /* XXX */ 438 } 439 } 440 #endif 441 } 442 443 return (claimed); 444 } 445 446 447 static void 448 sw_reset_adapter(struct ncr5380_softc *ncr_sc) 449 { 450 451 #ifdef DEBUG 452 if (sw_debug) { 453 printf("sw_reset_adapter\n"); 454 } 455 #endif 456 457 /* 458 * The reset bits in the CSR are active low. 459 */ 460 SWREG_WRITE(ncr_sc, SWREG_CSR, 0); 461 delay(10); 462 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES); 463 464 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0); 465 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0); 466 delay(10); 467 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN); 468 469 SCI_CLR_INTR(ncr_sc); 470 } 471 472 473 /***************************************************************** 474 * Common functions for DMA 475 ****************************************************************/ 476 477 /* 478 * Allocate a DMA handle and put it in sc->sc_dma. Prepare 479 * for DMA transfer. On the Sun4, this means mapping the buffer 480 * into DVMA space. 481 */ 482 void 483 sw_dma_alloc(struct ncr5380_softc *ncr_sc) 484 { 485 struct sw_softc *sc = (struct sw_softc *)ncr_sc; 486 struct sci_req *sr = ncr_sc->sc_current; 487 struct scsipi_xfer *xs = sr->sr_xs; 488 struct sw_dma_handle *dh; 489 int i, xlen; 490 u_long addr; 491 492 #ifdef DIAGNOSTIC 493 if (sr->sr_dma_hand != NULL) 494 panic("sw_dma_alloc: already have DMA handle"); 495 #endif 496 497 #if 1 /* XXX - Temporary */ 498 /* XXX - In case we think DMA is completely broken... */ 499 if ((sc->sc_options & SW_ENABLE_DMA) == 0) 500 return; 501 #endif 502 503 addr = (u_long) ncr_sc->sc_dataptr; 504 xlen = ncr_sc->sc_datalen; 505 506 /* If the DMA start addr is misaligned then do PIO */ 507 if ((addr & 1) || (xlen & 1)) { 508 printf("sw_dma_alloc: misaligned.\n"); 509 return; 510 } 511 512 /* Make sure our caller checked sc_min_dma_len. */ 513 if (xlen < MIN_DMA_LEN) 514 panic("sw_dma_alloc: xlen=0x%x", xlen); 515 516 /* Find free DMA handle. Guaranteed to find one since we have 517 as many DMA handles as the driver has processes. */ 518 for (i = 0; i < SCI_OPENINGS; i++) { 519 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0) 520 goto found; 521 } 522 panic("sw: no free DMA handles."); 523 524 found: 525 dh = &sc->sc_dma[i]; 526 dh->dh_flags = SIDH_BUSY; 527 dh->dh_addr = (u_char *)addr; 528 dh->dh_maplen = xlen; 529 530 /* Copy the "write" flag for convenience. */ 531 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0) 532 dh->dh_flags |= SIDH_OUT; 533 534 /* 535 * Double-map the buffer into DVMA space. If we can't re-map 536 * the buffer, we print a warning and fall back to PIO mode. 537 * 538 * NOTE: it is not safe to sleep here! 539 */ 540 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap, 541 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) { 542 /* Can't remap segment */ 543 printf("sw_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n", 544 addr, dh->dh_maplen); 545 dh->dh_flags = 0; 546 return; 547 } 548 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen, 549 (dh->dh_flags & SIDH_OUT) 550 ? BUS_DMASYNC_PREWRITE 551 : BUS_DMASYNC_PREREAD); 552 553 /* success */ 554 sr->sr_dma_hand = dh; 555 556 return; 557 } 558 559 560 void 561 sw_dma_free(struct ncr5380_softc *ncr_sc) 562 { 563 struct sw_softc *sc = (struct sw_softc *)ncr_sc; 564 struct sci_req *sr = ncr_sc->sc_current; 565 struct sw_dma_handle *dh = sr->sr_dma_hand; 566 567 #ifdef DIAGNOSTIC 568 if (dh == NULL) 569 panic("sw_dma_free: no DMA handle"); 570 #endif 571 572 if (ncr_sc->sc_state & NCR_DOINGDMA) 573 panic("sw_dma_free: free while in progress"); 574 575 if (dh->dh_flags & SIDH_BUSY) { 576 /* Give back the DVMA space. */ 577 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, 578 dh->dh_dvma, dh->dh_maplen, 579 (dh->dh_flags & SIDH_OUT) 580 ? BUS_DMASYNC_POSTWRITE 581 : BUS_DMASYNC_POSTREAD); 582 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap); 583 dh->dh_flags = 0; 584 } 585 sr->sr_dma_hand = NULL; 586 } 587 588 589 /* 590 * Poll (spin-wait) for DMA completion. 591 * Called right after xx_dma_start(), and 592 * xx_dma_stop() will be called next. 593 * Same for either VME or OBIO. 594 */ 595 void 596 sw_dma_poll(struct ncr5380_softc *ncr_sc) 597 { 598 struct sci_req *sr = ncr_sc->sc_current; 599 int tmo, csr_mask, csr; 600 601 /* Make sure DMA started successfully. */ 602 if (ncr_sc->sc_state & NCR_ABORTING) 603 return; 604 605 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP | 606 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR; 607 608 tmo = 50000; /* X100 = 5 sec. */ 609 for (;;) { 610 csr = SWREG_READ(ncr_sc, SWREG_CSR); 611 if (csr & csr_mask) 612 break; 613 if (--tmo <= 0) { 614 printf("%s: DMA timeout (while polling)\n", 615 ncr_sc->sc_dev.dv_xname); 616 /* Indicate timeout as MI code would. */ 617 sr->sr_flags |= SR_OVERDUE; 618 break; 619 } 620 delay(100); 621 } 622 623 #ifdef DEBUG 624 if (sw_debug) { 625 printf("sw_dma_poll: done, csr=0x%x\n", csr); 626 } 627 #endif 628 } 629 630 631 /* 632 * This is called when the bus is going idle, 633 * so we want to enable the SBC interrupts. 634 * That is controlled by the DMA enable! 635 * Who would have guessed! 636 * What a NASTY trick! 637 * 638 * XXX THIS MIGHT NOT WORK RIGHT! 639 */ 640 void 641 sw_intr_on(struct ncr5380_softc *ncr_sc) 642 { 643 uint32_t csr; 644 645 sw_dma_setup(ncr_sc); 646 csr = SWREG_READ(ncr_sc, SWREG_CSR); 647 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */ 648 SWREG_WRITE(ncr_sc, SWREG_CSR, csr); 649 } 650 651 /* 652 * This is called when the bus is idle and we are 653 * about to start playing with the SBC chip. 654 * 655 * XXX THIS MIGHT NOT WORK RIGHT! 656 */ 657 void 658 sw_intr_off(struct ncr5380_softc *ncr_sc) 659 { 660 uint32_t csr; 661 662 csr = SWREG_READ(ncr_sc, SWREG_CSR); 663 csr &= ~SW_CSR_DMA_EN; 664 SWREG_WRITE(ncr_sc, SWREG_CSR, csr); 665 } 666 667 668 /* 669 * This function is called during the COMMAND or MSG_IN phase 670 * that precedes a DATA_IN or DATA_OUT phase, in case we need 671 * to setup the DMA engine before the bus enters a DATA phase. 672 * 673 * On the OBIO version we just clear the DMA count and address 674 * here (to make sure it stays idle) and do the real setup 675 * later, in dma_start. 676 */ 677 void 678 sw_dma_setup(struct ncr5380_softc *ncr_sc) 679 { 680 uint32_t csr; 681 682 /* No FIFO to reset on "sw". */ 683 684 /* Set direction (assume recv here) */ 685 csr = SWREG_READ(ncr_sc, SWREG_CSR); 686 csr &= ~SW_CSR_SEND; 687 SWREG_WRITE(ncr_sc, SWREG_CSR, csr); 688 689 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0); 690 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0); 691 } 692 693 694 void 695 sw_dma_start(struct ncr5380_softc *ncr_sc) 696 { 697 struct sw_softc *sc = (struct sw_softc *)ncr_sc; 698 struct sci_req *sr = ncr_sc->sc_current; 699 struct sw_dma_handle *dh = sr->sr_dma_hand; 700 u_long dva; 701 int xlen, adj, adjlen; 702 u_int mode; 703 uint32_t csr; 704 705 /* 706 * Get the DVMA mapping for this segment. 707 */ 708 dva = (u_long)(dh->dh_dvma); 709 if (dva & 1) 710 panic("sw_dma_start: bad dva=0x%lx", dva); 711 712 xlen = ncr_sc->sc_datalen; 713 xlen &= ~1; 714 sc->sc_xlen = xlen; /* XXX: or less... */ 715 716 #ifdef DEBUG 717 if (sw_debug & 2) { 718 printf("sw_dma_start: dh=%p, dva=0x%lx, xlen=%d\n", 719 dh, dva, xlen); 720 } 721 #endif 722 723 /* 724 * Set up the DMA controller. 725 * Note that (dh->dh_len < sc_datalen) 726 */ 727 728 /* Set direction (send/recv) */ 729 csr = SWREG_READ(ncr_sc, SWREG_CSR); 730 if (dh->dh_flags & SIDH_OUT) { 731 csr |= SW_CSR_SEND; 732 } else { 733 csr &= ~SW_CSR_SEND; 734 } 735 SWREG_WRITE(ncr_sc, SWREG_CSR, csr); 736 737 /* 738 * The "sw" needs longword aligned transfers. We 739 * detect a shortword aligned transfer here, and adjust the 740 * DMA transfer by 2 bytes. These two bytes are read/written 741 * in PIO mode just before the DMA is started. 742 */ 743 adj = 0; 744 if (dva & 2) { 745 adj = 2; 746 #ifdef DEBUG 747 if (sw_debug & 2) 748 printf("sw_dma_start: adjusted up %d bytes\n", adj); 749 #endif 750 } 751 752 /* We have to frob the address on the "sw". */ 753 dh->dh_startingpa = (dva | 0xF00000); 754 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj)); 755 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj); 756 757 /* 758 * Acknowledge the phase change. (After DMA setup!) 759 * Put the SBIC into DMA mode, and start the transfer. 760 */ 761 if (dh->dh_flags & SIDH_OUT) { 762 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT); 763 if (adj) { 764 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT, 765 adj, dh->dh_addr); 766 if (adjlen != adj) 767 printf("%s: bad outgoing adj, %d != %d\n", 768 ncr_sc->sc_dev.dv_xname, adjlen, adj); 769 } 770 SCI_CLR_INTR(ncr_sc); 771 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA); 772 mode = NCR5380_READ(ncr_sc, sci_mode); 773 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 774 NCR5380_WRITE(ncr_sc, sci_mode, mode); 775 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */ 776 } else { 777 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN); 778 if (adj) { 779 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN, 780 adj, dh->dh_addr); 781 if (adjlen != adj) 782 printf("%s: bad incoming adj, %d != %d\n", 783 ncr_sc->sc_dev.dv_xname, adjlen, adj); 784 } 785 SCI_CLR_INTR(ncr_sc); 786 NCR5380_WRITE(ncr_sc, sci_icmd, 0); 787 mode = NCR5380_READ(ncr_sc, sci_mode); 788 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 789 NCR5380_WRITE(ncr_sc, sci_mode, mode); 790 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */ 791 } 792 793 /* Let'er rip! */ 794 csr |= SW_CSR_DMA_EN; 795 SWREG_WRITE(ncr_sc, SWREG_CSR, csr); 796 797 ncr_sc->sc_state |= NCR_DOINGDMA; 798 799 #ifdef DEBUG 800 if (sw_debug & 2) { 801 printf("sw_dma_start: started, flags=0x%x\n", 802 ncr_sc->sc_state); 803 } 804 #endif 805 } 806 807 808 void 809 sw_dma_eop(struct ncr5380_softc *ncr_sc) 810 { 811 812 /* Not needed - DMA was stopped prior to examining sci_csr */ 813 } 814 815 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS) 816 #define COUNT_SW_LEFTOVERS 817 #endif 818 #ifdef COUNT_SW_LEFTOVERS 819 /* 820 * Let's find out how often these occur. Read these with DDB from time 821 * to time. 822 */ 823 int sw_3_leftover = 0; 824 int sw_2_leftover = 0; 825 int sw_1_leftover = 0; 826 int sw_0_leftover = 0; 827 #endif 828 829 void 830 sw_dma_stop(struct ncr5380_softc *ncr_sc) 831 { 832 struct sci_req *sr = ncr_sc->sc_current; 833 struct sw_dma_handle *dh = sr->sr_dma_hand; 834 int ntrans = 0, dva; 835 u_int mode; 836 uint32_t csr; 837 838 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) { 839 #ifdef DEBUG 840 printf("sw_dma_stop: DMA not running\n"); 841 #endif 842 return; 843 } 844 ncr_sc->sc_state &= ~NCR_DOINGDMA; 845 846 /* First, halt the DMA engine. */ 847 csr = SWREG_READ(ncr_sc, SWREG_CSR); 848 csr &= ~SW_CSR_DMA_EN; 849 SWREG_WRITE(ncr_sc, SWREG_CSR, csr); 850 851 /* 852 * XXX HARDWARE BUG! 853 * Apparently, some early 4/100 SCSI controllers had a hardware 854 * bug that caused the controller to do illegal memory access. 855 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around 856 * this, we simply need to clean up after ourselves ... there will 857 * be as many as 3 bytes left over. Since we clean up "left-over" 858 * bytes on every read anyway, we just continue to chug along 859 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked 860 * around in hardware later with the "left-over byte" indicator 861 * in the VME controller.) 862 */ 863 #if 0 864 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR)) 865 #else 866 if (csr & (SW_CSR_DMA_CONFLICT)) 867 #endif 868 { 869 printf("sw: DMA error, csr=0x%x, reset\n", csr); 870 sr->sr_xs->error = XS_DRIVER_STUFFUP; 871 ncr_sc->sc_state |= NCR_ABORTING; 872 sw_reset_adapter(ncr_sc); 873 } 874 875 /* Note that timeout may have set the error flag. */ 876 if (ncr_sc->sc_state & NCR_ABORTING) 877 goto out; 878 879 /* 880 * Now try to figure out how much actually transferred 881 * 882 * The "sw" doesn't have a FIFO or a bcr, so we've stored 883 * the starting PA of the transfer in the DMA handle, 884 * and subtract it from the ending PA left in the dma_addr 885 * register. 886 */ 887 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR); 888 ntrans = (dva - dh->dh_startingpa); 889 890 #ifdef DEBUG 891 if (sw_debug & 2) { 892 printf("sw_dma_stop: ntrans=0x%x\n", ntrans); 893 } 894 #endif 895 896 if (ntrans > ncr_sc->sc_datalen) 897 panic("sw_dma_stop: excess transfer"); 898 899 /* Adjust data pointer */ 900 ncr_sc->sc_dataptr += ntrans; 901 ncr_sc->sc_datalen -= ntrans; 902 903 /* 904 * After a read, we may need to clean-up 905 * "Left-over bytes" (yuck!) The "sw" doesn't 906 * have a "left-over" indicator, so we have to so 907 * this no matter what. Ick. 908 */ 909 if ((dh->dh_flags & SIDH_OUT) == 0) { 910 char *cp = ncr_sc->sc_dataptr; 911 uint32_t bpr; 912 913 bpr = SWREG_READ(ncr_sc, SWREG_BPR); 914 915 switch (dva & 3) { 916 case 3: 917 cp[0] = (bpr & 0xff000000) >> 24; 918 cp[1] = (bpr & 0x00ff0000) >> 16; 919 cp[2] = (bpr & 0x0000ff00) >> 8; 920 #ifdef COUNT_SW_LEFTOVERS 921 ++sw_3_leftover; 922 #endif 923 break; 924 925 case 2: 926 cp[0] = (bpr & 0xff000000) >> 24; 927 cp[1] = (bpr & 0x00ff0000) >> 16; 928 #ifdef COUNT_SW_LEFTOVERS 929 ++sw_2_leftover; 930 #endif 931 break; 932 933 case 1: 934 cp[0] = (bpr & 0xff000000) >> 24; 935 #ifdef COUNT_SW_LEFTOVERS 936 ++sw_1_leftover; 937 #endif 938 break; 939 940 #ifdef COUNT_SW_LEFTOVERS 941 default: 942 ++sw_0_leftover; 943 break; 944 #endif 945 } 946 } 947 948 out: 949 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0); 950 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0); 951 952 /* Put SBIC back in PIO mode. */ 953 mode = NCR5380_READ(ncr_sc, sci_mode); 954 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE); 955 NCR5380_WRITE(ncr_sc, sci_mode, mode); 956 NCR5380_WRITE(ncr_sc, sci_icmd, 0); 957 958 #ifdef DEBUG 959 if (sw_debug & 2) { 960 printf("sw_dma_stop: ntrans=0x%x\n", ntrans); 961 } 962 #endif 963 } 964