1 /* $NetBSD: si_sebuf.c,v 1.5 1997/12/09 22:29:06 gwr Exp $ */ 2 3 /*- 4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Gordon W. Ross. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Sun3/E SCSI driver (machine-dependent portion). 41 * The machine-independent parts are in ncr5380sbc.c 42 * 43 * XXX - Mostly from the si driver. Merge? 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/errno.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/device.h> 52 #include <sys/buf.h> 53 #include <sys/proc.h> 54 #include <sys/user.h> 55 56 #include <dev/scsipi/scsi_all.h> 57 #include <dev/scsipi/scsipi_all.h> 58 #include <dev/scsipi/scsipi_debug.h> 59 #include <dev/scsipi/scsiconf.h> 60 61 #include <machine/autoconf.h> 62 63 /* #define DEBUG XXX */ 64 65 #include <dev/ic/ncr5380reg.h> 66 #include <dev/ic/ncr5380var.h> 67 68 #include "sereg.h" 69 #include "sevar.h" 70 71 /* 72 * Transfers smaller than this are done using PIO 73 * (on assumption they're not worth DMA overhead) 74 */ 75 #define MIN_DMA_LEN 128 76 77 /* 78 * Transfers lager than 65535 bytes need to be split-up. 79 * (Some of the FIFO logic has only 16 bits counters.) 80 * Make the size an integer multiple of the page size 81 * to avoid buf/cluster remap problems. (paranoid?) 82 */ 83 #define MAX_DMA_LEN 0xE000 84 85 /* 86 * This structure is used to keep track of mapped DMA requests. 87 */ 88 struct se_dma_handle { 89 int dh_flags; 90 #define SIDH_BUSY 1 /* This DH is in use */ 91 #define SIDH_OUT 2 /* DMA does data out (write) */ 92 u_char * dh_addr; /* KVA of start of buffer */ 93 int dh_maplen; /* Length of KVA mapping. */ 94 long dh_dma; /* Offset in DMA buffer. */ 95 }; 96 97 /* 98 * The first structure member has to be the ncr5380_softc 99 * so we can just cast to go back and fourth between them. 100 */ 101 struct se_softc { 102 struct ncr5380_softc ncr_sc; 103 volatile struct se_regs *sc_regs; 104 int sc_adapter_type; 105 int sc_adapter_iv; /* int. vec */ 106 int sc_options; /* options for this instance */ 107 int sc_reqlen; /* requested transfer length */ 108 struct se_dma_handle *sc_dma; 109 /* DMA command block for the OBIO controller. */ 110 void *sc_dmacmd; 111 }; 112 113 /* Options for disconnect/reselect, DMA, and interrupts. */ 114 #define SE_NO_DISCONNECT 0xff 115 #define SE_NO_PARITY_CHK 0xff00 116 #define SE_FORCE_POLLING 0x10000 117 #define SE_DISABLE_DMA 0x20000 118 119 void se_dma_alloc __P((struct ncr5380_softc *)); 120 void se_dma_free __P((struct ncr5380_softc *)); 121 void se_dma_poll __P((struct ncr5380_softc *)); 122 123 void se_dma_setup __P((struct ncr5380_softc *)); 124 void se_dma_start __P((struct ncr5380_softc *)); 125 void se_dma_eop __P((struct ncr5380_softc *)); 126 void se_dma_stop __P((struct ncr5380_softc *)); 127 128 void se_intr_on __P((struct ncr5380_softc *)); 129 void se_intr_off __P((struct ncr5380_softc *)); 130 131 static int se_intr __P((void *)); 132 static void se_reset __P((struct ncr5380_softc *)); 133 134 /* 135 * New-style autoconfig attachment 136 */ 137 138 static int se_match __P((struct device *, struct cfdata *, void *)); 139 static void se_attach __P((struct device *, struct device *, void *)); 140 141 struct cfattach si_sebuf_ca = { 142 sizeof(struct se_softc), se_match, se_attach 143 }; 144 145 static void se_minphys __P((struct buf *)); 146 static struct scsipi_adapter se_ops = { 147 ncr5380_scsi_cmd, /* scsi_cmd() */ 148 se_minphys, /* scsi_minphys() */ 149 NULL, /* open_target_lu() */ 150 NULL, /* close_target_lu() */ 151 }; 152 153 /* This is copied from julian's bt driver */ 154 /* "so we have a default dev struct for our link struct." */ 155 static struct scsipi_device se_dev = { 156 NULL, /* Use default error handler. */ 157 NULL, /* Use default start handler. */ 158 NULL, /* Use default async handler. */ 159 NULL, /* Use default "done" routine. */ 160 }; 161 162 /* Options for disconnect/reselect, DMA, and interrupts. */ 163 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff; 164 165 /* How long to wait for DMA before declaring an error. */ 166 int se_dma_intr_timo = 500; /* ticks (sec. X 100) */ 167 168 int se_debug = 0; 169 #ifdef DEBUG 170 static int se_link_flags = 0 /* | SDEV_DB2 */ ; 171 #endif 172 173 174 static int 175 se_match(parent, cf, args) 176 struct device *parent; 177 struct cfdata *cf; 178 void *args; 179 { 180 struct sebuf_attach_args *aa = args; 181 182 /* Match by name. */ 183 if (strcmp(aa->name, "se")) 184 return (0); 185 186 /* Anyting else to check? */ 187 188 return (1); 189 } 190 191 static void 192 se_attach(parent, self, args) 193 struct device *parent, *self; 194 void *args; 195 { 196 struct se_softc *sc = (struct se_softc *) self; 197 struct ncr5380_softc *ncr_sc = &sc->ncr_sc; 198 struct cfdata *cf = self->dv_cfdata; 199 struct sebuf_attach_args *aa = args; 200 volatile struct se_regs *regs; 201 int i; 202 203 /* Get options from config flags if specified. */ 204 if (cf->cf_flags) 205 sc->sc_options = cf->cf_flags; 206 else 207 sc->sc_options = se_options; 208 209 printf(": options=0x%x\n", sc->sc_options); 210 211 sc->sc_adapter_type = aa->ca.ca_bustype; 212 sc->sc_adapter_iv = aa->ca.ca_intvec; 213 sc->sc_regs = regs = aa->regs; 214 215 /* 216 * MD function pointers used by the MI code. 217 */ 218 ncr_sc->sc_pio_out = ncr5380_pio_out; 219 ncr_sc->sc_pio_in = ncr5380_pio_in; 220 221 #if 0 /* XXX - not yet... */ 222 ncr_sc->sc_dma_alloc = se_dma_alloc; 223 ncr_sc->sc_dma_free = se_dma_free; 224 ncr_sc->sc_dma_setup = se_dma_setup; 225 ncr_sc->sc_dma_start = se_dma_start; 226 ncr_sc->sc_dma_poll = se_dma_poll; 227 ncr_sc->sc_dma_eop = se_dma_eop; 228 ncr_sc->sc_dma_stop = se_dma_stop; 229 ncr_sc->sc_intr_on = se_intr_on; 230 ncr_sc->sc_intr_off = se_intr_off; 231 #endif /* XXX */ 232 233 /* Attach interrupt handler. */ 234 isr_add_vectored(se_intr, (void *)sc, 235 aa->ca.ca_intpri, aa->ca.ca_intvec); 236 237 /* Reset the hardware. */ 238 se_reset(ncr_sc); 239 240 /* Do the common attach stuff. */ 241 242 /* 243 * Support the "options" (config file flags). 244 * Disconnect/reselect is a per-target mask. 245 * Interrupts and DMA are per-controller. 246 */ 247 ncr_sc->sc_no_disconnect = 248 (sc->sc_options & SE_NO_DISCONNECT); 249 ncr_sc->sc_parity_disable = 250 (sc->sc_options & SE_NO_PARITY_CHK) >> 8; 251 if (sc->sc_options & SE_FORCE_POLLING) 252 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING; 253 254 #if 1 /* XXX - Temporary */ 255 /* XXX - In case we think DMA is completely broken... */ 256 if (sc->sc_options & SE_DISABLE_DMA) { 257 /* Override this function pointer. */ 258 ncr_sc->sc_dma_alloc = NULL; 259 } 260 #endif 261 ncr_sc->sc_min_dma_len = MIN_DMA_LEN; 262 263 /* 264 * Fill in the prototype scsi_link. 265 */ 266 ncr_sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE; 267 ncr_sc->sc_link.adapter_softc = sc; 268 ncr_sc->sc_link.scsipi_scsi.adapter_target = 7; 269 ncr_sc->sc_link.adapter = &se_ops; 270 ncr_sc->sc_link.device = &se_dev; 271 ncr_sc->sc_link.type = BUS_SCSI; 272 273 #ifdef DEBUG 274 if (se_debug) 275 printf("se: Set TheSoftC=%p TheRegs=%p\n", sc, regs); 276 ncr_sc->sc_link.flags |= se_link_flags; 277 #endif 278 279 /* 280 * Initialize fields used by the MI code 281 */ 282 ncr_sc->sci_r0 = ®s->ncrregs[0]; 283 ncr_sc->sci_r1 = ®s->ncrregs[1]; 284 ncr_sc->sci_r2 = ®s->ncrregs[2]; 285 ncr_sc->sci_r3 = ®s->ncrregs[3]; 286 ncr_sc->sci_r4 = ®s->ncrregs[4]; 287 ncr_sc->sci_r5 = ®s->ncrregs[5]; 288 ncr_sc->sci_r6 = ®s->ncrregs[6]; 289 ncr_sc->sci_r7 = ®s->ncrregs[7]; 290 291 /* 292 * Allocate DMA handles. 293 */ 294 i = SCI_OPENINGS * sizeof(struct se_dma_handle); 295 sc->sc_dma = (struct se_dma_handle *) 296 malloc(i, M_DEVBUF, M_WAITOK); 297 if (sc->sc_dma == NULL) 298 panic("se: dma_malloc failed\n"); 299 for (i = 0; i < SCI_OPENINGS; i++) 300 sc->sc_dma[i].dh_flags = 0; 301 302 /* 303 * Initialize se board itself. 304 */ 305 ncr5380_init(ncr_sc); 306 ncr5380_reset_scsibus(ncr_sc); 307 config_found(&(ncr_sc->sc_dev), &(ncr_sc->sc_link), scsiprint); 308 } 309 310 static void 311 se_reset(struct ncr5380_softc *ncr_sc) 312 { 313 struct se_softc *sc = (struct se_softc *)ncr_sc; 314 volatile struct se_regs *se = sc->sc_regs; 315 316 #ifdef DEBUG 317 if (se_debug) { 318 printf("se_reset\n"); 319 } 320 #endif 321 322 /* The reset bits in the CSR are active low. */ 323 se->se_csr = 0; 324 delay(10); 325 se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ; 326 delay(10); 327 328 /* Make sure the DMA engine is stopped. */ 329 se->dma_addr = 0; 330 se->dma_cntr = 0; 331 se->se_ivec = sc->sc_adapter_iv; 332 } 333 334 /* 335 * This is called when the bus is going idle, 336 * so we want to enable the SBC interrupts. 337 * That is controlled by the DMA enable! 338 * Who would have guessed! 339 * What a NASTY trick! 340 */ 341 void 342 se_intr_on(ncr_sc) 343 struct ncr5380_softc *ncr_sc; 344 { 345 struct se_softc *sc = (struct se_softc *)ncr_sc; 346 volatile struct se_regs *se = sc->sc_regs; 347 348 /* receive mode should be safer */ 349 se->se_csr &= ~SE_CSR_SEND; 350 351 /* Clear the count so nothing happens. */ 352 se->dma_cntr = 0; 353 354 /* Clear the start address too. (paranoid?) */ 355 se->dma_addr = 0; 356 357 /* Finally, enable the DMA engine. */ 358 se->se_csr |= SE_CSR_INTR_EN; 359 } 360 361 /* 362 * This is called when the bus is idle and we are 363 * about to start playing with the SBC chip. 364 */ 365 void 366 se_intr_off(ncr_sc) 367 struct ncr5380_softc *ncr_sc; 368 { 369 struct se_softc *sc = (struct se_softc *)ncr_sc; 370 volatile struct se_regs *se = sc->sc_regs; 371 372 se->se_csr &= ~SE_CSR_INTR_EN; 373 } 374 375 /* 376 * This function is called during the COMMAND or MSG_IN phase 377 * that preceeds a DATA_IN or DATA_OUT phase, in case we need 378 * to setup the DMA engine before the bus enters a DATA phase. 379 * 380 * On the VME version, setup the start addres, but clear the 381 * count (to make sure it stays idle) and set that later. 382 * XXX: The VME adapter appears to suppress SBC interrupts 383 * when the FIFO is not empty or the FIFO count is non-zero! 384 * XXX: Need to copy data into the DMA buffer... 385 */ 386 void 387 se_dma_setup(ncr_sc) 388 struct ncr5380_softc *ncr_sc; 389 { 390 struct se_softc *sc = (struct se_softc *)ncr_sc; 391 struct sci_req *sr = ncr_sc->sc_current; 392 struct se_dma_handle *dh = sr->sr_dma_hand; 393 volatile struct se_regs *se = sc->sc_regs; 394 long data_pa; 395 int xlen; 396 397 /* 398 * Get the DMA mapping for this segment. 399 * XXX - Should separate allocation and mapin. 400 */ 401 data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */ 402 data_pa += (ncr_sc->sc_dataptr - dh->dh_addr); 403 if (data_pa & 1) 404 panic("se_dma_start: bad pa=0x%lx", data_pa); 405 xlen = ncr_sc->sc_datalen; 406 xlen &= ~1; /* XXX: necessary? */ 407 sc->sc_reqlen = xlen; /* XXX: or less? */ 408 409 #ifdef DEBUG 410 if (se_debug & 2) { 411 printf("se_dma_setup: dh=%p, pa=0x%lx, xlen=0x%x\n", 412 dh, data_pa, xlen); 413 } 414 #endif 415 416 /* Set direction (send/recv) */ 417 if (dh->dh_flags & SIDH_OUT) { 418 se->se_csr |= SE_CSR_SEND; 419 } else { 420 se->se_csr &= ~SE_CSR_SEND; 421 } 422 423 /* Load the start address. */ 424 se->dma_addr = (ushort)(data_pa & 0xFFFF); 425 426 /* 427 * Keep the count zero or it may start early! 428 */ 429 se->dma_cntr = 0; 430 } 431 432 433 void 434 se_dma_start(ncr_sc) 435 struct ncr5380_softc *ncr_sc; 436 { 437 struct se_softc *sc = (struct se_softc *)ncr_sc; 438 struct sci_req *sr = ncr_sc->sc_current; 439 struct se_dma_handle *dh = sr->sr_dma_hand; 440 volatile struct se_regs *se = sc->sc_regs; 441 int s, xlen; 442 443 xlen = sc->sc_reqlen; 444 445 /* This MAY be time critical (not sure). */ 446 s = splhigh(); 447 448 se->dma_cntr = (ushort)(xlen & 0xFFFF); 449 450 /* 451 * Acknowledge the phase change. (After DMA setup!) 452 * Put the SBIC into DMA mode, and start the transfer. 453 */ 454 if (dh->dh_flags & SIDH_OUT) { 455 *ncr_sc->sci_tcmd = PHASE_DATA_OUT; 456 SCI_CLR_INTR(ncr_sc); 457 *ncr_sc->sci_icmd = SCI_ICMD_DATA; 458 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 459 *ncr_sc->sci_dma_send = 0; /* start it */ 460 } else { 461 *ncr_sc->sci_tcmd = PHASE_DATA_IN; 462 SCI_CLR_INTR(ncr_sc); 463 *ncr_sc->sci_icmd = 0; 464 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 465 *ncr_sc->sci_irecv = 0; /* start it */ 466 } 467 468 /* Let'er rip! */ 469 se->se_csr |= SE_CSR_INTR_EN; 470 471 splx(s); 472 ncr_sc->sc_state |= NCR_DOINGDMA; 473 474 #ifdef DEBUG 475 if (se_debug & 2) { 476 printf("se_dma_start: started, flags=0x%x\n", 477 ncr_sc->sc_state); 478 } 479 #endif 480 } 481 482 483 void 484 se_dma_eop(ncr_sc) 485 struct ncr5380_softc *ncr_sc; 486 { 487 488 /* Not needed - DMA was stopped prior to examining sci_csr */ 489 } 490 491 492 void 493 se_dma_stop(ncr_sc) 494 struct ncr5380_softc *ncr_sc; 495 { 496 struct se_softc *sc = (struct se_softc *)ncr_sc; 497 struct sci_req *sr = ncr_sc->sc_current; 498 struct se_dma_handle *dh = sr->sr_dma_hand; 499 volatile struct se_regs *se = sc->sc_regs; 500 int resid, ntrans; 501 502 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) { 503 #ifdef DEBUG 504 printf("se_dma_stop: dma not running\n"); 505 #endif 506 return; 507 } 508 ncr_sc->sc_state &= ~NCR_DOINGDMA; 509 510 /* First, halt the DMA engine. */ 511 se->se_csr &= ~SE_CSR_INTR_EN; /* VME only */ 512 513 /* Set an impossible phase to prevent data movement? */ 514 *ncr_sc->sci_tcmd = PHASE_INVALID; 515 516 /* Note that timeout may have set the error flag. */ 517 if (ncr_sc->sc_state & NCR_ABORTING) 518 goto out; 519 520 /* XXX: Wait for DMA to actually finish? */ 521 522 /* 523 * Now try to figure out how much actually transferred 524 */ 525 resid = se->dma_cntr & 0xFFFF; 526 if (dh->dh_flags & SIDH_OUT) 527 if ((resid > 0) && (resid < sc->sc_reqlen)) 528 resid++; 529 ntrans = sc->sc_reqlen - resid; 530 531 #ifdef DEBUG 532 if (se_debug & 2) { 533 printf("se_dma_stop: resid=0x%x ntrans=0x%x\n", 534 resid, ntrans); 535 } 536 #endif 537 538 if (ntrans < MIN_DMA_LEN) { 539 printf("se: fifo count: 0x%x\n", resid); 540 ncr_sc->sc_state |= NCR_ABORTING; 541 goto out; 542 } 543 if (ntrans > ncr_sc->sc_datalen) 544 panic("se_dma_stop: excess transfer"); 545 546 /* Adjust data pointer */ 547 ncr_sc->sc_dataptr += ntrans; 548 ncr_sc->sc_datalen -= ntrans; 549 550 out: 551 se->dma_addr = 0; 552 se->dma_cntr = 0; 553 554 /* Put SBIC back in PIO mode. */ 555 *ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE); 556 *ncr_sc->sci_icmd = 0; 557 } 558 559 /*****************************************************************/ 560 561 static void 562 se_minphys(struct buf *bp) 563 { 564 if (bp->b_bcount > MAX_DMA_LEN) { 565 #ifdef DEBUG 566 if (se_debug) { 567 printf("se_minphys len = 0x%x.\n", bp->b_bcount); 568 Debugger(); 569 } 570 #endif 571 bp->b_bcount = MAX_DMA_LEN; 572 } 573 return (minphys(bp)); 574 } 575 576 577 int 578 se_intr(void *arg) 579 { 580 struct se_softc *sc = arg; 581 volatile struct se_regs *se = sc->sc_regs; 582 int dma_error, claimed; 583 u_short csr; 584 585 claimed = 0; 586 dma_error = 0; 587 588 /* SBC interrupt? DMA interrupt? */ 589 csr = se->se_csr; 590 NCR_TRACE("se_intr: csr=0x%x\n", csr); 591 592 if (csr & SE_CSR_SBC_IP) { 593 claimed = ncr5380_intr(&sc->ncr_sc); 594 #ifdef DEBUG 595 if (!claimed) { 596 printf("se_intr: spurious from SBC\n"); 597 if (se_debug & 4) { 598 Debugger(); /* XXX */ 599 } 600 } 601 #endif 602 /* Yes, we DID cause this interrupt. */ 603 claimed = 1; 604 } 605 606 return (claimed); 607 } 608 609 610 /***************************************************************** 611 * Common functions for DMA 612 ****************************************************************/ 613 614 /* 615 * Allocate a DMA handle and put it in sc->sc_dma. Prepare 616 * for DMA transfer. On the Sun3/E, this means we have to 617 * allocate space in the DMA buffer for this transfer. 618 */ 619 void 620 se_dma_alloc(ncr_sc) 621 struct ncr5380_softc *ncr_sc; 622 { 623 struct se_softc *sc = (struct se_softc *)ncr_sc; 624 struct sci_req *sr = ncr_sc->sc_current; 625 struct scsipi_xfer *xs = sr->sr_xs; 626 struct se_dma_handle *dh; 627 int i, xlen; 628 u_long addr; 629 630 #ifdef DIAGNOSTIC 631 if (sr->sr_dma_hand != NULL) 632 panic("se_dma_alloc: already have DMA handle"); 633 #endif 634 635 addr = (u_long) ncr_sc->sc_dataptr; 636 xlen = ncr_sc->sc_datalen; 637 638 /* If the DMA start addr is misaligned then do PIO */ 639 if ((addr & 1) || (xlen & 1)) { 640 printf("se_dma_alloc: misaligned.\n"); 641 return; 642 } 643 644 /* Make sure our caller checked sc_min_dma_len. */ 645 if (xlen < MIN_DMA_LEN) 646 panic("se_dma_alloc: xlen=0x%x\n", xlen); 647 648 /* 649 * Never attempt single transfers of more than 63k, because 650 * our count register may be only 16 bits (an OBIO adapter). 651 * This should never happen since already bounded by minphys(). 652 * XXX - Should just segment these... 653 */ 654 if (xlen > MAX_DMA_LEN) { 655 printf("se_dma_alloc: excessive xlen=0x%x\n", xlen); 656 Debugger(); 657 ncr_sc->sc_datalen = xlen = MAX_DMA_LEN; 658 } 659 660 /* Find free DMA handle. Guaranteed to find one since we have 661 as many DMA handles as the driver has processes. */ 662 for (i = 0; i < SCI_OPENINGS; i++) { 663 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0) 664 goto found; 665 } 666 panic("se: no free DMA handles."); 667 found: 668 669 dh = &sc->sc_dma[i]; 670 dh->dh_flags = SIDH_BUSY; 671 672 /* Copy the "write" flag for convenience. */ 673 if (xs->flags & SCSI_DATA_OUT) 674 dh->dh_flags |= SIDH_OUT; 675 676 dh->dh_addr = (u_char*) addr; 677 dh->dh_maplen = xlen; 678 dh->dh_dma = 0; /* XXX - Allocate space in DMA buffer. */ 679 /* XXX: dh->dh_dma = alloc(xlen) */ 680 if (!dh->dh_dma) { 681 /* Can't remap segment */ 682 printf("se_dma_alloc: can't remap %p/0x%x\n", 683 dh->dh_addr, dh->dh_maplen); 684 dh->dh_flags = 0; 685 return; 686 } 687 688 /* success */ 689 sr->sr_dma_hand = dh; 690 691 return; 692 } 693 694 695 void 696 se_dma_free(ncr_sc) 697 struct ncr5380_softc *ncr_sc; 698 { 699 struct sci_req *sr = ncr_sc->sc_current; 700 struct se_dma_handle *dh = sr->sr_dma_hand; 701 702 #ifdef DIAGNOSTIC 703 if (dh == NULL) 704 panic("se_dma_free: no DMA handle"); 705 #endif 706 707 if (ncr_sc->sc_state & NCR_DOINGDMA) 708 panic("se_dma_free: free while in progress"); 709 710 if (dh->dh_flags & SIDH_BUSY) { 711 /* XXX: Should separate allocation and mapping. */ 712 /* XXX: Give back the DMA space. */ 713 /* XXX: free((caddr_t)dh->dh_dma, dh->dh_maplen); */ 714 dh->dh_dma = 0; 715 dh->dh_flags = 0; 716 } 717 sr->sr_dma_hand = NULL; 718 } 719 720 721 #define CSR_MASK SE_CSR_SBC_IP 722 #define POLL_TIMO 50000 /* X100 = 5 sec. */ 723 724 /* 725 * Poll (spin-wait) for DMA completion. 726 * Called right after xx_dma_start(), and 727 * xx_dma_stop() will be called next. 728 * Same for either VME or OBIO. 729 */ 730 void 731 se_dma_poll(ncr_sc) 732 struct ncr5380_softc *ncr_sc; 733 { 734 struct se_softc *sc = (struct se_softc *)ncr_sc; 735 struct sci_req *sr = ncr_sc->sc_current; 736 volatile struct se_regs *se = sc->sc_regs; 737 int tmo; 738 739 /* Make sure DMA started successfully. */ 740 if (ncr_sc->sc_state & NCR_ABORTING) 741 return; 742 743 /* 744 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here 745 * XXX: (on obio) or even worse (on vme) a 10mS. delay! 746 * XXX: I really doubt that is necessary... 747 */ 748 749 /* Wait for any "dma complete" or error bits. */ 750 tmo = POLL_TIMO; 751 for (;;) { 752 if (se->se_csr & CSR_MASK) 753 break; 754 if (--tmo <= 0) { 755 printf("se: DMA timeout (while polling)\n"); 756 /* Indicate timeout as MI code would. */ 757 sr->sr_flags |= SR_OVERDUE; 758 break; 759 } 760 delay(100); 761 } 762 NCR_TRACE("se_dma_poll: waited %d\n", 763 POLL_TIMO - tmo); 764 765 #ifdef DEBUG 766 if (se_debug & 2) { 767 printf("se_dma_poll: done, csr=0x%x\n", se->se_csr); 768 } 769 #endif 770 } 771 772