1 /* $NetBSD: si_sebuf.c,v 1.24 2006/03/29 04:16:48 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Gordon W. Ross. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Sun3/E SCSI driver (machine-dependent portion). 41 * The machine-independent parts are in ncr5380sbc.c 42 * 43 * XXX - Mostly from the si driver. Merge? 44 */ 45 46 #include <sys/cdefs.h> 47 __KERNEL_RCSID(0, "$NetBSD: si_sebuf.c,v 1.24 2006/03/29 04:16:48 thorpej Exp $"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/errno.h> 52 #include <sys/kernel.h> 53 #include <sys/malloc.h> 54 #include <sys/device.h> 55 #include <sys/buf.h> 56 #include <sys/proc.h> 57 #include <sys/user.h> 58 59 #include <dev/scsipi/scsi_all.h> 60 #include <dev/scsipi/scsipi_all.h> 61 #include <dev/scsipi/scsipi_debug.h> 62 #include <dev/scsipi/scsiconf.h> 63 64 #include <machine/autoconf.h> 65 66 /* #define DEBUG XXX */ 67 68 #include <dev/ic/ncr5380reg.h> 69 #include <dev/ic/ncr5380var.h> 70 71 #include "sereg.h" 72 #include "sevar.h" 73 74 /* 75 * Transfers smaller than this are done using PIO 76 * (on assumption they're not worth DMA overhead) 77 */ 78 #define MIN_DMA_LEN 128 79 80 /* 81 * Transfers lager than 65535 bytes need to be split-up. 82 * (Some of the FIFO logic has only 16 bits counters.) 83 * Make the size an integer multiple of the page size 84 * to avoid buf/cluster remap problems. (paranoid?) 85 */ 86 #define MAX_DMA_LEN 0xE000 87 88 /* 89 * This structure is used to keep track of mapped DMA requests. 90 */ 91 struct se_dma_handle { 92 int dh_flags; 93 #define SIDH_BUSY 1 /* This DH is in use */ 94 #define SIDH_OUT 2 /* DMA does data out (write) */ 95 u_char * dh_addr; /* KVA of start of buffer */ 96 int dh_maplen; /* Length of KVA mapping. */ 97 long dh_dma; /* Offset in DMA buffer. */ 98 }; 99 100 /* 101 * The first structure member has to be the ncr5380_softc 102 * so we can just cast to go back and fourth between them. 103 */ 104 struct se_softc { 105 struct ncr5380_softc ncr_sc; 106 volatile struct se_regs *sc_regs; 107 int sc_adapter_type; 108 int sc_adapter_iv; /* int. vec */ 109 int sc_options; /* options for this instance */ 110 int sc_reqlen; /* requested transfer length */ 111 struct se_dma_handle *sc_dma; 112 /* DMA command block for the OBIO controller. */ 113 void *sc_dmacmd; 114 }; 115 116 /* Options for disconnect/reselect, DMA, and interrupts. */ 117 #define SE_NO_DISCONNECT 0xff 118 #define SE_NO_PARITY_CHK 0xff00 119 #define SE_FORCE_POLLING 0x10000 120 #define SE_DISABLE_DMA 0x20000 121 122 void se_dma_alloc(struct ncr5380_softc *); 123 void se_dma_free(struct ncr5380_softc *); 124 void se_dma_poll(struct ncr5380_softc *); 125 126 void se_dma_setup(struct ncr5380_softc *); 127 void se_dma_start(struct ncr5380_softc *); 128 void se_dma_eop(struct ncr5380_softc *); 129 void se_dma_stop(struct ncr5380_softc *); 130 131 void se_intr_on (struct ncr5380_softc *); 132 void se_intr_off(struct ncr5380_softc *); 133 134 static int se_intr(void *); 135 static void se_reset(struct ncr5380_softc *); 136 137 /* 138 * New-style autoconfig attachment 139 */ 140 141 static int se_match(struct device *, struct cfdata *, void *); 142 static void se_attach(struct device *, struct device *, void *); 143 144 CFATTACH_DECL(si_sebuf, sizeof(struct se_softc), 145 se_match, se_attach, NULL, NULL); 146 147 static void se_minphys(struct buf *); 148 149 /* Options for disconnect/reselect, DMA, and interrupts. */ 150 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff; 151 152 /* How long to wait for DMA before declaring an error. */ 153 int se_dma_intr_timo = 500; /* ticks (sec. X 100) */ 154 155 int se_debug = 0; 156 157 static int 158 se_match(struct device *parent, struct cfdata *cf, void *args) 159 { 160 struct sebuf_attach_args *aa = args; 161 162 /* Match by name. */ 163 if (strcmp(aa->name, "se")) 164 return (0); 165 166 /* Anyting else to check? */ 167 168 return (1); 169 } 170 171 static void 172 se_attach(struct device *parent, struct device *self, void *args) 173 { 174 struct se_softc *sc = (struct se_softc *) self; 175 struct ncr5380_softc *ncr_sc = &sc->ncr_sc; 176 struct cfdata *cf = device_cfdata(self); 177 struct sebuf_attach_args *aa = args; 178 volatile struct se_regs *regs; 179 int i; 180 181 /* Get options from config flags if specified. */ 182 if (cf->cf_flags) 183 sc->sc_options = cf->cf_flags; 184 else 185 sc->sc_options = se_options; 186 187 printf(": options=0x%x\n", sc->sc_options); 188 189 sc->sc_adapter_type = aa->ca.ca_bustype; 190 sc->sc_adapter_iv = aa->ca.ca_intvec; 191 sc->sc_regs = regs = aa->regs; 192 193 /* 194 * MD function pointers used by the MI code. 195 */ 196 ncr_sc->sc_pio_out = ncr5380_pio_out; 197 ncr_sc->sc_pio_in = ncr5380_pio_in; 198 199 #if 0 /* XXX - not yet... */ 200 ncr_sc->sc_dma_alloc = se_dma_alloc; 201 ncr_sc->sc_dma_free = se_dma_free; 202 ncr_sc->sc_dma_setup = se_dma_setup; 203 ncr_sc->sc_dma_start = se_dma_start; 204 ncr_sc->sc_dma_poll = se_dma_poll; 205 ncr_sc->sc_dma_eop = se_dma_eop; 206 ncr_sc->sc_dma_stop = se_dma_stop; 207 ncr_sc->sc_intr_on = se_intr_on; 208 ncr_sc->sc_intr_off = se_intr_off; 209 #endif /* XXX */ 210 211 /* Attach interrupt handler. */ 212 isr_add_vectored(se_intr, (void *)sc, 213 aa->ca.ca_intpri, aa->ca.ca_intvec); 214 215 /* Reset the hardware. */ 216 se_reset(ncr_sc); 217 218 /* Do the common attach stuff. */ 219 220 /* 221 * Support the "options" (config file flags). 222 * Disconnect/reselect is a per-target mask. 223 * Interrupts and DMA are per-controller. 224 */ 225 ncr_sc->sc_no_disconnect = 226 (sc->sc_options & SE_NO_DISCONNECT); 227 ncr_sc->sc_parity_disable = 228 (sc->sc_options & SE_NO_PARITY_CHK) >> 8; 229 if (sc->sc_options & SE_FORCE_POLLING) 230 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING; 231 232 #if 1 /* XXX - Temporary */ 233 /* XXX - In case we think DMA is completely broken... */ 234 if (sc->sc_options & SE_DISABLE_DMA) { 235 /* Override this function pointer. */ 236 ncr_sc->sc_dma_alloc = NULL; 237 } 238 #endif 239 ncr_sc->sc_min_dma_len = MIN_DMA_LEN; 240 241 /* 242 * Initialize fields used by the MI code 243 */ 244 ncr_sc->sci_r0 = ®s->ncrregs[0]; 245 ncr_sc->sci_r1 = ®s->ncrregs[1]; 246 ncr_sc->sci_r2 = ®s->ncrregs[2]; 247 ncr_sc->sci_r3 = ®s->ncrregs[3]; 248 ncr_sc->sci_r4 = ®s->ncrregs[4]; 249 ncr_sc->sci_r5 = ®s->ncrregs[5]; 250 ncr_sc->sci_r6 = ®s->ncrregs[6]; 251 ncr_sc->sci_r7 = ®s->ncrregs[7]; 252 253 ncr_sc->sc_rev = NCR_VARIANT_NCR5380; 254 255 /* 256 * Allocate DMA handles. 257 */ 258 i = SCI_OPENINGS * sizeof(struct se_dma_handle); 259 sc->sc_dma = (struct se_dma_handle *) 260 malloc(i, M_DEVBUF, M_WAITOK); 261 if (sc->sc_dma == NULL) 262 panic("se: dma_malloc failed"); 263 for (i = 0; i < SCI_OPENINGS; i++) 264 sc->sc_dma[i].dh_flags = 0; 265 266 ncr_sc->sc_channel.chan_id = 7; 267 ncr_sc->sc_adapter.adapt_minphys = se_minphys; 268 269 /* 270 * Initialize se board itself. 271 */ 272 ncr5380_attach(ncr_sc); 273 } 274 275 static void 276 se_reset(struct ncr5380_softc *ncr_sc) 277 { 278 struct se_softc *sc = (struct se_softc *)ncr_sc; 279 volatile struct se_regs *se = sc->sc_regs; 280 281 #ifdef DEBUG 282 if (se_debug) { 283 printf("se_reset\n"); 284 } 285 #endif 286 287 /* The reset bits in the CSR are active low. */ 288 se->se_csr = 0; 289 delay(10); 290 se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ; 291 delay(10); 292 293 /* Make sure the DMA engine is stopped. */ 294 se->dma_addr = 0; 295 se->dma_cntr = 0; 296 se->se_ivec = sc->sc_adapter_iv; 297 } 298 299 /* 300 * This is called when the bus is going idle, 301 * so we want to enable the SBC interrupts. 302 * That is controlled by the DMA enable! 303 * Who would have guessed! 304 * What a NASTY trick! 305 */ 306 void 307 se_intr_on(struct ncr5380_softc *ncr_sc) 308 { 309 struct se_softc *sc = (struct se_softc *)ncr_sc; 310 volatile struct se_regs *se = sc->sc_regs; 311 312 /* receive mode should be safer */ 313 se->se_csr &= ~SE_CSR_SEND; 314 315 /* Clear the count so nothing happens. */ 316 se->dma_cntr = 0; 317 318 /* Clear the start address too. (paranoid?) */ 319 se->dma_addr = 0; 320 321 /* Finally, enable the DMA engine. */ 322 se->se_csr |= SE_CSR_INTR_EN; 323 } 324 325 /* 326 * This is called when the bus is idle and we are 327 * about to start playing with the SBC chip. 328 */ 329 void 330 se_intr_off(struct ncr5380_softc *ncr_sc) 331 { 332 struct se_softc *sc = (struct se_softc *)ncr_sc; 333 volatile struct se_regs *se = sc->sc_regs; 334 335 se->se_csr &= ~SE_CSR_INTR_EN; 336 } 337 338 /* 339 * This function is called during the COMMAND or MSG_IN phase 340 * that precedes a DATA_IN or DATA_OUT phase, in case we need 341 * to setup the DMA engine before the bus enters a DATA phase. 342 * 343 * On the VME version, setup the start addres, but clear the 344 * count (to make sure it stays idle) and set that later. 345 * XXX: The VME adapter appears to suppress SBC interrupts 346 * when the FIFO is not empty or the FIFO count is non-zero! 347 * XXX: Need to copy data into the DMA buffer... 348 */ 349 void 350 se_dma_setup(struct ncr5380_softc *ncr_sc) 351 { 352 struct se_softc *sc = (struct se_softc *)ncr_sc; 353 struct sci_req *sr = ncr_sc->sc_current; 354 struct se_dma_handle *dh = sr->sr_dma_hand; 355 volatile struct se_regs *se = sc->sc_regs; 356 long data_pa; 357 int xlen; 358 359 /* 360 * Get the DMA mapping for this segment. 361 * XXX - Should separate allocation and mapin. 362 */ 363 data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */ 364 data_pa += (ncr_sc->sc_dataptr - dh->dh_addr); 365 if (data_pa & 1) 366 panic("se_dma_start: bad pa=0x%lx", data_pa); 367 xlen = ncr_sc->sc_datalen; 368 xlen &= ~1; /* XXX: necessary? */ 369 sc->sc_reqlen = xlen; /* XXX: or less? */ 370 371 #ifdef DEBUG 372 if (se_debug & 2) { 373 printf("se_dma_setup: dh=%p, pa=0x%lx, xlen=0x%x\n", 374 dh, data_pa, xlen); 375 } 376 #endif 377 378 /* Set direction (send/recv) */ 379 if (dh->dh_flags & SIDH_OUT) { 380 se->se_csr |= SE_CSR_SEND; 381 } else { 382 se->se_csr &= ~SE_CSR_SEND; 383 } 384 385 /* Load the start address. */ 386 se->dma_addr = (ushort)(data_pa & 0xFFFF); 387 388 /* 389 * Keep the count zero or it may start early! 390 */ 391 se->dma_cntr = 0; 392 } 393 394 395 void 396 se_dma_start(struct ncr5380_softc *ncr_sc) 397 { 398 struct se_softc *sc = (struct se_softc *)ncr_sc; 399 struct sci_req *sr = ncr_sc->sc_current; 400 struct se_dma_handle *dh = sr->sr_dma_hand; 401 volatile struct se_regs *se = sc->sc_regs; 402 int s, xlen; 403 404 xlen = sc->sc_reqlen; 405 406 /* This MAY be time critical (not sure). */ 407 s = splhigh(); 408 409 se->dma_cntr = (ushort)(xlen & 0xFFFF); 410 411 /* 412 * Acknowledge the phase change. (After DMA setup!) 413 * Put the SBIC into DMA mode, and start the transfer. 414 */ 415 if (dh->dh_flags & SIDH_OUT) { 416 *ncr_sc->sci_tcmd = PHASE_DATA_OUT; 417 SCI_CLR_INTR(ncr_sc); 418 *ncr_sc->sci_icmd = SCI_ICMD_DATA; 419 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 420 *ncr_sc->sci_dma_send = 0; /* start it */ 421 } else { 422 *ncr_sc->sci_tcmd = PHASE_DATA_IN; 423 SCI_CLR_INTR(ncr_sc); 424 *ncr_sc->sci_icmd = 0; 425 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 426 *ncr_sc->sci_irecv = 0; /* start it */ 427 } 428 429 /* Let'er rip! */ 430 se->se_csr |= SE_CSR_INTR_EN; 431 432 splx(s); 433 ncr_sc->sc_state |= NCR_DOINGDMA; 434 435 #ifdef DEBUG 436 if (se_debug & 2) { 437 printf("se_dma_start: started, flags=0x%x\n", 438 ncr_sc->sc_state); 439 } 440 #endif 441 } 442 443 444 void 445 se_dma_eop(struct ncr5380_softc *ncr_sc) 446 { 447 448 /* Not needed - DMA was stopped prior to examining sci_csr */ 449 } 450 451 452 void 453 se_dma_stop(struct ncr5380_softc *ncr_sc) 454 { 455 struct se_softc *sc = (struct se_softc *)ncr_sc; 456 struct sci_req *sr = ncr_sc->sc_current; 457 struct se_dma_handle *dh = sr->sr_dma_hand; 458 volatile struct se_regs *se = sc->sc_regs; 459 int resid, ntrans; 460 461 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) { 462 #ifdef DEBUG 463 printf("se_dma_stop: DMA not running\n"); 464 #endif 465 return; 466 } 467 ncr_sc->sc_state &= ~NCR_DOINGDMA; 468 469 /* First, halt the DMA engine. */ 470 se->se_csr &= ~SE_CSR_INTR_EN; /* VME only */ 471 472 /* Set an impossible phase to prevent data movement? */ 473 *ncr_sc->sci_tcmd = PHASE_INVALID; 474 475 /* Note that timeout may have set the error flag. */ 476 if (ncr_sc->sc_state & NCR_ABORTING) 477 goto out; 478 479 /* XXX: Wait for DMA to actually finish? */ 480 481 /* 482 * Now try to figure out how much actually transferred 483 */ 484 resid = se->dma_cntr & 0xFFFF; 485 if (dh->dh_flags & SIDH_OUT) 486 if ((resid > 0) && (resid < sc->sc_reqlen)) 487 resid++; 488 ntrans = sc->sc_reqlen - resid; 489 490 #ifdef DEBUG 491 if (se_debug & 2) { 492 printf("se_dma_stop: resid=0x%x ntrans=0x%x\n", 493 resid, ntrans); 494 } 495 #endif 496 497 if (ntrans < MIN_DMA_LEN) { 498 printf("se: fifo count: 0x%x\n", resid); 499 ncr_sc->sc_state |= NCR_ABORTING; 500 goto out; 501 } 502 if (ntrans > ncr_sc->sc_datalen) 503 panic("se_dma_stop: excess transfer"); 504 505 /* Adjust data pointer */ 506 ncr_sc->sc_dataptr += ntrans; 507 ncr_sc->sc_datalen -= ntrans; 508 509 out: 510 se->dma_addr = 0; 511 se->dma_cntr = 0; 512 513 /* Put SBIC back in PIO mode. */ 514 *ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE); 515 *ncr_sc->sci_icmd = 0; 516 } 517 518 /*****************************************************************/ 519 520 static void 521 se_minphys(struct buf *bp) 522 { 523 524 if (bp->b_bcount > MAX_DMA_LEN) 525 bp->b_bcount = MAX_DMA_LEN; 526 527 minphys(bp); 528 } 529 530 531 int 532 se_intr(void *arg) 533 { 534 struct se_softc *sc = arg; 535 volatile struct se_regs *se = sc->sc_regs; 536 int dma_error, claimed; 537 u_short csr; 538 539 claimed = 0; 540 dma_error = 0; 541 542 /* SBC interrupt? DMA interrupt? */ 543 csr = se->se_csr; 544 NCR_TRACE("se_intr: csr=0x%x\n", csr); 545 546 if (csr & SE_CSR_SBC_IP) { 547 claimed = ncr5380_intr(&sc->ncr_sc); 548 #ifdef DEBUG 549 if (!claimed) { 550 printf("se_intr: spurious from SBC\n"); 551 } 552 #endif 553 /* Yes, we DID cause this interrupt. */ 554 claimed = 1; 555 } 556 557 return (claimed); 558 } 559 560 561 /***************************************************************** 562 * Common functions for DMA 563 ****************************************************************/ 564 565 /* 566 * Allocate a DMA handle and put it in sc->sc_dma. Prepare 567 * for DMA transfer. On the Sun3/E, this means we have to 568 * allocate space in the DMA buffer for this transfer. 569 */ 570 void 571 se_dma_alloc(struct ncr5380_softc *ncr_sc) 572 { 573 struct se_softc *sc = (struct se_softc *)ncr_sc; 574 struct sci_req *sr = ncr_sc->sc_current; 575 struct scsipi_xfer *xs = sr->sr_xs; 576 struct se_dma_handle *dh; 577 int i, xlen; 578 u_long addr; 579 580 #ifdef DIAGNOSTIC 581 if (sr->sr_dma_hand != NULL) 582 panic("se_dma_alloc: already have DMA handle"); 583 #endif 584 585 addr = (u_long) ncr_sc->sc_dataptr; 586 xlen = ncr_sc->sc_datalen; 587 588 /* If the DMA start addr is misaligned then do PIO */ 589 if ((addr & 1) || (xlen & 1)) { 590 printf("se_dma_alloc: misaligned.\n"); 591 return; 592 } 593 594 /* Make sure our caller checked sc_min_dma_len. */ 595 if (xlen < MIN_DMA_LEN) 596 panic("se_dma_alloc: xlen=0x%x", xlen); 597 598 /* 599 * Never attempt single transfers of more than 63k, because 600 * our count register may be only 16 bits (an OBIO adapter). 601 * This should never happen since already bounded by minphys(). 602 * XXX - Should just segment these... 603 */ 604 if (xlen > MAX_DMA_LEN) { 605 printf("se_dma_alloc: excessive xlen=0x%x\n", xlen); 606 ncr_sc->sc_datalen = xlen = MAX_DMA_LEN; 607 } 608 609 /* Find free DMA handle. Guaranteed to find one since we have 610 as many DMA handles as the driver has processes. */ 611 for (i = 0; i < SCI_OPENINGS; i++) { 612 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0) 613 goto found; 614 } 615 panic("se: no free DMA handles."); 616 found: 617 618 dh = &sc->sc_dma[i]; 619 dh->dh_flags = SIDH_BUSY; 620 621 /* Copy the "write" flag for convenience. */ 622 if (xs->xs_control & XS_CTL_DATA_OUT) 623 dh->dh_flags |= SIDH_OUT; 624 625 dh->dh_addr = (u_char*) addr; 626 dh->dh_maplen = xlen; 627 dh->dh_dma = 0; /* XXX - Allocate space in DMA buffer. */ 628 /* XXX: dh->dh_dma = alloc(xlen) */ 629 if (!dh->dh_dma) { 630 /* Can't remap segment */ 631 printf("se_dma_alloc: can't remap %p/0x%x\n", 632 dh->dh_addr, dh->dh_maplen); 633 dh->dh_flags = 0; 634 return; 635 } 636 637 /* success */ 638 sr->sr_dma_hand = dh; 639 640 return; 641 } 642 643 644 void 645 se_dma_free(struct ncr5380_softc *ncr_sc) 646 { 647 struct sci_req *sr = ncr_sc->sc_current; 648 struct se_dma_handle *dh = sr->sr_dma_hand; 649 650 #ifdef DIAGNOSTIC 651 if (dh == NULL) 652 panic("se_dma_free: no DMA handle"); 653 #endif 654 655 if (ncr_sc->sc_state & NCR_DOINGDMA) 656 panic("se_dma_free: free while in progress"); 657 658 if (dh->dh_flags & SIDH_BUSY) { 659 /* XXX: Should separate allocation and mapping. */ 660 /* XXX: Give back the DMA space. */ 661 /* XXX: free((caddr_t)dh->dh_dma, dh->dh_maplen); */ 662 dh->dh_dma = 0; 663 dh->dh_flags = 0; 664 } 665 sr->sr_dma_hand = NULL; 666 } 667 668 669 #define CSR_MASK SE_CSR_SBC_IP 670 #define POLL_TIMO 50000 /* X100 = 5 sec. */ 671 672 /* 673 * Poll (spin-wait) for DMA completion. 674 * Called right after xx_dma_start(), and 675 * xx_dma_stop() will be called next. 676 * Same for either VME or OBIO. 677 */ 678 void 679 se_dma_poll(struct ncr5380_softc *ncr_sc) 680 { 681 struct se_softc *sc = (struct se_softc *)ncr_sc; 682 struct sci_req *sr = ncr_sc->sc_current; 683 volatile struct se_regs *se = sc->sc_regs; 684 int tmo; 685 686 /* Make sure DMA started successfully. */ 687 if (ncr_sc->sc_state & NCR_ABORTING) 688 return; 689 690 /* 691 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here 692 * XXX: (on obio) or even worse (on vme) a 10mS. delay! 693 * XXX: I really doubt that is necessary... 694 */ 695 696 /* Wait for any "DMA complete" or error bits. */ 697 tmo = POLL_TIMO; 698 for (;;) { 699 if (se->se_csr & CSR_MASK) 700 break; 701 if (--tmo <= 0) { 702 printf("se: DMA timeout (while polling)\n"); 703 /* Indicate timeout as MI code would. */ 704 sr->sr_flags |= SR_OVERDUE; 705 break; 706 } 707 delay(100); 708 } 709 NCR_TRACE("se_dma_poll: waited %d\n", 710 POLL_TIMO - tmo); 711 712 #ifdef DEBUG 713 if (se_debug & 2) { 714 printf("se_dma_poll: done, csr=0x%x\n", se->se_csr); 715 } 716 #endif 717 } 718 719