1 /* $OpenBSD: adv.c,v 1.51 2020/09/22 19:32:52 krw Exp $ */ 2 /* $NetBSD: adv.c,v 1.6 1998/10/28 20:39:45 dante Exp $ */ 3 4 /* 5 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers 6 * 7 * Copyright (c) 1998 The NetBSD Foundation, Inc. 8 * All rights reserved. 9 * 10 * Author: Baldassare Dante Profeta <dante@mclink.it> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/errno.h> 38 #include <sys/ioctl.h> 39 #include <sys/device.h> 40 #include <sys/malloc.h> 41 #include <sys/buf.h> 42 43 #include <machine/bus.h> 44 #include <machine/intr.h> 45 46 #include <scsi/scsi_all.h> 47 #include <scsi/scsiconf.h> 48 49 #include <dev/ic/adv.h> 50 #include <dev/ic/advlib.h> 51 52 /* #define ASC_DEBUG */ 53 54 /******************************************************************************/ 55 56 57 static int adv_alloc_ccbs(ASC_SOFTC *); 58 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int); 59 void adv_ccb_free(void *, void *); 60 static void adv_reset_ccb(ADV_CCB *); 61 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *); 62 void *adv_ccb_alloc(void *); 63 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *); 64 static void adv_start_ccbs(ASC_SOFTC *); 65 66 static u_int8_t *adv_alloc_overrunbuf(char *dvname, bus_dma_tag_t); 67 68 static void adv_scsi_cmd(struct scsi_xfer *); 69 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *); 70 71 static int adv_poll(ASC_SOFTC *, struct scsi_xfer *, int); 72 static void adv_timeout(void *); 73 static void adv_watchdog(void *); 74 75 76 /******************************************************************************/ 77 78 79 struct cfdriver adv_cd = { 80 NULL, "adv", DV_DULL 81 }; 82 83 84 struct scsi_adapter adv_switch = { 85 adv_scsi_cmd, NULL, NULL, NULL, NULL 86 }; 87 88 89 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 90 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */ 91 92 93 /******************************************************************************/ 94 /* Control Blocks routines */ 95 /******************************************************************************/ 96 97 98 static int 99 adv_alloc_ccbs(sc) 100 ASC_SOFTC *sc; 101 { 102 bus_dma_segment_t seg; 103 int error, rseg; 104 105 /* 106 * Allocate the control blocks. 107 */ 108 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control), 109 NBPG, 0, &seg, 1, &rseg, 110 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) != 0) { 111 printf("%s: unable to allocate control structures," 112 " error = %d\n", sc->sc_dev.dv_xname, error); 113 return (error); 114 } 115 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 116 sizeof(struct adv_control), (caddr_t *) & sc->sc_control, 117 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 118 printf("%s: unable to map control structures, error = %d\n", 119 sc->sc_dev.dv_xname, error); 120 return (error); 121 } 122 /* 123 * Create and load the DMA map used for the control blocks. 124 */ 125 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control), 126 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT, 127 &sc->sc_dmamap_control)) != 0) { 128 printf("%s: unable to create control DMA map, error = %d\n", 129 sc->sc_dev.dv_xname, error); 130 return (error); 131 } 132 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control, 133 sc->sc_control, sizeof(struct adv_control), NULL, 134 BUS_DMA_NOWAIT)) != 0) { 135 printf("%s: unable to load control DMA map, error = %d\n", 136 sc->sc_dev.dv_xname, error); 137 return (error); 138 } 139 return (0); 140 } 141 142 143 /* 144 * Create a set of ccbs and add them to the free list. Called once 145 * by adv_init(). We return the number of CCBs successfully created. 146 * CCB data is already zeroed on allocation. 147 */ 148 static int 149 adv_create_ccbs(sc, ccbstore, count) 150 ASC_SOFTC *sc; 151 ADV_CCB *ccbstore; 152 int count; 153 { 154 ADV_CCB *ccb; 155 int i, error; 156 157 for (i = 0; i < count; i++) { 158 ccb = &ccbstore[i]; 159 if ((error = adv_init_ccb(sc, ccb)) != 0) { 160 printf("%s: unable to initialize ccb, error = %d\n", 161 sc->sc_dev.dv_xname, error); 162 return (i); 163 } 164 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain); 165 } 166 167 return (i); 168 } 169 170 171 /* 172 * A ccb is put onto the free list. 173 */ 174 void 175 adv_ccb_free(xsc, xccb) 176 void *xsc, *xccb; 177 { 178 ASC_SOFTC *sc = xsc; 179 ADV_CCB *ccb = xccb; 180 181 adv_reset_ccb(ccb); 182 183 mtx_enter(&sc->sc_ccb_mtx); 184 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 185 mtx_leave(&sc->sc_ccb_mtx); 186 } 187 188 189 static void 190 adv_reset_ccb(ccb) 191 ADV_CCB *ccb; 192 { 193 194 ccb->flags = 0; 195 } 196 197 198 static int 199 adv_init_ccb(sc, ccb) 200 ASC_SOFTC *sc; 201 ADV_CCB *ccb; 202 { 203 int error; 204 205 /* 206 * Create the DMA map for this CCB. 207 */ 208 error = bus_dmamap_create(sc->sc_dmat, 209 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 210 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 211 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer); 212 if (error) { 213 printf("%s: unable to create DMA map, error = %d\n", 214 sc->sc_dev.dv_xname, error); 215 return (error); 216 } 217 adv_reset_ccb(ccb); 218 return (0); 219 } 220 221 222 /* 223 * Get a free ccb 224 */ 225 void * 226 adv_ccb_alloc(xsc) 227 void *xsc; 228 { 229 ASC_SOFTC *sc = xsc; 230 ADV_CCB *ccb; 231 232 mtx_enter(&sc->sc_ccb_mtx); 233 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 234 if (ccb) { 235 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 236 ccb->flags |= CCB_ALLOC; 237 } 238 mtx_leave(&sc->sc_ccb_mtx); 239 240 return (ccb); 241 } 242 243 /* 244 * Queue a CCB to be sent to the controller, and send it if possible. 245 */ 246 static void 247 adv_queue_ccb(sc, ccb) 248 ASC_SOFTC *sc; 249 ADV_CCB *ccb; 250 { 251 252 timeout_set(&ccb->xs->stimeout, adv_timeout, ccb); 253 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 254 255 adv_start_ccbs(sc); 256 } 257 258 259 static void 260 adv_start_ccbs(sc) 261 ASC_SOFTC *sc; 262 { 263 ADV_CCB *ccb; 264 struct scsi_xfer *xs; 265 266 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 267 268 xs = ccb->xs; 269 if (ccb->flags & CCB_WATCHDOG) 270 timeout_del(&xs->stimeout); 271 272 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) { 273 ccb->flags |= CCB_WATCHDOG; 274 timeout_set(&xs->stimeout, adv_watchdog, ccb); 275 timeout_add_msec(&xs->stimeout, ADV_WATCH_TIMEOUT); 276 break; 277 } 278 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 279 280 if ((ccb->xs->flags & SCSI_POLL) == 0) { 281 timeout_set(&xs->stimeout, adv_timeout, ccb); 282 timeout_add_msec(&xs->stimeout, ccb->timeout); 283 } 284 } 285 } 286 287 288 /******************************************************************************/ 289 /* DMA able memory allocation routines */ 290 /******************************************************************************/ 291 292 293 /* 294 * Allocate a DMA able memory for overrun_buffer. 295 * This memory can be safely shared among all the AdvanSys boards. 296 */ 297 u_int8_t * 298 adv_alloc_overrunbuf(dvname, dmat) 299 char *dvname; 300 bus_dma_tag_t dmat; 301 { 302 static u_int8_t *overrunbuf = NULL; 303 304 bus_dmamap_t ovrbuf_dmamap; 305 bus_dma_segment_t seg; 306 int rseg, error; 307 308 309 /* 310 * if an overrun buffer has been already allocated don't allocate it 311 * again. Instead return the address of the allocated buffer. 312 */ 313 if (overrunbuf) 314 return (overrunbuf); 315 316 317 if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE, 318 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 319 printf("%s: unable to allocate overrun buffer, error = %d\n", 320 dvname, error); 321 return (0); 322 } 323 if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE, 324 (caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 325 printf("%s: unable to map overrun buffer, error = %d\n", 326 dvname, error); 327 328 bus_dmamem_free(dmat, &seg, 1); 329 return (0); 330 } 331 if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1, 332 ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) { 333 printf("%s: unable to create overrun buffer DMA map," 334 " error = %d\n", dvname, error); 335 336 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 337 bus_dmamem_free(dmat, &seg, 1); 338 return (0); 339 } 340 if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf, 341 ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) { 342 printf("%s: unable to load overrun buffer DMA map," 343 " error = %d\n", dvname, error); 344 345 bus_dmamap_destroy(dmat, ovrbuf_dmamap); 346 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 347 bus_dmamem_free(dmat, &seg, 1); 348 return (0); 349 } 350 return (overrunbuf); 351 } 352 353 354 /******************************************************************************/ 355 /* SCSI layer interfacing routines */ 356 /******************************************************************************/ 357 358 359 int 360 adv_init(sc) 361 ASC_SOFTC *sc; 362 { 363 int warn; 364 365 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) 366 panic("adv_init: adv_find_signature failed"); 367 368 /* 369 * Read the board configuration 370 */ 371 AscInitASC_SOFTC(sc); 372 warn = AscInitFromEEP(sc); 373 if (warn) { 374 printf("%s -get: ", sc->sc_dev.dv_xname); 375 switch (warn) { 376 case -1: 377 printf("Chip is not halted\n"); 378 break; 379 380 case -2: 381 printf("Couldn't get MicroCode Start" 382 " address\n"); 383 break; 384 385 case ASC_WARN_IO_PORT_ROTATE: 386 printf("I/O port address modified\n"); 387 break; 388 389 case ASC_WARN_AUTO_CONFIG: 390 printf("I/O port increment switch enabled\n"); 391 break; 392 393 case ASC_WARN_EEPROM_CHKSUM: 394 printf("EEPROM checksum error\n"); 395 break; 396 397 case ASC_WARN_IRQ_MODIFIED: 398 printf("IRQ modified\n"); 399 break; 400 401 case ASC_WARN_CMD_QNG_CONFLICT: 402 printf("tag queuing enabled w/o disconnects\n"); 403 break; 404 405 default: 406 printf("unknown warning %d\n", warn); 407 } 408 } 409 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) 410 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; 411 412 /* 413 * Modify the board configuration 414 */ 415 warn = AscInitFromASC_SOFTC(sc); 416 if (warn) { 417 printf("%s -set: ", sc->sc_dev.dv_xname); 418 switch (warn) { 419 case ASC_WARN_CMD_QNG_CONFLICT: 420 printf("tag queuing enabled w/o disconnects\n"); 421 break; 422 423 case ASC_WARN_AUTO_CONFIG: 424 printf("I/O port increment switch enabled\n"); 425 break; 426 427 default: 428 printf("unknown warning %d\n", warn); 429 } 430 } 431 sc->isr_callback = (ulong) adv_narrow_isr_callback; 432 433 if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname, 434 sc->sc_dmat))) { 435 return (1); 436 } 437 438 return (0); 439 } 440 441 442 void 443 adv_attach(sc) 444 ASC_SOFTC *sc; 445 { 446 struct scsibus_attach_args saa; 447 int i, error; 448 449 /* 450 * Initialize board RISC chip and enable interrupts. 451 */ 452 switch (AscInitDriver(sc)) { 453 case 0: 454 /* AllOK */ 455 break; 456 457 case 1: 458 panic("%s: bad signature", sc->sc_dev.dv_xname); 459 break; 460 461 case 2: 462 panic("%s: unable to load MicroCode", 463 sc->sc_dev.dv_xname); 464 break; 465 466 case 3: 467 panic("%s: unable to initialize MicroCode", 468 sc->sc_dev.dv_xname); 469 break; 470 471 default: 472 panic("%s: unable to initialize board RISC chip", 473 sc->sc_dev.dv_xname); 474 } 475 476 TAILQ_INIT(&sc->sc_free_ccb); 477 TAILQ_INIT(&sc->sc_waiting_ccb); 478 479 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 480 scsi_iopool_init(&sc->sc_iopool, sc, adv_ccb_alloc, adv_ccb_free); 481 482 /* 483 * Allocate the Control Blocks. 484 */ 485 error = adv_alloc_ccbs(sc); 486 if (error) 487 return; /* (error) */ ; 488 489 /* 490 * Create and initialize the Control Blocks. 491 */ 492 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB); 493 if (i == 0) { 494 printf("%s: unable to create control blocks\n", 495 sc->sc_dev.dv_xname); 496 return; /* (ENOMEM) */ ; 497 } else if (i != ADV_MAX_CCB) { 498 printf("%s: WARNING: only %d of %d control blocks created\n", 499 sc->sc_dev.dv_xname, i, ADV_MAX_CCB); 500 } 501 502 saa.saa_adapter_softc = sc; 503 saa.saa_adapter_target = sc->chip_scsi_id; 504 saa.saa_adapter = &adv_switch; 505 saa.saa_adapter_buswidth = 7; 506 saa.saa_luns = 8; 507 saa.saa_openings = 4; 508 saa.saa_pool = &sc->sc_iopool; 509 saa.saa_wwpn = saa.saa_wwnn = 0; 510 saa.saa_quirks = saa.saa_flags = 0; 511 512 config_found(&sc->sc_dev, &saa, scsiprint); 513 } 514 515 516 /* 517 * start a scsi operation given the command and the data address. Also needs 518 * the unit, target and lu. 519 */ 520 static void 521 adv_scsi_cmd(xs) 522 struct scsi_xfer *xs; 523 { 524 struct scsi_link *sc_link = xs->sc_link; 525 ASC_SOFTC *sc = sc_link->bus->sb_adapter_softc; 526 bus_dma_tag_t dmat = sc->sc_dmat; 527 ADV_CCB *ccb; 528 int flags, error, nsegs; 529 530 /* 531 * get a ccb to use. If the transfer 532 * is from a buf (possibly from interrupt time) 533 * then we can't allow it to sleep 534 */ 535 536 flags = xs->flags; 537 ccb = xs->io; 538 539 ccb->xs = xs; 540 ccb->timeout = xs->timeout; 541 542 /* 543 * Build up the request 544 */ 545 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q)); 546 547 ccb->scsiq.q2.ccb_ptr = (ulong) ccb; 548 549 ccb->scsiq.cdbptr = (u_int8_t *)&xs->cmd; 550 ccb->scsiq.q2.cdb_len = xs->cmdlen; 551 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->target); 552 ccb->scsiq.q1.target_lun = sc_link->lun; 553 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->target, 554 sc_link->lun); 555 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr + 556 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense); 557 ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data); 558 559 /* 560 * If there are any outstanding requests for the current target, 561 * then every 255th request send an ORDERED request. This heuristic 562 * tries to retain the benefit of request sorting while preventing 563 * request starvation. 255 is the max number of tags or pending commands 564 * a device may have outstanding. 565 */ 566 sc->reqcnt[sc_link->target]++; 567 if ((sc->reqcnt[sc_link->target] > 0) && 568 (sc->reqcnt[sc_link->target] % 255) == 0) { 569 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED; 570 } else { 571 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE; 572 } 573 574 575 if (xs->datalen) { 576 /* 577 * Map the DMA transfer. 578 */ 579 error = bus_dmamap_load(dmat, 580 ccb->dmamap_xfer, xs->data, xs->datalen, NULL, 581 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 582 583 if (error) { 584 if (error == EFBIG) { 585 printf("%s: adv_scsi_cmd, more than %d dma" 586 " segments\n", 587 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST); 588 } else { 589 printf("%s: adv_scsi_cmd, error %d loading" 590 " dma map\n", 591 sc->sc_dev.dv_xname, error); 592 } 593 594 xs->error = XS_DRIVER_STUFFUP; 595 scsi_done(xs); 596 return; 597 } 598 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 599 0, ccb->dmamap_xfer->dm_mapsize, 600 ((flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 601 BUS_DMASYNC_PREWRITE)); 602 603 604 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD)); 605 606 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) { 607 608 ccb->sghead.sg_list[nsegs].addr = 609 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr; 610 ccb->sghead.sg_list[nsegs].bytes = 611 ccb->dmamap_xfer->dm_segs[nsegs].ds_len; 612 } 613 614 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt = 615 ccb->dmamap_xfer->dm_nsegs; 616 617 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD; 618 ccb->scsiq.sg_head = &ccb->sghead; 619 ccb->scsiq.q1.data_addr = 0; 620 ccb->scsiq.q1.data_cnt = 0; 621 } else { 622 /* 623 * No data xfer, use non S/G values. 624 */ 625 ccb->scsiq.q1.data_addr = 0; 626 ccb->scsiq.q1.data_cnt = 0; 627 } 628 629 #ifdef ASC_DEBUG 630 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n", 631 sc_link->target, 632 sc_link->lun, xs->cmd.opcode, 633 (unsigned long)ccb); 634 #endif 635 /* 636 * Usually return SUCCESSFULLY QUEUED 637 */ 638 if ((flags & SCSI_POLL) == 0) 639 return; 640 641 /* 642 * If we can't use interrupts, poll on completion 643 */ 644 if (adv_poll(sc, xs, ccb->timeout)) { 645 adv_timeout(ccb); 646 if (adv_poll(sc, xs, ccb->timeout)) 647 adv_timeout(ccb); 648 } 649 } 650 651 652 int 653 adv_intr(arg) 654 void *arg; 655 { 656 ASC_SOFTC *sc = arg; 657 658 #ifdef ASC_DEBUG 659 int int_pend = FALSE; 660 661 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) 662 { 663 int_pend = TRUE; 664 printf("ISR - "); 665 } 666 #endif 667 AscISR(sc); 668 #ifdef ASC_DEBUG 669 if(int_pend) 670 printf("\n"); 671 #endif 672 673 return (1); 674 } 675 676 677 /* 678 * Poll a particular unit, looking for a particular xs 679 */ 680 static int 681 adv_poll(sc, xs, count) 682 ASC_SOFTC *sc; 683 struct scsi_xfer *xs; 684 int count; 685 { 686 int s; 687 688 /* timeouts are in msec, so we loop in 1000 usec cycles */ 689 while (count) { 690 s = splbio(); 691 adv_intr(sc); 692 splx(s); 693 if (xs->flags & ITSDONE) 694 return (0); 695 delay(1000); /* only happens in boot so ok */ 696 count--; 697 } 698 return (1); 699 } 700 701 702 static void 703 adv_timeout(arg) 704 void *arg; 705 { 706 ADV_CCB *ccb = arg; 707 struct scsi_xfer *xs = ccb->xs; 708 struct scsi_link *sc_link = xs->sc_link; 709 ASC_SOFTC *sc = sc_link->bus->sb_adapter_softc; 710 int s; 711 712 sc_print_addr(sc_link); 713 printf("timed out"); 714 715 s = splbio(); 716 717 /* 718 * If it has been through before, then a previous abort has failed, 719 * don't try abort again, reset the bus instead. 720 */ 721 if (ccb->flags & CCB_ABORT) { 722 /* abort timed out */ 723 printf(" AGAIN. Resetting Bus\n"); 724 /* Lets try resetting the bus! */ 725 if (AscResetBus(sc) == ASC_ERROR) { 726 ccb->timeout = sc->scsi_reset_wait; 727 adv_queue_ccb(sc, ccb); 728 } 729 } else { 730 /* abort the operation that has timed out */ 731 printf("\n"); 732 AscAbortCCB(sc, (u_int32_t) ccb); 733 ccb->xs->error = XS_TIMEOUT; 734 ccb->timeout = ADV_ABORT_TIMEOUT; 735 ccb->flags |= CCB_ABORT; 736 adv_queue_ccb(sc, ccb); 737 } 738 739 splx(s); 740 } 741 742 743 static void 744 adv_watchdog(arg) 745 void *arg; 746 { 747 ADV_CCB *ccb = arg; 748 struct scsi_xfer *xs = ccb->xs; 749 struct scsi_link *sc_link = xs->sc_link; 750 ASC_SOFTC *sc = sc_link->bus->sb_adapter_softc; 751 int s; 752 753 s = splbio(); 754 755 ccb->flags &= ~CCB_WATCHDOG; 756 adv_start_ccbs(sc); 757 758 splx(s); 759 } 760 761 762 /******************************************************************************/ 763 /* NARROW and WIDE boards Interrupt callbacks */ 764 /******************************************************************************/ 765 766 767 /* 768 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR() 769 * 770 * Interrupt callback function for the Narrow SCSI Asc Library. 771 */ 772 static void 773 adv_narrow_isr_callback(sc, qdonep) 774 ASC_SOFTC *sc; 775 ASC_QDONE_INFO *qdonep; 776 { 777 bus_dma_tag_t dmat = sc->sc_dmat; 778 ADV_CCB *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr; 779 struct scsi_xfer *xs = ccb->xs; 780 struct scsi_sense_data *s1, *s2; 781 782 783 #ifdef ASC_DEBUG 784 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ", 785 (unsigned long)ccb, 786 xs->sc_link->target, 787 xs->sc_link->lun, xs->cmd.opcode); 788 #endif 789 timeout_del(&xs->stimeout); 790 791 /* 792 * If we were a data transfer, unload the map that described 793 * the data buffer. 794 */ 795 if (xs->datalen) { 796 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 797 0, ccb->dmamap_xfer->dm_mapsize, 798 ((xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 799 BUS_DMASYNC_POSTWRITE)); 800 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 801 } 802 if ((ccb->flags & CCB_ALLOC) == 0) { 803 panic("%s: exiting ccb not allocated!", sc->sc_dev.dv_xname); 804 return; 805 } 806 /* 807 * 'qdonep' contains the command's ending status. 808 */ 809 #ifdef ASC_DEBUG 810 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat); 811 #endif 812 switch (qdonep->d3.done_stat) { 813 case ASC_QD_NO_ERROR: 814 switch (qdonep->d3.host_stat) { 815 case ASC_QHSTA_NO_ERROR: 816 xs->error = XS_NOERROR; 817 xs->resid = 0; 818 break; 819 820 default: 821 /* QHSTA error occurred */ 822 xs->error = XS_DRIVER_STUFFUP; 823 break; 824 } 825 826 /* 827 * If an INQUIRY command completed successfully, then call 828 * the AscInquiryHandling() function to patch bugged boards. 829 */ 830 if ((xs->cmd.opcode == SCSICMD_Inquiry) && 831 (xs->sc_link->lun == 0) && 832 (xs->datalen - qdonep->remain_bytes) >= 8) { 833 AscInquiryHandling(sc, 834 xs->sc_link->target & 0x7, 835 (ASC_SCSI_INQUIRY *) xs->data); 836 } 837 break; 838 839 case ASC_QD_WITH_ERROR: 840 switch (qdonep->d3.host_stat) { 841 case ASC_QHSTA_NO_ERROR: 842 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) { 843 s1 = &ccb->scsi_sense; 844 s2 = &xs->sense; 845 *s2 = *s1; 846 xs->error = XS_SENSE; 847 } else { 848 xs->error = XS_DRIVER_STUFFUP; 849 } 850 break; 851 852 default: 853 /* QHSTA error occurred */ 854 xs->error = XS_DRIVER_STUFFUP; 855 break; 856 } 857 break; 858 859 case ASC_QD_ABORTED_BY_HOST: 860 default: 861 xs->error = XS_DRIVER_STUFFUP; 862 break; 863 } 864 865 scsi_done(xs); 866 } 867