1 /* $OpenBSD: adv.c,v 1.54 2024/09/20 02:00:46 jsg Exp $ */ 2 /* $NetBSD: adv.c,v 1.6 1998/10/28 20:39:45 dante Exp $ */ 3 4 /* 5 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers 6 * 7 * Copyright (c) 1998 The NetBSD Foundation, Inc. 8 * All rights reserved. 9 * 10 * Author: Baldassare Dante Profeta <dante@mclink.it> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/errno.h> 38 #include <sys/ioctl.h> 39 #include <sys/device.h> 40 #include <sys/malloc.h> 41 #include <sys/buf.h> 42 43 #include <machine/bus.h> 44 #include <machine/intr.h> 45 46 #include <scsi/scsi_all.h> 47 #include <scsi/scsiconf.h> 48 49 #include <dev/ic/adv.h> 50 #include <dev/ic/advlib.h> 51 52 /* #define ASC_DEBUG */ 53 54 /******************************************************************************/ 55 56 57 static int adv_alloc_ccbs(ASC_SOFTC *); 58 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int); 59 void adv_ccb_free(void *, void *); 60 static void adv_reset_ccb(ADV_CCB *); 61 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *); 62 void *adv_ccb_alloc(void *); 63 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *); 64 static void adv_start_ccbs(ASC_SOFTC *); 65 66 static u_int8_t *adv_alloc_overrunbuf(char *dvname, bus_dma_tag_t); 67 68 static void adv_scsi_cmd(struct scsi_xfer *); 69 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *); 70 71 static int adv_poll(ASC_SOFTC *, struct scsi_xfer *, int); 72 static void adv_timeout(void *); 73 static void adv_watchdog(void *); 74 75 76 /******************************************************************************/ 77 78 79 struct cfdriver adv_cd = { 80 NULL, "adv", DV_DULL 81 }; 82 83 84 const struct scsi_adapter adv_switch = { 85 adv_scsi_cmd, NULL, NULL, NULL, NULL 86 }; 87 88 89 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 90 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */ 91 92 93 /******************************************************************************/ 94 /* Control Blocks routines */ 95 /******************************************************************************/ 96 97 98 static int 99 adv_alloc_ccbs(ASC_SOFTC *sc) 100 { 101 bus_dma_segment_t seg; 102 int error, rseg; 103 104 /* 105 * Allocate the control blocks. 106 */ 107 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control), 108 NBPG, 0, &seg, 1, &rseg, 109 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) != 0) { 110 printf("%s: unable to allocate control structures," 111 " error = %d\n", sc->sc_dev.dv_xname, error); 112 return (error); 113 } 114 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 115 sizeof(struct adv_control), (caddr_t *) & sc->sc_control, 116 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 117 printf("%s: unable to map control structures, error = %d\n", 118 sc->sc_dev.dv_xname, error); 119 return (error); 120 } 121 /* 122 * Create and load the DMA map used for the control blocks. 123 */ 124 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control), 125 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT, 126 &sc->sc_dmamap_control)) != 0) { 127 printf("%s: unable to create control DMA map, error = %d\n", 128 sc->sc_dev.dv_xname, error); 129 return (error); 130 } 131 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control, 132 sc->sc_control, sizeof(struct adv_control), NULL, 133 BUS_DMA_NOWAIT)) != 0) { 134 printf("%s: unable to load control DMA map, error = %d\n", 135 sc->sc_dev.dv_xname, error); 136 return (error); 137 } 138 return (0); 139 } 140 141 142 /* 143 * Create a set of ccbs and add them to the free list. Called once 144 * by adv_init(). We return the number of CCBs successfully created. 145 * CCB data is already zeroed on allocation. 146 */ 147 static int 148 adv_create_ccbs(ASC_SOFTC *sc, ADV_CCB *ccbstore, int count) 149 { 150 ADV_CCB *ccb; 151 int i, error; 152 153 for (i = 0; i < count; i++) { 154 ccb = &ccbstore[i]; 155 if ((error = adv_init_ccb(sc, ccb)) != 0) { 156 printf("%s: unable to initialize ccb, error = %d\n", 157 sc->sc_dev.dv_xname, error); 158 return (i); 159 } 160 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain); 161 } 162 163 return (i); 164 } 165 166 167 /* 168 * A ccb is put onto the free list. 169 */ 170 void 171 adv_ccb_free(void *xsc, void *xccb) 172 { 173 ASC_SOFTC *sc = xsc; 174 ADV_CCB *ccb = xccb; 175 176 adv_reset_ccb(ccb); 177 178 mtx_enter(&sc->sc_ccb_mtx); 179 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 180 mtx_leave(&sc->sc_ccb_mtx); 181 } 182 183 184 static void 185 adv_reset_ccb(ADV_CCB *ccb) 186 { 187 188 ccb->flags = 0; 189 } 190 191 192 static int 193 adv_init_ccb(ASC_SOFTC *sc, ADV_CCB *ccb) 194 { 195 int error; 196 197 /* 198 * Create the DMA map for this CCB. 199 */ 200 error = bus_dmamap_create(sc->sc_dmat, 201 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 202 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 203 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer); 204 if (error) { 205 printf("%s: unable to create DMA map, error = %d\n", 206 sc->sc_dev.dv_xname, error); 207 return (error); 208 } 209 adv_reset_ccb(ccb); 210 return (0); 211 } 212 213 214 /* 215 * Get a free ccb 216 */ 217 void * 218 adv_ccb_alloc(void *xsc) 219 { 220 ASC_SOFTC *sc = xsc; 221 ADV_CCB *ccb; 222 223 mtx_enter(&sc->sc_ccb_mtx); 224 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 225 if (ccb) { 226 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 227 ccb->flags |= CCB_ALLOC; 228 } 229 mtx_leave(&sc->sc_ccb_mtx); 230 231 return (ccb); 232 } 233 234 /* 235 * Queue a CCB to be sent to the controller, and send it if possible. 236 */ 237 static void 238 adv_queue_ccb(ASC_SOFTC *sc, ADV_CCB *ccb) 239 { 240 241 timeout_set(&ccb->xs->stimeout, adv_timeout, ccb); 242 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 243 244 adv_start_ccbs(sc); 245 } 246 247 248 static void 249 adv_start_ccbs(ASC_SOFTC *sc) 250 { 251 ADV_CCB *ccb; 252 struct scsi_xfer *xs; 253 254 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 255 256 xs = ccb->xs; 257 if (ccb->flags & CCB_WATCHDOG) 258 timeout_del(&xs->stimeout); 259 260 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) { 261 ccb->flags |= CCB_WATCHDOG; 262 timeout_set(&xs->stimeout, adv_watchdog, ccb); 263 timeout_add_msec(&xs->stimeout, ADV_WATCH_TIMEOUT); 264 break; 265 } 266 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 267 268 if ((ccb->xs->flags & SCSI_POLL) == 0) { 269 timeout_set(&xs->stimeout, adv_timeout, ccb); 270 timeout_add_msec(&xs->stimeout, ccb->timeout); 271 } 272 } 273 } 274 275 276 /******************************************************************************/ 277 /* DMA able memory allocation routines */ 278 /******************************************************************************/ 279 280 281 /* 282 * Allocate a DMA able memory for overrun_buffer. 283 * This memory can be safely shared among all the AdvanSys boards. 284 */ 285 u_int8_t * 286 adv_alloc_overrunbuf(char *dvname, bus_dma_tag_t dmat) 287 { 288 static u_int8_t *overrunbuf = NULL; 289 290 bus_dmamap_t ovrbuf_dmamap; 291 bus_dma_segment_t seg; 292 int rseg, error; 293 294 295 /* 296 * if an overrun buffer has been already allocated don't allocate it 297 * again. Instead return the address of the allocated buffer. 298 */ 299 if (overrunbuf) 300 return (overrunbuf); 301 302 303 if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE, 304 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 305 printf("%s: unable to allocate overrun buffer, error = %d\n", 306 dvname, error); 307 return (0); 308 } 309 if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE, 310 (caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 311 printf("%s: unable to map overrun buffer, error = %d\n", 312 dvname, error); 313 314 bus_dmamem_free(dmat, &seg, 1); 315 return (0); 316 } 317 if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1, 318 ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) { 319 printf("%s: unable to create overrun buffer DMA map," 320 " error = %d\n", dvname, error); 321 322 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 323 bus_dmamem_free(dmat, &seg, 1); 324 return (0); 325 } 326 if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf, 327 ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) { 328 printf("%s: unable to load overrun buffer DMA map," 329 " error = %d\n", dvname, error); 330 331 bus_dmamap_destroy(dmat, ovrbuf_dmamap); 332 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 333 bus_dmamem_free(dmat, &seg, 1); 334 return (0); 335 } 336 return (overrunbuf); 337 } 338 339 340 /******************************************************************************/ 341 /* SCSI layer interfacing routines */ 342 /******************************************************************************/ 343 344 345 int 346 adv_init(ASC_SOFTC *sc) 347 { 348 int warn; 349 350 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) 351 panic("adv_init: adv_find_signature failed"); 352 353 /* 354 * Read the board configuration 355 */ 356 AscInitASC_SOFTC(sc); 357 warn = AscInitFromEEP(sc); 358 if (warn) { 359 printf("%s -get: ", sc->sc_dev.dv_xname); 360 switch (warn) { 361 case -1: 362 printf("Chip is not halted\n"); 363 break; 364 365 case -2: 366 printf("Couldn't get MicroCode Start" 367 " address\n"); 368 break; 369 370 case ASC_WARN_IO_PORT_ROTATE: 371 printf("I/O port address modified\n"); 372 break; 373 374 case ASC_WARN_AUTO_CONFIG: 375 printf("I/O port increment switch enabled\n"); 376 break; 377 378 case ASC_WARN_EEPROM_CHKSUM: 379 printf("EEPROM checksum error\n"); 380 break; 381 382 case ASC_WARN_IRQ_MODIFIED: 383 printf("IRQ modified\n"); 384 break; 385 386 case ASC_WARN_CMD_QNG_CONFLICT: 387 printf("tag queuing enabled w/o disconnects\n"); 388 break; 389 390 default: 391 printf("unknown warning %d\n", warn); 392 } 393 } 394 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) 395 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; 396 397 /* 398 * Modify the board configuration 399 */ 400 warn = AscInitFromASC_SOFTC(sc); 401 if (warn) { 402 printf("%s -set: ", sc->sc_dev.dv_xname); 403 switch (warn) { 404 case ASC_WARN_CMD_QNG_CONFLICT: 405 printf("tag queuing enabled w/o disconnects\n"); 406 break; 407 408 case ASC_WARN_AUTO_CONFIG: 409 printf("I/O port increment switch enabled\n"); 410 break; 411 412 default: 413 printf("unknown warning %d\n", warn); 414 } 415 } 416 sc->isr_callback = (ulong) adv_narrow_isr_callback; 417 418 if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname, 419 sc->sc_dmat))) { 420 return (1); 421 } 422 423 return (0); 424 } 425 426 427 void 428 adv_attach(ASC_SOFTC *sc) 429 { 430 struct scsibus_attach_args saa; 431 int i, error; 432 433 /* 434 * Initialize board RISC chip and enable interrupts. 435 */ 436 switch (AscInitDriver(sc)) { 437 case 0: 438 /* AllOK */ 439 break; 440 441 case 1: 442 panic("%s: bad signature", sc->sc_dev.dv_xname); 443 break; 444 445 case 2: 446 panic("%s: unable to load MicroCode", 447 sc->sc_dev.dv_xname); 448 break; 449 450 case 3: 451 panic("%s: unable to initialize MicroCode", 452 sc->sc_dev.dv_xname); 453 break; 454 455 default: 456 panic("%s: unable to initialize board RISC chip", 457 sc->sc_dev.dv_xname); 458 } 459 460 TAILQ_INIT(&sc->sc_free_ccb); 461 TAILQ_INIT(&sc->sc_waiting_ccb); 462 463 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 464 scsi_iopool_init(&sc->sc_iopool, sc, adv_ccb_alloc, adv_ccb_free); 465 466 /* 467 * Allocate the Control Blocks. 468 */ 469 error = adv_alloc_ccbs(sc); 470 if (error) 471 return; /* (error) */ 472 473 /* 474 * Create and initialize the Control Blocks. 475 */ 476 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB); 477 if (i == 0) { 478 printf("%s: unable to create control blocks\n", 479 sc->sc_dev.dv_xname); 480 return; /* (ENOMEM) */ ; 481 } else if (i != ADV_MAX_CCB) { 482 printf("%s: WARNING: only %d of %d control blocks created\n", 483 sc->sc_dev.dv_xname, i, ADV_MAX_CCB); 484 } 485 486 saa.saa_adapter_softc = sc; 487 saa.saa_adapter_target = sc->chip_scsi_id; 488 saa.saa_adapter = &adv_switch; 489 saa.saa_adapter_buswidth = 7; 490 saa.saa_luns = 8; 491 saa.saa_openings = 4; 492 saa.saa_pool = &sc->sc_iopool; 493 saa.saa_wwpn = saa.saa_wwnn = 0; 494 saa.saa_quirks = saa.saa_flags = 0; 495 496 config_found(&sc->sc_dev, &saa, scsiprint); 497 } 498 499 500 /* 501 * start a scsi operation given the command and the data address. Also needs 502 * the unit, target and lu. 503 */ 504 static void 505 adv_scsi_cmd(struct scsi_xfer *xs) 506 { 507 struct scsi_link *sc_link = xs->sc_link; 508 ASC_SOFTC *sc = sc_link->bus->sb_adapter_softc; 509 bus_dma_tag_t dmat = sc->sc_dmat; 510 ADV_CCB *ccb; 511 int flags, error, nsegs; 512 513 /* 514 * get a ccb to use. If the transfer 515 * is from a buf (possibly from interrupt time) 516 * then we can't allow it to sleep 517 */ 518 519 flags = xs->flags; 520 ccb = xs->io; 521 522 ccb->xs = xs; 523 ccb->timeout = xs->timeout; 524 525 /* 526 * Build up the request 527 */ 528 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q)); 529 530 ccb->scsiq.q2.ccb_ptr = (ulong) ccb; 531 532 ccb->scsiq.cdbptr = (u_int8_t *)&xs->cmd; 533 ccb->scsiq.q2.cdb_len = xs->cmdlen; 534 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->target); 535 ccb->scsiq.q1.target_lun = sc_link->lun; 536 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->target, 537 sc_link->lun); 538 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr + 539 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense); 540 ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data); 541 542 /* 543 * If there are any outstanding requests for the current target, 544 * then every 255th request send an ORDERED request. This heuristic 545 * tries to retain the benefit of request sorting while preventing 546 * request starvation. 255 is the max number of tags or pending commands 547 * a device may have outstanding. 548 */ 549 sc->reqcnt[sc_link->target]++; 550 if ((sc->reqcnt[sc_link->target] > 0) && 551 (sc->reqcnt[sc_link->target] % 255) == 0) { 552 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED; 553 } else { 554 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE; 555 } 556 557 558 if (xs->datalen) { 559 /* 560 * Map the DMA transfer. 561 */ 562 error = bus_dmamap_load(dmat, 563 ccb->dmamap_xfer, xs->data, xs->datalen, NULL, 564 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 565 566 if (error) { 567 if (error == EFBIG) { 568 printf("%s: adv_scsi_cmd, more than %d dma" 569 " segments\n", 570 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST); 571 } else { 572 printf("%s: adv_scsi_cmd, error %d loading" 573 " dma map\n", 574 sc->sc_dev.dv_xname, error); 575 } 576 577 xs->error = XS_DRIVER_STUFFUP; 578 scsi_done(xs); 579 return; 580 } 581 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 582 0, ccb->dmamap_xfer->dm_mapsize, 583 ((flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 584 BUS_DMASYNC_PREWRITE)); 585 586 587 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD)); 588 589 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) { 590 591 ccb->sghead.sg_list[nsegs].addr = 592 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr; 593 ccb->sghead.sg_list[nsegs].bytes = 594 ccb->dmamap_xfer->dm_segs[nsegs].ds_len; 595 } 596 597 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt = 598 ccb->dmamap_xfer->dm_nsegs; 599 600 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD; 601 ccb->scsiq.sg_head = &ccb->sghead; 602 ccb->scsiq.q1.data_addr = 0; 603 ccb->scsiq.q1.data_cnt = 0; 604 } else { 605 /* 606 * No data xfer, use non S/G values. 607 */ 608 ccb->scsiq.q1.data_addr = 0; 609 ccb->scsiq.q1.data_cnt = 0; 610 } 611 612 #ifdef ASC_DEBUG 613 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n", 614 sc_link->target, 615 sc_link->lun, xs->cmd.opcode, 616 (unsigned long)ccb); 617 #endif 618 /* 619 * Usually return SUCCESSFULLY QUEUED 620 */ 621 if ((flags & SCSI_POLL) == 0) 622 return; 623 624 /* 625 * If we can't use interrupts, poll on completion 626 */ 627 if (adv_poll(sc, xs, ccb->timeout)) { 628 adv_timeout(ccb); 629 if (adv_poll(sc, xs, ccb->timeout)) 630 adv_timeout(ccb); 631 } 632 } 633 634 635 int 636 adv_intr(void *arg) 637 { 638 ASC_SOFTC *sc = arg; 639 640 #ifdef ASC_DEBUG 641 int int_pend = FALSE; 642 643 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) 644 { 645 int_pend = TRUE; 646 printf("ISR - "); 647 } 648 #endif 649 AscISR(sc); 650 #ifdef ASC_DEBUG 651 if(int_pend) 652 printf("\n"); 653 #endif 654 655 return (1); 656 } 657 658 659 /* 660 * Poll a particular unit, looking for a particular xs 661 */ 662 static int 663 adv_poll(ASC_SOFTC *sc, struct scsi_xfer *xs, int count) 664 { 665 int s; 666 667 /* timeouts are in msec, so we loop in 1000 usec cycles */ 668 while (count) { 669 s = splbio(); 670 adv_intr(sc); 671 splx(s); 672 if (xs->flags & ITSDONE) 673 return (0); 674 delay(1000); /* only happens in boot so ok */ 675 count--; 676 } 677 return (1); 678 } 679 680 681 static void 682 adv_timeout(void *arg) 683 { 684 ADV_CCB *ccb = arg; 685 struct scsi_xfer *xs = ccb->xs; 686 struct scsi_link *sc_link = xs->sc_link; 687 ASC_SOFTC *sc = sc_link->bus->sb_adapter_softc; 688 int s; 689 690 sc_print_addr(sc_link); 691 printf("timed out"); 692 693 s = splbio(); 694 695 /* 696 * If it has been through before, then a previous abort has failed, 697 * don't try abort again, reset the bus instead. 698 */ 699 if (ccb->flags & CCB_ABORT) { 700 /* abort timed out */ 701 printf(" AGAIN. Resetting Bus\n"); 702 /* Lets try resetting the bus! */ 703 if (AscResetBus(sc) == ASC_ERROR) { 704 ccb->timeout = sc->scsi_reset_wait; 705 adv_queue_ccb(sc, ccb); 706 } 707 } else { 708 /* abort the operation that has timed out */ 709 printf("\n"); 710 AscAbortCCB(sc, (u_int32_t) ccb); 711 ccb->xs->error = XS_TIMEOUT; 712 ccb->timeout = ADV_ABORT_TIMEOUT; 713 ccb->flags |= CCB_ABORT; 714 adv_queue_ccb(sc, ccb); 715 } 716 717 splx(s); 718 } 719 720 721 static void 722 adv_watchdog(void *arg) 723 { 724 ADV_CCB *ccb = arg; 725 struct scsi_xfer *xs = ccb->xs; 726 struct scsi_link *sc_link = xs->sc_link; 727 ASC_SOFTC *sc = sc_link->bus->sb_adapter_softc; 728 int s; 729 730 s = splbio(); 731 732 ccb->flags &= ~CCB_WATCHDOG; 733 adv_start_ccbs(sc); 734 735 splx(s); 736 } 737 738 739 /******************************************************************************/ 740 /* NARROW and WIDE boards Interrupt callbacks */ 741 /******************************************************************************/ 742 743 744 /* 745 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR() 746 * 747 * Interrupt callback function for the Narrow SCSI Asc Library. 748 */ 749 static void 750 adv_narrow_isr_callback(ASC_SOFTC *sc, ASC_QDONE_INFO *qdonep) 751 { 752 bus_dma_tag_t dmat = sc->sc_dmat; 753 ADV_CCB *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr; 754 struct scsi_xfer *xs = ccb->xs; 755 struct scsi_sense_data *s1, *s2; 756 757 758 #ifdef ASC_DEBUG 759 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ", 760 (unsigned long)ccb, 761 xs->sc_link->target, 762 xs->sc_link->lun, xs->cmd.opcode); 763 #endif 764 timeout_del(&xs->stimeout); 765 766 /* 767 * If we were a data transfer, unload the map that described 768 * the data buffer. 769 */ 770 if (xs->datalen) { 771 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 772 0, ccb->dmamap_xfer->dm_mapsize, 773 ((xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 774 BUS_DMASYNC_POSTWRITE)); 775 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 776 } 777 if ((ccb->flags & CCB_ALLOC) == 0) { 778 panic("%s: exiting ccb not allocated!", sc->sc_dev.dv_xname); 779 return; 780 } 781 /* 782 * 'qdonep' contains the command's ending status. 783 */ 784 #ifdef ASC_DEBUG 785 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat); 786 #endif 787 switch (qdonep->d3.done_stat) { 788 case ASC_QD_NO_ERROR: 789 switch (qdonep->d3.host_stat) { 790 case ASC_QHSTA_NO_ERROR: 791 xs->error = XS_NOERROR; 792 xs->resid = 0; 793 break; 794 795 default: 796 /* QHSTA error occurred */ 797 xs->error = XS_DRIVER_STUFFUP; 798 break; 799 } 800 801 /* 802 * If an INQUIRY command completed successfully, then call 803 * the AscInquiryHandling() function to patch bugged boards. 804 */ 805 if ((xs->cmd.opcode == SCSICMD_Inquiry) && 806 (xs->sc_link->lun == 0) && 807 (xs->datalen - qdonep->remain_bytes) >= 8) { 808 AscInquiryHandling(sc, 809 xs->sc_link->target & 0x7, 810 (ASC_SCSI_INQUIRY *) xs->data); 811 } 812 break; 813 814 case ASC_QD_WITH_ERROR: 815 switch (qdonep->d3.host_stat) { 816 case ASC_QHSTA_NO_ERROR: 817 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) { 818 s1 = &ccb->scsi_sense; 819 s2 = &xs->sense; 820 *s2 = *s1; 821 xs->error = XS_SENSE; 822 } else { 823 xs->error = XS_DRIVER_STUFFUP; 824 } 825 break; 826 827 default: 828 /* QHSTA error occurred */ 829 xs->error = XS_DRIVER_STUFFUP; 830 break; 831 } 832 break; 833 834 case ASC_QD_ABORTED_BY_HOST: 835 default: 836 xs->error = XS_DRIVER_STUFFUP; 837 break; 838 } 839 840 scsi_done(xs); 841 } 842