1 /* $OpenBSD: adv.c,v 1.20 2008/09/12 11:14:04 miod Exp $ */ 2 /* $NetBSD: adv.c,v 1.6 1998/10/28 20:39:45 dante Exp $ */ 3 4 /* 5 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers 6 * 7 * Copyright (c) 1998 The NetBSD Foundation, Inc. 8 * All rights reserved. 9 * 10 * Author: Baldassare Dante Profeta <dante@mclink.it> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/errno.h> 39 #include <sys/ioctl.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/buf.h> 43 #include <sys/proc.h> 44 #include <sys/user.h> 45 46 #include <machine/bus.h> 47 #include <machine/intr.h> 48 49 #include <scsi/scsi_all.h> 50 #include <scsi/scsiconf.h> 51 52 #include <dev/ic/adv.h> 53 #include <dev/ic/advlib.h> 54 55 #ifndef DDB 56 #define Debugger() panic("should call debugger here (adv.c)") 57 #endif /* ! DDB */ 58 59 60 /* #define ASC_DEBUG */ 61 62 /******************************************************************************/ 63 64 65 static void adv_enqueue(ASC_SOFTC *, struct scsi_xfer *, int); 66 static struct scsi_xfer *adv_dequeue(ASC_SOFTC *); 67 68 static int adv_alloc_ccbs(ASC_SOFTC *); 69 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int); 70 static void adv_free_ccb(ASC_SOFTC *, ADV_CCB *); 71 static void adv_reset_ccb(ADV_CCB *); 72 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *); 73 static ADV_CCB *adv_get_ccb(ASC_SOFTC *, int); 74 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *); 75 static void adv_start_ccbs(ASC_SOFTC *); 76 77 static u_int8_t *adv_alloc_overrunbuf(char *dvname, bus_dma_tag_t); 78 79 static int adv_scsi_cmd(struct scsi_xfer *); 80 static void advminphys(struct buf *); 81 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *); 82 83 static int adv_poll(ASC_SOFTC *, struct scsi_xfer *, int); 84 static void adv_timeout(void *); 85 static void adv_watchdog(void *); 86 87 88 /******************************************************************************/ 89 90 91 struct cfdriver adv_cd = { 92 NULL, "adv", DV_DULL 93 }; 94 95 96 struct scsi_adapter adv_switch = 97 { 98 adv_scsi_cmd, /* called to start/enqueue a SCSI command */ 99 advminphys, /* to limit the transfer to max device can do */ 100 0, /* IT SEEMS IT IS NOT USED YET */ 101 0, /* as above... */ 102 }; 103 104 105 /* the below structure is so we have a default dev struct for out link struct */ 106 struct scsi_device adv_dev = 107 { 108 NULL, /* Use default error handler */ 109 NULL, /* have a queue, served by this */ 110 NULL, /* have no async handler */ 111 NULL, /* Use default 'done' routine */ 112 }; 113 114 115 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 116 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */ 117 118 119 /******************************************************************************/ 120 /* scsi_xfer queue routines */ 121 /******************************************************************************/ 122 123 124 /* 125 * Insert a scsi_xfer into the software queue. We overload xs->free_list 126 * to avoid having to allocate additional resources (since we're used 127 * only during resource shortages anyhow. 128 */ 129 static void 130 adv_enqueue(sc, xs, infront) 131 ASC_SOFTC *sc; 132 struct scsi_xfer *xs; 133 int infront; 134 { 135 136 if (infront || LIST_EMPTY(&sc->sc_queue)) { 137 if (LIST_EMPTY(&sc->sc_queue)) 138 sc->sc_queuelast = xs; 139 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list); 140 return; 141 } 142 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list); 143 sc->sc_queuelast = xs; 144 } 145 146 147 /* 148 * Pull a scsi_xfer off the front of the software queue. 149 */ 150 static struct scsi_xfer * 151 adv_dequeue(sc) 152 ASC_SOFTC *sc; 153 { 154 struct scsi_xfer *xs; 155 156 xs = LIST_FIRST(&sc->sc_queue); 157 LIST_REMOVE(xs, free_list); 158 159 if (LIST_EMPTY(&sc->sc_queue)) 160 sc->sc_queuelast = NULL; 161 162 return (xs); 163 } 164 165 166 /******************************************************************************/ 167 /* Control Blocks routines */ 168 /******************************************************************************/ 169 170 171 static int 172 adv_alloc_ccbs(sc) 173 ASC_SOFTC *sc; 174 { 175 bus_dma_segment_t seg; 176 int error, rseg; 177 178 /* 179 * Allocate the control blocks. 180 */ 181 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control), 182 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 183 printf("%s: unable to allocate control structures," 184 " error = %d\n", sc->sc_dev.dv_xname, error); 185 return (error); 186 } 187 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 188 sizeof(struct adv_control), (caddr_t *) & sc->sc_control, 189 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 190 printf("%s: unable to map control structures, error = %d\n", 191 sc->sc_dev.dv_xname, error); 192 return (error); 193 } 194 /* 195 * Create and load the DMA map used for the control blocks. 196 */ 197 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control), 198 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT, 199 &sc->sc_dmamap_control)) != 0) { 200 printf("%s: unable to create control DMA map, error = %d\n", 201 sc->sc_dev.dv_xname, error); 202 return (error); 203 } 204 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control, 205 sc->sc_control, sizeof(struct adv_control), NULL, 206 BUS_DMA_NOWAIT)) != 0) { 207 printf("%s: unable to load control DMA map, error = %d\n", 208 sc->sc_dev.dv_xname, error); 209 return (error); 210 } 211 return (0); 212 } 213 214 215 /* 216 * Create a set of ccbs and add them to the free list. Called once 217 * by adv_init(). We return the number of CCBs successfully created. 218 */ 219 static int 220 adv_create_ccbs(sc, ccbstore, count) 221 ASC_SOFTC *sc; 222 ADV_CCB *ccbstore; 223 int count; 224 { 225 ADV_CCB *ccb; 226 int i, error; 227 228 bzero(ccbstore, sizeof(ADV_CCB) * count); 229 for (i = 0; i < count; i++) { 230 ccb = &ccbstore[i]; 231 if ((error = adv_init_ccb(sc, ccb)) != 0) { 232 printf("%s: unable to initialize ccb, error = %d\n", 233 sc->sc_dev.dv_xname, error); 234 return (i); 235 } 236 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain); 237 } 238 239 return (i); 240 } 241 242 243 /* 244 * A ccb is put onto the free list. 245 */ 246 static void 247 adv_free_ccb(sc, ccb) 248 ASC_SOFTC *sc; 249 ADV_CCB *ccb; 250 { 251 int s; 252 253 s = splbio(); 254 255 adv_reset_ccb(ccb); 256 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 257 258 /* 259 * If there were none, wake anybody waiting for one to come free, 260 * starting with queued entries. 261 */ 262 if (TAILQ_NEXT(ccb, chain) == NULL) 263 wakeup(&sc->sc_free_ccb); 264 265 splx(s); 266 } 267 268 269 static void 270 adv_reset_ccb(ccb) 271 ADV_CCB *ccb; 272 { 273 274 ccb->flags = 0; 275 } 276 277 278 static int 279 adv_init_ccb(sc, ccb) 280 ASC_SOFTC *sc; 281 ADV_CCB *ccb; 282 { 283 int error; 284 285 /* 286 * Create the DMA map for this CCB. 287 */ 288 error = bus_dmamap_create(sc->sc_dmat, 289 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 290 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 291 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer); 292 if (error) { 293 printf("%s: unable to create DMA map, error = %d\n", 294 sc->sc_dev.dv_xname, error); 295 return (error); 296 } 297 adv_reset_ccb(ccb); 298 return (0); 299 } 300 301 302 /* 303 * Get a free ccb 304 * 305 * If there are none, see if we can allocate a new one 306 */ 307 static ADV_CCB * 308 adv_get_ccb(sc, flags) 309 ASC_SOFTC *sc; 310 int flags; 311 { 312 ADV_CCB *ccb = 0; 313 int s; 314 315 s = splbio(); 316 317 /* 318 * If we can and have to, sleep waiting for one to come free 319 * but only if we can't allocate a new one. 320 */ 321 for (;;) { 322 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 323 if (ccb) { 324 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 325 break; 326 } 327 if ((flags & SCSI_NOSLEEP) != 0) 328 goto out; 329 330 tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0); 331 } 332 333 ccb->flags |= CCB_ALLOC; 334 335 out: 336 splx(s); 337 return (ccb); 338 } 339 340 341 /* 342 * Queue a CCB to be sent to the controller, and send it if possible. 343 */ 344 static void 345 adv_queue_ccb(sc, ccb) 346 ASC_SOFTC *sc; 347 ADV_CCB *ccb; 348 { 349 350 timeout_set(&ccb->xs->stimeout, adv_timeout, ccb); 351 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 352 353 adv_start_ccbs(sc); 354 } 355 356 357 static void 358 adv_start_ccbs(sc) 359 ASC_SOFTC *sc; 360 { 361 ADV_CCB *ccb; 362 struct scsi_xfer *xs; 363 364 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 365 366 xs = ccb->xs; 367 if (ccb->flags & CCB_WATCHDOG) 368 timeout_del(&xs->stimeout); 369 370 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) { 371 ccb->flags |= CCB_WATCHDOG; 372 timeout_set(&xs->stimeout, adv_watchdog, ccb); 373 timeout_add(&xs->stimeout, 374 (ADV_WATCH_TIMEOUT * hz) / 1000); 375 break; 376 } 377 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 378 379 if ((ccb->xs->flags & SCSI_POLL) == 0) { 380 timeout_set(&xs->stimeout, adv_timeout, ccb); 381 timeout_add(&xs->stimeout, (ccb->timeout * hz) / 1000); 382 } 383 } 384 } 385 386 387 /******************************************************************************/ 388 /* DMA able memory allocation routines */ 389 /******************************************************************************/ 390 391 392 /* 393 * Allocate a DMA able memory for overrun_buffer. 394 * This memory can be safely shared among all the AdvanSys boards. 395 */ 396 u_int8_t * 397 adv_alloc_overrunbuf(dvname, dmat) 398 char *dvname; 399 bus_dma_tag_t dmat; 400 { 401 static u_int8_t *overrunbuf = NULL; 402 403 bus_dmamap_t ovrbuf_dmamap; 404 bus_dma_segment_t seg; 405 int rseg, error; 406 407 408 /* 409 * if an overrun buffer has been already allocated don't allocate it 410 * again. Instead return the address of the allocated buffer. 411 */ 412 if (overrunbuf) 413 return (overrunbuf); 414 415 416 if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE, 417 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 418 printf("%s: unable to allocate overrun buffer, error = %d\n", 419 dvname, error); 420 return (0); 421 } 422 if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE, 423 (caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 424 printf("%s: unable to map overrun buffer, error = %d\n", 425 dvname, error); 426 427 bus_dmamem_free(dmat, &seg, 1); 428 return (0); 429 } 430 if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1, 431 ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) { 432 printf("%s: unable to create overrun buffer DMA map," 433 " error = %d\n", dvname, error); 434 435 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 436 bus_dmamem_free(dmat, &seg, 1); 437 return (0); 438 } 439 if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf, 440 ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) { 441 printf("%s: unable to load overrun buffer DMA map," 442 " error = %d\n", dvname, error); 443 444 bus_dmamap_destroy(dmat, ovrbuf_dmamap); 445 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 446 bus_dmamem_free(dmat, &seg, 1); 447 return (0); 448 } 449 return (overrunbuf); 450 } 451 452 453 /******************************************************************************/ 454 /* SCSI layer interfacing routines */ 455 /******************************************************************************/ 456 457 458 int 459 adv_init(sc) 460 ASC_SOFTC *sc; 461 { 462 int warn; 463 464 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) 465 panic("adv_init: adv_find_signature failed"); 466 467 /* 468 * Read the board configuration 469 */ 470 AscInitASC_SOFTC(sc); 471 warn = AscInitFromEEP(sc); 472 if (warn) { 473 printf("%s -get: ", sc->sc_dev.dv_xname); 474 switch (warn) { 475 case -1: 476 printf("Chip is not halted\n"); 477 break; 478 479 case -2: 480 printf("Couldn't get MicroCode Start" 481 " address\n"); 482 break; 483 484 case ASC_WARN_IO_PORT_ROTATE: 485 printf("I/O port address modified\n"); 486 break; 487 488 case ASC_WARN_AUTO_CONFIG: 489 printf("I/O port increment switch enabled\n"); 490 break; 491 492 case ASC_WARN_EEPROM_CHKSUM: 493 printf("EEPROM checksum error\n"); 494 break; 495 496 case ASC_WARN_IRQ_MODIFIED: 497 printf("IRQ modified\n"); 498 break; 499 500 case ASC_WARN_CMD_QNG_CONFLICT: 501 printf("tag queuing enabled w/o disconnects\n"); 502 break; 503 504 default: 505 printf("unknown warning %d\n", warn); 506 } 507 } 508 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) 509 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; 510 511 /* 512 * Modify the board configuration 513 */ 514 warn = AscInitFromASC_SOFTC(sc); 515 if (warn) { 516 printf("%s -set: ", sc->sc_dev.dv_xname); 517 switch (warn) { 518 case ASC_WARN_CMD_QNG_CONFLICT: 519 printf("tag queuing enabled w/o disconnects\n"); 520 break; 521 522 case ASC_WARN_AUTO_CONFIG: 523 printf("I/O port increment switch enabled\n"); 524 break; 525 526 default: 527 printf("unknown warning %d\n", warn); 528 } 529 } 530 sc->isr_callback = (ulong) adv_narrow_isr_callback; 531 532 if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname, 533 sc->sc_dmat))) { 534 return (1); 535 } 536 537 return (0); 538 } 539 540 541 void 542 adv_attach(sc) 543 ASC_SOFTC *sc; 544 { 545 struct scsibus_attach_args saa; 546 int i, error; 547 548 /* 549 * Initialize board RISC chip and enable interrupts. 550 */ 551 switch (AscInitDriver(sc)) { 552 case 0: 553 /* AllOK */ 554 break; 555 556 case 1: 557 panic("%s: bad signature", sc->sc_dev.dv_xname); 558 break; 559 560 case 2: 561 panic("%s: unable to load MicroCode", 562 sc->sc_dev.dv_xname); 563 break; 564 565 case 3: 566 panic("%s: unable to initialize MicroCode", 567 sc->sc_dev.dv_xname); 568 break; 569 570 default: 571 panic("%s: unable to initialize board RISC chip", 572 sc->sc_dev.dv_xname); 573 } 574 575 576 /* 577 * fill in the prototype scsi_link. 578 */ 579 sc->sc_link.adapter_softc = sc; 580 sc->sc_link.adapter_target = sc->chip_scsi_id; 581 sc->sc_link.adapter = &adv_switch; 582 sc->sc_link.device = &adv_dev; 583 sc->sc_link.openings = 4; 584 sc->sc_link.adapter_buswidth = 7; 585 586 587 TAILQ_INIT(&sc->sc_free_ccb); 588 TAILQ_INIT(&sc->sc_waiting_ccb); 589 LIST_INIT(&sc->sc_queue); 590 591 592 /* 593 * Allocate the Control Blocks. 594 */ 595 error = adv_alloc_ccbs(sc); 596 if (error) 597 return; /* (error) */ ; 598 599 /* 600 * Create and initialize the Control Blocks. 601 */ 602 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB); 603 if (i == 0) { 604 printf("%s: unable to create control blocks\n", 605 sc->sc_dev.dv_xname); 606 return; /* (ENOMEM) */ ; 607 } else if (i != ADV_MAX_CCB) { 608 printf("%s: WARNING: only %d of %d control blocks created\n", 609 sc->sc_dev.dv_xname, i, ADV_MAX_CCB); 610 } 611 612 bzero(&saa, sizeof(saa)); 613 saa.saa_sc_link = &sc->sc_link; 614 config_found(&sc->sc_dev, &saa, scsiprint); 615 } 616 617 618 static void 619 advminphys(bp) 620 struct buf *bp; 621 { 622 623 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE)) 624 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE); 625 minphys(bp); 626 } 627 628 629 /* 630 * start a scsi operation given the command and the data address. Also needs 631 * the unit, target and lu. 632 */ 633 static int 634 adv_scsi_cmd(xs) 635 struct scsi_xfer *xs; 636 { 637 struct scsi_link *sc_link = xs->sc_link; 638 ASC_SOFTC *sc = sc_link->adapter_softc; 639 bus_dma_tag_t dmat = sc->sc_dmat; 640 ADV_CCB *ccb; 641 int s, flags, error, nsegs; 642 int fromqueue = 1, dontqueue = 0; 643 644 645 s = splbio(); /* protect the queue */ 646 647 /* 648 * If we're running the queue from adv_done(), we've been 649 * called with the first queue entry as our argument. 650 */ 651 if (xs == LIST_FIRST(&sc->sc_queue)) { 652 xs = adv_dequeue(sc); 653 fromqueue = 1; 654 } else { 655 656 /* Polled requests can't be queued for later. */ 657 dontqueue = xs->flags & SCSI_POLL; 658 659 /* 660 * If there are jobs in the queue, run them first. 661 */ 662 if (!LIST_EMPTY(&sc->sc_queue)) { 663 /* 664 * If we can't queue, we have to abort, since 665 * we have to preserve order. 666 */ 667 if (dontqueue) { 668 splx(s); 669 return (TRY_AGAIN_LATER); 670 } 671 /* 672 * Swap with the first queue entry. 673 */ 674 adv_enqueue(sc, xs, 0); 675 xs = adv_dequeue(sc); 676 fromqueue = 1; 677 } 678 } 679 680 681 /* 682 * get a ccb to use. If the transfer 683 * is from a buf (possibly from interrupt time) 684 * then we can't allow it to sleep 685 */ 686 687 flags = xs->flags; 688 if ((ccb = adv_get_ccb(sc, flags)) == NULL) { 689 /* 690 * If we can't queue, we lose. 691 */ 692 if (dontqueue) { 693 splx(s); 694 return (TRY_AGAIN_LATER); 695 } 696 /* 697 * Stuff ourselves into the queue, in front 698 * if we came off in the first place. 699 */ 700 adv_enqueue(sc, xs, fromqueue); 701 splx(s); 702 return (SUCCESSFULLY_QUEUED); 703 } 704 splx(s); /* done playing with the queue */ 705 706 ccb->xs = xs; 707 ccb->timeout = xs->timeout; 708 709 /* 710 * Build up the request 711 */ 712 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q)); 713 714 ccb->scsiq.q2.ccb_ptr = (ulong) ccb; 715 716 ccb->scsiq.cdbptr = &xs->cmd->opcode; 717 ccb->scsiq.q2.cdb_len = xs->cmdlen; 718 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->target); 719 ccb->scsiq.q1.target_lun = sc_link->lun; 720 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->target, 721 sc_link->lun); 722 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr + 723 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense); 724 ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data); 725 726 /* 727 * If there are any outstanding requests for the current target, 728 * then every 255th request send an ORDERED request. This heuristic 729 * tries to retain the benefit of request sorting while preventing 730 * request starvation. 255 is the max number of tags or pending commands 731 * a device may have outstanding. 732 */ 733 sc->reqcnt[sc_link->target]++; 734 if ((sc->reqcnt[sc_link->target] > 0) && 735 (sc->reqcnt[sc_link->target] % 255) == 0) { 736 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED; 737 } else { 738 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE; 739 } 740 741 742 if (xs->datalen) { 743 /* 744 * Map the DMA transfer. 745 */ 746 error = bus_dmamap_load(dmat, 747 ccb->dmamap_xfer, xs->data, xs->datalen, NULL, 748 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 749 750 if (error) { 751 if (error == EFBIG) { 752 printf("%s: adv_scsi_cmd, more than %d dma" 753 " segments\n", 754 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST); 755 } else { 756 printf("%s: adv_scsi_cmd, error %d loading" 757 " dma map\n", 758 sc->sc_dev.dv_xname, error); 759 } 760 761 xs->error = XS_DRIVER_STUFFUP; 762 adv_free_ccb(sc, ccb); 763 return (COMPLETE); 764 } 765 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 766 0, ccb->dmamap_xfer->dm_mapsize, 767 ((flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 768 BUS_DMASYNC_PREWRITE)); 769 770 771 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD)); 772 773 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) { 774 775 ccb->sghead.sg_list[nsegs].addr = 776 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr; 777 ccb->sghead.sg_list[nsegs].bytes = 778 ccb->dmamap_xfer->dm_segs[nsegs].ds_len; 779 } 780 781 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt = 782 ccb->dmamap_xfer->dm_nsegs; 783 784 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD; 785 ccb->scsiq.sg_head = &ccb->sghead; 786 ccb->scsiq.q1.data_addr = 0; 787 ccb->scsiq.q1.data_cnt = 0; 788 } else { 789 /* 790 * No data xfer, use non S/G values. 791 */ 792 ccb->scsiq.q1.data_addr = 0; 793 ccb->scsiq.q1.data_cnt = 0; 794 } 795 796 #ifdef ASC_DEBUG 797 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n", 798 sc_link->scsipi_scsi.target, 799 sc_link->scsipi_scsi.lun, xs->cmd->opcode, 800 (unsigned long)ccb); 801 #endif 802 s = splbio(); 803 adv_queue_ccb(sc, ccb); 804 splx(s); 805 806 /* 807 * Usually return SUCCESSFULLY QUEUED 808 */ 809 if ((flags & SCSI_POLL) == 0) 810 return (SUCCESSFULLY_QUEUED); 811 812 /* 813 * If we can't use interrupts, poll on completion 814 */ 815 if (adv_poll(sc, xs, ccb->timeout)) { 816 adv_timeout(ccb); 817 if (adv_poll(sc, xs, ccb->timeout)) 818 adv_timeout(ccb); 819 } 820 return (COMPLETE); 821 } 822 823 824 int 825 adv_intr(arg) 826 void *arg; 827 { 828 ASC_SOFTC *sc = arg; 829 struct scsi_xfer *xs; 830 831 #ifdef ASC_DEBUG 832 int int_pend = FALSE; 833 834 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) 835 { 836 int_pend = TRUE; 837 printf("ISR - "); 838 } 839 #endif 840 AscISR(sc); 841 #ifdef ASC_DEBUG 842 if(int_pend) 843 printf("\n"); 844 #endif 845 846 /* 847 * If there are queue entries in the software queue, try to 848 * run the first one. We should be more or less guaranteed 849 * to succeed, since we just freed a CCB. 850 * 851 * NOTE: adv_scsi_cmd() relies on our calling it with 852 * the first entry in the queue. 853 */ 854 if ((xs = LIST_FIRST(&sc->sc_queue)) != NULL) 855 (void) adv_scsi_cmd(xs); 856 857 return (1); 858 } 859 860 861 /* 862 * Poll a particular unit, looking for a particular xs 863 */ 864 static int 865 adv_poll(sc, xs, count) 866 ASC_SOFTC *sc; 867 struct scsi_xfer *xs; 868 int count; 869 { 870 int s; 871 872 /* timeouts are in msec, so we loop in 1000 usec cycles */ 873 while (count) { 874 s = splbio(); 875 adv_intr(sc); 876 splx(s); 877 if (xs->flags & ITSDONE) 878 return (0); 879 delay(1000); /* only happens in boot so ok */ 880 count--; 881 } 882 return (1); 883 } 884 885 886 static void 887 adv_timeout(arg) 888 void *arg; 889 { 890 ADV_CCB *ccb = arg; 891 struct scsi_xfer *xs = ccb->xs; 892 struct scsi_link *sc_link = xs->sc_link; 893 ASC_SOFTC *sc = sc_link->adapter_softc; 894 int s; 895 896 sc_print_addr(sc_link); 897 printf("timed out"); 898 899 s = splbio(); 900 901 /* 902 * If it has been through before, then a previous abort has failed, 903 * don't try abort again, reset the bus instead. 904 */ 905 if (ccb->flags & CCB_ABORT) { 906 /* abort timed out */ 907 printf(" AGAIN. Resetting Bus\n"); 908 /* Lets try resetting the bus! */ 909 if (AscResetBus(sc) == ASC_ERROR) { 910 ccb->timeout = sc->scsi_reset_wait; 911 adv_queue_ccb(sc, ccb); 912 } 913 } else { 914 /* abort the operation that has timed out */ 915 printf("\n"); 916 AscAbortCCB(sc, (u_int32_t) ccb); 917 ccb->xs->error = XS_TIMEOUT; 918 ccb->timeout = ADV_ABORT_TIMEOUT; 919 ccb->flags |= CCB_ABORT; 920 adv_queue_ccb(sc, ccb); 921 } 922 923 splx(s); 924 } 925 926 927 static void 928 adv_watchdog(arg) 929 void *arg; 930 { 931 ADV_CCB *ccb = arg; 932 struct scsi_xfer *xs = ccb->xs; 933 struct scsi_link *sc_link = xs->sc_link; 934 ASC_SOFTC *sc = sc_link->adapter_softc; 935 int s; 936 937 s = splbio(); 938 939 ccb->flags &= ~CCB_WATCHDOG; 940 adv_start_ccbs(sc); 941 942 splx(s); 943 } 944 945 946 /******************************************************************************/ 947 /* NARROW and WIDE boards Interrupt callbacks */ 948 /******************************************************************************/ 949 950 951 /* 952 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR() 953 * 954 * Interrupt callback function for the Narrow SCSI Asc Library. 955 */ 956 static void 957 adv_narrow_isr_callback(sc, qdonep) 958 ASC_SOFTC *sc; 959 ASC_QDONE_INFO *qdonep; 960 { 961 bus_dma_tag_t dmat = sc->sc_dmat; 962 ADV_CCB *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr; 963 struct scsi_xfer *xs = ccb->xs; 964 struct scsi_sense_data *s1, *s2; 965 966 967 #ifdef ASC_DEBUG 968 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ", 969 (unsigned long)ccb, 970 xs->sc_link->scsipi_scsi.target, 971 xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode); 972 #endif 973 timeout_del(&xs->stimeout); 974 975 /* 976 * If we were a data transfer, unload the map that described 977 * the data buffer. 978 */ 979 if (xs->datalen) { 980 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 981 0, ccb->dmamap_xfer->dm_mapsize, 982 ((xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 983 BUS_DMASYNC_POSTWRITE)); 984 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 985 } 986 if ((ccb->flags & CCB_ALLOC) == 0) { 987 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname); 988 Debugger(); 989 return; 990 } 991 /* 992 * 'qdonep' contains the command's ending status. 993 */ 994 #ifdef ASC_DEBUG 995 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat); 996 #endif 997 switch (qdonep->d3.done_stat) { 998 case ASC_QD_NO_ERROR: 999 switch (qdonep->d3.host_stat) { 1000 case ASC_QHSTA_NO_ERROR: 1001 xs->error = XS_NOERROR; 1002 xs->resid = 0; 1003 break; 1004 1005 default: 1006 /* QHSTA error occurred */ 1007 xs->error = XS_DRIVER_STUFFUP; 1008 break; 1009 } 1010 1011 /* 1012 * If an INQUIRY command completed successfully, then call 1013 * the AscInquiryHandling() function to patch bugged boards. 1014 */ 1015 if ((xs->cmd->opcode == SCSICMD_Inquiry) && 1016 (xs->sc_link->lun == 0) && 1017 (xs->datalen - qdonep->remain_bytes) >= 8) { 1018 AscInquiryHandling(sc, 1019 xs->sc_link->target & 0x7, 1020 (ASC_SCSI_INQUIRY *) xs->data); 1021 } 1022 break; 1023 1024 case ASC_QD_WITH_ERROR: 1025 switch (qdonep->d3.host_stat) { 1026 case ASC_QHSTA_NO_ERROR: 1027 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) { 1028 s1 = &ccb->scsi_sense; 1029 s2 = &xs->sense; 1030 *s2 = *s1; 1031 xs->error = XS_SENSE; 1032 } else { 1033 xs->error = XS_DRIVER_STUFFUP; 1034 } 1035 break; 1036 1037 default: 1038 /* QHSTA error occurred */ 1039 xs->error = XS_DRIVER_STUFFUP; 1040 break; 1041 } 1042 break; 1043 1044 case ASC_QD_ABORTED_BY_HOST: 1045 default: 1046 xs->error = XS_DRIVER_STUFFUP; 1047 break; 1048 } 1049 1050 1051 adv_free_ccb(sc, ccb); 1052 xs->flags |= ITSDONE; 1053 scsi_done(xs); 1054 } 1055