1 /* $NetBSD: adv.c,v 1.43 2009/03/14 15:36:17 dsl Exp $ */ 2 3 /* 4 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers 5 * 6 * Copyright (c) 1998 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * Author: Baldassare Dante Profeta <dante@mclink.it> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: adv.c,v 1.43 2009/03/14 15:36:17 dsl Exp $"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/callout.h> 46 #include <sys/kernel.h> 47 #include <sys/errno.h> 48 #include <sys/ioctl.h> 49 #include <sys/device.h> 50 #include <sys/malloc.h> 51 #include <sys/buf.h> 52 #include <sys/proc.h> 53 #include <sys/user.h> 54 55 #include <sys/bus.h> 56 #include <sys/intr.h> 57 58 #include <uvm/uvm_extern.h> 59 60 #include <dev/scsipi/scsi_all.h> 61 #include <dev/scsipi/scsipi_all.h> 62 #include <dev/scsipi/scsiconf.h> 63 64 #include <dev/ic/advlib.h> 65 #include <dev/ic/adv.h> 66 67 #ifndef DDB 68 #define Debugger() panic("should call debugger here (adv.c)") 69 #endif /* ! DDB */ 70 71 72 /* #define ASC_DEBUG */ 73 74 /******************************************************************************/ 75 76 77 static int adv_alloc_control_data(ASC_SOFTC *); 78 static void adv_free_control_data(ASC_SOFTC *); 79 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int); 80 static void adv_free_ccb(ASC_SOFTC *, ADV_CCB *); 81 static void adv_reset_ccb(ADV_CCB *); 82 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *); 83 static ADV_CCB *adv_get_ccb(ASC_SOFTC *); 84 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *); 85 static void adv_start_ccbs(ASC_SOFTC *); 86 87 88 static void adv_scsipi_request(struct scsipi_channel *, 89 scsipi_adapter_req_t, void *); 90 static void advminphys(struct buf *); 91 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *); 92 93 static int adv_poll(ASC_SOFTC *, struct scsipi_xfer *, int); 94 static void adv_timeout(void *); 95 static void adv_watchdog(void *); 96 97 98 /******************************************************************************/ 99 100 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 101 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */ 102 103 /******************************************************************************/ 104 /* Control Blocks routines */ 105 /******************************************************************************/ 106 107 108 static int 109 adv_alloc_control_data(ASC_SOFTC *sc) 110 { 111 int error; 112 113 /* 114 * Allocate the control blocks. 115 */ 116 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control), 117 PAGE_SIZE, 0, &sc->sc_control_seg, 1, 118 &sc->sc_control_nsegs, BUS_DMA_NOWAIT)) != 0) { 119 aprint_error_dev(&sc->sc_dev, "unable to allocate control structures," 120 " error = %d\n", error); 121 return (error); 122 } 123 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_control_seg, 124 sc->sc_control_nsegs, sizeof(struct adv_control), 125 (void **) & sc->sc_control, 126 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 127 aprint_error_dev(&sc->sc_dev, "unable to map control structures, error = %d\n", 128 error); 129 return (error); 130 } 131 /* 132 * Create and load the DMA map used for the control blocks. 133 */ 134 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control), 135 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT, 136 &sc->sc_dmamap_control)) != 0) { 137 aprint_error_dev(&sc->sc_dev, "unable to create control DMA map, error = %d\n", 138 error); 139 return (error); 140 } 141 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control, 142 sc->sc_control, sizeof(struct adv_control), NULL, 143 BUS_DMA_NOWAIT)) != 0) { 144 aprint_error_dev(&sc->sc_dev, "unable to load control DMA map, error = %d\n", 145 error); 146 return (error); 147 } 148 149 /* 150 * Initialize the overrun_buf address. 151 */ 152 sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr + 153 offsetof(struct adv_control, overrun_buf); 154 155 return (0); 156 } 157 158 static void 159 adv_free_control_data(ASC_SOFTC *sc) 160 { 161 162 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_control); 163 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_control); 164 sc->sc_dmamap_control = NULL; 165 166 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control, 167 sizeof(struct adv_control)); 168 bus_dmamem_free(sc->sc_dmat, &sc->sc_control_seg, 169 sc->sc_control_nsegs); 170 } 171 172 /* 173 * Create a set of ccbs and add them to the free list. Called once 174 * by adv_init(). We return the number of CCBs successfully created. 175 */ 176 static int 177 adv_create_ccbs(ASC_SOFTC *sc, ADV_CCB *ccbstore, int count) 178 { 179 ADV_CCB *ccb; 180 int i, error; 181 182 memset(ccbstore, 0, sizeof(ADV_CCB) * count); 183 for (i = 0; i < count; i++) { 184 ccb = &ccbstore[i]; 185 if ((error = adv_init_ccb(sc, ccb)) != 0) { 186 aprint_error_dev(&sc->sc_dev, "unable to initialize ccb, error = %d\n", 187 error); 188 return (i); 189 } 190 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain); 191 } 192 193 return (i); 194 } 195 196 197 /* 198 * A ccb is put onto the free list. 199 */ 200 static void 201 adv_free_ccb(ASC_SOFTC *sc, ADV_CCB *ccb) 202 { 203 int s; 204 205 s = splbio(); 206 adv_reset_ccb(ccb); 207 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 208 splx(s); 209 } 210 211 212 static void 213 adv_reset_ccb(ADV_CCB *ccb) 214 { 215 216 ccb->flags = 0; 217 } 218 219 220 static int 221 adv_init_ccb(ASC_SOFTC *sc, ADV_CCB *ccb) 222 { 223 int hashnum, error; 224 225 callout_init(&ccb->ccb_watchdog, 0); 226 227 /* 228 * Create the DMA map for this CCB. 229 */ 230 error = bus_dmamap_create(sc->sc_dmat, 231 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 232 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 233 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer); 234 if (error) { 235 aprint_error_dev(&sc->sc_dev, "unable to create DMA map, error = %d\n", 236 error); 237 return (error); 238 } 239 240 /* 241 * put in the phystokv hash table 242 * Never gets taken out. 243 */ 244 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr + 245 ADV_CCB_OFF(ccb); 246 hashnum = CCB_HASH(ccb->hashkey); 247 ccb->nexthash = sc->sc_ccbhash[hashnum]; 248 sc->sc_ccbhash[hashnum] = ccb; 249 250 adv_reset_ccb(ccb); 251 return (0); 252 } 253 254 255 /* 256 * Get a free ccb 257 * 258 * If there are none, see if we can allocate a new one 259 */ 260 static ADV_CCB * 261 adv_get_ccb(ASC_SOFTC *sc) 262 { 263 ADV_CCB *ccb = 0; 264 int s; 265 266 s = splbio(); 267 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 268 if (ccb != NULL) { 269 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 270 ccb->flags |= CCB_ALLOC; 271 } 272 splx(s); 273 return (ccb); 274 } 275 276 277 /* 278 * Given a physical address, find the ccb that it corresponds to. 279 */ 280 ADV_CCB * 281 adv_ccb_phys_kv(ASC_SOFTC *sc, u_long ccb_phys) 282 { 283 int hashnum = CCB_HASH(ccb_phys); 284 ADV_CCB *ccb = sc->sc_ccbhash[hashnum]; 285 286 while (ccb) { 287 if (ccb->hashkey == ccb_phys) 288 break; 289 ccb = ccb->nexthash; 290 } 291 return (ccb); 292 } 293 294 295 /* 296 * Queue a CCB to be sent to the controller, and send it if possible. 297 */ 298 static void 299 adv_queue_ccb(ASC_SOFTC *sc, ADV_CCB *ccb) 300 { 301 302 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 303 304 adv_start_ccbs(sc); 305 } 306 307 308 static void 309 adv_start_ccbs(ASC_SOFTC *sc) 310 { 311 ADV_CCB *ccb; 312 313 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) { 314 if (ccb->flags & CCB_WATCHDOG) 315 callout_stop(&ccb->ccb_watchdog); 316 317 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) { 318 ccb->flags |= CCB_WATCHDOG; 319 callout_reset(&ccb->ccb_watchdog, 320 (ADV_WATCH_TIMEOUT * hz) / 1000, 321 adv_watchdog, ccb); 322 break; 323 } 324 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 325 326 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 327 callout_reset(&ccb->xs->xs_callout, 328 mstohz(ccb->timeout), adv_timeout, ccb); 329 } 330 } 331 332 333 /******************************************************************************/ 334 /* SCSI layer interfacing routines */ 335 /******************************************************************************/ 336 337 338 int 339 adv_init(ASC_SOFTC *sc) 340 { 341 int warn; 342 343 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) { 344 aprint_error("adv_init: failed to find signature\n"); 345 return (1); 346 } 347 348 /* 349 * Read the board configuration 350 */ 351 AscInitASC_SOFTC(sc); 352 warn = AscInitFromEEP(sc); 353 if (warn) { 354 aprint_error_dev(&sc->sc_dev, "-get: "); 355 switch (warn) { 356 case -1: 357 aprint_normal("Chip is not halted\n"); 358 break; 359 360 case -2: 361 aprint_normal("Couldn't get MicroCode Start" 362 " address\n"); 363 break; 364 365 case ASC_WARN_IO_PORT_ROTATE: 366 aprint_normal("I/O port address modified\n"); 367 break; 368 369 case ASC_WARN_AUTO_CONFIG: 370 aprint_normal("I/O port increment switch enabled\n"); 371 break; 372 373 case ASC_WARN_EEPROM_CHKSUM: 374 aprint_normal("EEPROM checksum error\n"); 375 break; 376 377 case ASC_WARN_IRQ_MODIFIED: 378 aprint_normal("IRQ modified\n"); 379 break; 380 381 case ASC_WARN_CMD_QNG_CONFLICT: 382 aprint_normal("tag queuing enabled w/o disconnects\n"); 383 break; 384 385 default: 386 aprint_normal("unknown warning %d\n", warn); 387 } 388 } 389 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) 390 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; 391 392 /* 393 * Modify the board configuration 394 */ 395 warn = AscInitFromASC_SOFTC(sc); 396 if (warn) { 397 aprint_error_dev(&sc->sc_dev, "-set: "); 398 switch (warn) { 399 case ASC_WARN_CMD_QNG_CONFLICT: 400 aprint_normal("tag queuing enabled w/o disconnects\n"); 401 break; 402 403 case ASC_WARN_AUTO_CONFIG: 404 aprint_normal("I/O port increment switch enabled\n"); 405 break; 406 407 default: 408 aprint_normal("unknown warning %d\n", warn); 409 } 410 } 411 sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback; 412 413 return (0); 414 } 415 416 417 void 418 adv_attach(ASC_SOFTC *sc) 419 { 420 struct scsipi_adapter *adapt = &sc->sc_adapter; 421 struct scsipi_channel *chan = &sc->sc_channel; 422 int i, error; 423 424 /* 425 * Initialize board RISC chip and enable interrupts. 426 */ 427 switch (AscInitDriver(sc)) { 428 case 0: 429 /* AllOK */ 430 break; 431 432 case 1: 433 panic("%s: bad signature", device_xname(&sc->sc_dev)); 434 break; 435 436 case 2: 437 panic("%s: unable to load MicroCode", 438 device_xname(&sc->sc_dev)); 439 break; 440 441 case 3: 442 panic("%s: unable to initialize MicroCode", 443 device_xname(&sc->sc_dev)); 444 break; 445 446 default: 447 panic("%s: unable to initialize board RISC chip", 448 device_xname(&sc->sc_dev)); 449 } 450 451 /* 452 * Fill in the scsipi_adapter. 453 */ 454 memset(adapt, 0, sizeof(*adapt)); 455 adapt->adapt_dev = &sc->sc_dev; 456 adapt->adapt_nchannels = 1; 457 /* adapt_openings initialized below */ 458 /* adapt_max_periph initialized below */ 459 adapt->adapt_request = adv_scsipi_request; 460 adapt->adapt_minphys = advminphys; 461 462 /* 463 * Fill in the scsipi_channel. 464 */ 465 memset(chan, 0, sizeof(*chan)); 466 chan->chan_adapter = adapt; 467 chan->chan_bustype = &scsi_bustype; 468 chan->chan_channel = 0; 469 chan->chan_ntargets = 8; 470 chan->chan_nluns = 8; 471 chan->chan_id = sc->chip_scsi_id; 472 473 TAILQ_INIT(&sc->sc_free_ccb); 474 TAILQ_INIT(&sc->sc_waiting_ccb); 475 476 /* 477 * Allocate the Control Blocks and the overrun buffer. 478 */ 479 error = adv_alloc_control_data(sc); 480 if (error) 481 return; /* (error) */ 482 483 /* 484 * Create and initialize the Control Blocks. 485 */ 486 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB); 487 if (i == 0) { 488 aprint_error_dev(&sc->sc_dev, "unable to create control blocks\n"); 489 return; /* (ENOMEM) */ ; 490 } else if (i != ADV_MAX_CCB) { 491 aprint_error_dev(&sc->sc_dev, 492 "WARNING: only %d of %d control blocks created\n", 493 i, ADV_MAX_CCB); 494 } 495 496 adapt->adapt_openings = i; 497 adapt->adapt_max_periph = adapt->adapt_openings; 498 499 sc->sc_child = config_found(&sc->sc_dev, chan, scsiprint); 500 } 501 502 int 503 adv_detach(ASC_SOFTC *sc, int flags) 504 { 505 int rv = 0; 506 507 if (sc->sc_child != NULL) 508 rv = config_detach(sc->sc_child, flags); 509 510 adv_free_control_data(sc); 511 512 return (rv); 513 } 514 515 static void 516 advminphys(struct buf *bp) 517 { 518 519 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE)) 520 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE); 521 minphys(bp); 522 } 523 524 525 /* 526 * start a scsi operation given the command and the data address. Also needs 527 * the unit, target and lu. 528 */ 529 530 static void 531 adv_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) 532 { 533 struct scsipi_xfer *xs; 534 struct scsipi_periph *periph; 535 ASC_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev; 536 bus_dma_tag_t dmat = sc->sc_dmat; 537 ADV_CCB *ccb; 538 int s, flags, error, nsegs; 539 540 switch (req) { 541 case ADAPTER_REQ_RUN_XFER: 542 xs = arg; 543 periph = xs->xs_periph; 544 flags = xs->xs_control; 545 546 /* 547 * Get a CCB to use. 548 */ 549 ccb = adv_get_ccb(sc); 550 #ifdef DIAGNOSTIC 551 /* 552 * This should never happen as we track the resources 553 * in the mid-layer. 554 */ 555 if (ccb == NULL) { 556 scsipi_printaddr(periph); 557 printf("unable to allocate ccb\n"); 558 panic("adv_scsipi_request"); 559 } 560 #endif 561 562 ccb->xs = xs; 563 ccb->timeout = xs->timeout; 564 565 /* 566 * Build up the request 567 */ 568 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q)); 569 570 ccb->scsiq.q2.ccb_ptr = 571 sc->sc_dmamap_control->dm_segs[0].ds_addr + 572 ADV_CCB_OFF(ccb); 573 574 ccb->scsiq.cdbptr = &xs->cmd->opcode; 575 ccb->scsiq.q2.cdb_len = xs->cmdlen; 576 ccb->scsiq.q1.target_id = 577 ASC_TID_TO_TARGET_ID(periph->periph_target); 578 ccb->scsiq.q1.target_lun = periph->periph_lun; 579 ccb->scsiq.q2.target_ix = 580 ASC_TIDLUN_TO_IX(periph->periph_target, 581 periph->periph_lun); 582 ccb->scsiq.q1.sense_addr = 583 sc->sc_dmamap_control->dm_segs[0].ds_addr + 584 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense); 585 ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data); 586 587 /* 588 * If there are any outstanding requests for the current 589 * target, then every 255th request send an ORDERED request. 590 * This heuristic tries to retain the benefit of request 591 * sorting while preventing request starvation. 255 is the 592 * max number of tags or pending commands a device may have 593 * outstanding. 594 */ 595 sc->reqcnt[periph->periph_target]++; 596 if (((sc->reqcnt[periph->periph_target] > 0) && 597 (sc->reqcnt[periph->periph_target] % 255) == 0) || 598 xs->bp == NULL || (xs->bp->b_flags & B_ASYNC) == 0) { 599 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED; 600 } else { 601 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE; 602 } 603 604 if (xs->datalen) { 605 /* 606 * Map the DMA transfer. 607 */ 608 #ifdef TFS 609 if (flags & SCSI_DATA_UIO) { 610 error = bus_dmamap_load_uio(dmat, 611 ccb->dmamap_xfer, (struct uio *) xs->data, 612 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 613 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 614 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 615 BUS_DMA_WRITE)); 616 } else 617 #endif /* TFS */ 618 { 619 error = bus_dmamap_load(dmat, ccb->dmamap_xfer, 620 xs->data, xs->datalen, NULL, 621 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 622 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 623 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 624 BUS_DMA_WRITE)); 625 } 626 627 switch (error) { 628 case 0: 629 break; 630 631 632 case ENOMEM: 633 case EAGAIN: 634 xs->error = XS_RESOURCE_SHORTAGE; 635 goto out_bad; 636 637 default: 638 xs->error = XS_DRIVER_STUFFUP; 639 if (error == EFBIG) { 640 aprint_error_dev(&sc->sc_dev, "adv_scsi_cmd, more than %d" 641 " DMA segments\n", 642 ASC_MAX_SG_LIST); 643 } else { 644 aprint_error_dev(&sc->sc_dev, "adv_scsi_cmd, error %d" 645 " loading DMA map\n", 646 error); 647 } 648 649 out_bad: 650 adv_free_ccb(sc, ccb); 651 scsipi_done(xs); 652 return; 653 } 654 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 655 ccb->dmamap_xfer->dm_mapsize, 656 (flags & XS_CTL_DATA_IN) ? 657 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 658 659 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD)); 660 661 for (nsegs = 0; 662 nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) { 663 ccb->sghead.sg_list[nsegs].addr = 664 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr; 665 ccb->sghead.sg_list[nsegs].bytes = 666 ccb->dmamap_xfer->dm_segs[nsegs].ds_len; 667 } 668 669 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt = 670 ccb->dmamap_xfer->dm_nsegs; 671 672 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD; 673 ccb->scsiq.sg_head = &ccb->sghead; 674 ccb->scsiq.q1.data_addr = 0; 675 ccb->scsiq.q1.data_cnt = 0; 676 } else { 677 /* 678 * No data xfer, use non S/G values. 679 */ 680 ccb->scsiq.q1.data_addr = 0; 681 ccb->scsiq.q1.data_cnt = 0; 682 } 683 684 #ifdef ASC_DEBUG 685 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX\n", 686 periph->periph_target, 687 periph->periph_lun, xs->cmd->opcode, 688 (unsigned long)ccb); 689 #endif 690 s = splbio(); 691 adv_queue_ccb(sc, ccb); 692 splx(s); 693 694 if ((flags & XS_CTL_POLL) == 0) 695 return; 696 697 /* Not allowed to use interrupts, poll for completion. */ 698 if (adv_poll(sc, xs, ccb->timeout)) { 699 adv_timeout(ccb); 700 if (adv_poll(sc, xs, ccb->timeout)) 701 adv_timeout(ccb); 702 } 703 return; 704 705 case ADAPTER_REQ_GROW_RESOURCES: 706 /* XXX Not supported. */ 707 return; 708 709 case ADAPTER_REQ_SET_XFER_MODE: 710 { 711 /* 712 * We can't really set the mode, but we know how to 713 * query what the firmware negotiated. 714 */ 715 struct scsipi_xfer_mode *xm = arg; 716 u_int8_t sdtr_data; 717 ASC_SCSI_BIT_ID_TYPE tid_bit; 718 719 tid_bit = ASC_TIX_TO_TARGET_ID(xm->xm_target); 720 721 xm->xm_mode = 0; 722 xm->xm_period = 0; 723 xm->xm_offset = 0; 724 725 if (sc->init_sdtr & tid_bit) { 726 xm->xm_mode |= PERIPH_CAP_SYNC; 727 sdtr_data = sc->sdtr_data[xm->xm_target]; 728 xm->xm_period = 729 sc->sdtr_period_tbl[(sdtr_data >> 4) & 730 (sc->max_sdtr_index - 1)]; 731 xm->xm_offset = sdtr_data & ASC_SYN_MAX_OFFSET; 732 } 733 734 if (sc->use_tagged_qng & tid_bit) 735 xm->xm_mode |= PERIPH_CAP_TQING; 736 737 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm); 738 return; 739 } 740 } 741 } 742 743 int 744 adv_intr(void *arg) 745 { 746 ASC_SOFTC *sc = arg; 747 748 #ifdef ASC_DEBUG 749 int int_pend = FALSE; 750 751 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) 752 { 753 int_pend = TRUE; 754 printf("ISR - "); 755 } 756 #endif 757 AscISR(sc); 758 #ifdef ASC_DEBUG 759 if(int_pend) 760 printf("\n"); 761 #endif 762 763 return (1); 764 } 765 766 767 /* 768 * Poll a particular unit, looking for a particular xs 769 */ 770 static int 771 adv_poll(ASC_SOFTC *sc, struct scsipi_xfer *xs, int count) 772 { 773 774 /* timeouts are in msec, so we loop in 1000 usec cycles */ 775 while (count) { 776 adv_intr(sc); 777 if (xs->xs_status & XS_STS_DONE) 778 return (0); 779 delay(1000); /* only happens in boot so ok */ 780 count--; 781 } 782 return (1); 783 } 784 785 786 static void 787 adv_timeout(void *arg) 788 { 789 ADV_CCB *ccb = arg; 790 struct scsipi_xfer *xs = ccb->xs; 791 struct scsipi_periph *periph = xs->xs_periph; 792 ASC_SOFTC *sc = 793 (void *)periph->periph_channel->chan_adapter->adapt_dev; 794 int s; 795 796 scsipi_printaddr(periph); 797 printf("timed out"); 798 799 s = splbio(); 800 801 /* 802 * If it has been through before, then a previous abort has failed, 803 * don't try abort again, reset the bus instead. 804 */ 805 if (ccb->flags & CCB_ABORT) { 806 /* abort timed out */ 807 printf(" AGAIN. Resetting Bus\n"); 808 /* Lets try resetting the bus! */ 809 if (AscResetBus(sc) == ASC_ERROR) { 810 ccb->timeout = sc->scsi_reset_wait; 811 adv_queue_ccb(sc, ccb); 812 } 813 } else { 814 /* abort the operation that has timed out */ 815 printf("\n"); 816 AscAbortCCB(sc, ccb); 817 ccb->xs->error = XS_TIMEOUT; 818 ccb->timeout = ADV_ABORT_TIMEOUT; 819 ccb->flags |= CCB_ABORT; 820 adv_queue_ccb(sc, ccb); 821 } 822 823 splx(s); 824 } 825 826 827 static void 828 adv_watchdog(void *arg) 829 { 830 ADV_CCB *ccb = arg; 831 struct scsipi_xfer *xs = ccb->xs; 832 struct scsipi_periph *periph = xs->xs_periph; 833 ASC_SOFTC *sc = 834 (void *)periph->periph_channel->chan_adapter->adapt_dev; 835 int s; 836 837 s = splbio(); 838 839 ccb->flags &= ~CCB_WATCHDOG; 840 adv_start_ccbs(sc); 841 842 splx(s); 843 } 844 845 846 /******************************************************************************/ 847 /* NARROW boards Interrupt callbacks */ 848 /******************************************************************************/ 849 850 851 /* 852 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR() 853 * 854 * Interrupt callback function for the Narrow SCSI Asc Library. 855 */ 856 static void 857 adv_narrow_isr_callback(ASC_SOFTC *sc, ASC_QDONE_INFO *qdonep) 858 { 859 bus_dma_tag_t dmat = sc->sc_dmat; 860 ADV_CCB *ccb; 861 struct scsipi_xfer *xs; 862 struct scsi_sense_data *s1, *s2; 863 864 865 ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr); 866 xs = ccb->xs; 867 868 #ifdef ASC_DEBUG 869 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ", 870 (unsigned long)ccb, 871 xs->xs_periph->periph_target, 872 xs->xs_periph->periph_lun, xs->cmd->opcode); 873 #endif 874 callout_stop(&ccb->xs->xs_callout); 875 876 /* 877 * If we were a data transfer, unload the map that described 878 * the data buffer. 879 */ 880 if (xs->datalen) { 881 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 882 ccb->dmamap_xfer->dm_mapsize, 883 (xs->xs_control & XS_CTL_DATA_IN) ? 884 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 885 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 886 } 887 if ((ccb->flags & CCB_ALLOC) == 0) { 888 aprint_error_dev(&sc->sc_dev, "exiting ccb not allocated!\n"); 889 Debugger(); 890 return; 891 } 892 /* 893 * 'qdonep' contains the command's ending status. 894 */ 895 #ifdef ASC_DEBUG 896 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat); 897 #endif 898 switch (qdonep->d3.done_stat) { 899 case ASC_QD_NO_ERROR: 900 switch (qdonep->d3.host_stat) { 901 case ASC_QHSTA_NO_ERROR: 902 xs->error = XS_NOERROR; 903 xs->resid = 0; 904 break; 905 906 default: 907 /* QHSTA error occurred */ 908 xs->error = XS_DRIVER_STUFFUP; 909 break; 910 } 911 912 /* 913 * If an INQUIRY command completed successfully, then call 914 * the AscInquiryHandling() function to patch bugged boards. 915 */ 916 if ((xs->cmd->opcode == SCSICMD_Inquiry) && 917 (xs->xs_periph->periph_lun == 0) && 918 (xs->datalen - qdonep->remain_bytes) >= 8) { 919 AscInquiryHandling(sc, 920 xs->xs_periph->periph_target & 0x7, 921 (ASC_SCSI_INQUIRY *) xs->data); 922 } 923 break; 924 925 case ASC_QD_WITH_ERROR: 926 switch (qdonep->d3.host_stat) { 927 case ASC_QHSTA_NO_ERROR: 928 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) { 929 s1 = &ccb->scsi_sense; 930 s2 = &xs->sense.scsi_sense; 931 *s2 = *s1; 932 xs->error = XS_SENSE; 933 } else { 934 xs->error = XS_DRIVER_STUFFUP; 935 } 936 break; 937 938 case ASC_QHSTA_M_SEL_TIMEOUT: 939 xs->error = XS_SELTIMEOUT; 940 break; 941 942 default: 943 /* QHSTA error occurred */ 944 xs->error = XS_DRIVER_STUFFUP; 945 break; 946 } 947 break; 948 949 case ASC_QD_ABORTED_BY_HOST: 950 default: 951 xs->error = XS_DRIVER_STUFFUP; 952 break; 953 } 954 955 956 adv_free_ccb(sc, ccb); 957 scsipi_done(xs); 958 } 959