1 /* $NetBSD: adw.c,v 1.43 2003/11/02 11:07:44 wiz Exp $ */ 2 3 /* 4 * Generic driver for the Advanced Systems Inc. SCSI controllers 5 * 6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * Author: Baldassare Dante Profeta <dante@mclink.it> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: adw.c,v 1.43 2003/11/02 11:07:44 wiz Exp $"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/callout.h> 46 #include <sys/kernel.h> 47 #include <sys/errno.h> 48 #include <sys/ioctl.h> 49 #include <sys/device.h> 50 #include <sys/malloc.h> 51 #include <sys/buf.h> 52 #include <sys/proc.h> 53 #include <sys/user.h> 54 55 #include <machine/bus.h> 56 #include <machine/intr.h> 57 58 #include <uvm/uvm_extern.h> 59 60 #include <dev/scsipi/scsi_all.h> 61 #include <dev/scsipi/scsipi_all.h> 62 #include <dev/scsipi/scsiconf.h> 63 64 #include <dev/ic/adwlib.h> 65 #include <dev/ic/adwmcode.h> 66 #include <dev/ic/adw.h> 67 68 #ifndef DDB 69 #define Debugger() panic("should call debugger here (adw.c)") 70 #endif /* ! DDB */ 71 72 /******************************************************************************/ 73 74 75 static int adw_alloc_controls(ADW_SOFTC *); 76 static int adw_alloc_carriers(ADW_SOFTC *); 77 static int adw_create_ccbs(ADW_SOFTC *, ADW_CCB *, int); 78 static void adw_free_ccb(ADW_SOFTC *, ADW_CCB *); 79 static void adw_reset_ccb(ADW_CCB *); 80 static int adw_init_ccb(ADW_SOFTC *, ADW_CCB *); 81 static ADW_CCB *adw_get_ccb(ADW_SOFTC *); 82 static int adw_queue_ccb(ADW_SOFTC *, ADW_CCB *); 83 84 static void adw_scsipi_request(struct scsipi_channel *, 85 scsipi_adapter_req_t, void *); 86 static int adw_build_req(ADW_SOFTC *, ADW_CCB *); 87 static void adw_build_sglist(ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *); 88 static void adwminphys(struct buf *); 89 static void adw_isr_callback(ADW_SOFTC *, ADW_SCSI_REQ_Q *); 90 static void adw_async_callback(ADW_SOFTC *, u_int8_t); 91 92 static void adw_print_info(ADW_SOFTC *, int); 93 94 static int adw_poll(ADW_SOFTC *, struct scsipi_xfer *, int); 95 static void adw_timeout(void *); 96 static void adw_reset_bus(ADW_SOFTC *); 97 98 99 /******************************************************************************/ 100 /* DMA Mapping for Control Blocks */ 101 /******************************************************************************/ 102 103 104 static int 105 adw_alloc_controls(ADW_SOFTC *sc) 106 { 107 bus_dma_segment_t seg; 108 int error, rseg; 109 110 /* 111 * Allocate the control structure. 112 */ 113 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control), 114 PAGE_SIZE, 0, &seg, 1, &rseg, 115 BUS_DMA_NOWAIT)) != 0) { 116 printf("%s: unable to allocate control structures," 117 " error = %d\n", sc->sc_dev.dv_xname, error); 118 return (error); 119 } 120 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 121 sizeof(struct adw_control), (caddr_t *) & sc->sc_control, 122 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 123 printf("%s: unable to map control structures, error = %d\n", 124 sc->sc_dev.dv_xname, error); 125 return (error); 126 } 127 128 /* 129 * Create and load the DMA map used for the control blocks. 130 */ 131 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control), 132 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT, 133 &sc->sc_dmamap_control)) != 0) { 134 printf("%s: unable to create control DMA map, error = %d\n", 135 sc->sc_dev.dv_xname, error); 136 return (error); 137 } 138 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control, 139 sc->sc_control, sizeof(struct adw_control), NULL, 140 BUS_DMA_NOWAIT)) != 0) { 141 printf("%s: unable to load control DMA map, error = %d\n", 142 sc->sc_dev.dv_xname, error); 143 return (error); 144 } 145 146 return (0); 147 } 148 149 150 static int 151 adw_alloc_carriers(ADW_SOFTC *sc) 152 { 153 bus_dma_segment_t seg; 154 int error, rseg; 155 156 /* 157 * Allocate the control structure. 158 */ 159 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 160 M_DEVBUF, M_WAITOK); 161 if(!sc->sc_control->carriers) { 162 aprint_error( 163 "%s: malloc() failed in allocating carrier structures\n", 164 sc->sc_dev.dv_xname); 165 return (ENOMEM); 166 } 167 168 if ((error = bus_dmamem_alloc(sc->sc_dmat, 169 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 170 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 171 aprint_error("%s: unable to allocate carrier structures," 172 " error = %d\n", sc->sc_dev.dv_xname, error); 173 return (error); 174 } 175 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 176 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 177 (caddr_t *) &sc->sc_control->carriers, 178 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 179 aprint_error("%s: unable to map carrier structures," 180 " error = %d\n", sc->sc_dev.dv_xname, error); 181 return (error); 182 } 183 184 /* 185 * Create and load the DMA map used for the control blocks. 186 */ 187 if ((error = bus_dmamap_create(sc->sc_dmat, 188 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1, 189 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT, 190 &sc->sc_dmamap_carrier)) != 0) { 191 aprint_error("%s: unable to create carriers DMA map," 192 " error = %d\n", sc->sc_dev.dv_xname, error); 193 return (error); 194 } 195 if ((error = bus_dmamap_load(sc->sc_dmat, 196 sc->sc_dmamap_carrier, sc->sc_control->carriers, 197 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL, 198 BUS_DMA_NOWAIT)) != 0) { 199 aprint_error("%s: unable to load carriers DMA map," 200 " error = %d\n", sc->sc_dev.dv_xname, error); 201 return (error); 202 } 203 204 return (0); 205 } 206 207 208 /******************************************************************************/ 209 /* Control Blocks routines */ 210 /******************************************************************************/ 211 212 213 /* 214 * Create a set of ccbs and add them to the free list. Called once 215 * by adw_init(). We return the number of CCBs successfully created. 216 */ 217 static int 218 adw_create_ccbs(ADW_SOFTC *sc, ADW_CCB *ccbstore, int count) 219 { 220 ADW_CCB *ccb; 221 int i, error; 222 223 for (i = 0; i < count; i++) { 224 ccb = &ccbstore[i]; 225 if ((error = adw_init_ccb(sc, ccb)) != 0) { 226 printf("%s: unable to initialize ccb, error = %d\n", 227 sc->sc_dev.dv_xname, error); 228 return (i); 229 } 230 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain); 231 } 232 233 return (i); 234 } 235 236 237 /* 238 * A ccb is put onto the free list. 239 */ 240 static void 241 adw_free_ccb(ADW_SOFTC *sc, ADW_CCB *ccb) 242 { 243 int s; 244 245 s = splbio(); 246 247 adw_reset_ccb(ccb); 248 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 249 250 splx(s); 251 } 252 253 254 static void 255 adw_reset_ccb(ADW_CCB *ccb) 256 { 257 258 ccb->flags = 0; 259 } 260 261 262 static int 263 adw_init_ccb(ADW_SOFTC *sc, ADW_CCB *ccb) 264 { 265 int hashnum, error; 266 267 /* 268 * Create the DMA map for this CCB. 269 */ 270 error = bus_dmamap_create(sc->sc_dmat, 271 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE, 272 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE, 273 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer); 274 if (error) { 275 printf("%s: unable to create CCB DMA map, error = %d\n", 276 sc->sc_dev.dv_xname, error); 277 return (error); 278 } 279 280 /* 281 * put in the phystokv hash table 282 * Never gets taken out. 283 */ 284 ccb->hashkey = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr + 285 ADW_CCB_OFF(ccb)); 286 hashnum = CCB_HASH(ccb->hashkey); 287 ccb->nexthash = sc->sc_ccbhash[hashnum]; 288 sc->sc_ccbhash[hashnum] = ccb; 289 adw_reset_ccb(ccb); 290 return (0); 291 } 292 293 294 /* 295 * Get a free ccb 296 * 297 * If there are none, see if we can allocate a new one 298 */ 299 static ADW_CCB * 300 adw_get_ccb(ADW_SOFTC *sc) 301 { 302 ADW_CCB *ccb = 0; 303 int s; 304 305 s = splbio(); 306 307 ccb = sc->sc_free_ccb.tqh_first; 308 if (ccb != NULL) { 309 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 310 ccb->flags |= CCB_ALLOC; 311 } 312 splx(s); 313 return (ccb); 314 } 315 316 317 /* 318 * Given a physical address, find the ccb that it corresponds to. 319 */ 320 ADW_CCB * 321 adw_ccb_phys_kv(ADW_SOFTC *sc, u_int32_t ccb_phys) 322 { 323 int hashnum = CCB_HASH(ccb_phys); 324 ADW_CCB *ccb = sc->sc_ccbhash[hashnum]; 325 326 while (ccb) { 327 if (ccb->hashkey == ccb_phys) 328 break; 329 ccb = ccb->nexthash; 330 } 331 return (ccb); 332 } 333 334 335 /* 336 * Queue a CCB to be sent to the controller, and send it if possible. 337 */ 338 static int 339 adw_queue_ccb(ADW_SOFTC *sc, ADW_CCB *ccb) 340 { 341 int errcode = ADW_SUCCESS; 342 343 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 344 345 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) { 346 347 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 348 errcode = AdwExeScsiQueue(sc, &ccb->scsiq); 349 switch(errcode) { 350 case ADW_SUCCESS: 351 break; 352 353 case ADW_BUSY: 354 printf("ADW_BUSY\n"); 355 return(ADW_BUSY); 356 357 case ADW_ERROR: 358 printf("ADW_ERROR\n"); 359 return(ADW_ERROR); 360 } 361 362 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain); 363 364 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 365 callout_reset(&ccb->xs->xs_callout, 366 mstohz(ccb->timeout), adw_timeout, ccb); 367 } 368 369 return(errcode); 370 } 371 372 373 /******************************************************************************/ 374 /* SCSI layer interfacing routines */ 375 /******************************************************************************/ 376 377 378 int 379 adw_init(ADW_SOFTC *sc) 380 { 381 u_int16_t warn_code; 382 383 384 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) | 385 ADW_LIB_VERSION_MINOR; 386 sc->cfg.chip_version = 387 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type); 388 389 /* 390 * Reset the chip to start and allow register writes. 391 */ 392 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) { 393 panic("adw_init: adw_find_signature failed"); 394 } else { 395 AdwResetChip(sc->sc_iot, sc->sc_ioh); 396 397 warn_code = AdwInitFromEEPROM(sc); 398 399 if (warn_code & ADW_WARN_EEPROM_CHKSUM) 400 aprint_error("%s: Bad checksum found. " 401 "Setting default values\n", 402 sc->sc_dev.dv_xname); 403 if (warn_code & ADW_WARN_EEPROM_TERMINATION) 404 aprint_error("%s: Bad bus termination setting." 405 "Using automatic termination.\n", 406 sc->sc_dev.dv_xname); 407 } 408 409 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback; 410 sc->async_callback = (ADW_CALLBACK) adw_async_callback; 411 412 return 0; 413 } 414 415 416 void 417 adw_attach(ADW_SOFTC *sc) 418 { 419 struct scsipi_adapter *adapt = &sc->sc_adapter; 420 struct scsipi_channel *chan = &sc->sc_channel; 421 int ncontrols, error; 422 423 TAILQ_INIT(&sc->sc_free_ccb); 424 TAILQ_INIT(&sc->sc_waiting_ccb); 425 TAILQ_INIT(&sc->sc_pending_ccb); 426 427 /* 428 * Allocate the Control Blocks. 429 */ 430 error = adw_alloc_controls(sc); 431 if (error) 432 return; /* (error) */ ; 433 434 memset(sc->sc_control, 0, sizeof(struct adw_control)); 435 436 /* 437 * Create and initialize the Control Blocks. 438 */ 439 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB); 440 if (ncontrols == 0) { 441 aprint_error("%s: unable to create Control Blocks\n", 442 sc->sc_dev.dv_xname); 443 return; /* (ENOMEM) */ ; 444 } else if (ncontrols != ADW_MAX_CCB) { 445 aprint_error("%s: WARNING: only %d of %d Control Blocks" 446 " created\n", 447 sc->sc_dev.dv_xname, ncontrols, ADW_MAX_CCB); 448 } 449 450 /* 451 * Create and initialize the Carriers. 452 */ 453 error = adw_alloc_carriers(sc); 454 if (error) 455 return; /* (error) */ ; 456 457 /* 458 * Zero's the freeze_device status 459 */ 460 memset(sc->sc_freeze_dev, 0, sizeof(sc->sc_freeze_dev)); 461 462 /* 463 * Initialize the adapter 464 */ 465 switch (AdwInitDriver(sc)) { 466 case ADW_IERR_BIST_PRE_TEST: 467 panic("%s: BIST pre-test error", 468 sc->sc_dev.dv_xname); 469 break; 470 471 case ADW_IERR_BIST_RAM_TEST: 472 panic("%s: BIST RAM test error", 473 sc->sc_dev.dv_xname); 474 break; 475 476 case ADW_IERR_MCODE_CHKSUM: 477 panic("%s: Microcode checksum error", 478 sc->sc_dev.dv_xname); 479 break; 480 481 case ADW_IERR_ILLEGAL_CONNECTION: 482 panic("%s: All three connectors are in use", 483 sc->sc_dev.dv_xname); 484 break; 485 486 case ADW_IERR_REVERSED_CABLE: 487 panic("%s: Cable is reversed", 488 sc->sc_dev.dv_xname); 489 break; 490 491 case ADW_IERR_HVD_DEVICE: 492 panic("%s: HVD attached to LVD connector", 493 sc->sc_dev.dv_xname); 494 break; 495 496 case ADW_IERR_SINGLE_END_DEVICE: 497 panic("%s: single-ended device is attached to" 498 " one of the connectors", 499 sc->sc_dev.dv_xname); 500 break; 501 502 case ADW_IERR_NO_CARRIER: 503 panic("%s: unable to create Carriers", 504 sc->sc_dev.dv_xname); 505 break; 506 507 case ADW_WARN_BUSRESET_ERROR: 508 aprint_error("%s: WARNING: Bus Reset Error\n", 509 sc->sc_dev.dv_xname); 510 break; 511 } 512 513 /* 514 * Fill in the scsipi_adapter. 515 */ 516 memset(adapt, 0, sizeof(*adapt)); 517 adapt->adapt_dev = &sc->sc_dev; 518 adapt->adapt_nchannels = 1; 519 adapt->adapt_openings = ncontrols; 520 adapt->adapt_max_periph = adapt->adapt_openings; 521 adapt->adapt_request = adw_scsipi_request; 522 adapt->adapt_minphys = adwminphys; 523 524 /* 525 * Fill in the scsipi_channel. 526 */ 527 memset(chan, 0, sizeof(*chan)); 528 chan->chan_adapter = adapt; 529 chan->chan_bustype = &scsi_bustype; 530 chan->chan_channel = 0; 531 chan->chan_ntargets = ADW_MAX_TID + 1; 532 chan->chan_nluns = 8; 533 chan->chan_id = sc->chip_scsi_id; 534 535 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 536 } 537 538 539 static void 540 adwminphys(struct buf *bp) 541 { 542 543 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE)) 544 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE); 545 minphys(bp); 546 } 547 548 549 /* 550 * start a scsi operation given the command and the data address. 551 * Also needs the unit, target and lu. 552 */ 553 static void 554 adw_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 555 void *arg) 556 { 557 struct scsipi_xfer *xs; 558 ADW_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev; 559 ADW_CCB *ccb; 560 int s, retry; 561 562 switch (req) { 563 case ADAPTER_REQ_RUN_XFER: 564 xs = arg; 565 566 /* 567 * get a ccb to use. If the transfer 568 * is from a buf (possibly from interrupt time) 569 * then we can't allow it to sleep 570 */ 571 572 ccb = adw_get_ccb(sc); 573 #ifdef DIAGNOSTIC 574 /* 575 * This should never happen as we track the resources 576 * in the mid-layer. 577 */ 578 if (ccb == NULL) { 579 scsipi_printaddr(xs->xs_periph); 580 printf("unable to allocate ccb\n"); 581 panic("adw_scsipi_request"); 582 } 583 #endif 584 585 ccb->xs = xs; 586 ccb->timeout = xs->timeout; 587 588 if (adw_build_req(sc, ccb)) { 589 s = splbio(); 590 retry = adw_queue_ccb(sc, ccb); 591 splx(s); 592 593 switch(retry) { 594 case ADW_BUSY: 595 xs->error = XS_RESOURCE_SHORTAGE; 596 adw_free_ccb(sc, ccb); 597 scsipi_done(xs); 598 return; 599 600 case ADW_ERROR: 601 xs->error = XS_DRIVER_STUFFUP; 602 adw_free_ccb(sc, ccb); 603 scsipi_done(xs); 604 return; 605 } 606 if ((xs->xs_control & XS_CTL_POLL) == 0) 607 return; 608 /* 609 * Not allowed to use interrupts, poll for completion. 610 */ 611 if (adw_poll(sc, xs, ccb->timeout)) { 612 adw_timeout(ccb); 613 if (adw_poll(sc, xs, ccb->timeout)) 614 adw_timeout(ccb); 615 } 616 } 617 return; 618 619 case ADAPTER_REQ_GROW_RESOURCES: 620 /* XXX Not supported. */ 621 return; 622 623 case ADAPTER_REQ_SET_XFER_MODE: 624 /* XXX XXX XXX */ 625 return; 626 } 627 } 628 629 630 /* 631 * Build a request structure for the Wide Boards. 632 */ 633 static int 634 adw_build_req(ADW_SOFTC *sc, ADW_CCB *ccb) 635 { 636 struct scsipi_xfer *xs = ccb->xs; 637 struct scsipi_periph *periph = xs->xs_periph; 638 bus_dma_tag_t dmat = sc->sc_dmat; 639 ADW_SCSI_REQ_Q *scsiqp; 640 int error; 641 642 scsiqp = &ccb->scsiq; 643 memset(scsiqp, 0, sizeof(ADW_SCSI_REQ_Q)); 644 645 /* 646 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the 647 * physical CCB structure. 648 */ 649 scsiqp->ccb_ptr = ccb->hashkey; 650 651 /* 652 * Build the ADW_SCSI_REQ_Q request. 653 */ 654 655 /* 656 * Set CDB length and copy it to the request structure. 657 * For wide boards a CDB length maximum of 16 bytes 658 * is supported. 659 */ 660 memcpy(&scsiqp->cdb, xs->cmd, ((scsiqp->cdb_len = xs->cmdlen) <= 12)? 661 xs->cmdlen : 12 ); 662 if(xs->cmdlen > 12) 663 memcpy(&scsiqp->cdb16, &(xs->cmd[12]), xs->cmdlen - 12); 664 665 scsiqp->target_id = periph->periph_target; 666 scsiqp->target_lun = periph->periph_lun; 667 668 scsiqp->vsense_addr = &ccb->scsi_sense; 669 scsiqp->sense_addr = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr + 670 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense)); 671 scsiqp->sense_len = sizeof(struct scsipi_sense_data); 672 673 /* 674 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command. 675 */ 676 if (xs->datalen) { 677 /* 678 * Map the DMA transfer. 679 */ 680 #ifdef TFS 681 if (xs->xs_control & SCSI_DATA_UIO) { 682 error = bus_dmamap_load_uio(dmat, 683 ccb->dmamap_xfer, (struct uio *) xs->data, 684 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 685 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 686 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 687 BUS_DMA_WRITE)); 688 } else 689 #endif /* TFS */ 690 { 691 error = bus_dmamap_load(dmat, 692 ccb->dmamap_xfer, xs->data, xs->datalen, NULL, 693 ((xs->xs_control & XS_CTL_NOSLEEP) ? 694 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 695 BUS_DMA_STREAMING | 696 ((xs->xs_control & XS_CTL_DATA_IN) ? 697 BUS_DMA_READ : BUS_DMA_WRITE)); 698 } 699 700 switch (error) { 701 case 0: 702 break; 703 case ENOMEM: 704 case EAGAIN: 705 xs->error = XS_RESOURCE_SHORTAGE; 706 goto out_bad; 707 708 default: 709 xs->error = XS_DRIVER_STUFFUP; 710 printf("%s: error %d loading DMA map\n", 711 sc->sc_dev.dv_xname, error); 712 out_bad: 713 adw_free_ccb(sc, ccb); 714 scsipi_done(xs); 715 return(0); 716 } 717 718 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 719 ccb->dmamap_xfer->dm_mapsize, 720 (xs->xs_control & XS_CTL_DATA_IN) ? 721 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 722 723 /* 724 * Build scatter-gather list. 725 */ 726 scsiqp->data_cnt = htole32(xs->datalen); 727 scsiqp->vdata_addr = xs->data; 728 scsiqp->data_addr = htole32(ccb->dmamap_xfer->dm_segs[0].ds_addr); 729 memset(ccb->sg_block, 0, 730 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK); 731 adw_build_sglist(ccb, scsiqp, ccb->sg_block); 732 } else { 733 /* 734 * No data xfer, use non S/G values. 735 */ 736 scsiqp->data_cnt = 0; 737 scsiqp->vdata_addr = 0; 738 scsiqp->data_addr = 0; 739 } 740 741 return (1); 742 } 743 744 745 /* 746 * Build scatter-gather list for Wide Boards. 747 */ 748 static void 749 adw_build_sglist(ADW_CCB *ccb, ADW_SCSI_REQ_Q *scsiqp, ADW_SG_BLOCK *sg_block) 750 { 751 u_long sg_block_next_addr; /* block and its next */ 752 u_int32_t sg_block_physical_addr; 753 int i; /* how many SG entries */ 754 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0]; 755 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs; 756 757 758 sg_block_next_addr = (u_long) sg_block; /* allow math operation */ 759 sg_block_physical_addr = le32toh(ccb->hashkey) + 760 offsetof(struct adw_ccb, sg_block[0]); 761 scsiqp->sg_real_addr = htole32(sg_block_physical_addr); 762 763 /* 764 * If there are more than NO_OF_SG_PER_BLOCK DMA segments (hw sg-list) 765 * then split the request into multiple sg-list blocks. 766 */ 767 768 do { 769 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { 770 sg_block->sg_list[i].sg_addr = htole32(sg_list->ds_addr); 771 sg_block->sg_list[i].sg_count = htole32(sg_list->ds_len); 772 773 if (--sg_elem_cnt == 0) { 774 /* last entry, get out */ 775 sg_block->sg_cnt = i + 1; 776 sg_block->sg_ptr = 0; /* next link = NULL */ 777 return; 778 } 779 sg_list++; 780 } 781 sg_block_next_addr += sizeof(ADW_SG_BLOCK); 782 sg_block_physical_addr += sizeof(ADW_SG_BLOCK); 783 784 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; 785 sg_block->sg_ptr = htole32(sg_block_physical_addr); 786 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */ 787 } while (1); 788 } 789 790 791 /******************************************************************************/ 792 /* Interrupts and TimeOut routines */ 793 /******************************************************************************/ 794 795 796 int 797 adw_intr(void *arg) 798 { 799 ADW_SOFTC *sc = arg; 800 801 802 if(AdwISR(sc) != ADW_FALSE) { 803 return (1); 804 } 805 806 return (0); 807 } 808 809 810 /* 811 * Poll a particular unit, looking for a particular xs 812 */ 813 static int 814 adw_poll(ADW_SOFTC *sc, struct scsipi_xfer *xs, int count) 815 { 816 817 /* timeouts are in msec, so we loop in 1000 usec cycles */ 818 while (count) { 819 adw_intr(sc); 820 if (xs->xs_status & XS_STS_DONE) 821 return (0); 822 delay(1000); /* only happens in boot so ok */ 823 count--; 824 } 825 return (1); 826 } 827 828 829 static void 830 adw_timeout(void *arg) 831 { 832 ADW_CCB *ccb = arg; 833 struct scsipi_xfer *xs = ccb->xs; 834 struct scsipi_periph *periph = xs->xs_periph; 835 ADW_SOFTC *sc = 836 (void *)periph->periph_channel->chan_adapter->adapt_dev; 837 int s; 838 839 scsipi_printaddr(periph); 840 printf("timed out"); 841 842 s = splbio(); 843 844 if (ccb->flags & CCB_ABORTED) { 845 /* 846 * Abort Timed Out 847 * 848 * No more opportunities. Lets try resetting the bus and 849 * reinitialize the host adapter. 850 */ 851 callout_stop(&xs->xs_callout); 852 printf(" AGAIN. Resetting SCSI Bus\n"); 853 adw_reset_bus(sc); 854 splx(s); 855 return; 856 } else if (ccb->flags & CCB_ABORTING) { 857 /* 858 * Abort the operation that has timed out. 859 * 860 * Second opportunity. 861 */ 862 printf("\n"); 863 xs->error = XS_TIMEOUT; 864 ccb->flags |= CCB_ABORTED; 865 #if 0 866 /* 867 * - XXX - 3.3a microcode is BROKEN!!! 868 * 869 * We cannot abort a CCB, so we can only hope the command 870 * get completed before the next timeout, otherwise a 871 * Bus Reset will arrive inexorably. 872 */ 873 /* 874 * ADW_ABORT_CCB() makes the board to generate an interrupt 875 * 876 * - XXX - The above assertion MUST be verified (and this 877 * code changed as well [callout_*()]), when the 878 * ADW_ABORT_CCB will be working again 879 */ 880 ADW_ABORT_CCB(sc, ccb); 881 #endif 882 /* 883 * waiting for multishot callout_reset() let's restart it 884 * by hand so the next time a timeout event will occur 885 * we will reset the bus. 886 */ 887 callout_reset(&xs->xs_callout, 888 mstohz(ccb->timeout), adw_timeout, ccb); 889 } else { 890 /* 891 * Abort the operation that has timed out. 892 * 893 * First opportunity. 894 */ 895 printf("\n"); 896 xs->error = XS_TIMEOUT; 897 ccb->flags |= CCB_ABORTING; 898 #if 0 899 /* 900 * - XXX - 3.3a microcode is BROKEN!!! 901 * 902 * We cannot abort a CCB, so we can only hope the command 903 * get completed before the next 2 timeout, otherwise a 904 * Bus Reset will arrive inexorably. 905 */ 906 /* 907 * ADW_ABORT_CCB() makes the board to generate an interrupt 908 * 909 * - XXX - The above assertion MUST be verified (and this 910 * code changed as well [callout_*()]), when the 911 * ADW_ABORT_CCB will be working again 912 */ 913 ADW_ABORT_CCB(sc, ccb); 914 #endif 915 /* 916 * waiting for multishot callout_reset() let's restart it 917 * by hand so to give a second opportunity to the command 918 * which timed-out. 919 */ 920 callout_reset(&xs->xs_callout, 921 mstohz(ccb->timeout), adw_timeout, ccb); 922 } 923 924 splx(s); 925 } 926 927 928 static void 929 adw_reset_bus(ADW_SOFTC *sc) 930 { 931 ADW_CCB *ccb; 932 int s; 933 struct scsipi_xfer *xs; 934 935 s = splbio(); 936 AdwResetSCSIBus(sc); 937 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb, 938 adw_pending_ccb)) != NULL) { 939 callout_stop(&ccb->xs->xs_callout); 940 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 941 xs = ccb->xs; 942 adw_free_ccb(sc, ccb); 943 xs->error = XS_RESOURCE_SHORTAGE; 944 scsipi_done(xs); 945 } 946 splx(s); 947 } 948 949 950 /******************************************************************************/ 951 /* Host Adapter and Peripherals Information Routines */ 952 /******************************************************************************/ 953 954 955 static void 956 adw_print_info(ADW_SOFTC *sc, int tid) 957 { 958 bus_space_tag_t iot = sc->sc_iot; 959 bus_space_handle_t ioh = sc->sc_ioh; 960 u_int16_t wdtr_able, wdtr_done, wdtr; 961 u_int16_t sdtr_able, sdtr_done, sdtr, period; 962 static int wdtr_reneg = 0, sdtr_reneg = 0; 963 964 if (tid == 0){ 965 wdtr_reneg = sdtr_reneg = 0; 966 } 967 968 printf("%s: target %d ", sc->sc_dev.dv_xname, tid); 969 970 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able); 971 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) { 972 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done); 973 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE + 974 (2 * tid), wdtr); 975 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8); 976 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0) 977 wdtr_reneg = 1; 978 } else { 979 printf("wide transfers disabled, "); 980 } 981 982 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able); 983 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) { 984 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done); 985 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE + 986 (2 * tid), sdtr); 987 sdtr &= ~0x8000; 988 if((sdtr & 0x1F) != 0) { 989 if((sdtr & 0x1F00) == 0x1100){ 990 printf("80.0 MHz"); 991 } else if((sdtr & 0x1F00) == 0x1000){ 992 printf("40.0 MHz"); 993 } else { 994 /* <= 20.0 MHz */ 995 period = (((sdtr >> 8) * 25) + 50)/4; 996 if(period == 0) { 997 /* Should never happen. */ 998 printf("? MHz"); 999 } else { 1000 printf("%d.%d MHz", 250/period, 1001 ADW_TENTHS(250, period)); 1002 } 1003 } 1004 printf(" synchronous transfers\n"); 1005 } else { 1006 printf("asynchronous transfers\n"); 1007 } 1008 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0) 1009 sdtr_reneg = 1; 1010 } else { 1011 printf("synchronous transfers disabled\n"); 1012 } 1013 1014 if(wdtr_reneg || sdtr_reneg) { 1015 printf("%s: target %d %s", sc->sc_dev.dv_xname, tid, 1016 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") : 1017 ((sdtr_reneg)? "sync" : "") ); 1018 printf(" renegotiation pending before next command.\n"); 1019 } 1020 } 1021 1022 1023 /******************************************************************************/ 1024 /* WIDE boards Interrupt callbacks */ 1025 /******************************************************************************/ 1026 1027 1028 /* 1029 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR() 1030 * 1031 * Interrupt callback function for the Wide SCSI Adv Library. 1032 * 1033 * Notice: 1034 * Interrupts are disabled by the caller (AdwISR() function), and will be 1035 * enabled at the end of the caller. 1036 */ 1037 static void 1038 adw_isr_callback(ADW_SOFTC *sc, ADW_SCSI_REQ_Q *scsiq) 1039 { 1040 bus_dma_tag_t dmat = sc->sc_dmat; 1041 ADW_CCB *ccb; 1042 struct scsipi_xfer *xs; 1043 struct scsipi_sense_data *s1, *s2; 1044 1045 1046 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr); 1047 1048 callout_stop(&ccb->xs->xs_callout); 1049 1050 xs = ccb->xs; 1051 1052 /* 1053 * If we were a data transfer, unload the map that described 1054 * the data buffer. 1055 */ 1056 if (xs->datalen) { 1057 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 1058 ccb->dmamap_xfer->dm_mapsize, 1059 (xs->xs_control & XS_CTL_DATA_IN) ? 1060 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1061 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 1062 } 1063 1064 if ((ccb->flags & CCB_ALLOC) == 0) { 1065 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname); 1066 Debugger(); 1067 return; 1068 } 1069 1070 /* 1071 * 'done_status' contains the command's ending status. 1072 * 'host_status' contains the host adapter status. 1073 * 'scsi_status' contains the scsi peripheral status. 1074 */ 1075 if ((scsiq->host_status == QHSTA_NO_ERROR) && 1076 ((scsiq->done_status == QD_NO_ERROR) || 1077 (scsiq->done_status == QD_WITH_ERROR))) { 1078 switch (scsiq->scsi_status) { 1079 case SCSI_STATUS_GOOD: 1080 if ((scsiq->cdb[0] == INQUIRY) && 1081 (scsiq->target_lun == 0)) { 1082 adw_print_info(sc, scsiq->target_id); 1083 } 1084 xs->error = XS_NOERROR; 1085 xs->resid = le32toh(scsiq->data_cnt); 1086 sc->sc_freeze_dev[scsiq->target_id] = 0; 1087 break; 1088 1089 case SCSI_STATUS_CHECK_CONDITION: 1090 case SCSI_STATUS_CMD_TERMINATED: 1091 s1 = &ccb->scsi_sense; 1092 s2 = &xs->sense.scsi_sense; 1093 *s2 = *s1; 1094 xs->error = XS_SENSE; 1095 sc->sc_freeze_dev[scsiq->target_id] = 1; 1096 break; 1097 1098 default: 1099 xs->error = XS_BUSY; 1100 sc->sc_freeze_dev[scsiq->target_id] = 1; 1101 break; 1102 } 1103 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) { 1104 xs->error = XS_DRIVER_STUFFUP; 1105 } else { 1106 switch (scsiq->host_status) { 1107 case QHSTA_M_SEL_TIMEOUT: 1108 xs->error = XS_SELTIMEOUT; 1109 break; 1110 1111 case QHSTA_M_SXFR_OFF_UFLW: 1112 case QHSTA_M_SXFR_OFF_OFLW: 1113 case QHSTA_M_DATA_OVER_RUN: 1114 printf("%s: Overrun/Overflow/Underflow condition\n", 1115 sc->sc_dev.dv_xname); 1116 xs->error = XS_DRIVER_STUFFUP; 1117 break; 1118 1119 case QHSTA_M_SXFR_DESELECTED: 1120 case QHSTA_M_UNEXPECTED_BUS_FREE: 1121 printf("%s: Unexpected BUS free\n",sc->sc_dev.dv_xname); 1122 xs->error = XS_DRIVER_STUFFUP; 1123 break; 1124 1125 case QHSTA_M_SCSI_BUS_RESET: 1126 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1127 printf("%s: BUS Reset\n", sc->sc_dev.dv_xname); 1128 xs->error = XS_DRIVER_STUFFUP; 1129 break; 1130 1131 case QHSTA_M_BUS_DEVICE_RESET: 1132 printf("%s: Device Reset\n", sc->sc_dev.dv_xname); 1133 xs->error = XS_DRIVER_STUFFUP; 1134 break; 1135 1136 case QHSTA_M_QUEUE_ABORTED: 1137 printf("%s: Queue Aborted\n", sc->sc_dev.dv_xname); 1138 xs->error = XS_DRIVER_STUFFUP; 1139 break; 1140 1141 case QHSTA_M_SXFR_SDMA_ERR: 1142 case QHSTA_M_SXFR_SXFR_PERR: 1143 case QHSTA_M_RDMA_PERR: 1144 /* 1145 * DMA Error. This should *NEVER* happen! 1146 * 1147 * Lets try resetting the bus and reinitialize 1148 * the host adapter. 1149 */ 1150 printf("%s: DMA Error. Reseting bus\n", 1151 sc->sc_dev.dv_xname); 1152 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 1153 adw_reset_bus(sc); 1154 xs->error = XS_BUSY; 1155 goto done; 1156 1157 case QHSTA_M_WTM_TIMEOUT: 1158 case QHSTA_M_SXFR_WD_TMO: 1159 /* The SCSI bus hung in a phase */ 1160 printf("%s: Watch Dog timer expired. Reseting bus\n", 1161 sc->sc_dev.dv_xname); 1162 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 1163 adw_reset_bus(sc); 1164 xs->error = XS_BUSY; 1165 goto done; 1166 1167 case QHSTA_M_SXFR_XFR_PH_ERR: 1168 printf("%s: Transfer Error\n", sc->sc_dev.dv_xname); 1169 xs->error = XS_DRIVER_STUFFUP; 1170 break; 1171 1172 case QHSTA_M_BAD_CMPL_STATUS_IN: 1173 /* No command complete after a status message */ 1174 printf("%s: Bad Completion Status\n", 1175 sc->sc_dev.dv_xname); 1176 xs->error = XS_DRIVER_STUFFUP; 1177 break; 1178 1179 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1180 printf("%s: Auto Sense Failed\n", sc->sc_dev.dv_xname); 1181 xs->error = XS_DRIVER_STUFFUP; 1182 break; 1183 1184 case QHSTA_M_INVALID_DEVICE: 1185 printf("%s: Invalid Device\n", sc->sc_dev.dv_xname); 1186 xs->error = XS_DRIVER_STUFFUP; 1187 break; 1188 1189 case QHSTA_M_NO_AUTO_REQ_SENSE: 1190 /* 1191 * User didn't request sense, but we got a 1192 * check condition. 1193 */ 1194 printf("%s: Unexpected Check Condition\n", 1195 sc->sc_dev.dv_xname); 1196 xs->error = XS_DRIVER_STUFFUP; 1197 break; 1198 1199 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1200 printf("%s: Unknown Error\n", sc->sc_dev.dv_xname); 1201 xs->error = XS_DRIVER_STUFFUP; 1202 break; 1203 1204 default: 1205 panic("%s: Unhandled Host Status Error %x", 1206 sc->sc_dev.dv_xname, scsiq->host_status); 1207 } 1208 } 1209 1210 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 1211 done: adw_free_ccb(sc, ccb); 1212 scsipi_done(xs); 1213 } 1214 1215 1216 /* 1217 * adw_async_callback() - Adv Library asynchronous event callback function. 1218 */ 1219 static void 1220 adw_async_callback(ADW_SOFTC *sc, u_int8_t code) 1221 { 1222 switch (code) { 1223 case ADV_ASYNC_SCSI_BUS_RESET_DET: 1224 /* The firmware detected a SCSI Bus reset. */ 1225 printf("%s: SCSI Bus reset detected\n", sc->sc_dev.dv_xname); 1226 break; 1227 1228 case ADV_ASYNC_RDMA_FAILURE: 1229 /* 1230 * Handle RDMA failure by resetting the SCSI Bus and 1231 * possibly the chip if it is unresponsive. 1232 */ 1233 printf("%s: RDMA failure. Resetting the SCSI Bus and" 1234 " the adapter\n", sc->sc_dev.dv_xname); 1235 AdwResetSCSIBus(sc); 1236 break; 1237 1238 case ADV_HOST_SCSI_BUS_RESET: 1239 /* Host generated SCSI bus reset occurred. */ 1240 printf("%s: Host generated SCSI bus reset occurred\n", 1241 sc->sc_dev.dv_xname); 1242 break; 1243 1244 case ADV_ASYNC_CARRIER_READY_FAILURE: 1245 /* Carrier Ready failure. */ 1246 printf("%s: Carrier Ready failure!\n", sc->sc_dev.dv_xname); 1247 break; 1248 1249 default: 1250 break; 1251 } 1252 } 1253