1 /* $NetBSD: adw.c,v 1.57 2021/04/24 23:36:55 thorpej Exp $ */ 2 3 /* 4 * Generic driver for the Advanced Systems Inc. SCSI controllers 5 * 6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * Author: Baldassare Dante Profeta <dante@mclink.it> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: adw.c,v 1.57 2021/04/24 23:36:55 thorpej Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/callout.h> 39 #include <sys/kernel.h> 40 #include <sys/errno.h> 41 #include <sys/ioctl.h> 42 #include <sys/device.h> 43 #include <sys/malloc.h> 44 #include <sys/buf.h> 45 #include <sys/proc.h> 46 47 #include <sys/bus.h> 48 #include <sys/intr.h> 49 50 #include <dev/scsipi/scsi_all.h> 51 #include <dev/scsipi/scsipi_all.h> 52 #include <dev/scsipi/scsiconf.h> 53 54 #include <dev/ic/adwlib.h> 55 #include <dev/ic/adwmcode.h> 56 #include <dev/ic/adw.h> 57 58 #ifndef DDB 59 #define Debugger() panic("should call debugger here (adw.c)") 60 #endif /* ! DDB */ 61 62 /******************************************************************************/ 63 64 65 static int adw_alloc_controls(ADW_SOFTC *); 66 static int adw_alloc_carriers(ADW_SOFTC *); 67 static int adw_create_ccbs(ADW_SOFTC *, ADW_CCB *, int); 68 static void adw_free_ccb(ADW_SOFTC *, ADW_CCB *); 69 static void adw_reset_ccb(ADW_CCB *); 70 static int adw_init_ccb(ADW_SOFTC *, ADW_CCB *); 71 static ADW_CCB *adw_get_ccb(ADW_SOFTC *); 72 static int adw_queue_ccb(ADW_SOFTC *, ADW_CCB *); 73 74 static void adw_scsipi_request(struct scsipi_channel *, 75 scsipi_adapter_req_t, void *); 76 static int adw_build_req(ADW_SOFTC *, ADW_CCB *); 77 static void adw_build_sglist(ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *); 78 static void adwminphys(struct buf *); 79 static void adw_isr_callback(ADW_SOFTC *, ADW_SCSI_REQ_Q *); 80 static void adw_async_callback(ADW_SOFTC *, u_int8_t); 81 82 static void adw_print_info(ADW_SOFTC *, int); 83 84 static int adw_poll(ADW_SOFTC *, struct scsipi_xfer *, int); 85 static void adw_timeout(void *); 86 static void adw_reset_bus(ADW_SOFTC *); 87 88 89 /******************************************************************************/ 90 /* DMA Mapping for Control Blocks */ 91 /******************************************************************************/ 92 93 94 static int 95 adw_alloc_controls(ADW_SOFTC *sc) 96 { 97 bus_dma_segment_t seg; 98 int error, rseg; 99 100 /* 101 * Allocate the control structure. 102 */ 103 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control), 104 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 105 aprint_error_dev(sc->sc_dev, "unable to allocate control " 106 "structures, error = %d\n", error); 107 return (error); 108 } 109 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 110 sizeof(struct adw_control), (void **) & sc->sc_control, 111 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 112 aprint_error_dev(sc->sc_dev, 113 "unable to map control structures, error = %d\n", error); 114 return (error); 115 } 116 117 /* 118 * Create and load the DMA map used for the control blocks. 119 */ 120 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control), 121 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT, 122 &sc->sc_dmamap_control)) != 0) { 123 aprint_error_dev(sc->sc_dev, 124 "unable to create control DMA map, error = %d\n", error); 125 return (error); 126 } 127 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control, 128 sc->sc_control, sizeof(struct adw_control), NULL, 129 BUS_DMA_NOWAIT)) != 0) { 130 aprint_error_dev(sc->sc_dev, 131 "unable to load control DMA map, error = %d\n", error); 132 return (error); 133 } 134 135 return (0); 136 } 137 138 139 static int 140 adw_alloc_carriers(ADW_SOFTC *sc) 141 { 142 bus_dma_segment_t seg; 143 int error, rseg; 144 145 /* 146 * Allocate the control structure. 147 */ 148 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 149 M_DEVBUF, M_WAITOK); 150 if(!sc->sc_control->carriers) { 151 aprint_error_dev(sc->sc_dev, 152 "malloc() failed in allocating carrier structures\n"); 153 return (ENOMEM); 154 } 155 156 if ((error = bus_dmamem_alloc(sc->sc_dmat, 157 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 158 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 159 aprint_error_dev(sc->sc_dev, "unable to allocate carrier " 160 "structures, error = %d\n", error); 161 return (error); 162 } 163 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 164 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 165 (void **)&sc->sc_control->carriers, 166 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 167 aprint_error_dev(sc->sc_dev, 168 "unable to map carrier structures, error = %d\n", error); 169 return (error); 170 } 171 172 /* 173 * Create and load the DMA map used for the control blocks. 174 */ 175 if ((error = bus_dmamap_create(sc->sc_dmat, 176 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1, 177 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT, 178 &sc->sc_dmamap_carrier)) != 0) { 179 aprint_error_dev(sc->sc_dev, 180 "unable to create carriers DMA map, error = %d\n", error); 181 return (error); 182 } 183 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_carrier, 184 sc->sc_control->carriers, sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 185 NULL, BUS_DMA_NOWAIT)) != 0) { 186 aprint_error_dev(sc->sc_dev, 187 "unable to load carriers DMA map, error = %d\n", error); 188 return (error); 189 } 190 191 return (0); 192 } 193 194 195 /******************************************************************************/ 196 /* Control Blocks routines */ 197 /******************************************************************************/ 198 199 200 /* 201 * Create a set of ccbs and add them to the free list. Called once 202 * by adw_init(). We return the number of CCBs successfully created. 203 */ 204 static int 205 adw_create_ccbs(ADW_SOFTC *sc, ADW_CCB *ccbstore, int count) 206 { 207 ADW_CCB *ccb; 208 int i, error; 209 210 for (i = 0; i < count; i++) { 211 ccb = &ccbstore[i]; 212 if ((error = adw_init_ccb(sc, ccb)) != 0) { 213 aprint_error_dev(sc->sc_dev, 214 "unable to initialize ccb, error = %d\n", error); 215 return (i); 216 } 217 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain); 218 } 219 220 return (i); 221 } 222 223 224 /* 225 * A ccb is put onto the free list. 226 */ 227 static void 228 adw_free_ccb(ADW_SOFTC *sc, ADW_CCB *ccb) 229 { 230 int s; 231 232 s = splbio(); 233 234 adw_reset_ccb(ccb); 235 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 236 237 splx(s); 238 } 239 240 241 static void 242 adw_reset_ccb(ADW_CCB *ccb) 243 { 244 245 ccb->flags = 0; 246 } 247 248 249 static int 250 adw_init_ccb(ADW_SOFTC *sc, ADW_CCB *ccb) 251 { 252 int hashnum, error; 253 254 /* 255 * Create the DMA map for this CCB. 256 */ 257 error = bus_dmamap_create(sc->sc_dmat, 258 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE, ADW_MAX_SG_LIST, 259 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE, 0, 260 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer); 261 if (error) { 262 aprint_error_dev(sc->sc_dev, 263 "unable to create CCB DMA map, error = %d\n", error); 264 return (error); 265 } 266 267 /* 268 * put in the phystokv hash table 269 * Never gets taken out. 270 */ 271 ccb->hashkey = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr + 272 ADW_CCB_OFF(ccb)); 273 hashnum = CCB_HASH(ccb->hashkey); 274 ccb->nexthash = sc->sc_ccbhash[hashnum]; 275 sc->sc_ccbhash[hashnum] = ccb; 276 adw_reset_ccb(ccb); 277 return (0); 278 } 279 280 281 /* 282 * Get a free ccb 283 * 284 * If there are none, see if we can allocate a new one 285 */ 286 static ADW_CCB * 287 adw_get_ccb(ADW_SOFTC *sc) 288 { 289 ADW_CCB *ccb = 0; 290 int s; 291 292 s = splbio(); 293 294 ccb = sc->sc_free_ccb.tqh_first; 295 if (ccb != NULL) { 296 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 297 ccb->flags |= CCB_ALLOC; 298 } 299 splx(s); 300 return (ccb); 301 } 302 303 304 /* 305 * Given a physical address, find the ccb that it corresponds to. 306 */ 307 ADW_CCB * 308 adw_ccb_phys_kv(ADW_SOFTC *sc, u_int32_t ccb_phys) 309 { 310 int hashnum = CCB_HASH(ccb_phys); 311 ADW_CCB *ccb = sc->sc_ccbhash[hashnum]; 312 313 while (ccb) { 314 if (ccb->hashkey == ccb_phys) 315 break; 316 ccb = ccb->nexthash; 317 } 318 return (ccb); 319 } 320 321 322 /* 323 * Queue a CCB to be sent to the controller, and send it if possible. 324 */ 325 static int 326 adw_queue_ccb(ADW_SOFTC *sc, ADW_CCB *ccb) 327 { 328 int errcode = ADW_SUCCESS; 329 330 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 331 332 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) { 333 334 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 335 errcode = AdwExeScsiQueue(sc, &ccb->scsiq); 336 switch(errcode) { 337 case ADW_SUCCESS: 338 break; 339 340 case ADW_BUSY: 341 printf("ADW_BUSY\n"); 342 return(ADW_BUSY); 343 344 case ADW_ERROR: 345 printf("ADW_ERROR\n"); 346 return(ADW_ERROR); 347 } 348 349 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain); 350 351 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 352 callout_reset(&ccb->xs->xs_callout, 353 mstohz(ccb->timeout), adw_timeout, ccb); 354 } 355 356 return(errcode); 357 } 358 359 360 /******************************************************************************/ 361 /* SCSI layer interfacing routines */ 362 /******************************************************************************/ 363 364 365 int 366 adw_init(ADW_SOFTC *sc) 367 { 368 u_int16_t warn_code; 369 370 371 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) | 372 ADW_LIB_VERSION_MINOR; 373 sc->cfg.chip_version = 374 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type); 375 376 /* 377 * Reset the chip to start and allow register writes. 378 */ 379 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) { 380 panic("adw_init: adw_find_signature failed"); 381 } else { 382 AdwResetChip(sc->sc_iot, sc->sc_ioh); 383 384 warn_code = AdwInitFromEEPROM(sc); 385 386 if (warn_code & ADW_WARN_EEPROM_CHKSUM) 387 aprint_error_dev(sc->sc_dev, "Bad checksum found. " 388 "Setting default values\n"); 389 if (warn_code & ADW_WARN_EEPROM_TERMINATION) 390 aprint_error_dev(sc->sc_dev, "Bad bus termination " 391 "setting. Using automatic termination.\n"); 392 } 393 394 sc->isr_callback = adw_isr_callback; 395 sc->async_callback = adw_async_callback; 396 397 return 0; 398 } 399 400 401 void 402 adw_attach(ADW_SOFTC *sc) 403 { 404 struct scsipi_adapter *adapt = &sc->sc_adapter; 405 struct scsipi_channel *chan = &sc->sc_channel; 406 int ncontrols, error; 407 408 TAILQ_INIT(&sc->sc_free_ccb); 409 TAILQ_INIT(&sc->sc_waiting_ccb); 410 TAILQ_INIT(&sc->sc_pending_ccb); 411 412 /* 413 * Allocate the Control Blocks. 414 */ 415 error = adw_alloc_controls(sc); 416 if (error) 417 return; /* (error) */ ; 418 419 memset(sc->sc_control, 0, sizeof(struct adw_control)); 420 421 /* 422 * Create and initialize the Control Blocks. 423 */ 424 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB); 425 if (ncontrols == 0) { 426 aprint_error_dev(sc->sc_dev, 427 "unable to create Control Blocks\n"); 428 return; /* (ENOMEM) */ ; 429 } else if (ncontrols != ADW_MAX_CCB) { 430 aprint_error_dev(sc->sc_dev, 431 "WARNING: only %d of %d Control Blocks created\n", 432 ncontrols, ADW_MAX_CCB); 433 } 434 435 /* 436 * Create and initialize the Carriers. 437 */ 438 error = adw_alloc_carriers(sc); 439 if (error) 440 return; /* (error) */ ; 441 442 /* 443 * Zero's the freeze_device status 444 */ 445 memset(sc->sc_freeze_dev, 0, sizeof(sc->sc_freeze_dev)); 446 447 /* 448 * Initialize the adapter 449 */ 450 switch (AdwInitDriver(sc)) { 451 case ADW_IERR_BIST_PRE_TEST: 452 panic("%s: BIST pre-test error", 453 device_xname(sc->sc_dev)); 454 break; 455 456 case ADW_IERR_BIST_RAM_TEST: 457 panic("%s: BIST RAM test error", 458 device_xname(sc->sc_dev)); 459 break; 460 461 case ADW_IERR_MCODE_CHKSUM: 462 panic("%s: Microcode checksum error", 463 device_xname(sc->sc_dev)); 464 break; 465 466 case ADW_IERR_ILLEGAL_CONNECTION: 467 panic("%s: All three connectors are in use", 468 device_xname(sc->sc_dev)); 469 break; 470 471 case ADW_IERR_REVERSED_CABLE: 472 panic("%s: Cable is reversed", 473 device_xname(sc->sc_dev)); 474 break; 475 476 case ADW_IERR_HVD_DEVICE: 477 panic("%s: HVD attached to LVD connector", 478 device_xname(sc->sc_dev)); 479 break; 480 481 case ADW_IERR_SINGLE_END_DEVICE: 482 panic("%s: single-ended device is attached to" 483 " one of the connectors", 484 device_xname(sc->sc_dev)); 485 break; 486 487 case ADW_IERR_NO_CARRIER: 488 panic("%s: unable to create Carriers", 489 device_xname(sc->sc_dev)); 490 break; 491 492 case ADW_WARN_BUSRESET_ERROR: 493 aprint_error_dev(sc->sc_dev, "WARNING: Bus Reset Error\n"); 494 break; 495 } 496 497 /* 498 * Fill in the scsipi_adapter. 499 */ 500 memset(adapt, 0, sizeof(*adapt)); 501 adapt->adapt_dev = sc->sc_dev; 502 adapt->adapt_nchannels = 1; 503 adapt->adapt_openings = ncontrols; 504 adapt->adapt_max_periph = adapt->adapt_openings; 505 adapt->adapt_request = adw_scsipi_request; 506 adapt->adapt_minphys = adwminphys; 507 508 /* 509 * Fill in the scsipi_channel. 510 */ 511 memset(chan, 0, sizeof(*chan)); 512 chan->chan_adapter = adapt; 513 chan->chan_bustype = &scsi_bustype; 514 chan->chan_channel = 0; 515 chan->chan_ntargets = ADW_MAX_TID + 1; 516 chan->chan_nluns = 8; 517 chan->chan_id = sc->chip_scsi_id; 518 519 config_found(sc->sc_dev, &sc->sc_channel, scsiprint, CFARG_EOL); 520 } 521 522 523 static void 524 adwminphys(struct buf *bp) 525 { 526 527 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE)) 528 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE); 529 minphys(bp); 530 } 531 532 533 /* 534 * start a scsi operation given the command and the data address. 535 * Also needs the unit, target and lu. 536 */ 537 static void 538 adw_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 539 void *arg) 540 { 541 struct scsipi_xfer *xs; 542 ADW_SOFTC *sc = device_private(chan->chan_adapter->adapt_dev); 543 ADW_CCB *ccb; 544 int s, retry; 545 546 switch (req) { 547 case ADAPTER_REQ_RUN_XFER: 548 xs = arg; 549 550 /* 551 * get a ccb to use. If the transfer 552 * is from a buf (possibly from interrupt time) 553 * then we can't allow it to sleep 554 */ 555 556 ccb = adw_get_ccb(sc); 557 #ifdef DIAGNOSTIC 558 /* 559 * This should never happen as we track the resources 560 * in the mid-layer. 561 */ 562 if (ccb == NULL) { 563 scsipi_printaddr(xs->xs_periph); 564 printf("unable to allocate ccb\n"); 565 panic("adw_scsipi_request"); 566 } 567 #endif 568 569 ccb->xs = xs; 570 ccb->timeout = xs->timeout; 571 572 if (adw_build_req(sc, ccb)) { 573 s = splbio(); 574 retry = adw_queue_ccb(sc, ccb); 575 splx(s); 576 577 switch(retry) { 578 case ADW_BUSY: 579 xs->error = XS_RESOURCE_SHORTAGE; 580 adw_free_ccb(sc, ccb); 581 scsipi_done(xs); 582 return; 583 584 case ADW_ERROR: 585 xs->error = XS_DRIVER_STUFFUP; 586 adw_free_ccb(sc, ccb); 587 scsipi_done(xs); 588 return; 589 } 590 if ((xs->xs_control & XS_CTL_POLL) == 0) 591 return; 592 /* 593 * Not allowed to use interrupts, poll for completion. 594 */ 595 if (adw_poll(sc, xs, ccb->timeout)) { 596 adw_timeout(ccb); 597 if (adw_poll(sc, xs, ccb->timeout)) 598 adw_timeout(ccb); 599 } 600 } 601 return; 602 603 case ADAPTER_REQ_GROW_RESOURCES: 604 /* XXX Not supported. */ 605 return; 606 607 case ADAPTER_REQ_SET_XFER_MODE: 608 /* XXX XXX XXX */ 609 return; 610 } 611 } 612 613 614 /* 615 * Build a request structure for the Wide Boards. 616 */ 617 static int 618 adw_build_req(ADW_SOFTC *sc, ADW_CCB *ccb) 619 { 620 struct scsipi_xfer *xs = ccb->xs; 621 struct scsipi_periph *periph = xs->xs_periph; 622 bus_dma_tag_t dmat = sc->sc_dmat; 623 ADW_SCSI_REQ_Q *scsiqp; 624 int error; 625 626 scsiqp = &ccb->scsiq; 627 memset(scsiqp, 0, sizeof(ADW_SCSI_REQ_Q)); 628 629 /* 630 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the 631 * physical CCB structure. 632 */ 633 scsiqp->ccb_ptr = ccb->hashkey; 634 635 /* 636 * Build the ADW_SCSI_REQ_Q request. 637 */ 638 639 /* 640 * Set CDB length and copy it to the request structure. 641 * For wide boards a CDB length maximum of 16 bytes 642 * is supported. 643 */ 644 memcpy(&scsiqp->cdb, xs->cmd, ((scsiqp->cdb_len = xs->cmdlen) <= 12)? 645 xs->cmdlen : 12 ); 646 if(xs->cmdlen > 12) 647 memcpy(&scsiqp->cdb16, &(xs->cmd[12]), xs->cmdlen - 12); 648 649 scsiqp->target_id = periph->periph_target; 650 scsiqp->target_lun = periph->periph_lun; 651 652 scsiqp->vsense_addr = &ccb->scsi_sense; 653 scsiqp->sense_addr = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr + 654 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense)); 655 scsiqp->sense_len = sizeof(struct scsi_sense_data); 656 657 /* 658 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command. 659 */ 660 if (xs->datalen) { 661 /* 662 * Map the DMA transfer. 663 */ 664 #ifdef TFS 665 if (xs->xs_control & SCSI_DATA_UIO) { 666 error = bus_dmamap_load_uio(dmat, 667 ccb->dmamap_xfer, (struct uio *) xs->data, 668 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 669 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 670 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 671 BUS_DMA_WRITE)); 672 } else 673 #endif /* TFS */ 674 { 675 error = bus_dmamap_load(dmat, 676 ccb->dmamap_xfer, xs->data, xs->datalen, NULL, 677 ((xs->xs_control & XS_CTL_NOSLEEP) ? 678 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 679 BUS_DMA_STREAMING | 680 ((xs->xs_control & XS_CTL_DATA_IN) ? 681 BUS_DMA_READ : BUS_DMA_WRITE)); 682 } 683 684 switch (error) { 685 case 0: 686 break; 687 case ENOMEM: 688 case EAGAIN: 689 xs->error = XS_RESOURCE_SHORTAGE; 690 goto out_bad; 691 692 default: 693 xs->error = XS_DRIVER_STUFFUP; 694 aprint_error_dev(sc->sc_dev, 695 "error %d loading DMA map\n", error); 696 out_bad: 697 adw_free_ccb(sc, ccb); 698 scsipi_done(xs); 699 return(0); 700 } 701 702 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 703 ccb->dmamap_xfer->dm_mapsize, 704 (xs->xs_control & XS_CTL_DATA_IN) ? 705 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 706 707 /* 708 * Build scatter-gather list. 709 */ 710 scsiqp->data_cnt = htole32(xs->datalen); 711 scsiqp->vdata_addr = xs->data; 712 scsiqp->data_addr = htole32(ccb->dmamap_xfer->dm_segs[0].ds_addr); 713 memset(ccb->sg_block, 0, 714 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK); 715 adw_build_sglist(ccb, scsiqp, ccb->sg_block); 716 } else { 717 /* 718 * No data xfer, use non S/G values. 719 */ 720 scsiqp->data_cnt = 0; 721 scsiqp->vdata_addr = 0; 722 scsiqp->data_addr = 0; 723 } 724 725 return (1); 726 } 727 728 729 /* 730 * Build scatter-gather list for Wide Boards. 731 */ 732 static void 733 adw_build_sglist(ADW_CCB *ccb, ADW_SCSI_REQ_Q *scsiqp, ADW_SG_BLOCK *sg_block) 734 { 735 u_long sg_block_next_addr; /* block and its next */ 736 u_int32_t sg_block_physical_addr; 737 int i; /* how many SG entries */ 738 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0]; 739 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs; 740 741 742 sg_block_next_addr = (u_long) sg_block; /* allow math operation */ 743 sg_block_physical_addr = le32toh(ccb->hashkey) + 744 offsetof(struct adw_ccb, sg_block[0]); 745 scsiqp->sg_real_addr = htole32(sg_block_physical_addr); 746 747 /* 748 * If there are more than NO_OF_SG_PER_BLOCK DMA segments (hw sg-list) 749 * then split the request into multiple sg-list blocks. 750 */ 751 752 do { 753 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { 754 sg_block->sg_list[i].sg_addr = htole32(sg_list->ds_addr); 755 sg_block->sg_list[i].sg_count = htole32(sg_list->ds_len); 756 757 if (--sg_elem_cnt == 0) { 758 /* last entry, get out */ 759 sg_block->sg_cnt = i + 1; 760 sg_block->sg_ptr = 0; /* next link = NULL */ 761 return; 762 } 763 sg_list++; 764 } 765 sg_block_next_addr += sizeof(ADW_SG_BLOCK); 766 sg_block_physical_addr += sizeof(ADW_SG_BLOCK); 767 768 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; 769 sg_block->sg_ptr = htole32(sg_block_physical_addr); 770 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */ 771 } while (1); 772 } 773 774 775 /******************************************************************************/ 776 /* Interrupts and TimeOut routines */ 777 /******************************************************************************/ 778 779 780 int 781 adw_intr(void *arg) 782 { 783 ADW_SOFTC *sc = arg; 784 785 786 if(AdwISR(sc) != ADW_FALSE) { 787 return (1); 788 } 789 790 return (0); 791 } 792 793 794 /* 795 * Poll a particular unit, looking for a particular xs 796 */ 797 static int 798 adw_poll(ADW_SOFTC *sc, struct scsipi_xfer *xs, int count) 799 { 800 801 /* timeouts are in msec, so we loop in 1000 usec cycles */ 802 while (count) { 803 adw_intr(sc); 804 if (xs->xs_status & XS_STS_DONE) 805 return (0); 806 delay(1000); /* only happens in boot so ok */ 807 count--; 808 } 809 return (1); 810 } 811 812 813 static void 814 adw_timeout(void *arg) 815 { 816 ADW_CCB *ccb = arg; 817 struct scsipi_xfer *xs = ccb->xs; 818 struct scsipi_periph *periph = xs->xs_periph; 819 ADW_SOFTC *sc = 820 device_private(periph->periph_channel->chan_adapter->adapt_dev); 821 int s; 822 823 scsipi_printaddr(periph); 824 printf("timed out"); 825 826 s = splbio(); 827 828 if (ccb->flags & CCB_ABORTED) { 829 /* 830 * Abort Timed Out 831 * 832 * No more opportunities. Lets try resetting the bus and 833 * reinitialize the host adapter. 834 */ 835 callout_stop(&xs->xs_callout); 836 printf(" AGAIN. Resetting SCSI Bus\n"); 837 adw_reset_bus(sc); 838 splx(s); 839 return; 840 } else if (ccb->flags & CCB_ABORTING) { 841 /* 842 * Abort the operation that has timed out. 843 * 844 * Second opportunity. 845 */ 846 printf("\n"); 847 xs->error = XS_TIMEOUT; 848 ccb->flags |= CCB_ABORTED; 849 #if 0 850 /* 851 * - XXX - 3.3a microcode is BROKEN!!! 852 * 853 * We cannot abort a CCB, so we can only hope the command 854 * get completed before the next timeout, otherwise a 855 * Bus Reset will arrive inexorably. 856 */ 857 /* 858 * ADW_ABORT_CCB() makes the board to generate an interrupt 859 * 860 * - XXX - The above assertion MUST be verified (and this 861 * code changed as well [callout_*()]), when the 862 * ADW_ABORT_CCB will be working again 863 */ 864 ADW_ABORT_CCB(sc, ccb); 865 #endif 866 /* 867 * waiting for multishot callout_reset() let's restart it 868 * by hand so the next time a timeout event will occur 869 * we will reset the bus. 870 */ 871 callout_reset(&xs->xs_callout, 872 mstohz(ccb->timeout), adw_timeout, ccb); 873 } else { 874 /* 875 * Abort the operation that has timed out. 876 * 877 * First opportunity. 878 */ 879 printf("\n"); 880 xs->error = XS_TIMEOUT; 881 ccb->flags |= CCB_ABORTING; 882 #if 0 883 /* 884 * - XXX - 3.3a microcode is BROKEN!!! 885 * 886 * We cannot abort a CCB, so we can only hope the command 887 * get completed before the next 2 timeout, otherwise a 888 * Bus Reset will arrive inexorably. 889 */ 890 /* 891 * ADW_ABORT_CCB() makes the board to generate an interrupt 892 * 893 * - XXX - The above assertion MUST be verified (and this 894 * code changed as well [callout_*()]), when the 895 * ADW_ABORT_CCB will be working again 896 */ 897 ADW_ABORT_CCB(sc, ccb); 898 #endif 899 /* 900 * waiting for multishot callout_reset() let's restart it 901 * by hand so to give a second opportunity to the command 902 * which timed-out. 903 */ 904 callout_reset(&xs->xs_callout, 905 mstohz(ccb->timeout), adw_timeout, ccb); 906 } 907 908 splx(s); 909 } 910 911 912 static void 913 adw_reset_bus(ADW_SOFTC *sc) 914 { 915 ADW_CCB *ccb; 916 int s; 917 struct scsipi_xfer *xs; 918 919 s = splbio(); 920 AdwResetSCSIBus(sc); 921 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb, 922 adw_pending_ccb)) != NULL) { 923 callout_stop(&ccb->xs->xs_callout); 924 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 925 xs = ccb->xs; 926 adw_free_ccb(sc, ccb); 927 xs->error = XS_RESOURCE_SHORTAGE; 928 scsipi_done(xs); 929 } 930 splx(s); 931 } 932 933 934 /******************************************************************************/ 935 /* Host Adapter and Peripherals Information Routines */ 936 /******************************************************************************/ 937 938 939 static void 940 adw_print_info(ADW_SOFTC *sc, int tid) 941 { 942 bus_space_tag_t iot = sc->sc_iot; 943 bus_space_handle_t ioh = sc->sc_ioh; 944 u_int16_t wdtr_able, wdtr_done, wdtr; 945 u_int16_t sdtr_able, sdtr_done, sdtr, period; 946 static int wdtr_reneg = 0, sdtr_reneg = 0; 947 948 if (tid == 0){ 949 wdtr_reneg = sdtr_reneg = 0; 950 } 951 952 printf("%s: target %d ", device_xname(sc->sc_dev), tid); 953 954 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able); 955 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) { 956 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done); 957 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE + 958 (2 * tid), wdtr); 959 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8); 960 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0) 961 wdtr_reneg = 1; 962 } else { 963 printf("wide transfers disabled, "); 964 } 965 966 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able); 967 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) { 968 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done); 969 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE + 970 (2 * tid), sdtr); 971 sdtr &= ~0x8000; 972 if((sdtr & 0x1F) != 0) { 973 if((sdtr & 0x1F00) == 0x1100){ 974 printf("80.0 MHz"); 975 } else if((sdtr & 0x1F00) == 0x1000){ 976 printf("40.0 MHz"); 977 } else { 978 /* <= 20.0 MHz */ 979 period = (((sdtr >> 8) * 25) + 50)/4; 980 if(period == 0) { 981 /* Should never happen. */ 982 printf("? MHz"); 983 } else { 984 printf("%d.%d MHz", 250/period, 985 ADW_TENTHS(250, period)); 986 } 987 } 988 printf(" synchronous transfers\n"); 989 } else { 990 printf("asynchronous transfers\n"); 991 } 992 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0) 993 sdtr_reneg = 1; 994 } else { 995 printf("synchronous transfers disabled\n"); 996 } 997 998 if(wdtr_reneg || sdtr_reneg) { 999 printf("%s: target %d %s", device_xname(sc->sc_dev), tid, 1000 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") : 1001 ((sdtr_reneg)? "sync" : "") ); 1002 printf(" renegotiation pending before next command.\n"); 1003 } 1004 } 1005 1006 1007 /******************************************************************************/ 1008 /* WIDE boards Interrupt callbacks */ 1009 /******************************************************************************/ 1010 1011 1012 /* 1013 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR() 1014 * 1015 * Interrupt callback function for the Wide SCSI Adv Library. 1016 * 1017 * Notice: 1018 * Interrupts are disabled by the caller (AdwISR() function), and will be 1019 * enabled at the end of the caller. 1020 */ 1021 static void 1022 adw_isr_callback(ADW_SOFTC *sc, ADW_SCSI_REQ_Q *scsiq) 1023 { 1024 bus_dma_tag_t dmat = sc->sc_dmat; 1025 ADW_CCB *ccb; 1026 struct scsipi_xfer *xs; 1027 struct scsi_sense_data *s1, *s2; 1028 1029 1030 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr); 1031 1032 callout_stop(&ccb->xs->xs_callout); 1033 1034 xs = ccb->xs; 1035 1036 /* 1037 * If we were a data transfer, unload the map that described 1038 * the data buffer. 1039 */ 1040 if (xs->datalen) { 1041 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 1042 ccb->dmamap_xfer->dm_mapsize, 1043 (xs->xs_control & XS_CTL_DATA_IN) ? 1044 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1045 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 1046 } 1047 1048 if ((ccb->flags & CCB_ALLOC) == 0) { 1049 aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n"); 1050 Debugger(); 1051 return; 1052 } 1053 1054 /* 1055 * 'done_status' contains the command's ending status. 1056 * 'host_status' contains the host adapter status. 1057 * 'scsi_status' contains the scsi peripheral status. 1058 */ 1059 if ((scsiq->host_status == QHSTA_NO_ERROR) && 1060 ((scsiq->done_status == QD_NO_ERROR) || 1061 (scsiq->done_status == QD_WITH_ERROR))) { 1062 switch (scsiq->scsi_status) { 1063 case SCSI_STATUS_GOOD: 1064 if ((scsiq->cdb[0] == INQUIRY) && 1065 (scsiq->target_lun == 0)) { 1066 adw_print_info(sc, scsiq->target_id); 1067 } 1068 xs->error = XS_NOERROR; 1069 xs->resid = le32toh(scsiq->data_cnt); 1070 sc->sc_freeze_dev[scsiq->target_id] = 0; 1071 break; 1072 1073 case SCSI_STATUS_CHECK_CONDITION: 1074 case SCSI_STATUS_CMD_TERMINATED: 1075 s1 = &ccb->scsi_sense; 1076 s2 = &xs->sense.scsi_sense; 1077 *s2 = *s1; 1078 xs->error = XS_SENSE; 1079 sc->sc_freeze_dev[scsiq->target_id] = 1; 1080 break; 1081 1082 default: 1083 xs->error = XS_BUSY; 1084 sc->sc_freeze_dev[scsiq->target_id] = 1; 1085 break; 1086 } 1087 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) { 1088 xs->error = XS_DRIVER_STUFFUP; 1089 } else { 1090 switch (scsiq->host_status) { 1091 case QHSTA_M_SEL_TIMEOUT: 1092 xs->error = XS_SELTIMEOUT; 1093 break; 1094 1095 case QHSTA_M_SXFR_OFF_UFLW: 1096 case QHSTA_M_SXFR_OFF_OFLW: 1097 case QHSTA_M_DATA_OVER_RUN: 1098 aprint_error_dev(sc->sc_dev, 1099 "Overrun/Overflow/Underflow condition\n"); 1100 xs->error = XS_DRIVER_STUFFUP; 1101 break; 1102 1103 case QHSTA_M_SXFR_DESELECTED: 1104 case QHSTA_M_UNEXPECTED_BUS_FREE: 1105 aprint_error_dev(sc->sc_dev, "Unexpected BUS free\n"); 1106 xs->error = XS_DRIVER_STUFFUP; 1107 break; 1108 1109 case QHSTA_M_SCSI_BUS_RESET: 1110 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1111 aprint_error_dev(sc->sc_dev, "BUS Reset\n"); 1112 xs->error = XS_DRIVER_STUFFUP; 1113 break; 1114 1115 case QHSTA_M_BUS_DEVICE_RESET: 1116 aprint_error_dev(sc->sc_dev, "Device Reset\n"); 1117 xs->error = XS_DRIVER_STUFFUP; 1118 break; 1119 1120 case QHSTA_M_QUEUE_ABORTED: 1121 aprint_error_dev(sc->sc_dev, "Queue Aborted\n"); 1122 xs->error = XS_DRIVER_STUFFUP; 1123 break; 1124 1125 case QHSTA_M_SXFR_SDMA_ERR: 1126 case QHSTA_M_SXFR_SXFR_PERR: 1127 case QHSTA_M_RDMA_PERR: 1128 /* 1129 * DMA Error. This should *NEVER* happen! 1130 * 1131 * Lets try resetting the bus and reinitialize 1132 * the host adapter. 1133 */ 1134 aprint_error_dev(sc->sc_dev, 1135 "DMA Error. Reseting bus\n"); 1136 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 1137 adw_reset_bus(sc); 1138 xs->error = XS_BUSY; 1139 goto done; 1140 1141 case QHSTA_M_WTM_TIMEOUT: 1142 case QHSTA_M_SXFR_WD_TMO: 1143 /* The SCSI bus hung in a phase */ 1144 printf("%s: Watch Dog timer expired. Reseting bus\n", 1145 device_xname(sc->sc_dev)); 1146 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 1147 adw_reset_bus(sc); 1148 xs->error = XS_BUSY; 1149 goto done; 1150 1151 case QHSTA_M_SXFR_XFR_PH_ERR: 1152 aprint_error_dev(sc->sc_dev, "Transfer Error\n"); 1153 xs->error = XS_DRIVER_STUFFUP; 1154 break; 1155 1156 case QHSTA_M_BAD_CMPL_STATUS_IN: 1157 /* No command complete after a status message */ 1158 printf("%s: Bad Completion Status\n", 1159 device_xname(sc->sc_dev)); 1160 xs->error = XS_DRIVER_STUFFUP; 1161 break; 1162 1163 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1164 aprint_error_dev(sc->sc_dev, "Auto Sense Failed\n"); 1165 xs->error = XS_DRIVER_STUFFUP; 1166 break; 1167 1168 case QHSTA_M_INVALID_DEVICE: 1169 aprint_error_dev(sc->sc_dev, "Invalid Device\n"); 1170 xs->error = XS_DRIVER_STUFFUP; 1171 break; 1172 1173 case QHSTA_M_NO_AUTO_REQ_SENSE: 1174 /* 1175 * User didn't request sense, but we got a 1176 * check condition. 1177 */ 1178 aprint_error_dev(sc->sc_dev, 1179 "Unexpected Check Condition\n"); 1180 xs->error = XS_DRIVER_STUFFUP; 1181 break; 1182 1183 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1184 aprint_error_dev(sc->sc_dev, "Unknown Error\n"); 1185 xs->error = XS_DRIVER_STUFFUP; 1186 break; 1187 1188 default: 1189 panic("%s: Unhandled Host Status Error %x", 1190 device_xname(sc->sc_dev), scsiq->host_status); 1191 } 1192 } 1193 1194 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain); 1195 done: adw_free_ccb(sc, ccb); 1196 scsipi_done(xs); 1197 } 1198 1199 1200 /* 1201 * adw_async_callback() - Adv Library asynchronous event callback function. 1202 */ 1203 static void 1204 adw_async_callback(ADW_SOFTC *sc, u_int8_t code) 1205 { 1206 switch (code) { 1207 case ADV_ASYNC_SCSI_BUS_RESET_DET: 1208 /* The firmware detected a SCSI Bus reset. */ 1209 printf("%s: SCSI Bus reset detected\n", 1210 device_xname(sc->sc_dev)); 1211 break; 1212 1213 case ADV_ASYNC_RDMA_FAILURE: 1214 /* 1215 * Handle RDMA failure by resetting the SCSI Bus and 1216 * possibly the chip if it is unresponsive. 1217 */ 1218 printf("%s: RDMA failure. Resetting the SCSI Bus and" 1219 " the adapter\n", device_xname(sc->sc_dev)); 1220 AdwResetSCSIBus(sc); 1221 break; 1222 1223 case ADV_HOST_SCSI_BUS_RESET: 1224 /* Host generated SCSI bus reset occurred. */ 1225 printf("%s: Host generated SCSI bus reset occurred\n", 1226 device_xname(sc->sc_dev)); 1227 break; 1228 1229 case ADV_ASYNC_CARRIER_READY_FAILURE: 1230 /* Carrier Ready failure. */ 1231 printf("%s: Carrier Ready failure!\n", 1232 device_xname(sc->sc_dev)); 1233 break; 1234 1235 default: 1236 break; 1237 } 1238 } 1239