1 /* $NetBSD: bha.c,v 1.73 2010/07/27 14:34:34 jakllsch Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Originally written by Julian Elischer (julian@tfs.com) 35 * for TRW Financial Systems for use under the MACH(2.5) operating system. 36 * 37 * TRW Financial Systems, in accordance with their agreement with Carnegie 38 * Mellon University, makes this software available to CMU to distribute 39 * or use in any manner that they see fit as long as this message is kept with 40 * the software. For this reason TFS also grants any other persons or 41 * organisations permission to use or modify this software. 42 * 43 * TFS supplies this software to be publicly redistributed 44 * on the understanding that TFS is not responsible for the correct 45 * functioning of this software in any circumstances. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: bha.c,v 1.73 2010/07/27 14:34:34 jakllsch Exp $"); 50 51 #include "opt_ddb.h" 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/callout.h> 56 #include <sys/kernel.h> 57 #include <sys/errno.h> 58 #include <sys/ioctl.h> 59 #include <sys/device.h> 60 #include <sys/malloc.h> 61 #include <sys/buf.h> 62 #include <sys/proc.h> 63 64 #include <uvm/uvm_extern.h> 65 66 #include <sys/bus.h> 67 #include <sys/intr.h> 68 69 #include <dev/scsipi/scsi_all.h> 70 #include <dev/scsipi/scsipi_all.h> 71 #include <dev/scsipi/scsiconf.h> 72 73 #include <dev/ic/bhareg.h> 74 #include <dev/ic/bhavar.h> 75 76 #ifndef DDB 77 #define Debugger() panic("should call debugger here (bha.c)") 78 #endif /* ! DDB */ 79 80 #define BHA_MAXXFER ((BHA_NSEG - 1) << PGSHIFT) 81 82 #ifdef BHADEBUG 83 int bha_debug = 0; 84 #endif /* BHADEBUG */ 85 86 static int bha_cmd(bus_space_tag_t, bus_space_handle_t, const char *, int, 87 u_char *, int, u_char *); 88 89 static void bha_scsipi_request(struct scsipi_channel *, 90 scsipi_adapter_req_t, void *); 91 static void bha_minphys(struct buf *); 92 93 static void bha_get_xfer_mode(struct bha_softc *, 94 struct scsipi_xfer_mode *); 95 96 static void bha_done(struct bha_softc *, struct bha_ccb *); 97 static int bha_poll(struct bha_softc *, struct scsipi_xfer *, int); 98 static void bha_timeout(void *arg); 99 100 static int bha_init(struct bha_softc *); 101 102 static int bha_create_mailbox(struct bha_softc *); 103 static void bha_collect_mbo(struct bha_softc *); 104 105 static void bha_queue_ccb(struct bha_softc *, struct bha_ccb *); 106 static void bha_start_ccbs(struct bha_softc *); 107 static void bha_finish_ccbs(struct bha_softc *); 108 109 static struct bha_ccb *bha_ccb_phys_kv(struct bha_softc *, bus_addr_t); 110 static void bha_create_ccbs(struct bha_softc *, int); 111 static int bha_init_ccb(struct bha_softc *, struct bha_ccb *); 112 static struct bha_ccb *bha_get_ccb(struct bha_softc *); 113 static void bha_free_ccb(struct bha_softc *, struct bha_ccb *); 114 115 #define BHA_RESET_TIMEOUT 2000 /* time to wait for reset (mSec) */ 116 #define BHA_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 117 118 /* 119 * Number of CCBs in an allocation group; must be computed at run-time. 120 */ 121 static int bha_ccbs_per_group; 122 123 static inline struct bha_mbx_out * 124 bha_nextmbo(struct bha_softc *sc, struct bha_mbx_out *mbo) 125 { 126 127 if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1]) 128 return (&sc->sc_mbo[0]); 129 return (mbo + 1); 130 } 131 132 static inline struct bha_mbx_in * 133 bha_nextmbi(struct bha_softc *sc, struct bha_mbx_in *mbi) 134 { 135 if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1]) 136 return (&sc->sc_mbi[0]); 137 return (mbi + 1); 138 } 139 140 /* 141 * bha_attach: 142 * 143 * Finish attaching a Buslogic controller, and configure children. 144 */ 145 void 146 bha_attach(struct bha_softc *sc) 147 { 148 struct scsipi_adapter *adapt = &sc->sc_adapter; 149 struct scsipi_channel *chan = &sc->sc_channel; 150 int initial_ccbs; 151 152 /* 153 * Initialize the number of CCBs per group. 154 */ 155 if (bha_ccbs_per_group == 0) 156 bha_ccbs_per_group = BHA_CCBS_PER_GROUP; 157 158 initial_ccbs = bha_info(sc); 159 if (initial_ccbs == 0) { 160 aprint_error_dev(&sc->sc_dev, "unable to get adapter info\n"); 161 return; 162 } 163 164 /* 165 * Fill in the scsipi_adapter. 166 */ 167 memset(adapt, 0, sizeof(*adapt)); 168 adapt->adapt_dev = &sc->sc_dev; 169 adapt->adapt_nchannels = 1; 170 /* adapt_openings initialized below */ 171 adapt->adapt_max_periph = sc->sc_mbox_count; 172 adapt->adapt_request = bha_scsipi_request; 173 adapt->adapt_minphys = bha_minphys; 174 175 /* 176 * Fill in the scsipi_channel. 177 */ 178 memset(chan, 0, sizeof(*chan)); 179 chan->chan_adapter = adapt; 180 chan->chan_bustype = &scsi_bustype; 181 chan->chan_channel = 0; 182 chan->chan_flags = SCSIPI_CHAN_CANGROW; 183 chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8; 184 chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8; 185 chan->chan_id = sc->sc_scsi_id; 186 187 TAILQ_INIT(&sc->sc_free_ccb); 188 TAILQ_INIT(&sc->sc_waiting_ccb); 189 TAILQ_INIT(&sc->sc_allocating_ccbs); 190 191 if (bha_create_mailbox(sc) != 0) 192 return; 193 194 bha_create_ccbs(sc, initial_ccbs); 195 if (sc->sc_cur_ccbs < 2) { 196 aprint_error_dev(&sc->sc_dev, "not enough CCBs to run\n"); 197 return; 198 } 199 200 adapt->adapt_openings = sc->sc_cur_ccbs; 201 202 if (bha_init(sc) != 0) 203 return; 204 205 (void) config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 206 } 207 208 /* 209 * bha_intr: 210 * 211 * Interrupt service routine. 212 */ 213 int 214 bha_intr(void *arg) 215 { 216 struct bha_softc *sc = arg; 217 bus_space_tag_t iot = sc->sc_iot; 218 bus_space_handle_t ioh = sc->sc_ioh; 219 u_char sts; 220 221 #ifdef BHADEBUG 222 printf("%s: bha_intr ", device_xname(&sc->sc_dev)); 223 #endif /* BHADEBUG */ 224 225 /* 226 * First acknowledge the interrupt, Then if it's not telling about 227 * a completed operation just return. 228 */ 229 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 230 if ((sts & BHA_INTR_ANYINTR) == 0) 231 return (0); 232 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 233 234 #ifdef BHADIAG 235 /* Make sure we clear CCB_SENDING before finishing a CCB. */ 236 bha_collect_mbo(sc); 237 #endif 238 239 /* Mail box out empty? */ 240 if (sts & BHA_INTR_MBOA) { 241 struct bha_toggle toggle; 242 243 toggle.cmd.opcode = BHA_MBO_INTR_EN; 244 toggle.cmd.enable = 0; 245 bha_cmd(iot, ioh, device_xname(&sc->sc_dev), 246 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 247 0, (u_char *)0); 248 bha_start_ccbs(sc); 249 } 250 251 /* Mail box in full? */ 252 if (sts & BHA_INTR_MBIF) 253 bha_finish_ccbs(sc); 254 255 return (1); 256 } 257 258 /***************************************************************************** 259 * SCSI interface routines 260 *****************************************************************************/ 261 262 /* 263 * bha_scsipi_request: 264 * 265 * Perform a request for the SCSIPI layer. 266 */ 267 static void 268 bha_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 269 void *arg) 270 { 271 struct scsipi_adapter *adapt = chan->chan_adapter; 272 struct bha_softc *sc = (void *)adapt->adapt_dev; 273 struct scsipi_xfer *xs; 274 struct scsipi_periph *periph; 275 bus_dma_tag_t dmat = sc->sc_dmat; 276 struct bha_ccb *ccb; 277 int error, seg, flags, s; 278 279 switch (req) { 280 case ADAPTER_REQ_RUN_XFER: 281 xs = arg; 282 periph = xs->xs_periph; 283 flags = xs->xs_control; 284 285 SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n")); 286 287 /* Get a CCB to use. */ 288 ccb = bha_get_ccb(sc); 289 #ifdef DIAGNOSTIC 290 /* 291 * This should never happen as we track the resources 292 * in the mid-layer. 293 */ 294 if (ccb == NULL) { 295 scsipi_printaddr(periph); 296 printf("unable to allocate ccb\n"); 297 panic("bha_scsipi_request"); 298 } 299 #endif 300 301 ccb->xs = xs; 302 ccb->timeout = xs->timeout; 303 304 /* 305 * Put all the arguments for the xfer in the ccb 306 */ 307 if (flags & XS_CTL_RESET) { 308 ccb->opcode = BHA_RESET_CCB; 309 ccb->scsi_cmd_length = 0; 310 } else { 311 /* can't use S/G if zero length */ 312 if (xs->cmdlen > sizeof(ccb->scsi_cmd)) { 313 printf("%s: cmdlen %d too large for CCB\n", 314 device_xname(&sc->sc_dev), xs->cmdlen); 315 xs->error = XS_DRIVER_STUFFUP; 316 goto out_bad; 317 } 318 ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB 319 : BHA_INITIATOR_CCB); 320 memcpy(&ccb->scsi_cmd, xs->cmd, 321 ccb->scsi_cmd_length = xs->cmdlen); 322 } 323 324 if (xs->datalen) { 325 /* 326 * Map the DMA transfer. 327 */ 328 #ifdef TFS 329 if (flags & XS_CTL_DATA_UIO) { 330 error = bus_dmamap_load_uio(dmat, 331 ccb->dmamap_xfer, (struct uio *)xs->data, 332 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 333 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 334 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 335 BUS_DMA_WRITE)); 336 } else 337 #endif /* TFS */ 338 { 339 error = bus_dmamap_load(dmat, 340 ccb->dmamap_xfer, xs->data, xs->datalen, 341 NULL, 342 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 343 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 344 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 345 BUS_DMA_WRITE)); 346 } 347 348 switch (error) { 349 case 0: 350 break; 351 352 case ENOMEM: 353 case EAGAIN: 354 xs->error = XS_RESOURCE_SHORTAGE; 355 goto out_bad; 356 357 default: 358 xs->error = XS_DRIVER_STUFFUP; 359 aprint_error_dev(&sc->sc_dev, "error %d loading DMA map\n", error); 360 out_bad: 361 bha_free_ccb(sc, ccb); 362 scsipi_done(xs); 363 return; 364 } 365 366 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 367 ccb->dmamap_xfer->dm_mapsize, 368 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 369 BUS_DMASYNC_PREWRITE); 370 371 /* 372 * Load the hardware scatter/gather map with the 373 * contents of the DMA map. 374 */ 375 for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) { 376 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr, 377 ccb->scat_gath[seg].seg_addr); 378 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len, 379 ccb->scat_gath[seg].seg_len); 380 } 381 382 ltophys(ccb->hashkey + offsetof(struct bha_ccb, 383 scat_gath), ccb->data_addr); 384 ltophys(ccb->dmamap_xfer->dm_nsegs * 385 sizeof(struct bha_scat_gath), ccb->data_length); 386 } else { 387 /* 388 * No data xfer, use non S/G values. 389 */ 390 ltophys(0, ccb->data_addr); 391 ltophys(0, ccb->data_length); 392 } 393 394 if (XS_CTL_TAGTYPE(xs) != 0) { 395 ccb->tag_enable = 1; 396 ccb->tag_type = xs->xs_tag_type & 0x03; 397 } else { 398 ccb->tag_enable = 0; 399 ccb->tag_type = 0; 400 } 401 402 ccb->data_out = 0; 403 ccb->data_in = 0; 404 ccb->target = periph->periph_target; 405 ccb->lun = periph->periph_lun; 406 ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense), 407 ccb->sense_ptr); 408 ccb->req_sense_length = sizeof(ccb->scsi_sense); 409 ccb->host_stat = 0x00; 410 ccb->target_stat = 0x00; 411 ccb->link_id = 0; 412 ltophys(0, ccb->link_addr); 413 414 BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 415 416 s = splbio(); 417 bha_queue_ccb(sc, ccb); 418 splx(s); 419 420 SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n")); 421 if ((flags & XS_CTL_POLL) == 0) 422 return; 423 424 /* 425 * If we can't use interrupts, poll on completion 426 */ 427 if (bha_poll(sc, xs, ccb->timeout)) { 428 bha_timeout(ccb); 429 if (bha_poll(sc, xs, ccb->timeout)) 430 bha_timeout(ccb); 431 } 432 return; 433 434 case ADAPTER_REQ_GROW_RESOURCES: 435 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) { 436 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW; 437 return; 438 } 439 seg = sc->sc_cur_ccbs; 440 bha_create_ccbs(sc, bha_ccbs_per_group); 441 adapt->adapt_openings += sc->sc_cur_ccbs - seg; 442 return; 443 444 case ADAPTER_REQ_SET_XFER_MODE: 445 /* 446 * Can't really do this on the Buslogic. It has its 447 * own setup info. But we do know how to query what 448 * the settings are. 449 */ 450 bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg); 451 return; 452 } 453 } 454 455 /* 456 * bha_minphys: 457 * 458 * Limit a transfer to our maximum transfer size. 459 */ 460 void 461 bha_minphys(struct buf *bp) 462 { 463 464 if (bp->b_bcount > BHA_MAXXFER) 465 bp->b_bcount = BHA_MAXXFER; 466 minphys(bp); 467 } 468 469 /***************************************************************************** 470 * SCSI job execution helper routines 471 *****************************************************************************/ 472 473 /* 474 * bha_get_xfer_mode; 475 * 476 * Negotiate the xfer mode for the specified periph, and report 477 * back the mode to the midlayer. 478 * 479 * NOTE: we must be called at splbio(). 480 */ 481 static void 482 bha_get_xfer_mode(struct bha_softc *sc, struct scsipi_xfer_mode *xm) 483 { 484 struct bha_setup hwsetup; 485 struct bha_period hwperiod; 486 struct bha_sync *bs; 487 int toff = xm->xm_target & 7, tmask = (1 << toff); 488 int wide, period, offset, rlen; 489 490 /* 491 * Issue an Inquire Setup Information. We can extract 492 * sync and wide information from here. 493 */ 494 rlen = sizeof(hwsetup.reply) + 495 ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0); 496 hwsetup.cmd.opcode = BHA_INQUIRE_SETUP; 497 hwsetup.cmd.len = rlen; 498 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev), 499 sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd, 500 rlen, (u_char *)&hwsetup.reply); 501 502 xm->xm_mode = 0; 503 xm->xm_period = 0; 504 xm->xm_offset = 0; 505 506 /* 507 * First check for wide. On later boards, we can check 508 * directly in the setup info if wide is currently active. 509 * 510 * On earlier boards, we have to make an educated guess. 511 */ 512 if (sc->sc_flags & BHAF_WIDE) { 513 if (strcmp(sc->sc_firmware, "5.06L") >= 0) { 514 if (xm->xm_target > 7) { 515 wide = 516 hwsetup.reply_w.high_wide_active & tmask; 517 } else { 518 wide = 519 hwsetup.reply_w.low_wide_active & tmask; 520 } 521 if (wide) 522 xm->xm_mode |= PERIPH_CAP_WIDE16; 523 } else { 524 /* XXX Check `wide permitted' in the config info. */ 525 xm->xm_mode |= PERIPH_CAP_WIDE16; 526 } 527 } 528 529 /* 530 * Now get basic sync info. 531 */ 532 bs = (xm->xm_target > 7) ? 533 &hwsetup.reply_w.sync_high[toff] : 534 &hwsetup.reply.sync_low[toff]; 535 536 if (bs->valid) { 537 xm->xm_mode |= PERIPH_CAP_SYNC; 538 period = (bs->period * 50) + 20; 539 offset = bs->offset; 540 541 /* 542 * On boards that can do Fast and Ultra, use the Inquire Period 543 * command to get the period. 544 */ 545 if (sc->sc_firmware[0] >= '3') { 546 rlen = sizeof(hwperiod.reply) + 547 ((sc->sc_flags & BHAF_WIDE) ? 548 sizeof(hwperiod.reply_w) : 0); 549 hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD; 550 hwperiod.cmd.len = rlen; 551 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev), 552 sizeof(hwperiod.cmd), (u_char *)&hwperiod.cmd, 553 rlen, (u_char *)&hwperiod.reply); 554 555 if (xm->xm_target > 7) 556 period = hwperiod.reply_w.period[toff]; 557 else 558 period = hwperiod.reply.period[toff]; 559 560 period *= 10; 561 } 562 563 xm->xm_period = 564 scsipi_sync_period_to_factor(period * 100); 565 xm->xm_offset = offset; 566 } 567 568 /* 569 * Now check for tagged queueing support. 570 * 571 * XXX Check `tags permitted' in the config info. 572 */ 573 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 574 xm->xm_mode |= PERIPH_CAP_TQING; 575 576 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm); 577 } 578 579 /* 580 * bha_done: 581 * 582 * A CCB has completed execution. Pass the status back to the 583 * upper layer. 584 */ 585 static void 586 bha_done(struct bha_softc *sc, struct bha_ccb *ccb) 587 { 588 bus_dma_tag_t dmat = sc->sc_dmat; 589 struct scsipi_xfer *xs = ccb->xs; 590 591 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n")); 592 593 #ifdef BHADIAG 594 if (ccb->flags & CCB_SENDING) { 595 printf("%s: exiting ccb still in transit!\n", 596 device_xname(&sc->sc_dev)); 597 Debugger(); 598 return; 599 } 600 #endif 601 if ((ccb->flags & CCB_ALLOC) == 0) { 602 aprint_error_dev(&sc->sc_dev, "exiting ccb not allocated!\n"); 603 Debugger(); 604 return; 605 } 606 607 /* 608 * If we were a data transfer, unload the map that described 609 * the data buffer. 610 */ 611 if (xs->datalen) { 612 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 613 ccb->dmamap_xfer->dm_mapsize, 614 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD : 615 BUS_DMASYNC_POSTWRITE); 616 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 617 } 618 619 if (xs->error == XS_NOERROR) { 620 if (ccb->host_stat != BHA_OK) { 621 switch (ccb->host_stat) { 622 case BHA_SEL_TIMEOUT: /* No response */ 623 xs->error = XS_SELTIMEOUT; 624 break; 625 default: /* Other scsi protocol messes */ 626 printf("%s: host_stat %x\n", 627 device_xname(&sc->sc_dev), ccb->host_stat); 628 xs->error = XS_DRIVER_STUFFUP; 629 break; 630 } 631 } else if (ccb->target_stat != SCSI_OK) { 632 switch (ccb->target_stat) { 633 case SCSI_CHECK: 634 memcpy(&xs->sense.scsi_sense, 635 &ccb->scsi_sense, 636 sizeof(xs->sense.scsi_sense)); 637 xs->error = XS_SENSE; 638 break; 639 case SCSI_BUSY: 640 xs->error = XS_BUSY; 641 break; 642 default: 643 printf("%s: target_stat %x\n", 644 device_xname(&sc->sc_dev), ccb->target_stat); 645 xs->error = XS_DRIVER_STUFFUP; 646 break; 647 } 648 } else 649 xs->resid = 0; 650 } 651 652 bha_free_ccb(sc, ccb); 653 scsipi_done(xs); 654 } 655 656 /* 657 * bha_poll: 658 * 659 * Poll for completion of the specified job. 660 */ 661 static int 662 bha_poll(struct bha_softc *sc, struct scsipi_xfer *xs, int count) 663 { 664 bus_space_tag_t iot = sc->sc_iot; 665 bus_space_handle_t ioh = sc->sc_ioh; 666 667 /* timeouts are in msec, so we loop in 1000 usec cycles */ 668 while (count) { 669 /* 670 * If we had interrupts enabled, would we 671 * have got an interrupt? 672 */ 673 if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) & 674 BHA_INTR_ANYINTR) 675 bha_intr(sc); 676 if (xs->xs_status & XS_STS_DONE) 677 return (0); 678 delay(1000); /* only happens in boot so ok */ 679 count--; 680 } 681 return (1); 682 } 683 684 /* 685 * bha_timeout: 686 * 687 * CCB timeout handler. 688 */ 689 static void 690 bha_timeout(void *arg) 691 { 692 struct bha_ccb *ccb = arg; 693 struct scsipi_xfer *xs = ccb->xs; 694 struct scsipi_periph *periph = xs->xs_periph; 695 struct bha_softc *sc = 696 (void *)periph->periph_channel->chan_adapter->adapt_dev; 697 int s; 698 699 scsipi_printaddr(periph); 700 printf("timed out"); 701 702 s = splbio(); 703 704 #ifdef BHADIAG 705 /* 706 * If the ccb's mbx is not free, then the board has gone Far East? 707 */ 708 bha_collect_mbo(sc); 709 if (ccb->flags & CCB_SENDING) { 710 aprint_error_dev(&sc->sc_dev, "not taking commands!\n"); 711 Debugger(); 712 } 713 #endif 714 715 /* 716 * If it has been through before, then 717 * a previous abort has failed, don't 718 * try abort again 719 */ 720 if (ccb->flags & CCB_ABORT) { 721 /* abort timed out */ 722 printf(" AGAIN\n"); 723 /* XXX Must reset! */ 724 } else { 725 /* abort the operation that has timed out */ 726 printf("\n"); 727 ccb->xs->error = XS_TIMEOUT; 728 ccb->timeout = BHA_ABORT_TIMEOUT; 729 ccb->flags |= CCB_ABORT; 730 bha_queue_ccb(sc, ccb); 731 } 732 733 splx(s); 734 } 735 736 /***************************************************************************** 737 * Misc. subroutines. 738 *****************************************************************************/ 739 740 /* 741 * bha_cmd: 742 * 743 * Send a command to the Buglogic controller. 744 */ 745 static int 746 bha_cmd(bus_space_tag_t iot, bus_space_handle_t ioh, const char *name, int icnt, 747 u_char *ibuf, int ocnt, u_char *obuf) 748 { 749 int i; 750 int wait; 751 u_char sts; 752 u_char opcode = ibuf[0]; 753 754 /* 755 * Calculate a reasonable timeout for the command. 756 */ 757 switch (opcode) { 758 case BHA_INQUIRE_DEVICES: 759 case BHA_INQUIRE_DEVICES_2: 760 wait = 90 * 20000; 761 break; 762 default: 763 wait = 1 * 20000; 764 break; 765 } 766 767 /* 768 * Wait for the adapter to go idle, unless it's one of 769 * the commands which don't need this 770 */ 771 if (opcode != BHA_MBO_INTR_EN) { 772 for (i = 20000; i; i--) { /* 1 sec? */ 773 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 774 if (sts & BHA_STAT_IDLE) 775 break; 776 delay(50); 777 } 778 if (!i) { 779 printf("%s: bha_cmd, host not idle(0x%x)\n", 780 name, sts); 781 return (1); 782 } 783 } 784 785 /* 786 * Now that it is idle, if we expect output, preflush the 787 * queue feeding to us. 788 */ 789 if (ocnt) { 790 while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) & 791 BHA_STAT_DF) 792 (void)bus_space_read_1(iot, ioh, BHA_DATA_PORT); 793 } 794 795 /* 796 * Output the command and the number of arguments given 797 * for each byte, first check the port is empty. 798 */ 799 while (icnt--) { 800 for (i = wait; i; i--) { 801 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 802 if (!(sts & BHA_STAT_CDF)) 803 break; 804 delay(50); 805 } 806 if (!i) { 807 if (opcode != BHA_INQUIRE_REVISION) 808 printf("%s: bha_cmd, cmd/data port full\n", 809 name); 810 goto bad; 811 } 812 bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++); 813 } 814 815 /* 816 * If we expect input, loop that many times, each time, 817 * looking for the data register to have valid data 818 */ 819 while (ocnt--) { 820 for (i = wait; i; i--) { 821 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 822 if (sts & BHA_STAT_DF) 823 break; 824 delay(50); 825 } 826 if (!i) { 827 #ifdef BHADEBUG 828 if (opcode != BHA_INQUIRE_REVISION) 829 printf("%s: bha_cmd, cmd/data port empty %d\n", 830 name, ocnt); 831 #endif /* BHADEBUG */ 832 goto bad; 833 } 834 *obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT); 835 } 836 837 /* 838 * Wait for the board to report a finished instruction. 839 * We may get an extra interrupt for the HACC signal, but this is 840 * unimportant. 841 */ 842 if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) { 843 for (i = 20000; i; i--) { /* 1 sec? */ 844 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 845 /* XXX Need to save this in the interrupt handler? */ 846 if (sts & BHA_INTR_HACC) 847 break; 848 delay(50); 849 } 850 if (!i) { 851 printf("%s: bha_cmd, host not finished(0x%x)\n", 852 name, sts); 853 return (1); 854 } 855 } 856 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 857 return (0); 858 859 bad: 860 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST); 861 return (1); 862 } 863 864 /* 865 * bha_find: 866 * 867 * Find the board. 868 */ 869 int 870 bha_find(bus_space_tag_t iot, bus_space_handle_t ioh) 871 { 872 int i; 873 u_char sts; 874 struct bha_extended_inquire inquire; 875 876 /* Check something is at the ports we need to access */ 877 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 878 if (sts == 0xFF) 879 return (0); 880 881 /* 882 * Reset board, If it doesn't respond, assume 883 * that it's not there.. good for the probe 884 */ 885 886 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, 887 BHA_CTRL_HRST | BHA_CTRL_SRST); 888 889 delay(100); 890 for (i = BHA_RESET_TIMEOUT; i; i--) { 891 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 892 if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT)) 893 break; 894 delay(1000); 895 } 896 if (!i) { 897 #ifdef BHADEBUG 898 if (bha_debug) 899 printf("bha_find: No answer from buslogic board\n"); 900 #endif /* BHADEBUG */ 901 return (0); 902 } 903 904 /* 905 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible 906 * interface. The native bha interface is not compatible with 907 * an aha. 1542. We need to ensure that we never match an 908 * Adaptec 1542. We must also avoid sending Adaptec-compatible 909 * commands to a real bha, lest it go into 1542 emulation mode. 910 * (On an indirect bus like ISA, we should always probe for BusLogic 911 * interfaces before Adaptec interfaces). 912 */ 913 914 /* 915 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking 916 * for an extended-geometry register. The 1542[AB] don't have one. 917 */ 918 sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT); 919 if (sts == 0xFF) 920 return (0); 921 922 /* 923 * Check that we actually know how to use this board. 924 */ 925 delay(1000); 926 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 927 inquire.cmd.len = sizeof(inquire.reply); 928 i = bha_cmd(iot, ioh, "(bha_find)", 929 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 930 sizeof(inquire.reply), (u_char *)&inquire.reply); 931 932 /* 933 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev) 934 * have the extended-geometry register and also respond to 935 * BHA_INQUIRE_EXTENDED. Make sure we never match such cards, 936 * by checking the size of the reply is what a BusLogic card returns. 937 */ 938 if (i) { 939 #ifdef BHADEBUG 940 printf("bha_find: board returned %d instead of %zu to %s\n", 941 i, sizeof(inquire.reply), "INQUIRE_EXTENDED"); 942 #endif 943 return (0); 944 } 945 946 /* OK, we know we've found a buslogic adaptor. */ 947 948 switch (inquire.reply.bus_type) { 949 case BHA_BUS_TYPE_24BIT: 950 case BHA_BUS_TYPE_32BIT: 951 break; 952 case BHA_BUS_TYPE_MCA: 953 /* We don't grok MicroChannel (yet). */ 954 return (0); 955 default: 956 printf("bha_find: illegal bus type %c\n", 957 inquire.reply.bus_type); 958 return (0); 959 } 960 961 return (1); 962 } 963 964 965 /* 966 * bha_inquire_config: 967 * 968 * Determine irq/drq. 969 */ 970 int 971 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh, 972 struct bha_probe_data *sc) 973 { 974 int irq, drq; 975 struct bha_config config; 976 977 /* 978 * Assume we have a board at this stage setup DMA channel from 979 * jumpers and save int level 980 */ 981 delay(1000); 982 config.cmd.opcode = BHA_INQUIRE_CONFIG; 983 bha_cmd(iot, ioh, "(bha_inquire_config)", 984 sizeof(config.cmd), (u_char *)&config.cmd, 985 sizeof(config.reply), (u_char *)&config.reply); 986 switch (config.reply.chan) { 987 case EISADMA: 988 drq = -1; 989 break; 990 case CHAN0: 991 drq = 0; 992 break; 993 case CHAN5: 994 drq = 5; 995 break; 996 case CHAN6: 997 drq = 6; 998 break; 999 case CHAN7: 1000 drq = 7; 1001 break; 1002 default: 1003 printf("bha: illegal drq setting %x\n", 1004 config.reply.chan); 1005 return (0); 1006 } 1007 1008 switch (config.reply.intr) { 1009 case INT9: 1010 irq = 9; 1011 break; 1012 case INT10: 1013 irq = 10; 1014 break; 1015 case INT11: 1016 irq = 11; 1017 break; 1018 case INT12: 1019 irq = 12; 1020 break; 1021 case INT14: 1022 irq = 14; 1023 break; 1024 case INT15: 1025 irq = 15; 1026 break; 1027 default: 1028 printf("bha: illegal irq setting %x\n", 1029 config.reply.intr); 1030 return (0); 1031 } 1032 1033 /* if we want to fill in softc, do so now */ 1034 if (sc != NULL) { 1035 sc->sc_irq = irq; 1036 sc->sc_drq = drq; 1037 } 1038 1039 return (1); 1040 } 1041 1042 int 1043 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh, 1044 struct bha_probe_data *bpd) 1045 { 1046 return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd); 1047 } 1048 1049 /* 1050 * bha_disable_isacompat: 1051 * 1052 * Disable the ISA-compatibility ioports on PCI bha devices, 1053 * to ensure they're not autoconfigured a second time as an ISA bha. 1054 */ 1055 int 1056 bha_disable_isacompat(struct bha_softc *sc) 1057 { 1058 struct bha_isadisable isa_disable; 1059 1060 isa_disable.cmd.opcode = BHA_MODIFY_IOPORT; 1061 isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1; 1062 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev), 1063 sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd, 1064 0, (u_char *)0); 1065 return (0); 1066 } 1067 1068 /* 1069 * bha_info: 1070 * 1071 * Get information about the board, and report it. We 1072 * return the initial number of CCBs, 0 if we failed. 1073 */ 1074 int 1075 bha_info(struct bha_softc *sc) 1076 { 1077 bus_space_tag_t iot = sc->sc_iot; 1078 bus_space_handle_t ioh = sc->sc_ioh; 1079 struct bha_extended_inquire inquire; 1080 struct bha_config config; 1081 struct bha_devices devices; 1082 struct bha_setup setup; 1083 struct bha_model model; 1084 struct bha_revision revision; 1085 struct bha_digit digit; 1086 int i, j, initial_ccbs, rlen; 1087 const char *name = device_xname(&sc->sc_dev); 1088 char *p; 1089 1090 /* 1091 * Fetch the extended inquire information. 1092 */ 1093 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 1094 inquire.cmd.len = sizeof(inquire.reply); 1095 bha_cmd(iot, ioh, name, 1096 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 1097 sizeof(inquire.reply), (u_char *)&inquire.reply); 1098 1099 /* 1100 * Fetch the configuration information. 1101 */ 1102 config.cmd.opcode = BHA_INQUIRE_CONFIG; 1103 bha_cmd(iot, ioh, name, 1104 sizeof(config.cmd), (u_char *)&config.cmd, 1105 sizeof(config.reply), (u_char *)&config.reply); 1106 1107 sc->sc_scsi_id = config.reply.scsi_dev; 1108 1109 /* 1110 * Get the firmware revision. 1111 */ 1112 p = sc->sc_firmware; 1113 revision.cmd.opcode = BHA_INQUIRE_REVISION; 1114 bha_cmd(iot, ioh, name, 1115 sizeof(revision.cmd), (u_char *)&revision.cmd, 1116 sizeof(revision.reply), (u_char *)&revision.reply); 1117 *p++ = revision.reply.firm_revision; 1118 *p++ = '.'; 1119 *p++ = revision.reply.firm_version; 1120 digit.cmd.opcode = BHA_INQUIRE_REVISION_3; 1121 bha_cmd(iot, ioh, name, 1122 sizeof(digit.cmd), (u_char *)&digit.cmd, 1123 sizeof(digit.reply), (u_char *)&digit.reply); 1124 *p++ = digit.reply.digit; 1125 if (revision.reply.firm_revision >= '3' || 1126 (revision.reply.firm_revision == '3' && 1127 revision.reply.firm_version >= '3')) { 1128 digit.cmd.opcode = BHA_INQUIRE_REVISION_4; 1129 bha_cmd(iot, ioh, name, 1130 sizeof(digit.cmd), (u_char *)&digit.cmd, 1131 sizeof(digit.reply), (u_char *)&digit.reply); 1132 *p++ = digit.reply.digit; 1133 } 1134 while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0')) 1135 p--; 1136 *p = '\0'; 1137 1138 /* 1139 * Get the model number. 1140 * 1141 * Some boards do not handle the Inquire Board Model Number 1142 * command correctly, or don't give correct information. 1143 * 1144 * So, we use the Firmware Revision and Extended Setup 1145 * information to fixup the model number in these cases. 1146 * 1147 * The firmware version indicates: 1148 * 1149 * 5.xx BusLogic "W" Series Host Adapters 1150 * BT-948/958/958D 1151 * 1152 * 4.xx BusLogic "C" Series Host Adapters 1153 * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF 1154 * 1155 * 3.xx BusLogic "S" Series Host Adapters 1156 * BT-747S/747D/757S/757D/445S/545S/542D 1157 * BT-542B/742A (revision H) 1158 * 1159 * 2.xx BusLogic "A" Series Host Adapters 1160 * BT-542B/742A (revision G and below) 1161 * 1162 * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter 1163 */ 1164 if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT && 1165 sc->sc_firmware[0] < '3') 1166 snprintf(sc->sc_model, sizeof(sc->sc_model), "542B"); 1167 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1168 sc->sc_firmware[0] == '2' && 1169 (sc->sc_firmware[2] == '1' || 1170 (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '0'))) 1171 snprintf(sc->sc_model, sizeof(sc->sc_model), "742A"); 1172 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1173 sc->sc_firmware[0] == '0') 1174 snprintf(sc->sc_model, sizeof(sc->sc_model), "747A"); 1175 else { 1176 p = sc->sc_model; 1177 model.cmd.opcode = BHA_INQUIRE_MODEL; 1178 model.cmd.len = sizeof(model.reply); 1179 bha_cmd(iot, ioh, name, 1180 sizeof(model.cmd), (u_char *)&model.cmd, 1181 sizeof(model.reply), (u_char *)&model.reply); 1182 *p++ = model.reply.id[0]; 1183 *p++ = model.reply.id[1]; 1184 *p++ = model.reply.id[2]; 1185 *p++ = model.reply.id[3]; 1186 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1187 p--; 1188 *p++ = model.reply.version[0]; 1189 *p++ = model.reply.version[1]; 1190 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1191 p--; 1192 *p = '\0'; 1193 } 1194 1195 /* Enable round-robin scheme - appeared at firmware rev. 3.31. */ 1196 if (strcmp(sc->sc_firmware, "3.31") >= 0) 1197 sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN; 1198 1199 /* 1200 * Determine some characteristics about our bus. 1201 */ 1202 if (inquire.reply.scsi_flags & BHA_SCSI_WIDE) 1203 sc->sc_flags |= BHAF_WIDE; 1204 if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL) 1205 sc->sc_flags |= BHAF_DIFFERENTIAL; 1206 if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA) 1207 sc->sc_flags |= BHAF_ULTRA; 1208 1209 /* 1210 * Determine some characterists of the board. 1211 */ 1212 sc->sc_max_dmaseg = inquire.reply.sg_limit; 1213 1214 /* 1215 * Determine the maximum CCB count and whether or not 1216 * tagged queueing is available on this host adapter. 1217 * 1218 * Tagged queueing works on: 1219 * 1220 * "W" Series adapters 1221 * "C" Series adapters with firmware >= 4.22 1222 * "S" Series adapters with firmware >= 3.35 1223 * 1224 * The internal CCB counts are: 1225 * 1226 * 192 BT-948/958/958D 1227 * 100 BT-946C/956C/956CD/747C/757C/757CD/445C 1228 * 50 BT-545C/540CF 1229 * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A 1230 */ 1231 switch (sc->sc_firmware[0]) { 1232 case '5': 1233 sc->sc_max_ccbs = 192; 1234 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1235 break; 1236 1237 case '4': 1238 if (sc->sc_model[0] == '5') 1239 sc->sc_max_ccbs = 50; 1240 else 1241 sc->sc_max_ccbs = 100; 1242 if (strcmp(sc->sc_firmware, "4.22") >= 0) 1243 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1244 break; 1245 1246 case '3': 1247 if (strcmp(sc->sc_firmware, "3.35") >= 0) 1248 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1249 /* FALLTHROUGH */ 1250 1251 default: 1252 sc->sc_max_ccbs = 30; 1253 } 1254 1255 /* 1256 * Set the mailbox count to precisely the number of HW CCBs 1257 * available. A mailbox isn't required while a CCB is executing, 1258 * but this allows us to actually enqueue up to our resource 1259 * limit. 1260 * 1261 * This will keep the mailbox count small on boards which don't 1262 * have strict round-robin (they have to scan the entire set of 1263 * mailboxes each time they run a command). 1264 */ 1265 sc->sc_mbox_count = sc->sc_max_ccbs; 1266 1267 /* 1268 * Obtain setup information. 1269 */ 1270 rlen = sizeof(setup.reply) + 1271 ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0); 1272 setup.cmd.opcode = BHA_INQUIRE_SETUP; 1273 setup.cmd.len = rlen; 1274 bha_cmd(iot, ioh, name, 1275 sizeof(setup.cmd), (u_char *)&setup.cmd, 1276 rlen, (u_char *)&setup.reply); 1277 1278 aprint_normal_dev(&sc->sc_dev, "model BT-%s, firmware %s\n", 1279 sc->sc_model, sc->sc_firmware); 1280 1281 aprint_normal_dev(&sc->sc_dev, "%d H/W CCBs", sc->sc_max_ccbs); 1282 if (setup.reply.sync_neg) 1283 aprint_normal(", sync"); 1284 if (setup.reply.parity) 1285 aprint_normal(", parity"); 1286 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 1287 aprint_normal(", tagged queueing"); 1288 if (sc->sc_flags & BHAF_WIDE_LUN) 1289 aprint_normal(", wide LUN support"); 1290 aprint_normal("\n"); 1291 1292 /* 1293 * Poll targets 0 - 7. 1294 */ 1295 devices.cmd.opcode = BHA_INQUIRE_DEVICES; 1296 bha_cmd(iot, ioh, name, 1297 sizeof(devices.cmd), (u_char *)&devices.cmd, 1298 sizeof(devices.reply), (u_char *)&devices.reply); 1299 1300 /* Count installed units. */ 1301 initial_ccbs = 0; 1302 for (i = 0; i < 8; i++) { 1303 for (j = 0; j < 8; j++) { 1304 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1305 initial_ccbs++; 1306 } 1307 } 1308 1309 /* 1310 * Poll targets 8 - 15 if we have a wide bus. 1311 */ 1312 if (sc->sc_flags & BHAF_WIDE) { 1313 devices.cmd.opcode = BHA_INQUIRE_DEVICES_2; 1314 bha_cmd(iot, ioh, name, 1315 sizeof(devices.cmd), (u_char *)&devices.cmd, 1316 sizeof(devices.reply), (u_char *)&devices.reply); 1317 1318 for (i = 0; i < 8; i++) { 1319 for (j = 0; j < 8; j++) { 1320 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1321 initial_ccbs++; 1322 } 1323 } 1324 } 1325 1326 /* 1327 * Double the initial CCB count, for good measure. 1328 */ 1329 initial_ccbs *= 2; 1330 1331 /* 1332 * Sanity check the initial CCB count; don't create more than 1333 * we can enqueue (sc_max_ccbs), and make sure there are some 1334 * at all. 1335 */ 1336 if (initial_ccbs > sc->sc_max_ccbs) 1337 initial_ccbs = sc->sc_max_ccbs; 1338 if (initial_ccbs == 0) 1339 initial_ccbs = 2; 1340 1341 return (initial_ccbs); 1342 } 1343 1344 /* 1345 * bha_init: 1346 * 1347 * Initialize the board. 1348 */ 1349 static int 1350 bha_init(struct bha_softc *sc) 1351 { 1352 const char *name = device_xname(&sc->sc_dev); 1353 struct bha_toggle toggle; 1354 struct bha_mailbox mailbox; 1355 struct bha_mbx_out *mbo; 1356 struct bha_mbx_in *mbi; 1357 int i; 1358 1359 /* 1360 * Set up the mailbox. We always run the mailbox in round-robin. 1361 */ 1362 for (i = 0; i < sc->sc_mbox_count; i++) { 1363 mbo = &sc->sc_mbo[i]; 1364 mbi = &sc->sc_mbi[i]; 1365 1366 mbo->cmd = BHA_MBO_FREE; 1367 BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1368 1369 mbi->comp_stat = BHA_MBI_FREE; 1370 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1371 } 1372 1373 sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0]; 1374 sc->sc_tmbi = &sc->sc_mbi[0]; 1375 1376 sc->sc_mbofull = 0; 1377 1378 /* 1379 * If the board supports strict round-robin, enable that. 1380 */ 1381 if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) { 1382 toggle.cmd.opcode = BHA_ROUND_ROBIN; 1383 toggle.cmd.enable = 1; 1384 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1385 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1386 0, NULL); 1387 } 1388 1389 /* 1390 * Give the mailbox to the board. 1391 */ 1392 mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED; 1393 mailbox.cmd.nmbx = sc->sc_mbox_count; 1394 ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr); 1395 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1396 sizeof(mailbox.cmd), (u_char *)&mailbox.cmd, 1397 0, (u_char *)0); 1398 1399 return (0); 1400 } 1401 1402 /***************************************************************************** 1403 * CCB execution engine 1404 *****************************************************************************/ 1405 1406 /* 1407 * bha_queue_ccb: 1408 * 1409 * Queue a CCB to be sent to the controller, and send it if possible. 1410 */ 1411 static void 1412 bha_queue_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1413 { 1414 1415 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 1416 bha_start_ccbs(sc); 1417 } 1418 1419 /* 1420 * bha_start_ccbs: 1421 * 1422 * Send as many CCBs as we have empty mailboxes for. 1423 */ 1424 static void 1425 bha_start_ccbs(struct bha_softc *sc) 1426 { 1427 bus_space_tag_t iot = sc->sc_iot; 1428 bus_space_handle_t ioh = sc->sc_ioh; 1429 struct bha_ccb_group *bcg; 1430 struct bha_mbx_out *mbo; 1431 struct bha_ccb *ccb; 1432 1433 mbo = sc->sc_tmbo; 1434 1435 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 1436 if (sc->sc_mbofull >= sc->sc_mbox_count) { 1437 #ifdef DIAGNOSTIC 1438 if (sc->sc_mbofull > sc->sc_mbox_count) 1439 panic("bha_start_ccbs: mbofull > mbox_count"); 1440 #endif 1441 /* 1442 * No mailboxes available; attempt to collect ones 1443 * that have already been used. 1444 */ 1445 bha_collect_mbo(sc); 1446 if (sc->sc_mbofull == sc->sc_mbox_count) { 1447 /* 1448 * Still no more available; have the 1449 * controller interrupt us when it 1450 * frees one. 1451 */ 1452 struct bha_toggle toggle; 1453 1454 toggle.cmd.opcode = BHA_MBO_INTR_EN; 1455 toggle.cmd.enable = 1; 1456 bha_cmd(iot, ioh, device_xname(&sc->sc_dev), 1457 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1458 0, (u_char *)0); 1459 break; 1460 } 1461 } 1462 1463 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 1464 #ifdef BHADIAG 1465 ccb->flags |= CCB_SENDING; 1466 #endif 1467 1468 /* 1469 * Put the CCB in the mailbox. 1470 */ 1471 bcg = BHA_CCB_GROUP(ccb); 1472 ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr + 1473 BHA_CCB_OFFSET(ccb), mbo->ccb_addr); 1474 if (ccb->flags & CCB_ABORT) 1475 mbo->cmd = BHA_MBO_ABORT; 1476 else 1477 mbo->cmd = BHA_MBO_START; 1478 1479 BHA_MBO_SYNC(sc, mbo, 1480 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1481 1482 /* Tell the card to poll immediately. */ 1483 bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI); 1484 1485 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 1486 callout_reset(&ccb->xs->xs_callout, 1487 mstohz(ccb->timeout), bha_timeout, ccb); 1488 1489 ++sc->sc_mbofull; 1490 mbo = bha_nextmbo(sc, mbo); 1491 } 1492 1493 sc->sc_tmbo = mbo; 1494 } 1495 1496 /* 1497 * bha_finish_ccbs: 1498 * 1499 * Finalize the execution of CCBs in our incoming mailbox. 1500 */ 1501 static void 1502 bha_finish_ccbs(struct bha_softc *sc) 1503 { 1504 struct bha_mbx_in *mbi; 1505 struct bha_ccb *ccb; 1506 int i; 1507 1508 mbi = sc->sc_tmbi; 1509 1510 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1511 1512 if (mbi->comp_stat == BHA_MBI_FREE) { 1513 for (i = 0; i < sc->sc_mbox_count; i++) { 1514 if (mbi->comp_stat != BHA_MBI_FREE) { 1515 #ifdef BHADIAG 1516 /* 1517 * This can happen in normal operation if 1518 * we use all mailbox slots. 1519 */ 1520 printf("%s: mbi not in round-robin order\n", 1521 device_xname(&sc->sc_dev)); 1522 #endif 1523 goto again; 1524 } 1525 mbi = bha_nextmbi(sc, mbi); 1526 BHA_MBI_SYNC(sc, mbi, 1527 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1528 } 1529 #ifdef BHADIAGnot 1530 printf("%s: mbi interrupt with no full mailboxes\n", 1531 device_xname(&sc->sc_dev)); 1532 #endif 1533 return; 1534 } 1535 1536 again: 1537 do { 1538 ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr)); 1539 if (ccb == NULL) { 1540 aprint_error_dev(&sc->sc_dev, "bad mbi ccb pointer 0x%08x; skipping\n", 1541 phystol(mbi->ccb_addr)); 1542 goto next; 1543 } 1544 1545 BHA_CCB_SYNC(sc, ccb, 1546 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1547 1548 #ifdef BHADEBUG 1549 if (bha_debug) { 1550 u_char *cp = ccb->scsi_cmd; 1551 printf("op=%x %x %x %x %x %x\n", 1552 cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]); 1553 printf("comp_stat %x for mbi addr = %p, ", 1554 mbi->comp_stat, mbi); 1555 printf("ccb addr = %p\n", ccb); 1556 } 1557 #endif /* BHADEBUG */ 1558 1559 switch (mbi->comp_stat) { 1560 case BHA_MBI_OK: 1561 case BHA_MBI_ERROR: 1562 if ((ccb->flags & CCB_ABORT) != 0) { 1563 /* 1564 * If we already started an abort, wait for it 1565 * to complete before clearing the CCB. We 1566 * could instead just clear CCB_SENDING, but 1567 * what if the mailbox was already received? 1568 * The worst that happens here is that we clear 1569 * the CCB a bit later than we need to. BFD. 1570 */ 1571 goto next; 1572 } 1573 break; 1574 1575 case BHA_MBI_ABORT: 1576 case BHA_MBI_UNKNOWN: 1577 /* 1578 * Even if the CCB wasn't found, we clear it anyway. 1579 * See preceding comment. 1580 */ 1581 break; 1582 1583 default: 1584 aprint_error_dev(&sc->sc_dev, "bad mbi comp_stat %02x; skipping\n", 1585 mbi->comp_stat); 1586 goto next; 1587 } 1588 1589 callout_stop(&ccb->xs->xs_callout); 1590 bha_done(sc, ccb); 1591 1592 next: 1593 mbi->comp_stat = BHA_MBI_FREE; 1594 BHA_CCB_SYNC(sc, ccb, 1595 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1596 1597 mbi = bha_nextmbi(sc, mbi); 1598 BHA_MBI_SYNC(sc, mbi, 1599 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1600 } while (mbi->comp_stat != BHA_MBI_FREE); 1601 1602 sc->sc_tmbi = mbi; 1603 } 1604 1605 /***************************************************************************** 1606 * Mailbox management functions. 1607 *****************************************************************************/ 1608 1609 /* 1610 * bha_create_mailbox: 1611 * 1612 * Create the mailbox structures. Helper function for bha_attach(). 1613 * 1614 * NOTE: The Buslogic hardware only gets one DMA address for the 1615 * mailbox! It expects: 1616 * 1617 * mailbox_out[mailbox_size] 1618 * mailbox_in[mailbox_size] 1619 */ 1620 static int 1621 bha_create_mailbox(struct bha_softc *sc) 1622 { 1623 bus_dma_segment_t seg; 1624 size_t size; 1625 int error, rseg; 1626 1627 size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) + 1628 (sizeof(struct bha_mbx_in) * sc->sc_mbox_count); 1629 1630 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1631 1, &rseg, sc->sc_dmaflags); 1632 if (error) { 1633 aprint_error_dev(&sc->sc_dev, "unable to allocate mailboxes, error = %d\n", 1634 error); 1635 goto bad_0; 1636 } 1637 1638 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 1639 (void **)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT); 1640 if (error) { 1641 aprint_error_dev(&sc->sc_dev, "unable to map mailboxes, error = %d\n", 1642 error); 1643 goto bad_1; 1644 } 1645 1646 memset(sc->sc_mbo, 0, size); 1647 1648 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1649 sc->sc_dmaflags, &sc->sc_dmamap_mbox); 1650 if (error) { 1651 aprint_error_dev(&sc->sc_dev, 1652 "unable to create mailbox DMA map, error = %d\n", 1653 error); 1654 goto bad_2; 1655 } 1656 1657 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox, 1658 sc->sc_mbo, size, NULL, 0); 1659 if (error) { 1660 aprint_error_dev(&sc->sc_dev, "unable to load mailbox DMA map, error = %d\n", 1661 error); 1662 goto bad_3; 1663 } 1664 1665 sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count); 1666 1667 return (0); 1668 1669 bad_3: 1670 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox); 1671 bad_2: 1672 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_mbo, size); 1673 bad_1: 1674 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1675 bad_0: 1676 return (error); 1677 } 1678 1679 /* 1680 * bha_collect_mbo: 1681 * 1682 * Garbage collect mailboxes that are no longer in use. 1683 */ 1684 static void 1685 bha_collect_mbo(struct bha_softc *sc) 1686 { 1687 struct bha_mbx_out *mbo; 1688 #ifdef BHADIAG 1689 struct bha_ccb *ccb; 1690 #endif 1691 1692 mbo = sc->sc_cmbo; 1693 1694 while (sc->sc_mbofull > 0) { 1695 BHA_MBO_SYNC(sc, mbo, 1696 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1697 if (mbo->cmd != BHA_MBO_FREE) 1698 break; 1699 1700 #ifdef BHADIAG 1701 ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr)); 1702 ccb->flags &= ~CCB_SENDING; 1703 #endif 1704 1705 --sc->sc_mbofull; 1706 mbo = bha_nextmbo(sc, mbo); 1707 } 1708 1709 sc->sc_cmbo = mbo; 1710 } 1711 1712 /***************************************************************************** 1713 * CCB management functions 1714 *****************************************************************************/ 1715 1716 static inline void 1717 bha_reset_ccb(struct bha_ccb *ccb) 1718 { 1719 1720 ccb->flags = 0; 1721 } 1722 1723 /* 1724 * bha_create_ccbs: 1725 * 1726 * Create a set of CCBs. 1727 * 1728 * We determine the target CCB count, and then keep creating them 1729 * until we reach the target, or fail. CCBs that are allocated 1730 * but not "created" are left on the allocating list. 1731 * 1732 * XXX AB_QUIET/AB_SILENT lossage here; this is called during 1733 * boot as well as at run-time. 1734 */ 1735 static void 1736 bha_create_ccbs(struct bha_softc *sc, int count) 1737 { 1738 struct bha_ccb_group *bcg; 1739 struct bha_ccb *ccb; 1740 bus_dma_segment_t seg; 1741 bus_dmamap_t ccbmap; 1742 int target, i, error, rseg; 1743 1744 /* 1745 * If the current CCB count is already the max number we're 1746 * allowed to have, bail out now. 1747 */ 1748 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) 1749 return; 1750 1751 /* 1752 * Compute our target count, and clamp it down to the max 1753 * number we're allowed to have. 1754 */ 1755 target = sc->sc_cur_ccbs + count; 1756 if (target > sc->sc_max_ccbs) 1757 target = sc->sc_max_ccbs; 1758 1759 /* 1760 * If there are CCBs on the allocating list, don't allocate a 1761 * CCB group yet. 1762 */ 1763 if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL) 1764 goto have_allocating_ccbs; 1765 1766 allocate_group: 1767 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 1768 PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT); 1769 if (error) { 1770 aprint_error_dev(&sc->sc_dev, "unable to allocate CCB group, error = %d\n", 1771 error); 1772 goto bad_0; 1773 } 1774 1775 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 1776 (void *)&bcg, 1777 sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1778 if (error) { 1779 aprint_error_dev(&sc->sc_dev, "unable to map CCB group, error = %d\n", 1780 error); 1781 goto bad_1; 1782 } 1783 1784 memset(bcg, 0, PAGE_SIZE); 1785 1786 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1787 1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap); 1788 if (error) { 1789 aprint_error_dev(&sc->sc_dev, "unable to create CCB group DMA map, error = %d\n", 1790 error); 1791 goto bad_2; 1792 } 1793 1794 error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL, 1795 sc->sc_dmaflags | BUS_DMA_NOWAIT); 1796 if (error) { 1797 aprint_error_dev(&sc->sc_dev, "unable to load CCB group DMA map, error = %d\n", 1798 error); 1799 goto bad_3; 1800 } 1801 1802 bcg->bcg_dmamap = ccbmap; 1803 1804 #ifdef DIAGNOSTIC 1805 if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) != 1806 BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1])) 1807 panic("bha_create_ccbs: CCB group size botch"); 1808 #endif 1809 1810 /* 1811 * Add all of the CCBs in this group to the allocating list. 1812 */ 1813 for (i = 0; i < bha_ccbs_per_group; i++) { 1814 ccb = &bcg->bcg_ccbs[i]; 1815 TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain); 1816 } 1817 1818 have_allocating_ccbs: 1819 /* 1820 * Loop over the allocating list until we reach our CCB target. 1821 * If we run out on the list, we'll allocate another group's 1822 * worth. 1823 */ 1824 while (sc->sc_cur_ccbs < target) { 1825 ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs); 1826 if (ccb == NULL) 1827 goto allocate_group; 1828 if (bha_init_ccb(sc, ccb) != 0) { 1829 /* 1830 * We were unable to initialize the CCB. 1831 * This is likely due to a resource shortage, 1832 * so bail out now. 1833 */ 1834 return; 1835 } 1836 } 1837 1838 /* 1839 * If we got here, we've reached our target! 1840 */ 1841 return; 1842 1843 bad_3: 1844 bus_dmamap_destroy(sc->sc_dmat, ccbmap); 1845 bad_2: 1846 bus_dmamem_unmap(sc->sc_dmat, (void *)bcg, PAGE_SIZE); 1847 bad_1: 1848 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1849 bad_0: 1850 return; 1851 } 1852 1853 /* 1854 * bha_init_ccb: 1855 * 1856 * Initialize a CCB; helper function for bha_create_ccbs(). 1857 */ 1858 static int 1859 bha_init_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1860 { 1861 struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb); 1862 int hashnum, error; 1863 1864 /* 1865 * Create the DMA map for this CCB. 1866 * 1867 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages 1868 * XXX in the ISA case. A better solution is needed. 1869 */ 1870 error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG, 1871 BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags, 1872 &ccb->dmamap_xfer); 1873 if (error) { 1874 aprint_error_dev(&sc->sc_dev, "unable to create CCB DMA map, error = %d\n", 1875 error); 1876 return (error); 1877 } 1878 1879 TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain); 1880 1881 /* 1882 * Put the CCB into the phystokv hash table. 1883 */ 1884 ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr + 1885 BHA_CCB_OFFSET(ccb); 1886 hashnum = CCB_HASH(ccb->hashkey); 1887 ccb->nexthash = sc->sc_ccbhash[hashnum]; 1888 sc->sc_ccbhash[hashnum] = ccb; 1889 bha_reset_ccb(ccb); 1890 1891 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1892 sc->sc_cur_ccbs++; 1893 1894 return (0); 1895 } 1896 1897 /* 1898 * bha_get_ccb: 1899 * 1900 * Get a CCB for the SCSI operation. If there are none left, 1901 * wait until one becomes available, if we can. 1902 */ 1903 static struct bha_ccb * 1904 bha_get_ccb(struct bha_softc *sc) 1905 { 1906 struct bha_ccb *ccb; 1907 int s; 1908 1909 s = splbio(); 1910 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 1911 if (ccb != NULL) { 1912 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 1913 ccb->flags |= CCB_ALLOC; 1914 } 1915 splx(s); 1916 return (ccb); 1917 } 1918 1919 /* 1920 * bha_free_ccb: 1921 * 1922 * Put a CCB back onto the free list. 1923 */ 1924 static void 1925 bha_free_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1926 { 1927 int s; 1928 1929 s = splbio(); 1930 bha_reset_ccb(ccb); 1931 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1932 splx(s); 1933 } 1934 1935 /* 1936 * bha_ccb_phys_kv: 1937 * 1938 * Given a CCB DMA address, locate the CCB in kernel virtual space. 1939 */ 1940 static struct bha_ccb * 1941 bha_ccb_phys_kv(struct bha_softc *sc, bus_addr_t ccb_phys) 1942 { 1943 int hashnum = CCB_HASH(ccb_phys); 1944 struct bha_ccb *ccb = sc->sc_ccbhash[hashnum]; 1945 1946 while (ccb) { 1947 if (ccb->hashkey == ccb_phys) 1948 break; 1949 ccb = ccb->nexthash; 1950 } 1951 return (ccb); 1952 } 1953