1 /* $NetBSD: bha.c,v 1.80 2022/09/25 18:43:32 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Originally written by Julian Elischer (julian@tfs.com) 35 * for TRW Financial Systems for use under the MACH(2.5) operating system. 36 * 37 * TRW Financial Systems, in accordance with their agreement with Carnegie 38 * Mellon University, makes this software available to CMU to distribute 39 * or use in any manner that they see fit as long as this message is kept with 40 * the software. For this reason TFS also grants any other persons or 41 * organisations permission to use or modify this software. 42 * 43 * TFS supplies this software to be publicly redistributed 44 * on the understanding that TFS is not responsible for the correct 45 * functioning of this software in any circumstances. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: bha.c,v 1.80 2022/09/25 18:43:32 thorpej Exp $"); 50 51 #include "opt_ddb.h" 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/callout.h> 56 #include <sys/kernel.h> 57 #include <sys/errno.h> 58 #include <sys/ioctl.h> 59 #include <sys/device.h> 60 #include <sys/buf.h> 61 #include <sys/proc.h> 62 63 #include <sys/bus.h> 64 #include <sys/intr.h> 65 66 #include <dev/scsipi/scsi_all.h> 67 #include <dev/scsipi/scsipi_all.h> 68 #include <dev/scsipi/scsiconf.h> 69 70 #include <dev/ic/bhareg.h> 71 #include <dev/ic/bhavar.h> 72 73 #ifndef DDB 74 #define Debugger() panic("should call debugger here (bha.c)") 75 #endif /* ! DDB */ 76 77 #define BHA_MAXXFER ((BHA_NSEG - 1) << PGSHIFT) 78 79 #ifdef BHADEBUG 80 int bha_debug = 0; 81 #endif /* BHADEBUG */ 82 83 static int bha_cmd(bus_space_tag_t, bus_space_handle_t, const char *, int, 84 u_char *, int, u_char *); 85 86 static void bha_scsipi_request(struct scsipi_channel *, 87 scsipi_adapter_req_t, void *); 88 static void bha_minphys(struct buf *); 89 90 static void bha_get_xfer_mode(struct bha_softc *, 91 struct scsipi_xfer_mode *); 92 93 static void bha_done(struct bha_softc *, struct bha_ccb *); 94 static int bha_poll(struct bha_softc *, struct scsipi_xfer *, int); 95 static void bha_timeout(void *arg); 96 97 static int bha_init(struct bha_softc *); 98 99 static int bha_create_mailbox(struct bha_softc *); 100 static void bha_collect_mbo(struct bha_softc *); 101 102 static void bha_queue_ccb(struct bha_softc *, struct bha_ccb *); 103 static void bha_start_ccbs(struct bha_softc *); 104 static void bha_finish_ccbs(struct bha_softc *); 105 106 static struct bha_ccb *bha_ccb_phys_kv(struct bha_softc *, bus_addr_t); 107 static void bha_create_ccbs(struct bha_softc *, int); 108 static int bha_init_ccb(struct bha_softc *, struct bha_ccb *); 109 static struct bha_ccb *bha_get_ccb(struct bha_softc *); 110 static void bha_free_ccb(struct bha_softc *, struct bha_ccb *); 111 112 #define BHA_RESET_TIMEOUT 2000 /* time to wait for reset (mSec) */ 113 #define BHA_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 114 115 /* 116 * Number of CCBs in an allocation group; must be computed at run-time. 117 */ 118 static int bha_ccbs_per_group; 119 120 static inline struct bha_mbx_out * 121 bha_nextmbo(struct bha_softc *sc, struct bha_mbx_out *mbo) 122 { 123 124 if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1]) 125 return (&sc->sc_mbo[0]); 126 return (mbo + 1); 127 } 128 129 static inline struct bha_mbx_in * 130 bha_nextmbi(struct bha_softc *sc, struct bha_mbx_in *mbi) 131 { 132 if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1]) 133 return (&sc->sc_mbi[0]); 134 return (mbi + 1); 135 } 136 137 /* 138 * bha_attach: 139 * 140 * Finish attaching a Buslogic controller, and configure children. 141 */ 142 void 143 bha_attach(struct bha_softc *sc) 144 { 145 struct scsipi_adapter *adapt = &sc->sc_adapter; 146 struct scsipi_channel *chan = &sc->sc_channel; 147 int initial_ccbs; 148 149 /* 150 * Initialize the number of CCBs per group. 151 */ 152 if (bha_ccbs_per_group == 0) 153 bha_ccbs_per_group = BHA_CCBS_PER_GROUP; 154 155 initial_ccbs = bha_info(sc); 156 if (initial_ccbs == 0) { 157 aprint_error_dev(sc->sc_dev, "unable to get adapter info\n"); 158 return; 159 } 160 161 /* 162 * Fill in the scsipi_adapter. 163 */ 164 memset(adapt, 0, sizeof(*adapt)); 165 adapt->adapt_dev = sc->sc_dev; 166 adapt->adapt_nchannels = 1; 167 /* adapt_openings initialized below */ 168 adapt->adapt_max_periph = sc->sc_mbox_count; 169 adapt->adapt_request = bha_scsipi_request; 170 adapt->adapt_minphys = bha_minphys; 171 172 /* 173 * Fill in the scsipi_channel. 174 */ 175 memset(chan, 0, sizeof(*chan)); 176 chan->chan_adapter = adapt; 177 chan->chan_bustype = &scsi_bustype; 178 chan->chan_channel = 0; 179 chan->chan_flags = SCSIPI_CHAN_CANGROW; 180 chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8; 181 chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8; 182 chan->chan_id = sc->sc_scsi_id; 183 184 TAILQ_INIT(&sc->sc_free_ccb); 185 TAILQ_INIT(&sc->sc_waiting_ccb); 186 TAILQ_INIT(&sc->sc_allocating_ccbs); 187 188 if (bha_create_mailbox(sc) != 0) 189 return; 190 191 bha_create_ccbs(sc, initial_ccbs); 192 if (sc->sc_cur_ccbs < 2) { 193 aprint_error_dev(sc->sc_dev, "not enough CCBs to run\n"); 194 return; 195 } 196 197 adapt->adapt_openings = sc->sc_cur_ccbs; 198 199 if (bha_init(sc) != 0) 200 return; 201 202 (void) config_found(sc->sc_dev, &sc->sc_channel, scsiprint, CFARGS_NONE); 203 } 204 205 /* 206 * bha_intr: 207 * 208 * Interrupt service routine. 209 */ 210 int 211 bha_intr(void *arg) 212 { 213 struct bha_softc *sc = arg; 214 bus_space_tag_t iot = sc->sc_iot; 215 bus_space_handle_t ioh = sc->sc_ioh; 216 u_char sts; 217 218 #ifdef BHADEBUG 219 printf("%s: bha_intr ", device_xname(sc->sc_dev)); 220 #endif /* BHADEBUG */ 221 222 /* 223 * First acknowledge the interrupt, Then if it's not telling about 224 * a completed operation just return. 225 */ 226 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 227 if ((sts & BHA_INTR_ANYINTR) == 0) 228 return (0); 229 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 230 231 #ifdef BHADIAG 232 /* Make sure we clear CCB_SENDING before finishing a CCB. */ 233 bha_collect_mbo(sc); 234 #endif 235 236 /* Mail box out empty? */ 237 if (sts & BHA_INTR_MBOA) { 238 struct bha_toggle toggle; 239 240 toggle.cmd.opcode = BHA_MBO_INTR_EN; 241 toggle.cmd.enable = 0; 242 bha_cmd(iot, ioh, device_xname(sc->sc_dev), 243 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 244 0, (u_char *)0); 245 bha_start_ccbs(sc); 246 } 247 248 /* Mail box in full? */ 249 if (sts & BHA_INTR_MBIF) 250 bha_finish_ccbs(sc); 251 252 return (1); 253 } 254 255 /***************************************************************************** 256 * SCSI interface routines 257 *****************************************************************************/ 258 259 /* 260 * bha_scsipi_request: 261 * 262 * Perform a request for the SCSIPI layer. 263 */ 264 static void 265 bha_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 266 void *arg) 267 { 268 struct scsipi_adapter *adapt = chan->chan_adapter; 269 struct bha_softc *sc = device_private(adapt->adapt_dev); 270 struct scsipi_xfer *xs; 271 struct scsipi_periph *periph; 272 bus_dma_tag_t dmat = sc->sc_dmat; 273 struct bha_ccb *ccb; 274 int error, seg, flags, s; 275 276 switch (req) { 277 case ADAPTER_REQ_RUN_XFER: 278 xs = arg; 279 periph = xs->xs_periph; 280 flags = xs->xs_control; 281 282 SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n")); 283 284 /* Get a CCB to use. */ 285 ccb = bha_get_ccb(sc); 286 #ifdef DIAGNOSTIC 287 /* 288 * This should never happen as we track the resources 289 * in the mid-layer. 290 */ 291 if (ccb == NULL) { 292 scsipi_printaddr(periph); 293 printf("unable to allocate ccb\n"); 294 panic("bha_scsipi_request"); 295 } 296 #endif 297 298 ccb->xs = xs; 299 ccb->timeout = xs->timeout; 300 301 /* 302 * Put all the arguments for the xfer in the ccb 303 */ 304 if (flags & XS_CTL_RESET) { 305 ccb->opcode = BHA_RESET_CCB; 306 ccb->scsi_cmd_length = 0; 307 } else { 308 /* can't use S/G if zero length */ 309 if (xs->cmdlen > sizeof(ccb->scsi_cmd)) { 310 printf("%s: cmdlen %d too large for CCB\n", 311 device_xname(sc->sc_dev), xs->cmdlen); 312 xs->error = XS_DRIVER_STUFFUP; 313 goto out_bad; 314 } 315 ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB 316 : BHA_INITIATOR_CCB); 317 memcpy(&ccb->scsi_cmd, xs->cmd, 318 ccb->scsi_cmd_length = xs->cmdlen); 319 } 320 321 if (xs->datalen) { 322 /* 323 * Map the DMA transfer. 324 */ 325 #ifdef TFS 326 if (flags & XS_CTL_DATA_UIO) { 327 error = bus_dmamap_load_uio(dmat, 328 ccb->dmamap_xfer, (struct uio *)xs->data, 329 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 330 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 331 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 332 BUS_DMA_WRITE)); 333 } else 334 #endif /* TFS */ 335 { 336 error = bus_dmamap_load(dmat, 337 ccb->dmamap_xfer, xs->data, xs->datalen, 338 NULL, 339 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 340 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 341 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 342 BUS_DMA_WRITE)); 343 } 344 345 switch (error) { 346 case 0: 347 break; 348 349 case ENOMEM: 350 case EAGAIN: 351 xs->error = XS_RESOURCE_SHORTAGE; 352 goto out_bad; 353 354 default: 355 xs->error = XS_DRIVER_STUFFUP; 356 aprint_error_dev(sc->sc_dev, 357 "error %d loading DMA map\n", error); 358 out_bad: 359 bha_free_ccb(sc, ccb); 360 scsipi_done(xs); 361 return; 362 } 363 364 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 365 ccb->dmamap_xfer->dm_mapsize, 366 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 367 BUS_DMASYNC_PREWRITE); 368 369 /* 370 * Load the hardware scatter/gather map with the 371 * contents of the DMA map. 372 */ 373 for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) { 374 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr, 375 ccb->scat_gath[seg].seg_addr); 376 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len, 377 ccb->scat_gath[seg].seg_len); 378 } 379 380 ltophys(ccb->hashkey + offsetof(struct bha_ccb, 381 scat_gath), ccb->data_addr); 382 ltophys(ccb->dmamap_xfer->dm_nsegs * 383 sizeof(struct bha_scat_gath), ccb->data_length); 384 } else { 385 /* 386 * No data xfer, use non S/G values. 387 */ 388 ltophys(0, ccb->data_addr); 389 ltophys(0, ccb->data_length); 390 } 391 392 if (XS_CTL_TAGTYPE(xs) != 0) { 393 ccb->tag_enable = 1; 394 ccb->tag_type = xs->xs_tag_type & 0x03; 395 } else { 396 ccb->tag_enable = 0; 397 ccb->tag_type = 0; 398 } 399 400 ccb->data_out = 0; 401 ccb->data_in = 0; 402 ccb->target = periph->periph_target; 403 ccb->lun = periph->periph_lun; 404 ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense), 405 ccb->sense_ptr); 406 ccb->req_sense_length = sizeof(ccb->scsi_sense); 407 ccb->host_stat = 0x00; 408 ccb->target_stat = 0x00; 409 ccb->link_id = 0; 410 ltophys(0, ccb->link_addr); 411 412 BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 413 414 s = splbio(); 415 bha_queue_ccb(sc, ccb); 416 splx(s); 417 418 SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n")); 419 if ((flags & XS_CTL_POLL) == 0) 420 return; 421 422 /* 423 * If we can't use interrupts, poll on completion 424 */ 425 if (bha_poll(sc, xs, ccb->timeout)) { 426 bha_timeout(ccb); 427 if (bha_poll(sc, xs, ccb->timeout)) 428 bha_timeout(ccb); 429 } 430 return; 431 432 case ADAPTER_REQ_GROW_RESOURCES: 433 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) { 434 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW; 435 return; 436 } 437 seg = sc->sc_cur_ccbs; 438 bha_create_ccbs(sc, bha_ccbs_per_group); 439 adapt->adapt_openings += sc->sc_cur_ccbs - seg; 440 return; 441 442 case ADAPTER_REQ_SET_XFER_MODE: 443 /* 444 * Can't really do this on the Buslogic. It has its 445 * own setup info. But we do know how to query what 446 * the settings are. 447 */ 448 bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg); 449 return; 450 } 451 } 452 453 /* 454 * bha_minphys: 455 * 456 * Limit a transfer to our maximum transfer size. 457 */ 458 void 459 bha_minphys(struct buf *bp) 460 { 461 462 if (bp->b_bcount > BHA_MAXXFER) 463 bp->b_bcount = BHA_MAXXFER; 464 minphys(bp); 465 } 466 467 /***************************************************************************** 468 * SCSI job execution helper routines 469 *****************************************************************************/ 470 471 /* 472 * bha_get_xfer_mode; 473 * 474 * Negotiate the xfer mode for the specified periph, and report 475 * back the mode to the midlayer. 476 * 477 * NOTE: we must be called at splbio(). 478 */ 479 static void 480 bha_get_xfer_mode(struct bha_softc *sc, struct scsipi_xfer_mode *xm) 481 { 482 struct bha_setup hwsetup; 483 struct bha_period hwperiod; 484 struct bha_sync *bs; 485 int toff = xm->xm_target & 7, tmask = (1 << toff); 486 int wide, period, offset, rlen; 487 488 /* 489 * Issue an Inquire Setup Information. We can extract 490 * sync and wide information from here. 491 */ 492 rlen = sizeof(hwsetup.reply) + 493 ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0); 494 hwsetup.cmd.opcode = BHA_INQUIRE_SETUP; 495 hwsetup.cmd.len = rlen; 496 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(sc->sc_dev), 497 sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd, 498 rlen, (u_char *)&hwsetup.reply); 499 500 xm->xm_mode = 0; 501 xm->xm_period = 0; 502 xm->xm_offset = 0; 503 504 /* 505 * First check for wide. On later boards, we can check 506 * directly in the setup info if wide is currently active. 507 * 508 * On earlier boards, we have to make an educated guess. 509 */ 510 if (sc->sc_flags & BHAF_WIDE) { 511 if (strcmp(sc->sc_firmware, "5.06L") >= 0) { 512 if (xm->xm_target > 7) { 513 wide = 514 hwsetup.reply_w.high_wide_active & tmask; 515 } else { 516 wide = 517 hwsetup.reply_w.low_wide_active & tmask; 518 } 519 if (wide) 520 xm->xm_mode |= PERIPH_CAP_WIDE16; 521 } else { 522 /* XXX Check `wide permitted' in the config info. */ 523 xm->xm_mode |= PERIPH_CAP_WIDE16; 524 } 525 } 526 527 /* 528 * Now get basic sync info. 529 */ 530 bs = (xm->xm_target > 7) ? 531 &hwsetup.reply_w.sync_high[toff] : 532 &hwsetup.reply.sync_low[toff]; 533 534 if (bs->valid) { 535 xm->xm_mode |= PERIPH_CAP_SYNC; 536 period = (bs->period * 50) + 20; 537 offset = bs->offset; 538 539 /* 540 * On boards that can do Fast and Ultra, use the Inquire Period 541 * command to get the period. 542 */ 543 if (sc->sc_firmware[0] >= '3') { 544 rlen = sizeof(hwperiod.reply) + 545 ((sc->sc_flags & BHAF_WIDE) ? 546 sizeof(hwperiod.reply_w) : 0); 547 hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD; 548 hwperiod.cmd.len = rlen; 549 bha_cmd(sc->sc_iot, sc->sc_ioh, 550 device_xname(sc->sc_dev), sizeof(hwperiod.cmd), 551 (u_char *)&hwperiod.cmd, rlen, 552 (u_char *)&hwperiod.reply); 553 554 if (xm->xm_target > 7) 555 period = hwperiod.reply_w.period[toff]; 556 else 557 period = hwperiod.reply.period[toff]; 558 559 period *= 10; 560 } 561 562 xm->xm_period = 563 scsipi_sync_period_to_factor(period * 100); 564 xm->xm_offset = offset; 565 } 566 567 /* 568 * Now check for tagged queueing support. 569 * 570 * XXX Check `tags permitted' in the config info. 571 */ 572 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 573 xm->xm_mode |= PERIPH_CAP_TQING; 574 575 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm); 576 } 577 578 /* 579 * bha_done: 580 * 581 * A CCB has completed execution. Pass the status back to the 582 * upper layer. 583 */ 584 static void 585 bha_done(struct bha_softc *sc, struct bha_ccb *ccb) 586 { 587 bus_dma_tag_t dmat = sc->sc_dmat; 588 struct scsipi_xfer *xs = ccb->xs; 589 590 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n")); 591 592 #ifdef BHADIAG 593 if (ccb->flags & CCB_SENDING) { 594 printf("%s: exiting ccb still in transit!\n", 595 device_xname(sc->sc_dev)); 596 Debugger(); 597 return; 598 } 599 #endif 600 if ((ccb->flags & CCB_ALLOC) == 0) { 601 aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n"); 602 Debugger(); 603 return; 604 } 605 606 /* 607 * If we were a data transfer, unload the map that described 608 * the data buffer. 609 */ 610 if (xs->datalen) { 611 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 612 ccb->dmamap_xfer->dm_mapsize, 613 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD : 614 BUS_DMASYNC_POSTWRITE); 615 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 616 } 617 618 if (xs->error == XS_NOERROR) { 619 if (ccb->host_stat != BHA_OK) { 620 switch (ccb->host_stat) { 621 case BHA_SEL_TIMEOUT: /* No response */ 622 xs->error = XS_SELTIMEOUT; 623 break; 624 default: /* Other scsi protocol messes */ 625 printf("%s: host_stat %x\n", 626 device_xname(sc->sc_dev), ccb->host_stat); 627 xs->error = XS_DRIVER_STUFFUP; 628 break; 629 } 630 } else if (ccb->target_stat != SCSI_OK) { 631 switch (ccb->target_stat) { 632 case SCSI_CHECK: 633 memcpy(&xs->sense.scsi_sense, 634 &ccb->scsi_sense, 635 sizeof(xs->sense.scsi_sense)); 636 xs->error = XS_SENSE; 637 break; 638 case SCSI_BUSY: 639 xs->error = XS_BUSY; 640 break; 641 default: 642 printf("%s: target_stat %x\n", 643 device_xname(sc->sc_dev), ccb->target_stat); 644 xs->error = XS_DRIVER_STUFFUP; 645 break; 646 } 647 } else 648 xs->resid = 0; 649 } 650 651 bha_free_ccb(sc, ccb); 652 scsipi_done(xs); 653 } 654 655 /* 656 * bha_poll: 657 * 658 * Poll for completion of the specified job. 659 */ 660 static int 661 bha_poll(struct bha_softc *sc, struct scsipi_xfer *xs, int count) 662 { 663 bus_space_tag_t iot = sc->sc_iot; 664 bus_space_handle_t ioh = sc->sc_ioh; 665 666 /* timeouts are in msec, so we loop in 1000 usec cycles */ 667 while (count) { 668 /* 669 * If we had interrupts enabled, would we 670 * have got an interrupt? 671 */ 672 if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) & 673 BHA_INTR_ANYINTR) 674 bha_intr(sc); 675 if (xs->xs_status & XS_STS_DONE) 676 return (0); 677 delay(1000); /* only happens in boot so ok */ 678 count--; 679 } 680 return (1); 681 } 682 683 /* 684 * bha_timeout: 685 * 686 * CCB timeout handler. 687 */ 688 static void 689 bha_timeout(void *arg) 690 { 691 struct bha_ccb *ccb = arg; 692 struct scsipi_xfer *xs = ccb->xs; 693 struct scsipi_periph *periph = xs->xs_periph; 694 struct bha_softc *sc = 695 device_private(periph->periph_channel->chan_adapter->adapt_dev); 696 int s; 697 698 scsipi_printaddr(periph); 699 printf("timed out"); 700 701 s = splbio(); 702 703 #ifdef BHADIAG 704 /* 705 * If the ccb's mbx is not free, then the board has gone Far East? 706 */ 707 bha_collect_mbo(sc); 708 if (ccb->flags & CCB_SENDING) { 709 aprint_error_dev(sc->sc_dev, "not taking commands!\n"); 710 Debugger(); 711 } 712 #endif 713 714 /* 715 * If it has been through before, then 716 * a previous abort has failed, don't 717 * try abort again 718 */ 719 if (ccb->flags & CCB_ABORT) { 720 /* abort timed out */ 721 printf(" AGAIN\n"); 722 /* XXX Must reset! */ 723 } else { 724 /* abort the operation that has timed out */ 725 printf("\n"); 726 ccb->xs->error = XS_TIMEOUT; 727 ccb->timeout = BHA_ABORT_TIMEOUT; 728 ccb->flags |= CCB_ABORT; 729 bha_queue_ccb(sc, ccb); 730 } 731 732 splx(s); 733 } 734 735 /***************************************************************************** 736 * Misc. subroutines. 737 *****************************************************************************/ 738 739 /* 740 * bha_cmd: 741 * 742 * Send a command to the Buglogic controller. 743 */ 744 static int 745 bha_cmd(bus_space_tag_t iot, bus_space_handle_t ioh, const char *name, 746 int icnt, u_char *ibuf, int ocnt, u_char *obuf) 747 { 748 int i; 749 int wait; 750 u_char sts; 751 u_char opcode = ibuf[0]; 752 753 /* 754 * Calculate a reasonable timeout for the command. 755 */ 756 switch (opcode) { 757 case BHA_INQUIRE_DEVICES: 758 case BHA_INQUIRE_DEVICES_2: 759 wait = 90 * 20000; 760 break; 761 default: 762 wait = 1 * 20000; 763 break; 764 } 765 766 /* 767 * Wait for the adapter to go idle, unless it's one of 768 * the commands which don't need this 769 */ 770 if (opcode != BHA_MBO_INTR_EN) { 771 for (i = 20000; i; i--) { /* 1 sec? */ 772 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 773 if (sts & BHA_STAT_IDLE) 774 break; 775 delay(50); 776 } 777 if (!i) { 778 printf("%s: bha_cmd, host not idle(0x%x)\n", 779 name, sts); 780 return (1); 781 } 782 } 783 784 /* 785 * Now that it is idle, if we expect output, preflush the 786 * queue feeding to us. 787 */ 788 if (ocnt) { 789 while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) & 790 BHA_STAT_DF) 791 (void)bus_space_read_1(iot, ioh, BHA_DATA_PORT); 792 } 793 794 /* 795 * Output the command and the number of arguments given 796 * for each byte, first check the port is empty. 797 */ 798 while (icnt--) { 799 for (i = wait; i; i--) { 800 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 801 if (!(sts & BHA_STAT_CDF)) 802 break; 803 delay(50); 804 } 805 if (!i) { 806 if (opcode != BHA_INQUIRE_REVISION) 807 printf("%s: bha_cmd, cmd/data port full\n", 808 name); 809 goto bad; 810 } 811 bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++); 812 } 813 814 /* 815 * If we expect input, loop that many times, each time, 816 * looking for the data register to have valid data 817 */ 818 while (ocnt--) { 819 for (i = wait; i; i--) { 820 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 821 if (sts & BHA_STAT_DF) 822 break; 823 delay(50); 824 } 825 if (!i) { 826 #ifdef BHADEBUG 827 if (opcode != BHA_INQUIRE_REVISION) 828 printf("%s: bha_cmd, cmd/data port empty %d\n", 829 name, ocnt); 830 #endif /* BHADEBUG */ 831 goto bad; 832 } 833 *obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT); 834 } 835 836 /* 837 * Wait for the board to report a finished instruction. 838 * We may get an extra interrupt for the HACC signal, but this is 839 * unimportant. 840 */ 841 if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) { 842 for (i = 20000; i; i--) { /* 1 sec? */ 843 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 844 /* XXX Need to save this in the interrupt handler? */ 845 if (sts & BHA_INTR_HACC) 846 break; 847 delay(50); 848 } 849 if (!i) { 850 printf("%s: bha_cmd, host not finished(0x%x)\n", 851 name, sts); 852 return (1); 853 } 854 } 855 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 856 return (0); 857 858 bad: 859 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST); 860 return (1); 861 } 862 863 /* 864 * bha_find: 865 * 866 * Find the board. 867 */ 868 int 869 bha_find(bus_space_tag_t iot, bus_space_handle_t ioh) 870 { 871 int i; 872 u_char sts; 873 struct bha_extended_inquire inquire; 874 875 /* Check something is at the ports we need to access */ 876 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 877 if (sts == 0xFF) 878 return (0); 879 880 /* 881 * Reset board, If it doesn't respond, assume 882 * that it's not there.. good for the probe 883 */ 884 885 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, 886 BHA_CTRL_HRST | BHA_CTRL_SRST); 887 888 delay(100); 889 for (i = BHA_RESET_TIMEOUT; i; i--) { 890 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 891 if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT)) 892 break; 893 delay(1000); 894 } 895 if (!i) { 896 #ifdef BHADEBUG 897 if (bha_debug) 898 printf("bha_find: No answer from buslogic board\n"); 899 #endif /* BHADEBUG */ 900 return (0); 901 } 902 903 /* 904 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible 905 * interface. The native bha interface is not compatible with 906 * an aha. 1542. We need to ensure that we never match an 907 * Adaptec 1542. We must also avoid sending Adaptec-compatible 908 * commands to a real bha, lest it go into 1542 emulation mode. 909 * (On an indirect bus like ISA, we should always probe for BusLogic 910 * interfaces before Adaptec interfaces). 911 */ 912 913 /* 914 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking 915 * for an extended-geometry register. The 1542[AB] don't have one. 916 */ 917 sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT); 918 if (sts == 0xFF) 919 return (0); 920 921 /* 922 * Check that we actually know how to use this board. 923 */ 924 delay(1000); 925 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 926 inquire.cmd.len = sizeof(inquire.reply); 927 i = bha_cmd(iot, ioh, "(bha_find)", 928 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 929 sizeof(inquire.reply), (u_char *)&inquire.reply); 930 931 /* 932 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev) 933 * have the extended-geometry register and also respond to 934 * BHA_INQUIRE_EXTENDED. Make sure we never match such cards, 935 * by checking the size of the reply is what a BusLogic card returns. 936 */ 937 if (i) { 938 #ifdef BHADEBUG 939 printf("bha_find: board returned %d instead of %zu to %s\n", 940 i, sizeof(inquire.reply), "INQUIRE_EXTENDED"); 941 #endif 942 return (0); 943 } 944 945 /* OK, we know we've found a buslogic adaptor. */ 946 947 switch (inquire.reply.bus_type) { 948 case BHA_BUS_TYPE_24BIT: 949 case BHA_BUS_TYPE_32BIT: 950 break; 951 case BHA_BUS_TYPE_MCA: 952 /* We don't grok MicroChannel (yet). */ 953 return (0); 954 default: 955 printf("bha_find: illegal bus type %c\n", 956 inquire.reply.bus_type); 957 return (0); 958 } 959 960 return (1); 961 } 962 963 964 /* 965 * bha_inquire_config: 966 * 967 * Determine irq/drq. 968 */ 969 int 970 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh, 971 struct bha_probe_data *sc) 972 { 973 int irq, drq; 974 struct bha_config config; 975 976 /* 977 * Assume we have a board at this stage setup DMA channel from 978 * jumpers and save int level 979 */ 980 delay(1000); 981 config.cmd.opcode = BHA_INQUIRE_CONFIG; 982 bha_cmd(iot, ioh, "(bha_inquire_config)", 983 sizeof(config.cmd), (u_char *)&config.cmd, 984 sizeof(config.reply), (u_char *)&config.reply); 985 switch (config.reply.chan) { 986 case EISADMA: 987 drq = -1; 988 break; 989 case CHAN0: 990 drq = 0; 991 break; 992 case CHAN5: 993 drq = 5; 994 break; 995 case CHAN6: 996 drq = 6; 997 break; 998 case CHAN7: 999 drq = 7; 1000 break; 1001 default: 1002 printf("bha: illegal drq setting %x\n", 1003 config.reply.chan); 1004 return (0); 1005 } 1006 1007 switch (config.reply.intr) { 1008 case INT9: 1009 irq = 9; 1010 break; 1011 case INT10: 1012 irq = 10; 1013 break; 1014 case INT11: 1015 irq = 11; 1016 break; 1017 case INT12: 1018 irq = 12; 1019 break; 1020 case INT14: 1021 irq = 14; 1022 break; 1023 case INT15: 1024 irq = 15; 1025 break; 1026 default: 1027 printf("bha: illegal irq setting %x\n", 1028 config.reply.intr); 1029 return (0); 1030 } 1031 1032 /* if we want to fill in softc, do so now */ 1033 if (sc != NULL) { 1034 sc->sc_irq = irq; 1035 sc->sc_drq = drq; 1036 } 1037 1038 return (1); 1039 } 1040 1041 int 1042 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh, 1043 struct bha_probe_data *bpd) 1044 { 1045 return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd); 1046 } 1047 1048 /* 1049 * bha_disable_isacompat: 1050 * 1051 * Disable the ISA-compatibility ioports on PCI bha devices, 1052 * to ensure they're not autoconfigured a second time as an ISA bha. 1053 */ 1054 int 1055 bha_disable_isacompat(struct bha_softc *sc) 1056 { 1057 struct bha_isadisable isa_disable; 1058 1059 isa_disable.cmd.opcode = BHA_MODIFY_IOPORT; 1060 isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1; 1061 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(sc->sc_dev), 1062 sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd, 1063 0, (u_char *)0); 1064 return (0); 1065 } 1066 1067 /* 1068 * bha_info: 1069 * 1070 * Get information about the board, and report it. We 1071 * return the initial number of CCBs, 0 if we failed. 1072 */ 1073 int 1074 bha_info(struct bha_softc *sc) 1075 { 1076 bus_space_tag_t iot = sc->sc_iot; 1077 bus_space_handle_t ioh = sc->sc_ioh; 1078 struct bha_extended_inquire inquire; 1079 struct bha_config config; 1080 struct bha_devices devices; 1081 struct bha_setup setup; 1082 struct bha_model model; 1083 struct bha_revision revision; 1084 struct bha_digit digit; 1085 int i, j, initial_ccbs, rlen; 1086 const char *name = device_xname(sc->sc_dev); 1087 char *p; 1088 1089 /* 1090 * Fetch the extended inquire information. 1091 */ 1092 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 1093 inquire.cmd.len = sizeof(inquire.reply); 1094 bha_cmd(iot, ioh, name, 1095 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 1096 sizeof(inquire.reply), (u_char *)&inquire.reply); 1097 1098 /* 1099 * Fetch the configuration information. 1100 */ 1101 config.cmd.opcode = BHA_INQUIRE_CONFIG; 1102 bha_cmd(iot, ioh, name, 1103 sizeof(config.cmd), (u_char *)&config.cmd, 1104 sizeof(config.reply), (u_char *)&config.reply); 1105 1106 sc->sc_scsi_id = config.reply.scsi_dev; 1107 1108 /* 1109 * Get the firmware revision. 1110 */ 1111 p = sc->sc_firmware; 1112 revision.cmd.opcode = BHA_INQUIRE_REVISION; 1113 bha_cmd(iot, ioh, name, 1114 sizeof(revision.cmd), (u_char *)&revision.cmd, 1115 sizeof(revision.reply), (u_char *)&revision.reply); 1116 *p++ = revision.reply.firm_revision; 1117 *p++ = '.'; 1118 *p++ = revision.reply.firm_version; 1119 digit.cmd.opcode = BHA_INQUIRE_REVISION_3; 1120 bha_cmd(iot, ioh, name, 1121 sizeof(digit.cmd), (u_char *)&digit.cmd, 1122 sizeof(digit.reply), (u_char *)&digit.reply); 1123 *p++ = digit.reply.digit; 1124 if (revision.reply.firm_revision >= '3' || 1125 (revision.reply.firm_revision == '3' && 1126 revision.reply.firm_version >= '3')) { 1127 digit.cmd.opcode = BHA_INQUIRE_REVISION_4; 1128 bha_cmd(iot, ioh, name, 1129 sizeof(digit.cmd), (u_char *)&digit.cmd, 1130 sizeof(digit.reply), (u_char *)&digit.reply); 1131 *p++ = digit.reply.digit; 1132 } 1133 while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0')) 1134 p--; 1135 *p = '\0'; 1136 1137 /* 1138 * Get the model number. 1139 * 1140 * Some boards do not handle the Inquire Board Model Number 1141 * command correctly, or don't give correct information. 1142 * 1143 * So, we use the Firmware Revision and Extended Setup 1144 * information to fixup the model number in these cases. 1145 * 1146 * The firmware version indicates: 1147 * 1148 * 5.xx BusLogic "W" Series Host Adapters 1149 * BT-948/958/958D 1150 * 1151 * 4.xx BusLogic "C" Series Host Adapters 1152 * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF 1153 * 1154 * 3.xx BusLogic "S" Series Host Adapters 1155 * BT-747S/747D/757S/757D/445S/545S/542D 1156 * BT-542B/742A (revision H) 1157 * 1158 * 2.xx BusLogic "A" Series Host Adapters 1159 * BT-542B/742A (revision G and below) 1160 * 1161 * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter 1162 */ 1163 if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT && 1164 sc->sc_firmware[0] < '3') 1165 snprintf(sc->sc_model, sizeof(sc->sc_model), "542B"); 1166 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1167 sc->sc_firmware[0] == '2' && 1168 (sc->sc_firmware[2] == '1' || 1169 (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '0'))) 1170 snprintf(sc->sc_model, sizeof(sc->sc_model), "742A"); 1171 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1172 sc->sc_firmware[0] == '0') 1173 snprintf(sc->sc_model, sizeof(sc->sc_model), "747A"); 1174 else { 1175 p = sc->sc_model; 1176 model.cmd.opcode = BHA_INQUIRE_MODEL; 1177 model.cmd.len = sizeof(model.reply); 1178 bha_cmd(iot, ioh, name, 1179 sizeof(model.cmd), (u_char *)&model.cmd, 1180 sizeof(model.reply), (u_char *)&model.reply); 1181 *p++ = model.reply.id[0]; 1182 *p++ = model.reply.id[1]; 1183 *p++ = model.reply.id[2]; 1184 *p++ = model.reply.id[3]; 1185 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1186 p--; 1187 *p++ = model.reply.version[0]; 1188 *p++ = model.reply.version[1]; 1189 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1190 p--; 1191 *p = '\0'; 1192 } 1193 1194 /* Enable round-robin scheme - appeared at firmware rev. 3.31. */ 1195 if (strcmp(sc->sc_firmware, "3.31") >= 0) 1196 sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN; 1197 1198 /* 1199 * Determine some characteristics about our bus. 1200 */ 1201 if (inquire.reply.scsi_flags & BHA_SCSI_WIDE) 1202 sc->sc_flags |= BHAF_WIDE; 1203 if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL) 1204 sc->sc_flags |= BHAF_DIFFERENTIAL; 1205 if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA) 1206 sc->sc_flags |= BHAF_ULTRA; 1207 1208 /* 1209 * Determine some characterists of the board. 1210 */ 1211 sc->sc_max_dmaseg = inquire.reply.sg_limit; 1212 1213 /* 1214 * Determine the maximum CCB count and whether or not 1215 * tagged queueing is available on this host adapter. 1216 * 1217 * Tagged queueing works on: 1218 * 1219 * "W" Series adapters 1220 * "C" Series adapters with firmware >= 4.22 1221 * "S" Series adapters with firmware >= 3.35 1222 * 1223 * The internal CCB counts are: 1224 * 1225 * 192 BT-948/958/958D 1226 * 100 BT-946C/956C/956CD/747C/757C/757CD/445C 1227 * 50 BT-545C/540CF 1228 * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A 1229 */ 1230 switch (sc->sc_firmware[0]) { 1231 case '5': 1232 sc->sc_max_ccbs = 192; 1233 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1234 break; 1235 1236 case '4': 1237 if (sc->sc_model[0] == '5') 1238 sc->sc_max_ccbs = 50; 1239 else 1240 sc->sc_max_ccbs = 100; 1241 if (strcmp(sc->sc_firmware, "4.22") >= 0) 1242 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1243 break; 1244 1245 case '3': 1246 if (strcmp(sc->sc_firmware, "3.35") >= 0) 1247 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1248 /* FALLTHROUGH */ 1249 1250 default: 1251 sc->sc_max_ccbs = 30; 1252 } 1253 1254 /* 1255 * Set the mailbox count to precisely the number of HW CCBs 1256 * available. A mailbox isn't required while a CCB is executing, 1257 * but this allows us to actually enqueue up to our resource 1258 * limit. 1259 * 1260 * This will keep the mailbox count small on boards which don't 1261 * have strict round-robin (they have to scan the entire set of 1262 * mailboxes each time they run a command). 1263 */ 1264 sc->sc_mbox_count = sc->sc_max_ccbs; 1265 1266 /* 1267 * Obtain setup information. 1268 */ 1269 rlen = sizeof(setup.reply) + 1270 ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0); 1271 setup.cmd.opcode = BHA_INQUIRE_SETUP; 1272 setup.cmd.len = rlen; 1273 bha_cmd(iot, ioh, name, 1274 sizeof(setup.cmd), (u_char *)&setup.cmd, 1275 rlen, (u_char *)&setup.reply); 1276 1277 aprint_normal_dev(sc->sc_dev, "model BT-%s, firmware %s\n", 1278 sc->sc_model, sc->sc_firmware); 1279 1280 aprint_normal_dev(sc->sc_dev, "%d H/W CCBs", sc->sc_max_ccbs); 1281 if (setup.reply.sync_neg) 1282 aprint_normal(", sync"); 1283 if (setup.reply.parity) 1284 aprint_normal(", parity"); 1285 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 1286 aprint_normal(", tagged queueing"); 1287 if (sc->sc_flags & BHAF_WIDE_LUN) 1288 aprint_normal(", wide LUN support"); 1289 aprint_normal("\n"); 1290 1291 /* 1292 * Poll targets 0 - 7. 1293 */ 1294 devices.cmd.opcode = BHA_INQUIRE_DEVICES; 1295 bha_cmd(iot, ioh, name, 1296 sizeof(devices.cmd), (u_char *)&devices.cmd, 1297 sizeof(devices.reply), (u_char *)&devices.reply); 1298 1299 /* Count installed units. */ 1300 initial_ccbs = 0; 1301 for (i = 0; i < 8; i++) { 1302 for (j = 0; j < 8; j++) { 1303 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1304 initial_ccbs++; 1305 } 1306 } 1307 1308 /* 1309 * Poll targets 8 - 15 if we have a wide bus. 1310 */ 1311 if (sc->sc_flags & BHAF_WIDE) { 1312 devices.cmd.opcode = BHA_INQUIRE_DEVICES_2; 1313 bha_cmd(iot, ioh, name, 1314 sizeof(devices.cmd), (u_char *)&devices.cmd, 1315 sizeof(devices.reply), (u_char *)&devices.reply); 1316 1317 for (i = 0; i < 8; i++) { 1318 for (j = 0; j < 8; j++) { 1319 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1320 initial_ccbs++; 1321 } 1322 } 1323 } 1324 1325 /* 1326 * Double the initial CCB count, for good measure. 1327 */ 1328 initial_ccbs *= 2; 1329 1330 /* 1331 * Sanity check the initial CCB count; don't create more than 1332 * we can enqueue (sc_max_ccbs), and make sure there are some 1333 * at all. 1334 */ 1335 if (initial_ccbs > sc->sc_max_ccbs) 1336 initial_ccbs = sc->sc_max_ccbs; 1337 if (initial_ccbs == 0) 1338 initial_ccbs = 2; 1339 1340 return (initial_ccbs); 1341 } 1342 1343 /* 1344 * bha_init: 1345 * 1346 * Initialize the board. 1347 */ 1348 static int 1349 bha_init(struct bha_softc *sc) 1350 { 1351 const char *name = device_xname(sc->sc_dev); 1352 struct bha_toggle toggle; 1353 struct bha_mailbox mailbox; 1354 struct bha_mbx_out *mbo; 1355 struct bha_mbx_in *mbi; 1356 int i; 1357 1358 /* 1359 * Set up the mailbox. We always run the mailbox in round-robin. 1360 */ 1361 for (i = 0; i < sc->sc_mbox_count; i++) { 1362 mbo = &sc->sc_mbo[i]; 1363 mbi = &sc->sc_mbi[i]; 1364 1365 mbo->cmd = BHA_MBO_FREE; 1366 BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1367 1368 mbi->comp_stat = BHA_MBI_FREE; 1369 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1370 } 1371 1372 sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0]; 1373 sc->sc_tmbi = &sc->sc_mbi[0]; 1374 1375 sc->sc_mbofull = 0; 1376 1377 /* 1378 * If the board supports strict round-robin, enable that. 1379 */ 1380 if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) { 1381 toggle.cmd.opcode = BHA_ROUND_ROBIN; 1382 toggle.cmd.enable = 1; 1383 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1384 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1385 0, NULL); 1386 } 1387 1388 /* 1389 * Give the mailbox to the board. 1390 */ 1391 mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED; 1392 mailbox.cmd.nmbx = sc->sc_mbox_count; 1393 ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr); 1394 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1395 sizeof(mailbox.cmd), (u_char *)&mailbox.cmd, 1396 0, (u_char *)0); 1397 1398 return (0); 1399 } 1400 1401 /***************************************************************************** 1402 * CCB execution engine 1403 *****************************************************************************/ 1404 1405 /* 1406 * bha_queue_ccb: 1407 * 1408 * Queue a CCB to be sent to the controller, and send it if possible. 1409 */ 1410 static void 1411 bha_queue_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1412 { 1413 1414 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 1415 bha_start_ccbs(sc); 1416 } 1417 1418 /* 1419 * bha_start_ccbs: 1420 * 1421 * Send as many CCBs as we have empty mailboxes for. 1422 */ 1423 static void 1424 bha_start_ccbs(struct bha_softc *sc) 1425 { 1426 bus_space_tag_t iot = sc->sc_iot; 1427 bus_space_handle_t ioh = sc->sc_ioh; 1428 struct bha_ccb_group *bcg; 1429 struct bha_mbx_out *mbo; 1430 struct bha_ccb *ccb; 1431 1432 mbo = sc->sc_tmbo; 1433 1434 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 1435 if (sc->sc_mbofull >= sc->sc_mbox_count) { 1436 #ifdef DIAGNOSTIC 1437 if (sc->sc_mbofull > sc->sc_mbox_count) 1438 panic("bha_start_ccbs: mbofull > mbox_count"); 1439 #endif 1440 /* 1441 * No mailboxes available; attempt to collect ones 1442 * that have already been used. 1443 */ 1444 bha_collect_mbo(sc); 1445 if (sc->sc_mbofull == sc->sc_mbox_count) { 1446 /* 1447 * Still no more available; have the 1448 * controller interrupt us when it 1449 * frees one. 1450 */ 1451 struct bha_toggle toggle; 1452 1453 toggle.cmd.opcode = BHA_MBO_INTR_EN; 1454 toggle.cmd.enable = 1; 1455 bha_cmd(iot, ioh, device_xname(sc->sc_dev), 1456 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1457 0, (u_char *)0); 1458 break; 1459 } 1460 } 1461 1462 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 1463 #ifdef BHADIAG 1464 ccb->flags |= CCB_SENDING; 1465 #endif 1466 1467 /* 1468 * Put the CCB in the mailbox. 1469 */ 1470 bcg = BHA_CCB_GROUP(ccb); 1471 ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr + 1472 BHA_CCB_OFFSET(ccb), mbo->ccb_addr); 1473 if (ccb->flags & CCB_ABORT) 1474 mbo->cmd = BHA_MBO_ABORT; 1475 else 1476 mbo->cmd = BHA_MBO_START; 1477 1478 BHA_MBO_SYNC(sc, mbo, 1479 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1480 1481 /* Tell the card to poll immediately. */ 1482 bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI); 1483 1484 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 1485 callout_reset(&ccb->xs->xs_callout, 1486 mstohz(ccb->timeout), bha_timeout, ccb); 1487 1488 ++sc->sc_mbofull; 1489 mbo = bha_nextmbo(sc, mbo); 1490 } 1491 1492 sc->sc_tmbo = mbo; 1493 } 1494 1495 /* 1496 * bha_finish_ccbs: 1497 * 1498 * Finalize the execution of CCBs in our incoming mailbox. 1499 */ 1500 static void 1501 bha_finish_ccbs(struct bha_softc *sc) 1502 { 1503 struct bha_mbx_in *mbi; 1504 struct bha_ccb *ccb; 1505 int i; 1506 1507 mbi = sc->sc_tmbi; 1508 1509 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1510 1511 if (mbi->comp_stat == BHA_MBI_FREE) { 1512 for (i = 0; i < sc->sc_mbox_count; i++) { 1513 if (mbi->comp_stat != BHA_MBI_FREE) { 1514 #ifdef BHADIAG 1515 /* 1516 * This can happen in normal operation if 1517 * we use all mailbox slots. 1518 */ 1519 printf("%s: mbi not in round-robin order\n", 1520 device_xname(sc->sc_dev)); 1521 #endif 1522 goto again; 1523 } 1524 mbi = bha_nextmbi(sc, mbi); 1525 BHA_MBI_SYNC(sc, mbi, 1526 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1527 } 1528 #ifdef BHADIAGnot 1529 printf("%s: mbi interrupt with no full mailboxes\n", 1530 device_xname(sc->sc_dev)); 1531 #endif 1532 return; 1533 } 1534 1535 again: 1536 do { 1537 ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr)); 1538 if (ccb == NULL) { 1539 aprint_error_dev(sc->sc_dev, "bad mbi ccb pointer 0x%08x; skipping\n", 1540 phystol(mbi->ccb_addr)); 1541 goto next; 1542 } 1543 1544 BHA_CCB_SYNC(sc, ccb, 1545 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1546 1547 #ifdef BHADEBUG 1548 if (bha_debug) { 1549 u_char *cp = ccb->scsi_cmd; 1550 printf("op=%x %x %x %x %x %x\n", 1551 cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]); 1552 printf("comp_stat %x for mbi addr = %p, ", 1553 mbi->comp_stat, mbi); 1554 printf("ccb addr = %p\n", ccb); 1555 } 1556 #endif /* BHADEBUG */ 1557 1558 switch (mbi->comp_stat) { 1559 case BHA_MBI_OK: 1560 case BHA_MBI_ERROR: 1561 if ((ccb->flags & CCB_ABORT) != 0) { 1562 /* 1563 * If we already started an abort, wait for it 1564 * to complete before clearing the CCB. We 1565 * could instead just clear CCB_SENDING, but 1566 * what if the mailbox was already received? 1567 * The worst that happens here is that we clear 1568 * the CCB a bit later than we need to. BFD. 1569 */ 1570 goto next; 1571 } 1572 break; 1573 1574 case BHA_MBI_ABORT: 1575 case BHA_MBI_UNKNOWN: 1576 case BHA_MBI_BADCCB: 1577 /* 1578 * Even if the CCB wasn't found, we clear it anyway. 1579 * See preceding comment. 1580 */ 1581 break; 1582 1583 default: 1584 aprint_error_dev(sc->sc_dev, "bad mbi comp_stat %02x; skipping\n", 1585 mbi->comp_stat); 1586 goto next; 1587 } 1588 1589 callout_stop(&ccb->xs->xs_callout); 1590 bha_done(sc, ccb); 1591 1592 next: 1593 mbi->comp_stat = BHA_MBI_FREE; 1594 BHA_CCB_SYNC(sc, ccb, 1595 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1596 1597 mbi = bha_nextmbi(sc, mbi); 1598 BHA_MBI_SYNC(sc, mbi, 1599 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1600 } while (mbi->comp_stat != BHA_MBI_FREE); 1601 1602 sc->sc_tmbi = mbi; 1603 } 1604 1605 /***************************************************************************** 1606 * Mailbox management functions. 1607 *****************************************************************************/ 1608 1609 /* 1610 * bha_create_mailbox: 1611 * 1612 * Create the mailbox structures. Helper function for bha_attach(). 1613 * 1614 * NOTE: The Buslogic hardware only gets one DMA address for the 1615 * mailbox! It expects: 1616 * 1617 * mailbox_out[mailbox_size] 1618 * mailbox_in[mailbox_size] 1619 */ 1620 static int 1621 bha_create_mailbox(struct bha_softc *sc) 1622 { 1623 bus_dma_segment_t seg; 1624 size_t size; 1625 int error, rseg; 1626 1627 size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) + 1628 (sizeof(struct bha_mbx_in) * sc->sc_mbox_count); 1629 1630 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1631 1, &rseg, sc->sc_dmaflags); 1632 if (error) { 1633 aprint_error_dev(sc->sc_dev, 1634 "unable to allocate mailboxes, error = %d\n", error); 1635 goto bad_0; 1636 } 1637 1638 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 1639 (void **)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT); 1640 if (error) { 1641 aprint_error_dev(sc->sc_dev, 1642 "unable to map mailboxes, error = %d\n", error); 1643 goto bad_1; 1644 } 1645 1646 memset(sc->sc_mbo, 0, size); 1647 1648 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1649 sc->sc_dmaflags, &sc->sc_dmamap_mbox); 1650 if (error) { 1651 aprint_error_dev(sc->sc_dev, 1652 "unable to create mailbox DMA map, error = %d\n", error); 1653 goto bad_2; 1654 } 1655 1656 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox, 1657 sc->sc_mbo, size, NULL, 0); 1658 if (error) { 1659 aprint_error_dev(sc->sc_dev, 1660 "unable to load mailbox DMA map, error = %d\n", error); 1661 goto bad_3; 1662 } 1663 1664 sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count); 1665 1666 return (0); 1667 1668 bad_3: 1669 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox); 1670 bad_2: 1671 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_mbo, size); 1672 bad_1: 1673 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1674 bad_0: 1675 return (error); 1676 } 1677 1678 /* 1679 * bha_collect_mbo: 1680 * 1681 * Garbage collect mailboxes that are no longer in use. 1682 */ 1683 static void 1684 bha_collect_mbo(struct bha_softc *sc) 1685 { 1686 struct bha_mbx_out *mbo; 1687 #ifdef BHADIAG 1688 struct bha_ccb *ccb; 1689 #endif 1690 1691 mbo = sc->sc_cmbo; 1692 1693 while (sc->sc_mbofull > 0) { 1694 BHA_MBO_SYNC(sc, mbo, 1695 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1696 if (mbo->cmd != BHA_MBO_FREE) 1697 break; 1698 1699 #ifdef BHADIAG 1700 ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr)); 1701 ccb->flags &= ~CCB_SENDING; 1702 #endif 1703 1704 --sc->sc_mbofull; 1705 mbo = bha_nextmbo(sc, mbo); 1706 } 1707 1708 sc->sc_cmbo = mbo; 1709 } 1710 1711 /***************************************************************************** 1712 * CCB management functions 1713 *****************************************************************************/ 1714 1715 static inline void 1716 bha_reset_ccb(struct bha_ccb *ccb) 1717 { 1718 1719 ccb->flags = 0; 1720 } 1721 1722 /* 1723 * bha_create_ccbs: 1724 * 1725 * Create a set of CCBs. 1726 * 1727 * We determine the target CCB count, and then keep creating them 1728 * until we reach the target, or fail. CCBs that are allocated 1729 * but not "created" are left on the allocating list. 1730 * 1731 * XXX AB_QUIET/AB_SILENT lossage here; this is called during 1732 * boot as well as at run-time. 1733 */ 1734 static void 1735 bha_create_ccbs(struct bha_softc *sc, int count) 1736 { 1737 struct bha_ccb_group *bcg; 1738 struct bha_ccb *ccb; 1739 bus_dma_segment_t seg; 1740 bus_dmamap_t ccbmap; 1741 int target, i, error, rseg; 1742 1743 /* 1744 * If the current CCB count is already the max number we're 1745 * allowed to have, bail out now. 1746 */ 1747 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) 1748 return; 1749 1750 /* 1751 * Compute our target count, and clamp it down to the max 1752 * number we're allowed to have. 1753 */ 1754 target = sc->sc_cur_ccbs + count; 1755 if (target > sc->sc_max_ccbs) 1756 target = sc->sc_max_ccbs; 1757 1758 /* 1759 * If there are CCBs on the allocating list, don't allocate a 1760 * CCB group yet. 1761 */ 1762 if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL) 1763 goto have_allocating_ccbs; 1764 1765 allocate_group: 1766 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 1767 PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT); 1768 if (error) { 1769 aprint_error_dev(sc->sc_dev, 1770 "unable to allocate CCB group, error = %d\n", error); 1771 goto bad_0; 1772 } 1773 1774 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 1775 (void *)&bcg, 1776 sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1777 if (error) { 1778 aprint_error_dev(sc->sc_dev, 1779 "unable to map CCB group, error = %d\n", error); 1780 goto bad_1; 1781 } 1782 1783 memset(bcg, 0, PAGE_SIZE); 1784 1785 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1786 1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap); 1787 if (error) { 1788 aprint_error_dev(sc->sc_dev, 1789 "unable to create CCB group DMA map, error = %d\n", error); 1790 goto bad_2; 1791 } 1792 1793 error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL, 1794 sc->sc_dmaflags | BUS_DMA_NOWAIT); 1795 if (error) { 1796 aprint_error_dev(sc->sc_dev, 1797 "unable to load CCB group DMA map, error = %d\n", error); 1798 goto bad_3; 1799 } 1800 1801 bcg->bcg_dmamap = ccbmap; 1802 1803 #ifdef DIAGNOSTIC 1804 if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) != 1805 BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1])) 1806 panic("bha_create_ccbs: CCB group size botch"); 1807 #endif 1808 1809 /* 1810 * Add all of the CCBs in this group to the allocating list. 1811 */ 1812 for (i = 0; i < bha_ccbs_per_group; i++) { 1813 ccb = &bcg->bcg_ccbs[i]; 1814 TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain); 1815 } 1816 1817 have_allocating_ccbs: 1818 /* 1819 * Loop over the allocating list until we reach our CCB target. 1820 * If we run out on the list, we'll allocate another group's 1821 * worth. 1822 */ 1823 while (sc->sc_cur_ccbs < target) { 1824 ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs); 1825 if (ccb == NULL) 1826 goto allocate_group; 1827 if (bha_init_ccb(sc, ccb) != 0) { 1828 /* 1829 * We were unable to initialize the CCB. 1830 * This is likely due to a resource shortage, 1831 * so bail out now. 1832 */ 1833 return; 1834 } 1835 } 1836 1837 /* 1838 * If we got here, we've reached our target! 1839 */ 1840 return; 1841 1842 bad_3: 1843 bus_dmamap_destroy(sc->sc_dmat, ccbmap); 1844 bad_2: 1845 bus_dmamem_unmap(sc->sc_dmat, (void *)bcg, PAGE_SIZE); 1846 bad_1: 1847 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1848 bad_0: 1849 return; 1850 } 1851 1852 /* 1853 * bha_init_ccb: 1854 * 1855 * Initialize a CCB; helper function for bha_create_ccbs(). 1856 */ 1857 static int 1858 bha_init_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1859 { 1860 struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb); 1861 int hashnum, error; 1862 1863 /* 1864 * Create the DMA map for this CCB. 1865 * 1866 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages 1867 * XXX in the ISA case. A better solution is needed. 1868 */ 1869 error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG, 1870 BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags, 1871 &ccb->dmamap_xfer); 1872 if (error) { 1873 aprint_error_dev(sc->sc_dev, 1874 "unable to create CCB DMA map, error = %d\n", error); 1875 return (error); 1876 } 1877 1878 TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain); 1879 1880 /* 1881 * Put the CCB into the phystokv hash table. 1882 */ 1883 ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr + 1884 BHA_CCB_OFFSET(ccb); 1885 hashnum = CCB_HASH(ccb->hashkey); 1886 ccb->nexthash = sc->sc_ccbhash[hashnum]; 1887 sc->sc_ccbhash[hashnum] = ccb; 1888 bha_reset_ccb(ccb); 1889 1890 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1891 sc->sc_cur_ccbs++; 1892 1893 return (0); 1894 } 1895 1896 /* 1897 * bha_get_ccb: 1898 * 1899 * Get a CCB for the SCSI operation. If there are none left, 1900 * wait until one becomes available, if we can. 1901 */ 1902 static struct bha_ccb * 1903 bha_get_ccb(struct bha_softc *sc) 1904 { 1905 struct bha_ccb *ccb; 1906 int s; 1907 1908 s = splbio(); 1909 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 1910 if (ccb != NULL) { 1911 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 1912 ccb->flags |= CCB_ALLOC; 1913 } 1914 splx(s); 1915 return (ccb); 1916 } 1917 1918 /* 1919 * bha_free_ccb: 1920 * 1921 * Put a CCB back onto the free list. 1922 */ 1923 static void 1924 bha_free_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1925 { 1926 int s; 1927 1928 s = splbio(); 1929 bha_reset_ccb(ccb); 1930 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1931 splx(s); 1932 } 1933 1934 /* 1935 * bha_ccb_phys_kv: 1936 * 1937 * Given a CCB DMA address, locate the CCB in kernel virtual space. 1938 */ 1939 static struct bha_ccb * 1940 bha_ccb_phys_kv(struct bha_softc *sc, bus_addr_t ccb_phys) 1941 { 1942 int hashnum = CCB_HASH(ccb_phys); 1943 struct bha_ccb *ccb = sc->sc_ccbhash[hashnum]; 1944 1945 while (ccb) { 1946 if (ccb->hashkey == ccb_phys) 1947 break; 1948 ccb = ccb->nexthash; 1949 } 1950 return (ccb); 1951 } 1952