1 /* $NetBSD: bha.c,v 1.59 2004/08/24 00:53:29 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Originally written by Julian Elischer (julian@tfs.com) 42 * for TRW Financial Systems for use under the MACH(2.5) operating system. 43 * 44 * TRW Financial Systems, in accordance with their agreement with Carnegie 45 * Mellon University, makes this software available to CMU to distribute 46 * or use in any manner that they see fit as long as this message is kept with 47 * the software. For this reason TFS also grants any other persons or 48 * organisations permission to use or modify this software. 49 * 50 * TFS supplies this software to be publicly redistributed 51 * on the understanding that TFS is not responsible for the correct 52 * functioning of this software in any circumstances. 53 */ 54 55 #include <sys/cdefs.h> 56 __KERNEL_RCSID(0, "$NetBSD: bha.c,v 1.59 2004/08/24 00:53:29 thorpej Exp $"); 57 58 #include "opt_ddb.h" 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/callout.h> 63 #include <sys/kernel.h> 64 #include <sys/errno.h> 65 #include <sys/ioctl.h> 66 #include <sys/device.h> 67 #include <sys/malloc.h> 68 #include <sys/buf.h> 69 #include <sys/proc.h> 70 #include <sys/user.h> 71 72 #include <uvm/uvm_extern.h> 73 74 #include <machine/bus.h> 75 #include <machine/intr.h> 76 77 #include <dev/scsipi/scsi_all.h> 78 #include <dev/scsipi/scsipi_all.h> 79 #include <dev/scsipi/scsiconf.h> 80 81 #include <dev/ic/bhareg.h> 82 #include <dev/ic/bhavar.h> 83 84 #ifndef DDB 85 #define Debugger() panic("should call debugger here (bha.c)") 86 #endif /* ! DDB */ 87 88 #define BHA_MAXXFER ((BHA_NSEG - 1) << PGSHIFT) 89 90 #ifdef BHADEBUG 91 int bha_debug = 0; 92 #endif /* BHADEBUG */ 93 94 static int bha_cmd(bus_space_tag_t, bus_space_handle_t, const char *, int, 95 u_char *, int, u_char *); 96 97 static void bha_scsipi_request(struct scsipi_channel *, 98 scsipi_adapter_req_t, void *); 99 static void bha_minphys(struct buf *); 100 101 static void bha_get_xfer_mode(struct bha_softc *, 102 struct scsipi_xfer_mode *); 103 104 static void bha_done(struct bha_softc *, struct bha_ccb *); 105 static int bha_poll(struct bha_softc *, struct scsipi_xfer *, int); 106 static void bha_timeout(void *arg); 107 108 static int bha_init(struct bha_softc *); 109 110 static int bha_create_mailbox(struct bha_softc *); 111 static void bha_collect_mbo(struct bha_softc *); 112 113 static void bha_queue_ccb(struct bha_softc *, struct bha_ccb *); 114 static void bha_start_ccbs(struct bha_softc *); 115 static void bha_finish_ccbs(struct bha_softc *); 116 117 static struct bha_ccb *bha_ccb_phys_kv(struct bha_softc *, bus_addr_t); 118 static void bha_create_ccbs(struct bha_softc *, int); 119 static int bha_init_ccb(struct bha_softc *, struct bha_ccb *); 120 static struct bha_ccb *bha_get_ccb(struct bha_softc *); 121 static void bha_free_ccb(struct bha_softc *, struct bha_ccb *); 122 123 #define BHA_RESET_TIMEOUT 2000 /* time to wait for reset (mSec) */ 124 #define BHA_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 125 126 /* 127 * Number of CCBs in an allocation group; must be computed at run-time. 128 */ 129 static int bha_ccbs_per_group; 130 131 static __inline struct bha_mbx_out * 132 bha_nextmbo(struct bha_softc *sc, struct bha_mbx_out *mbo) 133 { 134 135 if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1]) 136 return (&sc->sc_mbo[0]); 137 return (mbo + 1); 138 } 139 140 static __inline struct bha_mbx_in * 141 bha_nextmbi(struct bha_softc *sc, struct bha_mbx_in *mbi) 142 { 143 if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1]) 144 return (&sc->sc_mbi[0]); 145 return (mbi + 1); 146 } 147 148 /* 149 * bha_attach: 150 * 151 * Finish attaching a Buslogic controller, and configure children. 152 */ 153 void 154 bha_attach(struct bha_softc *sc) 155 { 156 struct scsipi_adapter *adapt = &sc->sc_adapter; 157 struct scsipi_channel *chan = &sc->sc_channel; 158 int initial_ccbs; 159 160 /* 161 * Initialize the number of CCBs per group. 162 */ 163 if (bha_ccbs_per_group == 0) 164 bha_ccbs_per_group = BHA_CCBS_PER_GROUP; 165 166 initial_ccbs = bha_info(sc); 167 if (initial_ccbs == 0) { 168 aprint_error("%s: unable to get adapter info\n", 169 sc->sc_dev.dv_xname); 170 return; 171 } 172 173 /* 174 * Fill in the scsipi_adapter. 175 */ 176 memset(adapt, 0, sizeof(*adapt)); 177 adapt->adapt_dev = &sc->sc_dev; 178 adapt->adapt_nchannels = 1; 179 /* adapt_openings initialized below */ 180 adapt->adapt_max_periph = sc->sc_mbox_count; 181 adapt->adapt_request = bha_scsipi_request; 182 adapt->adapt_minphys = bha_minphys; 183 184 /* 185 * Fill in the scsipi_channel. 186 */ 187 memset(chan, 0, sizeof(*chan)); 188 chan->chan_adapter = adapt; 189 chan->chan_bustype = &scsi_bustype; 190 chan->chan_channel = 0; 191 chan->chan_flags = SCSIPI_CHAN_CANGROW; 192 chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8; 193 chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8; 194 chan->chan_id = sc->sc_scsi_id; 195 196 TAILQ_INIT(&sc->sc_free_ccb); 197 TAILQ_INIT(&sc->sc_waiting_ccb); 198 TAILQ_INIT(&sc->sc_allocating_ccbs); 199 200 if (bha_create_mailbox(sc) != 0) 201 return; 202 203 bha_create_ccbs(sc, initial_ccbs); 204 if (sc->sc_cur_ccbs < 2) { 205 aprint_error("%s: not enough CCBs to run\n", 206 sc->sc_dev.dv_xname); 207 return; 208 } 209 210 adapt->adapt_openings = sc->sc_cur_ccbs; 211 212 if (bha_init(sc) != 0) 213 return; 214 215 (void) config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 216 } 217 218 /* 219 * bha_intr: 220 * 221 * Interrupt service routine. 222 */ 223 int 224 bha_intr(void *arg) 225 { 226 struct bha_softc *sc = arg; 227 bus_space_tag_t iot = sc->sc_iot; 228 bus_space_handle_t ioh = sc->sc_ioh; 229 u_char sts; 230 231 #ifdef BHADEBUG 232 printf("%s: bha_intr ", sc->sc_dev.dv_xname); 233 #endif /* BHADEBUG */ 234 235 /* 236 * First acknowledge the interrupt, Then if it's not telling about 237 * a completed operation just return. 238 */ 239 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 240 if ((sts & BHA_INTR_ANYINTR) == 0) 241 return (0); 242 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 243 244 #ifdef BHADIAG 245 /* Make sure we clear CCB_SENDING before finishing a CCB. */ 246 bha_collect_mbo(sc); 247 #endif 248 249 /* Mail box out empty? */ 250 if (sts & BHA_INTR_MBOA) { 251 struct bha_toggle toggle; 252 253 toggle.cmd.opcode = BHA_MBO_INTR_EN; 254 toggle.cmd.enable = 0; 255 bha_cmd(iot, ioh, sc->sc_dev.dv_xname, 256 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 257 0, (u_char *)0); 258 bha_start_ccbs(sc); 259 } 260 261 /* Mail box in full? */ 262 if (sts & BHA_INTR_MBIF) 263 bha_finish_ccbs(sc); 264 265 return (1); 266 } 267 268 /***************************************************************************** 269 * SCSI interface routines 270 *****************************************************************************/ 271 272 /* 273 * bha_scsipi_request: 274 * 275 * Perform a request for the SCSIPI layer. 276 */ 277 static void 278 bha_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 279 void *arg) 280 { 281 struct scsipi_adapter *adapt = chan->chan_adapter; 282 struct bha_softc *sc = (void *)adapt->adapt_dev; 283 struct scsipi_xfer *xs; 284 struct scsipi_periph *periph; 285 bus_dma_tag_t dmat = sc->sc_dmat; 286 struct bha_ccb *ccb; 287 int error, seg, flags, s; 288 289 switch (req) { 290 case ADAPTER_REQ_RUN_XFER: 291 xs = arg; 292 periph = xs->xs_periph; 293 flags = xs->xs_control; 294 295 SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n")); 296 297 /* Get a CCB to use. */ 298 ccb = bha_get_ccb(sc); 299 #ifdef DIAGNOSTIC 300 /* 301 * This should never happen as we track the resources 302 * in the mid-layer. 303 */ 304 if (ccb == NULL) { 305 scsipi_printaddr(periph); 306 printf("unable to allocate ccb\n"); 307 panic("bha_scsipi_request"); 308 } 309 #endif 310 311 ccb->xs = xs; 312 ccb->timeout = xs->timeout; 313 314 /* 315 * Put all the arguments for the xfer in the ccb 316 */ 317 if (flags & XS_CTL_RESET) { 318 ccb->opcode = BHA_RESET_CCB; 319 ccb->scsi_cmd_length = 0; 320 } else { 321 /* can't use S/G if zero length */ 322 ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB 323 : BHA_INITIATOR_CCB); 324 memcpy(&ccb->scsi_cmd, xs->cmd, 325 ccb->scsi_cmd_length = xs->cmdlen); 326 } 327 328 if (xs->datalen) { 329 /* 330 * Map the DMA transfer. 331 */ 332 #ifdef TFS 333 if (flags & XS_CTL_DATA_UIO) { 334 error = bus_dmamap_load_uio(dmat, 335 ccb->dmamap_xfer, (struct uio *)xs->data, 336 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 337 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 338 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 339 BUS_DMA_WRITE)); 340 } else 341 #endif /* TFS */ 342 { 343 error = bus_dmamap_load(dmat, 344 ccb->dmamap_xfer, xs->data, xs->datalen, 345 NULL, 346 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 347 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 348 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 349 BUS_DMA_WRITE)); 350 } 351 352 switch (error) { 353 case 0: 354 break; 355 356 case ENOMEM: 357 case EAGAIN: 358 xs->error = XS_RESOURCE_SHORTAGE; 359 goto out_bad; 360 361 default: 362 xs->error = XS_DRIVER_STUFFUP; 363 printf("%s: error %d loading DMA map\n", 364 sc->sc_dev.dv_xname, error); 365 out_bad: 366 bha_free_ccb(sc, ccb); 367 scsipi_done(xs); 368 return; 369 } 370 371 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 372 ccb->dmamap_xfer->dm_mapsize, 373 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 374 BUS_DMASYNC_PREWRITE); 375 376 /* 377 * Load the hardware scatter/gather map with the 378 * contents of the DMA map. 379 */ 380 for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) { 381 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr, 382 ccb->scat_gath[seg].seg_addr); 383 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len, 384 ccb->scat_gath[seg].seg_len); 385 } 386 387 ltophys(ccb->hashkey + offsetof(struct bha_ccb, 388 scat_gath), ccb->data_addr); 389 ltophys(ccb->dmamap_xfer->dm_nsegs * 390 sizeof(struct bha_scat_gath), ccb->data_length); 391 } else { 392 /* 393 * No data xfer, use non S/G values. 394 */ 395 ltophys(0, ccb->data_addr); 396 ltophys(0, ccb->data_length); 397 } 398 399 if (XS_CTL_TAGTYPE(xs) != 0) { 400 ccb->tag_enable = 1; 401 ccb->tag_type = xs->xs_tag_type & 0x03; 402 } else { 403 ccb->tag_enable = 0; 404 ccb->tag_type = 0; 405 } 406 407 ccb->data_out = 0; 408 ccb->data_in = 0; 409 ccb->target = periph->periph_target; 410 ccb->lun = periph->periph_lun; 411 ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense), 412 ccb->sense_ptr); 413 ccb->req_sense_length = sizeof(ccb->scsi_sense); 414 ccb->host_stat = 0x00; 415 ccb->target_stat = 0x00; 416 ccb->link_id = 0; 417 ltophys(0, ccb->link_addr); 418 419 BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 420 421 s = splbio(); 422 bha_queue_ccb(sc, ccb); 423 splx(s); 424 425 SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n")); 426 if ((flags & XS_CTL_POLL) == 0) 427 return; 428 429 /* 430 * If we can't use interrupts, poll on completion 431 */ 432 if (bha_poll(sc, xs, ccb->timeout)) { 433 bha_timeout(ccb); 434 if (bha_poll(sc, xs, ccb->timeout)) 435 bha_timeout(ccb); 436 } 437 return; 438 439 case ADAPTER_REQ_GROW_RESOURCES: 440 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) { 441 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW; 442 return; 443 } 444 seg = sc->sc_cur_ccbs; 445 bha_create_ccbs(sc, bha_ccbs_per_group); 446 adapt->adapt_openings += sc->sc_cur_ccbs - seg; 447 return; 448 449 case ADAPTER_REQ_SET_XFER_MODE: 450 /* 451 * Can't really do this on the Buslogic. It has its 452 * own setup info. But we do know how to query what 453 * the settings are. 454 */ 455 bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg); 456 return; 457 } 458 } 459 460 /* 461 * bha_minphys: 462 * 463 * Limit a transfer to our maximum transfer size. 464 */ 465 void 466 bha_minphys(struct buf *bp) 467 { 468 469 if (bp->b_bcount > BHA_MAXXFER) 470 bp->b_bcount = BHA_MAXXFER; 471 minphys(bp); 472 } 473 474 /***************************************************************************** 475 * SCSI job execution helper routines 476 *****************************************************************************/ 477 478 /* 479 * bha_get_xfer_mode; 480 * 481 * Negotiate the xfer mode for the specified periph, and report 482 * back the mode to the midlayer. 483 * 484 * NOTE: we must be called at splbio(). 485 */ 486 static void 487 bha_get_xfer_mode(struct bha_softc *sc, struct scsipi_xfer_mode *xm) 488 { 489 struct bha_setup hwsetup; 490 struct bha_period hwperiod; 491 struct bha_sync *bs; 492 int toff = xm->xm_target & 7, tmask = (1 << toff); 493 int wide, period, offset, rlen; 494 495 /* 496 * Issue an Inquire Setup Information. We can extract 497 * sync and wide information from here. 498 */ 499 rlen = sizeof(hwsetup.reply) + 500 ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0); 501 hwsetup.cmd.opcode = BHA_INQUIRE_SETUP; 502 hwsetup.cmd.len = rlen; 503 bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname, 504 sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd, 505 rlen, (u_char *)&hwsetup.reply); 506 507 xm->xm_mode = 0; 508 xm->xm_period = 0; 509 xm->xm_offset = 0; 510 511 /* 512 * First check for wide. On later boards, we can check 513 * directly in the setup info if wide is currently active. 514 * 515 * On earlier boards, we have to make an educated guess. 516 */ 517 if (sc->sc_flags & BHAF_WIDE) { 518 if (strcmp(sc->sc_firmware, "5.06L") >= 0) { 519 if (xm->xm_target > 7) { 520 wide = 521 hwsetup.reply_w.high_wide_active & tmask; 522 } else { 523 wide = 524 hwsetup.reply_w.low_wide_active & tmask; 525 } 526 if (wide) 527 xm->xm_mode |= PERIPH_CAP_WIDE16; 528 } else { 529 /* XXX Check `wide permitted' in the config info. */ 530 xm->xm_mode |= PERIPH_CAP_WIDE16; 531 } 532 } 533 534 /* 535 * Now get basic sync info. 536 */ 537 bs = (xm->xm_target > 7) ? 538 &hwsetup.reply_w.sync_high[toff] : 539 &hwsetup.reply.sync_low[toff]; 540 541 if (bs->valid) { 542 xm->xm_mode |= PERIPH_CAP_SYNC; 543 period = (bs->period * 50) + 20; 544 offset = bs->offset; 545 546 /* 547 * On boards that can do Fast and Ultra, use the Inquire Period 548 * command to get the period. 549 */ 550 if (sc->sc_firmware[0] >= '3') { 551 rlen = sizeof(hwperiod.reply) + 552 ((sc->sc_flags & BHAF_WIDE) ? 553 sizeof(hwperiod.reply_w) : 0); 554 hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD; 555 hwperiod.cmd.len = rlen; 556 bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname, 557 sizeof(hwperiod.cmd), (u_char *)&hwperiod.cmd, 558 rlen, (u_char *)&hwperiod.reply); 559 560 if (xm->xm_target > 7) 561 period = hwperiod.reply_w.period[toff]; 562 else 563 period = hwperiod.reply.period[toff]; 564 565 period *= 10; 566 } 567 568 xm->xm_period = 569 scsipi_sync_period_to_factor(period * 100); 570 xm->xm_offset = offset; 571 } 572 573 /* 574 * Now check for tagged queueing support. 575 * 576 * XXX Check `tags permitted' in the config info. 577 */ 578 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 579 xm->xm_mode |= PERIPH_CAP_TQING; 580 581 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm); 582 } 583 584 /* 585 * bha_done: 586 * 587 * A CCB has completed execution. Pass the status back to the 588 * upper layer. 589 */ 590 static void 591 bha_done(struct bha_softc *sc, struct bha_ccb *ccb) 592 { 593 bus_dma_tag_t dmat = sc->sc_dmat; 594 struct scsipi_xfer *xs = ccb->xs; 595 596 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n")); 597 598 #ifdef BHADIAG 599 if (ccb->flags & CCB_SENDING) { 600 printf("%s: exiting ccb still in transit!\n", 601 sc->sc_dev.dv_xname); 602 Debugger(); 603 return; 604 } 605 #endif 606 if ((ccb->flags & CCB_ALLOC) == 0) { 607 printf("%s: exiting ccb not allocated!\n", 608 sc->sc_dev.dv_xname); 609 Debugger(); 610 return; 611 } 612 613 /* 614 * If we were a data transfer, unload the map that described 615 * the data buffer. 616 */ 617 if (xs->datalen) { 618 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 619 ccb->dmamap_xfer->dm_mapsize, 620 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD : 621 BUS_DMASYNC_POSTWRITE); 622 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 623 } 624 625 if (xs->error == XS_NOERROR) { 626 if (ccb->host_stat != BHA_OK) { 627 switch (ccb->host_stat) { 628 case BHA_SEL_TIMEOUT: /* No response */ 629 xs->error = XS_SELTIMEOUT; 630 break; 631 default: /* Other scsi protocol messes */ 632 printf("%s: host_stat %x\n", 633 sc->sc_dev.dv_xname, ccb->host_stat); 634 xs->error = XS_DRIVER_STUFFUP; 635 break; 636 } 637 } else if (ccb->target_stat != SCSI_OK) { 638 switch (ccb->target_stat) { 639 case SCSI_CHECK: 640 memcpy(&xs->sense.scsi_sense, 641 &ccb->scsi_sense, 642 sizeof(xs->sense.scsi_sense)); 643 xs->error = XS_SENSE; 644 break; 645 case SCSI_BUSY: 646 xs->error = XS_BUSY; 647 break; 648 default: 649 printf("%s: target_stat %x\n", 650 sc->sc_dev.dv_xname, ccb->target_stat); 651 xs->error = XS_DRIVER_STUFFUP; 652 break; 653 } 654 } else 655 xs->resid = 0; 656 } 657 658 bha_free_ccb(sc, ccb); 659 scsipi_done(xs); 660 } 661 662 /* 663 * bha_poll: 664 * 665 * Poll for completion of the specified job. 666 */ 667 static int 668 bha_poll(struct bha_softc *sc, struct scsipi_xfer *xs, int count) 669 { 670 bus_space_tag_t iot = sc->sc_iot; 671 bus_space_handle_t ioh = sc->sc_ioh; 672 673 /* timeouts are in msec, so we loop in 1000 usec cycles */ 674 while (count) { 675 /* 676 * If we had interrupts enabled, would we 677 * have got an interrupt? 678 */ 679 if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) & 680 BHA_INTR_ANYINTR) 681 bha_intr(sc); 682 if (xs->xs_status & XS_STS_DONE) 683 return (0); 684 delay(1000); /* only happens in boot so ok */ 685 count--; 686 } 687 return (1); 688 } 689 690 /* 691 * bha_timeout: 692 * 693 * CCB timeout handler. 694 */ 695 static void 696 bha_timeout(void *arg) 697 { 698 struct bha_ccb *ccb = arg; 699 struct scsipi_xfer *xs = ccb->xs; 700 struct scsipi_periph *periph = xs->xs_periph; 701 struct bha_softc *sc = 702 (void *)periph->periph_channel->chan_adapter->adapt_dev; 703 int s; 704 705 scsipi_printaddr(periph); 706 printf("timed out"); 707 708 s = splbio(); 709 710 #ifdef BHADIAG 711 /* 712 * If the ccb's mbx is not free, then the board has gone Far East? 713 */ 714 bha_collect_mbo(sc); 715 if (ccb->flags & CCB_SENDING) { 716 printf("%s: not taking commands!\n", sc->sc_dev.dv_xname); 717 Debugger(); 718 } 719 #endif 720 721 /* 722 * If it has been through before, then 723 * a previous abort has failed, don't 724 * try abort again 725 */ 726 if (ccb->flags & CCB_ABORT) { 727 /* abort timed out */ 728 printf(" AGAIN\n"); 729 /* XXX Must reset! */ 730 } else { 731 /* abort the operation that has timed out */ 732 printf("\n"); 733 ccb->xs->error = XS_TIMEOUT; 734 ccb->timeout = BHA_ABORT_TIMEOUT; 735 ccb->flags |= CCB_ABORT; 736 bha_queue_ccb(sc, ccb); 737 } 738 739 splx(s); 740 } 741 742 /***************************************************************************** 743 * Misc. subroutines. 744 *****************************************************************************/ 745 746 /* 747 * bha_cmd: 748 * 749 * Send a command to the Buglogic controller. 750 */ 751 static int 752 bha_cmd(bus_space_tag_t iot, bus_space_handle_t ioh, const char *name, int icnt, 753 u_char *ibuf, int ocnt, u_char *obuf) 754 { 755 int i; 756 int wait; 757 u_char sts; 758 u_char opcode = ibuf[0]; 759 760 /* 761 * Calculate a reasonable timeout for the command. 762 */ 763 switch (opcode) { 764 case BHA_INQUIRE_DEVICES: 765 case BHA_INQUIRE_DEVICES_2: 766 wait = 90 * 20000; 767 break; 768 default: 769 wait = 1 * 20000; 770 break; 771 } 772 773 /* 774 * Wait for the adapter to go idle, unless it's one of 775 * the commands which don't need this 776 */ 777 if (opcode != BHA_MBO_INTR_EN) { 778 for (i = 20000; i; i--) { /* 1 sec? */ 779 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 780 if (sts & BHA_STAT_IDLE) 781 break; 782 delay(50); 783 } 784 if (!i) { 785 printf("%s: bha_cmd, host not idle(0x%x)\n", 786 name, sts); 787 return (1); 788 } 789 } 790 791 /* 792 * Now that it is idle, if we expect output, preflush the 793 * queue feeding to us. 794 */ 795 if (ocnt) { 796 while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) & 797 BHA_STAT_DF) 798 bus_space_read_1(iot, ioh, BHA_DATA_PORT); 799 } 800 801 /* 802 * Output the command and the number of arguments given 803 * for each byte, first check the port is empty. 804 */ 805 while (icnt--) { 806 for (i = wait; i; i--) { 807 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 808 if (!(sts & BHA_STAT_CDF)) 809 break; 810 delay(50); 811 } 812 if (!i) { 813 if (opcode != BHA_INQUIRE_REVISION) 814 printf("%s: bha_cmd, cmd/data port full\n", 815 name); 816 goto bad; 817 } 818 bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++); 819 } 820 821 /* 822 * If we expect input, loop that many times, each time, 823 * looking for the data register to have valid data 824 */ 825 while (ocnt--) { 826 for (i = wait; i; i--) { 827 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 828 if (sts & BHA_STAT_DF) 829 break; 830 delay(50); 831 } 832 if (!i) { 833 #ifdef BHADEBUG 834 if (opcode != BHA_INQUIRE_REVISION) 835 printf("%s: bha_cmd, cmd/data port empty %d\n", 836 name, ocnt); 837 #endif /* BHADEBUG */ 838 goto bad; 839 } 840 *obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT); 841 } 842 843 /* 844 * Wait for the board to report a finished instruction. 845 * We may get an extra interrupt for the HACC signal, but this is 846 * unimportant. 847 */ 848 if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) { 849 for (i = 20000; i; i--) { /* 1 sec? */ 850 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 851 /* XXX Need to save this in the interrupt handler? */ 852 if (sts & BHA_INTR_HACC) 853 break; 854 delay(50); 855 } 856 if (!i) { 857 printf("%s: bha_cmd, host not finished(0x%x)\n", 858 name, sts); 859 return (1); 860 } 861 } 862 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 863 return (0); 864 865 bad: 866 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST); 867 return (1); 868 } 869 870 /* 871 * bha_find: 872 * 873 * Find the board. 874 */ 875 int 876 bha_find(bus_space_tag_t iot, bus_space_handle_t ioh) 877 { 878 int i; 879 u_char sts; 880 struct bha_extended_inquire inquire; 881 882 /* Check something is at the ports we need to access */ 883 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 884 if (sts == 0xFF) 885 return (0); 886 887 /* 888 * Reset board, If it doesn't respond, assume 889 * that it's not there.. good for the probe 890 */ 891 892 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, 893 BHA_CTRL_HRST | BHA_CTRL_SRST); 894 895 delay(100); 896 for (i = BHA_RESET_TIMEOUT; i; i--) { 897 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 898 if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT)) 899 break; 900 delay(1000); 901 } 902 if (!i) { 903 #ifdef BHADEBUG 904 if (bha_debug) 905 printf("bha_find: No answer from buslogic board\n"); 906 #endif /* BHADEBUG */ 907 return (0); 908 } 909 910 /* 911 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible 912 * interface. The native bha interface is not compatible with 913 * an aha. 1542. We need to ensure that we never match an 914 * Adaptec 1542. We must also avoid sending Adaptec-compatible 915 * commands to a real bha, lest it go into 1542 emulation mode. 916 * (On an indirect bus like ISA, we should always probe for BusLogic 917 * interfaces before Adaptec interfaces). 918 */ 919 920 /* 921 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking 922 * for an extended-geometry register. The 1542[AB] don't have one. 923 */ 924 sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT); 925 if (sts == 0xFF) 926 return (0); 927 928 /* 929 * Check that we actually know how to use this board. 930 */ 931 delay(1000); 932 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 933 inquire.cmd.len = sizeof(inquire.reply); 934 i = bha_cmd(iot, ioh, "(bha_find)", 935 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 936 sizeof(inquire.reply), (u_char *)&inquire.reply); 937 938 /* 939 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev) 940 * have the extended-geometry register and also respond to 941 * BHA_INQUIRE_EXTENDED. Make sure we never match such cards, 942 * by checking the size of the reply is what a BusLogic card returns. 943 */ 944 if (i) { 945 #ifdef BHADEBUG 946 printf("bha_find: board returned %d instead of %d to %s\n", 947 i, sizeof(inquire.reply), "INQUIRE_EXTENDED"); 948 #endif 949 return (0); 950 } 951 952 /* OK, we know we've found a buslogic adaptor. */ 953 954 switch (inquire.reply.bus_type) { 955 case BHA_BUS_TYPE_24BIT: 956 case BHA_BUS_TYPE_32BIT: 957 break; 958 case BHA_BUS_TYPE_MCA: 959 /* We don't grok MicroChannel (yet). */ 960 return (0); 961 default: 962 printf("bha_find: illegal bus type %c\n", 963 inquire.reply.bus_type); 964 return (0); 965 } 966 967 return (1); 968 } 969 970 971 /* 972 * bha_inquire_config: 973 * 974 * Determine irq/drq. 975 */ 976 int 977 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh, 978 struct bha_probe_data *sc) 979 { 980 int irq, drq; 981 struct bha_config config; 982 983 /* 984 * Assume we have a board at this stage setup DMA channel from 985 * jumpers and save int level 986 */ 987 delay(1000); 988 config.cmd.opcode = BHA_INQUIRE_CONFIG; 989 bha_cmd(iot, ioh, "(bha_inquire_config)", 990 sizeof(config.cmd), (u_char *)&config.cmd, 991 sizeof(config.reply), (u_char *)&config.reply); 992 switch (config.reply.chan) { 993 case EISADMA: 994 drq = -1; 995 break; 996 case CHAN0: 997 drq = 0; 998 break; 999 case CHAN5: 1000 drq = 5; 1001 break; 1002 case CHAN6: 1003 drq = 6; 1004 break; 1005 case CHAN7: 1006 drq = 7; 1007 break; 1008 default: 1009 printf("bha: illegal drq setting %x\n", 1010 config.reply.chan); 1011 return (0); 1012 } 1013 1014 switch (config.reply.intr) { 1015 case INT9: 1016 irq = 9; 1017 break; 1018 case INT10: 1019 irq = 10; 1020 break; 1021 case INT11: 1022 irq = 11; 1023 break; 1024 case INT12: 1025 irq = 12; 1026 break; 1027 case INT14: 1028 irq = 14; 1029 break; 1030 case INT15: 1031 irq = 15; 1032 break; 1033 default: 1034 printf("bha: illegal irq setting %x\n", 1035 config.reply.intr); 1036 return (0); 1037 } 1038 1039 /* if we want to fill in softc, do so now */ 1040 if (sc != NULL) { 1041 sc->sc_irq = irq; 1042 sc->sc_drq = drq; 1043 } 1044 1045 return (1); 1046 } 1047 1048 int 1049 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh, 1050 struct bha_probe_data *bpd) 1051 { 1052 return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd); 1053 } 1054 1055 /* 1056 * bha_disable_isacompat: 1057 * 1058 * Disable the ISA-compatibility ioports on PCI bha devices, 1059 * to ensure they're not autoconfigured a second time as an ISA bha. 1060 */ 1061 int 1062 bha_disable_isacompat(struct bha_softc *sc) 1063 { 1064 struct bha_isadisable isa_disable; 1065 1066 isa_disable.cmd.opcode = BHA_MODIFY_IOPORT; 1067 isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1; 1068 bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname, 1069 sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd, 1070 0, (u_char *)0); 1071 return (0); 1072 } 1073 1074 /* 1075 * bha_info: 1076 * 1077 * Get information about the board, and report it. We 1078 * return the initial number of CCBs, 0 if we failed. 1079 */ 1080 int 1081 bha_info(struct bha_softc *sc) 1082 { 1083 bus_space_tag_t iot = sc->sc_iot; 1084 bus_space_handle_t ioh = sc->sc_ioh; 1085 struct bha_extended_inquire inquire; 1086 struct bha_config config; 1087 struct bha_devices devices; 1088 struct bha_setup setup; 1089 struct bha_model model; 1090 struct bha_revision revision; 1091 struct bha_digit digit; 1092 int i, j, initial_ccbs, rlen; 1093 char *name = sc->sc_dev.dv_xname; 1094 char *p; 1095 1096 /* 1097 * Fetch the extended inquire information. 1098 */ 1099 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 1100 inquire.cmd.len = sizeof(inquire.reply); 1101 bha_cmd(iot, ioh, name, 1102 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 1103 sizeof(inquire.reply), (u_char *)&inquire.reply); 1104 1105 /* 1106 * Fetch the configuration information. 1107 */ 1108 config.cmd.opcode = BHA_INQUIRE_CONFIG; 1109 bha_cmd(iot, ioh, name, 1110 sizeof(config.cmd), (u_char *)&config.cmd, 1111 sizeof(config.reply), (u_char *)&config.reply); 1112 1113 sc->sc_scsi_id = config.reply.scsi_dev; 1114 1115 /* 1116 * Get the firmware revision. 1117 */ 1118 p = sc->sc_firmware; 1119 revision.cmd.opcode = BHA_INQUIRE_REVISION; 1120 bha_cmd(iot, ioh, name, 1121 sizeof(revision.cmd), (u_char *)&revision.cmd, 1122 sizeof(revision.reply), (u_char *)&revision.reply); 1123 *p++ = revision.reply.firm_revision; 1124 *p++ = '.'; 1125 *p++ = revision.reply.firm_version; 1126 digit.cmd.opcode = BHA_INQUIRE_REVISION_3; 1127 bha_cmd(iot, ioh, name, 1128 sizeof(digit.cmd), (u_char *)&digit.cmd, 1129 sizeof(digit.reply), (u_char *)&digit.reply); 1130 *p++ = digit.reply.digit; 1131 if (revision.reply.firm_revision >= '3' || 1132 (revision.reply.firm_revision == '3' && 1133 revision.reply.firm_version >= '3')) { 1134 digit.cmd.opcode = BHA_INQUIRE_REVISION_4; 1135 bha_cmd(iot, ioh, name, 1136 sizeof(digit.cmd), (u_char *)&digit.cmd, 1137 sizeof(digit.reply), (u_char *)&digit.reply); 1138 *p++ = digit.reply.digit; 1139 } 1140 while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0')) 1141 p--; 1142 *p = '\0'; 1143 1144 /* 1145 * Get the model number. 1146 * 1147 * Some boards do not handle the Inquire Board Model Number 1148 * command correctly, or don't give correct information. 1149 * 1150 * So, we use the Firmware Revision and Extended Setup 1151 * information to fixup the model number in these cases. 1152 * 1153 * The firmware version indicates: 1154 * 1155 * 5.xx BusLogic "W" Series Host Adapters 1156 * BT-948/958/958D 1157 * 1158 * 4.xx BusLogic "C" Series Host Adapters 1159 * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF 1160 * 1161 * 3.xx BusLogic "S" Series Host Adapters 1162 * BT-747S/747D/757S/757D/445S/545S/542D 1163 * BT-542B/742A (revision H) 1164 * 1165 * 2.xx BusLogic "A" Series Host Adapters 1166 * BT-542B/742A (revision G and below) 1167 * 1168 * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter 1169 */ 1170 if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT && 1171 sc->sc_firmware[0] < '3') 1172 snprintf(sc->sc_model, sizeof(sc->sc_model), "542B"); 1173 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1174 sc->sc_firmware[0] == '2' && 1175 (sc->sc_firmware[2] == '1' || 1176 (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '0'))) 1177 snprintf(sc->sc_model, sizeof(sc->sc_model), "742A"); 1178 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1179 sc->sc_firmware[0] == '0') 1180 snprintf(sc->sc_model, sizeof(sc->sc_model), "747A"); 1181 else { 1182 p = sc->sc_model; 1183 model.cmd.opcode = BHA_INQUIRE_MODEL; 1184 model.cmd.len = sizeof(model.reply); 1185 bha_cmd(iot, ioh, name, 1186 sizeof(model.cmd), (u_char *)&model.cmd, 1187 sizeof(model.reply), (u_char *)&model.reply); 1188 *p++ = model.reply.id[0]; 1189 *p++ = model.reply.id[1]; 1190 *p++ = model.reply.id[2]; 1191 *p++ = model.reply.id[3]; 1192 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1193 p--; 1194 *p++ = model.reply.version[0]; 1195 *p++ = model.reply.version[1]; 1196 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1197 p--; 1198 *p = '\0'; 1199 } 1200 1201 /* Enable round-robin scheme - appeared at firmware rev. 3.31. */ 1202 if (strcmp(sc->sc_firmware, "3.31") >= 0) 1203 sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN; 1204 1205 /* 1206 * Determine some characteristics about our bus. 1207 */ 1208 if (inquire.reply.scsi_flags & BHA_SCSI_WIDE) 1209 sc->sc_flags |= BHAF_WIDE; 1210 if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL) 1211 sc->sc_flags |= BHAF_DIFFERENTIAL; 1212 if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA) 1213 sc->sc_flags |= BHAF_ULTRA; 1214 1215 /* 1216 * Determine some characterists of the board. 1217 */ 1218 sc->sc_max_dmaseg = inquire.reply.sg_limit; 1219 1220 /* 1221 * Determine the maximum CCB count and whether or not 1222 * tagged queueing is available on this host adapter. 1223 * 1224 * Tagged queueing works on: 1225 * 1226 * "W" Series adapters 1227 * "C" Series adapters with firmware >= 4.22 1228 * "S" Series adapters with firmware >= 3.35 1229 * 1230 * The internal CCB counts are: 1231 * 1232 * 192 BT-948/958/958D 1233 * 100 BT-946C/956C/956CD/747C/757C/757CD/445C 1234 * 50 BT-545C/540CF 1235 * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A 1236 */ 1237 switch (sc->sc_firmware[0]) { 1238 case '5': 1239 sc->sc_max_ccbs = 192; 1240 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1241 break; 1242 1243 case '4': 1244 if (sc->sc_model[0] == '5') 1245 sc->sc_max_ccbs = 50; 1246 else 1247 sc->sc_max_ccbs = 100; 1248 if (strcmp(sc->sc_firmware, "4.22") >= 0) 1249 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1250 break; 1251 1252 case '3': 1253 if (strcmp(sc->sc_firmware, "3.35") >= 0) 1254 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1255 /* FALLTHROUGH */ 1256 1257 default: 1258 sc->sc_max_ccbs = 30; 1259 } 1260 1261 /* 1262 * Set the mailbox count to precisely the number of HW CCBs 1263 * available. A mailbox isn't required while a CCB is executing, 1264 * but this allows us to actually enqueue up to our resource 1265 * limit. 1266 * 1267 * This will keep the mailbox count small on boards which don't 1268 * have strict round-robin (they have to scan the entire set of 1269 * mailboxes each time they run a command). 1270 */ 1271 sc->sc_mbox_count = sc->sc_max_ccbs; 1272 1273 /* 1274 * Obtain setup information. 1275 */ 1276 rlen = sizeof(setup.reply) + 1277 ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0); 1278 setup.cmd.opcode = BHA_INQUIRE_SETUP; 1279 setup.cmd.len = rlen; 1280 bha_cmd(iot, ioh, name, 1281 sizeof(setup.cmd), (u_char *)&setup.cmd, 1282 rlen, (u_char *)&setup.reply); 1283 1284 aprint_normal("%s: model BT-%s, firmware %s\n", sc->sc_dev.dv_xname, 1285 sc->sc_model, sc->sc_firmware); 1286 1287 aprint_normal("%s: %d H/W CCBs", sc->sc_dev.dv_xname, sc->sc_max_ccbs); 1288 if (setup.reply.sync_neg) 1289 aprint_normal(", sync"); 1290 if (setup.reply.parity) 1291 aprint_normal(", parity"); 1292 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 1293 aprint_normal(", tagged queueing"); 1294 if (sc->sc_flags & BHAF_WIDE_LUN) 1295 aprint_normal(", wide LUN support"); 1296 aprint_normal("\n"); 1297 1298 /* 1299 * Poll targets 0 - 7. 1300 */ 1301 devices.cmd.opcode = BHA_INQUIRE_DEVICES; 1302 bha_cmd(iot, ioh, name, 1303 sizeof(devices.cmd), (u_char *)&devices.cmd, 1304 sizeof(devices.reply), (u_char *)&devices.reply); 1305 1306 /* Count installed units. */ 1307 initial_ccbs = 0; 1308 for (i = 0; i < 8; i++) { 1309 for (j = 0; j < 8; j++) { 1310 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1311 initial_ccbs++; 1312 } 1313 } 1314 1315 /* 1316 * Poll targets 8 - 15 if we have a wide bus. 1317 */ 1318 if (sc->sc_flags & BHAF_WIDE) { 1319 devices.cmd.opcode = BHA_INQUIRE_DEVICES_2; 1320 bha_cmd(iot, ioh, name, 1321 sizeof(devices.cmd), (u_char *)&devices.cmd, 1322 sizeof(devices.reply), (u_char *)&devices.reply); 1323 1324 for (i = 0; i < 8; i++) { 1325 for (j = 0; j < 8; j++) { 1326 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1327 initial_ccbs++; 1328 } 1329 } 1330 } 1331 1332 /* 1333 * Double the initial CCB count, for good measure. 1334 */ 1335 initial_ccbs *= 2; 1336 1337 /* 1338 * Sanity check the initial CCB count; don't create more than 1339 * we can enqueue (sc_max_ccbs), and make sure there are some 1340 * at all. 1341 */ 1342 if (initial_ccbs > sc->sc_max_ccbs) 1343 initial_ccbs = sc->sc_max_ccbs; 1344 if (initial_ccbs == 0) 1345 initial_ccbs = 2; 1346 1347 return (initial_ccbs); 1348 } 1349 1350 /* 1351 * bha_init: 1352 * 1353 * Initialize the board. 1354 */ 1355 static int 1356 bha_init(struct bha_softc *sc) 1357 { 1358 char *name = sc->sc_dev.dv_xname; 1359 struct bha_toggle toggle; 1360 struct bha_mailbox mailbox; 1361 struct bha_mbx_out *mbo; 1362 struct bha_mbx_in *mbi; 1363 int i; 1364 1365 /* 1366 * Set up the mailbox. We always run the mailbox in round-robin. 1367 */ 1368 for (i = 0; i < sc->sc_mbox_count; i++) { 1369 mbo = &sc->sc_mbo[i]; 1370 mbi = &sc->sc_mbi[i]; 1371 1372 mbo->cmd = BHA_MBO_FREE; 1373 BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1374 1375 mbi->comp_stat = BHA_MBI_FREE; 1376 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1377 } 1378 1379 sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0]; 1380 sc->sc_tmbi = &sc->sc_mbi[0]; 1381 1382 sc->sc_mbofull = 0; 1383 1384 /* 1385 * If the board supports strict round-robin, enable that. 1386 */ 1387 if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) { 1388 toggle.cmd.opcode = BHA_ROUND_ROBIN; 1389 toggle.cmd.enable = 1; 1390 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1391 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1392 0, NULL); 1393 } 1394 1395 /* 1396 * Give the mailbox to the board. 1397 */ 1398 mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED; 1399 mailbox.cmd.nmbx = sc->sc_mbox_count; 1400 ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr); 1401 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1402 sizeof(mailbox.cmd), (u_char *)&mailbox.cmd, 1403 0, (u_char *)0); 1404 1405 return (0); 1406 } 1407 1408 /***************************************************************************** 1409 * CCB execution engine 1410 *****************************************************************************/ 1411 1412 /* 1413 * bha_queue_ccb: 1414 * 1415 * Queue a CCB to be sent to the controller, and send it if possible. 1416 */ 1417 static void 1418 bha_queue_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1419 { 1420 1421 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 1422 bha_start_ccbs(sc); 1423 } 1424 1425 /* 1426 * bha_start_ccbs: 1427 * 1428 * Send as many CCBs as we have empty mailboxes for. 1429 */ 1430 static void 1431 bha_start_ccbs(struct bha_softc *sc) 1432 { 1433 bus_space_tag_t iot = sc->sc_iot; 1434 bus_space_handle_t ioh = sc->sc_ioh; 1435 struct bha_ccb_group *bcg; 1436 struct bha_mbx_out *mbo; 1437 struct bha_ccb *ccb; 1438 1439 mbo = sc->sc_tmbo; 1440 1441 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 1442 if (sc->sc_mbofull >= sc->sc_mbox_count) { 1443 #ifdef DIAGNOSTIC 1444 if (sc->sc_mbofull > sc->sc_mbox_count) 1445 panic("bha_start_ccbs: mbofull > mbox_count"); 1446 #endif 1447 /* 1448 * No mailboxes available; attempt to collect ones 1449 * that have already been used. 1450 */ 1451 bha_collect_mbo(sc); 1452 if (sc->sc_mbofull == sc->sc_mbox_count) { 1453 /* 1454 * Still no more available; have the 1455 * controller interrupt us when it 1456 * frees one. 1457 */ 1458 struct bha_toggle toggle; 1459 1460 toggle.cmd.opcode = BHA_MBO_INTR_EN; 1461 toggle.cmd.enable = 1; 1462 bha_cmd(iot, ioh, sc->sc_dev.dv_xname, 1463 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1464 0, (u_char *)0); 1465 break; 1466 } 1467 } 1468 1469 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 1470 #ifdef BHADIAG 1471 ccb->flags |= CCB_SENDING; 1472 #endif 1473 1474 /* 1475 * Put the CCB in the mailbox. 1476 */ 1477 bcg = BHA_CCB_GROUP(ccb); 1478 ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr + 1479 BHA_CCB_OFFSET(ccb), mbo->ccb_addr); 1480 if (ccb->flags & CCB_ABORT) 1481 mbo->cmd = BHA_MBO_ABORT; 1482 else 1483 mbo->cmd = BHA_MBO_START; 1484 1485 BHA_MBO_SYNC(sc, mbo, 1486 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1487 1488 /* Tell the card to poll immediately. */ 1489 bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI); 1490 1491 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 1492 callout_reset(&ccb->xs->xs_callout, 1493 mstohz(ccb->timeout), bha_timeout, ccb); 1494 1495 ++sc->sc_mbofull; 1496 mbo = bha_nextmbo(sc, mbo); 1497 } 1498 1499 sc->sc_tmbo = mbo; 1500 } 1501 1502 /* 1503 * bha_finish_ccbs: 1504 * 1505 * Finalize the execution of CCBs in our incoming mailbox. 1506 */ 1507 static void 1508 bha_finish_ccbs(struct bha_softc *sc) 1509 { 1510 struct bha_mbx_in *mbi; 1511 struct bha_ccb *ccb; 1512 int i; 1513 1514 mbi = sc->sc_tmbi; 1515 1516 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1517 1518 if (mbi->comp_stat == BHA_MBI_FREE) { 1519 for (i = 0; i < sc->sc_mbox_count; i++) { 1520 if (mbi->comp_stat != BHA_MBI_FREE) { 1521 #ifdef BHADIAG 1522 /* 1523 * This can happen in normal operation if 1524 * we use all mailbox slots. 1525 */ 1526 printf("%s: mbi not in round-robin order\n", 1527 sc->sc_dev.dv_xname); 1528 #endif 1529 goto again; 1530 } 1531 mbi = bha_nextmbi(sc, mbi); 1532 BHA_MBI_SYNC(sc, mbi, 1533 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1534 } 1535 #ifdef BHADIAGnot 1536 printf("%s: mbi interrupt with no full mailboxes\n", 1537 sc->sc_dev.dv_xname); 1538 #endif 1539 return; 1540 } 1541 1542 again: 1543 do { 1544 ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr)); 1545 if (ccb == NULL) { 1546 printf("%s: bad mbi ccb pointer 0x%08x; skipping\n", 1547 sc->sc_dev.dv_xname, phystol(mbi->ccb_addr)); 1548 goto next; 1549 } 1550 1551 BHA_CCB_SYNC(sc, ccb, 1552 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1553 1554 #ifdef BHADEBUG 1555 if (bha_debug) { 1556 struct scsi_generic *cmd = &ccb->scsi_cmd; 1557 printf("op=%x %x %x %x %x %x\n", 1558 cmd->opcode, cmd->bytes[0], cmd->bytes[1], 1559 cmd->bytes[2], cmd->bytes[3], cmd->bytes[4]); 1560 printf("comp_stat %x for mbi addr = 0x%p, ", 1561 mbi->comp_stat, mbi); 1562 printf("ccb addr = %p\n", ccb); 1563 } 1564 #endif /* BHADEBUG */ 1565 1566 switch (mbi->comp_stat) { 1567 case BHA_MBI_OK: 1568 case BHA_MBI_ERROR: 1569 if ((ccb->flags & CCB_ABORT) != 0) { 1570 /* 1571 * If we already started an abort, wait for it 1572 * to complete before clearing the CCB. We 1573 * could instead just clear CCB_SENDING, but 1574 * what if the mailbox was already received? 1575 * The worst that happens here is that we clear 1576 * the CCB a bit later than we need to. BFD. 1577 */ 1578 goto next; 1579 } 1580 break; 1581 1582 case BHA_MBI_ABORT: 1583 case BHA_MBI_UNKNOWN: 1584 /* 1585 * Even if the CCB wasn't found, we clear it anyway. 1586 * See preceding comment. 1587 */ 1588 break; 1589 1590 default: 1591 printf("%s: bad mbi comp_stat %02x; skipping\n", 1592 sc->sc_dev.dv_xname, mbi->comp_stat); 1593 goto next; 1594 } 1595 1596 callout_stop(&ccb->xs->xs_callout); 1597 bha_done(sc, ccb); 1598 1599 next: 1600 mbi->comp_stat = BHA_MBI_FREE; 1601 BHA_CCB_SYNC(sc, ccb, 1602 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1603 1604 mbi = bha_nextmbi(sc, mbi); 1605 BHA_MBI_SYNC(sc, mbi, 1606 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1607 } while (mbi->comp_stat != BHA_MBI_FREE); 1608 1609 sc->sc_tmbi = mbi; 1610 } 1611 1612 /***************************************************************************** 1613 * Mailbox management functions. 1614 *****************************************************************************/ 1615 1616 /* 1617 * bha_create_mailbox: 1618 * 1619 * Create the mailbox structures. Helper function for bha_attach(). 1620 * 1621 * NOTE: The Buslogic hardware only gets one DMA address for the 1622 * mailbox! It expects: 1623 * 1624 * mailbox_out[mailbox_size] 1625 * mailbox_in[mailbox_size] 1626 */ 1627 static int 1628 bha_create_mailbox(struct bha_softc *sc) 1629 { 1630 bus_dma_segment_t seg; 1631 size_t size; 1632 int error, rseg; 1633 1634 size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) + 1635 (sizeof(struct bha_mbx_in) * sc->sc_mbox_count); 1636 1637 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1638 1, &rseg, sc->sc_dmaflags); 1639 if (error) { 1640 aprint_error("%s: unable to allocate mailboxes, error = %d\n", 1641 sc->sc_dev.dv_xname, error); 1642 goto bad_0; 1643 } 1644 1645 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 1646 (caddr_t *)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT); 1647 if (error) { 1648 aprint_error("%s: unable to map mailboxes, error = %d\n", 1649 sc->sc_dev.dv_xname, error); 1650 goto bad_1; 1651 } 1652 1653 memset(sc->sc_mbo, 0, size); 1654 1655 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1656 sc->sc_dmaflags, &sc->sc_dmamap_mbox); 1657 if (error) { 1658 aprint_error( 1659 "%s: unable to create mailbox DMA map, error = %d\n", 1660 sc->sc_dev.dv_xname, error); 1661 goto bad_2; 1662 } 1663 1664 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox, 1665 sc->sc_mbo, size, NULL, 0); 1666 if (error) { 1667 aprint_error("%s: unable to load mailbox DMA map, error = %d\n", 1668 sc->sc_dev.dv_xname, error); 1669 goto bad_3; 1670 } 1671 1672 sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count); 1673 1674 return (0); 1675 1676 bad_3: 1677 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox); 1678 bad_2: 1679 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_mbo, size); 1680 bad_1: 1681 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1682 bad_0: 1683 return (error); 1684 } 1685 1686 /* 1687 * bha_collect_mbo: 1688 * 1689 * Garbage collect mailboxes that are no longer in use. 1690 */ 1691 static void 1692 bha_collect_mbo(struct bha_softc *sc) 1693 { 1694 struct bha_mbx_out *mbo; 1695 #ifdef BHADIAG 1696 struct bha_ccb *ccb; 1697 #endif 1698 1699 mbo = sc->sc_cmbo; 1700 1701 while (sc->sc_mbofull > 0) { 1702 BHA_MBO_SYNC(sc, mbo, 1703 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1704 if (mbo->cmd != BHA_MBO_FREE) 1705 break; 1706 1707 #ifdef BHADIAG 1708 ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr)); 1709 ccb->flags &= ~CCB_SENDING; 1710 #endif 1711 1712 --sc->sc_mbofull; 1713 mbo = bha_nextmbo(sc, mbo); 1714 } 1715 1716 sc->sc_cmbo = mbo; 1717 } 1718 1719 /***************************************************************************** 1720 * CCB management functions 1721 *****************************************************************************/ 1722 1723 static __inline void 1724 bha_reset_ccb(struct bha_ccb *ccb) 1725 { 1726 1727 ccb->flags = 0; 1728 } 1729 1730 /* 1731 * bha_create_ccbs: 1732 * 1733 * Create a set of CCBs. 1734 * 1735 * We determine the target CCB count, and then keep creating them 1736 * until we reach the target, or fail. CCBs that are allocated 1737 * but not "created" are left on the allocating list. 1738 * 1739 * XXX AB_QUIET/AB_SILENT lossage here; this is called during 1740 * boot as well as at run-time. 1741 */ 1742 static void 1743 bha_create_ccbs(struct bha_softc *sc, int count) 1744 { 1745 struct bha_ccb_group *bcg; 1746 struct bha_ccb *ccb; 1747 bus_dma_segment_t seg; 1748 bus_dmamap_t ccbmap; 1749 int target, i, error, rseg; 1750 1751 /* 1752 * If the current CCB count is already the max number we're 1753 * allowed to have, bail out now. 1754 */ 1755 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) 1756 return; 1757 1758 /* 1759 * Compute our target count, and clamp it down to the max 1760 * number we're allowed to have. 1761 */ 1762 target = sc->sc_cur_ccbs + count; 1763 if (target > sc->sc_max_ccbs) 1764 target = sc->sc_max_ccbs; 1765 1766 /* 1767 * If there are CCBs on the allocating list, don't allocate a 1768 * CCB group yet. 1769 */ 1770 if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL) 1771 goto have_allocating_ccbs; 1772 1773 allocate_group: 1774 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 1775 PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT); 1776 if (error) { 1777 printf("%s: unable to allocate CCB group, error = %d\n", 1778 sc->sc_dev.dv_xname, error); 1779 goto bad_0; 1780 } 1781 1782 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 1783 (void *)&bcg, 1784 sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1785 if (error) { 1786 printf("%s: unable to map CCB group, error = %d\n", 1787 sc->sc_dev.dv_xname, error); 1788 goto bad_1; 1789 } 1790 1791 memset(bcg, 0, PAGE_SIZE); 1792 1793 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1794 1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap); 1795 if (error) { 1796 printf("%s: unable to create CCB group DMA map, error = %d\n", 1797 sc->sc_dev.dv_xname, error); 1798 goto bad_2; 1799 } 1800 1801 error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL, 1802 sc->sc_dmaflags | BUS_DMA_NOWAIT); 1803 if (error) { 1804 printf("%s: unable to load CCB group DMA map, error = %d\n", 1805 sc->sc_dev.dv_xname, error); 1806 goto bad_3; 1807 } 1808 1809 bcg->bcg_dmamap = ccbmap; 1810 1811 #ifdef DIAGNOSTIC 1812 if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) != 1813 BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1])) 1814 panic("bha_create_ccbs: CCB group size botch"); 1815 #endif 1816 1817 /* 1818 * Add all of the CCBs in this group to the allocating list. 1819 */ 1820 for (i = 0; i < bha_ccbs_per_group; i++) { 1821 ccb = &bcg->bcg_ccbs[i]; 1822 TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain); 1823 } 1824 1825 have_allocating_ccbs: 1826 /* 1827 * Loop over the allocating list until we reach our CCB target. 1828 * If we run out on the list, we'll allocate another group's 1829 * worth. 1830 */ 1831 while (sc->sc_cur_ccbs < target) { 1832 ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs); 1833 if (ccb == NULL) 1834 goto allocate_group; 1835 if (bha_init_ccb(sc, ccb) != 0) { 1836 /* 1837 * We were unable to initialize the CCB. 1838 * This is likely due to a resource shortage, 1839 * so bail out now. 1840 */ 1841 return; 1842 } 1843 } 1844 1845 /* 1846 * If we got here, we've reached our target! 1847 */ 1848 return; 1849 1850 bad_3: 1851 bus_dmamap_destroy(sc->sc_dmat, ccbmap); 1852 bad_2: 1853 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)bcg, PAGE_SIZE); 1854 bad_1: 1855 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1856 bad_0: 1857 return; 1858 } 1859 1860 /* 1861 * bha_init_ccb: 1862 * 1863 * Initialize a CCB; helper function for bha_create_ccbs(). 1864 */ 1865 static int 1866 bha_init_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1867 { 1868 struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb); 1869 int hashnum, error; 1870 1871 /* 1872 * Create the DMA map for this CCB. 1873 * 1874 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages 1875 * XXX in the ISA case. A better solution is needed. 1876 */ 1877 error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG, 1878 BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags, 1879 &ccb->dmamap_xfer); 1880 if (error) { 1881 printf("%s: unable to create CCB DMA map, error = %d\n", 1882 sc->sc_dev.dv_xname, error); 1883 return (error); 1884 } 1885 1886 TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain); 1887 1888 /* 1889 * Put the CCB into the phystokv hash table. 1890 */ 1891 ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr + 1892 BHA_CCB_OFFSET(ccb); 1893 hashnum = CCB_HASH(ccb->hashkey); 1894 ccb->nexthash = sc->sc_ccbhash[hashnum]; 1895 sc->sc_ccbhash[hashnum] = ccb; 1896 bha_reset_ccb(ccb); 1897 1898 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1899 sc->sc_cur_ccbs++; 1900 1901 return (0); 1902 } 1903 1904 /* 1905 * bha_get_ccb: 1906 * 1907 * Get a CCB for the SCSI operation. If there are none left, 1908 * wait until one becomes available, if we can. 1909 */ 1910 static struct bha_ccb * 1911 bha_get_ccb(struct bha_softc *sc) 1912 { 1913 struct bha_ccb *ccb; 1914 int s; 1915 1916 s = splbio(); 1917 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 1918 if (ccb != NULL) { 1919 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 1920 ccb->flags |= CCB_ALLOC; 1921 } 1922 splx(s); 1923 return (ccb); 1924 } 1925 1926 /* 1927 * bha_free_ccb: 1928 * 1929 * Put a CCB back onto the free list. 1930 */ 1931 static void 1932 bha_free_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1933 { 1934 int s; 1935 1936 s = splbio(); 1937 bha_reset_ccb(ccb); 1938 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1939 splx(s); 1940 } 1941 1942 /* 1943 * bha_ccb_phys_kv: 1944 * 1945 * Given a CCB DMA address, locate the CCB in kernel virtual space. 1946 */ 1947 static struct bha_ccb * 1948 bha_ccb_phys_kv(struct bha_softc *sc, bus_addr_t ccb_phys) 1949 { 1950 int hashnum = CCB_HASH(ccb_phys); 1951 struct bha_ccb *ccb = sc->sc_ccbhash[hashnum]; 1952 1953 while (ccb) { 1954 if (ccb->hashkey == ccb_phys) 1955 break; 1956 ccb = ccb->nexthash; 1957 } 1958 return (ccb); 1959 } 1960